From 8eaf56c107f86e9d9db7771f0dbe1878990293b8 Mon Sep 17 00:00:00 2001 From: Frank Lai Date: Sun, 22 Mar 2026 15:10:17 -0700 Subject: [PATCH 01/20] Add LEAPP export integration for manager-based RSL-RL environments --- scripts/reinforcement_learning/deploy.py | 72 ++ .../rsl_rl/LEAPP_annotations_for_isaac_lab.md | 158 ++++ .../reinforcement_learning/rsl_rl/export.py | 295 +++++++ .../export_annotator.py | 759 ++++++++++++++++++ .../managed_environment_annotator.py/proxy.py | 346 ++++++++ .../managed_environment_annotator.py/utils.py | 4 + .../isaaclab/envs/direct_deployment_env.py | 397 +++++++++ .../envs/mdp/commands/pose_2d_command.py | 3 + .../envs/mdp/commands/pose_command.py | 3 + .../envs/mdp/commands/velocity_command.py | 3 + .../isaaclab/managers/manager_term_cfg.py | 3 + .../contact_sensor/contact_sensor_data.py | 2 + .../frame_transformer_data.py | 2 + .../isaaclab/isaaclab/sensors/imu/imu_data.py | 2 + .../multi_mesh_ray_caster_camera.py | 4 +- .../isaaclab/sensors/ray_caster/ray_caster.py | 14 +- .../sensors/ray_caster/ray_caster_data.py | 23 +- .../isaaclab/utils/buffers/circular_buffer.py | 57 +- .../isaaclab/utils/leapp_semantics.py | 125 +++ .../test/test_rsl_rl_export_flow.py | 144 ++++ .../classic/humanoid/mdp/observations.py | 4 +- .../manipulation/deploy/mdp/observations.py | 6 +- .../dexsuite/mdp/commands/pose_commands.py | 3 + .../mdp/commands/orientation_command.py | 3 + 24 files changed, 2385 insertions(+), 47 deletions(-) create mode 100644 scripts/reinforcement_learning/deploy.py create mode 100644 scripts/reinforcement_learning/rsl_rl/LEAPP_annotations_for_isaac_lab.md create mode 100644 scripts/reinforcement_learning/rsl_rl/export.py create mode 100644 scripts/reinforcement_learning/rsl_rl/managed_environment_annotator.py/export_annotator.py create mode 100644 scripts/reinforcement_learning/rsl_rl/managed_environment_annotator.py/proxy.py create mode 100644 scripts/reinforcement_learning/rsl_rl/managed_environment_annotator.py/utils.py create mode 100644 source/isaaclab/isaaclab/envs/direct_deployment_env.py create mode 100644 source/isaaclab/isaaclab/utils/leapp_semantics.py create mode 100644 source/isaaclab_rl/test/test_rsl_rl_export_flow.py diff --git a/scripts/reinforcement_learning/deploy.py b/scripts/reinforcement_learning/deploy.py new file mode 100644 index 000000000000..cfa05bd549c8 --- /dev/null +++ b/scripts/reinforcement_learning/deploy.py @@ -0,0 +1,72 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Deploy a LEAPP-exported policy in an Isaac Lab simulation. + +Usage:: + + ./isaaclab.sh -p scripts/reinforcement_learning/deploy.py \ + --task Isaac-Velocity-Flat-Anymal-B-v0 \ + --leapp_model .pretrained_checkpoints/rsl_rl/Isaac-Velocity-Flat-Anymal-B-v0/Isaac-Velocity-Flat-Anymal-B-v0/Isaac-Velocity-Flat-Anymal-B-v0.yaml \ + --headless +""" + +"""Launch Isaac Sim Simulator first.""" + +import argparse +import sys + +from isaaclab.app import AppLauncher + +parser = argparse.ArgumentParser(description="Deploy a LEAPP-exported policy in simulation.") +parser.add_argument("--task", type=str, required=True, help="Name of the registered Isaac Lab task.") +parser.add_argument("--leapp_model", type=str, required=True, help="Path to the LEAPP .yaml pipeline description.") +parser.add_argument("--seed", type=int, default=None, help="Seed for the environment.") +AppLauncher.add_app_launcher_args(parser) +args_cli, hydra_args = parser.parse_known_args() + +sys.argv = [sys.argv[0]] + hydra_args + +app_launcher = AppLauncher(args_cli) +simulation_app = app_launcher.app + +"""Rest everything follows.""" + +import torch + +from isaaclab.envs.direct_deployment_env import DirectDeploymentEnv + +import isaaclab_tasks # noqa: F401 +from isaaclab_tasks.utils.parse_cfg import load_cfg_from_registry + + +def main(): + # ── Load env config from gym registry ───────────────────────── + task_name = args_cli.task.split(":")[-1] + env_cfg = load_cfg_from_registry(task_name, "env_cfg_entry_point") + + if args_cli.seed is not None: + env_cfg.seed = args_cli.seed + if args_cli.device is not None: + env_cfg.sim.device = args_cli.device + + # ── Create deploy env ───────────────────────────────────────── + env = DirectDeploymentEnv(env_cfg, args_cli.leapp_model) + + print(f"[INFO]: Deploying task '{task_name}' with LEAPP model: {args_cli.leapp_model}") + print(f"[INFO]: Num envs: {env.num_envs}, decimation: {env.cfg.decimation}, step_dt: {env.step_dt:.4f}s") + + # ── Run loop ────────────────────────────────────────────────── + env.reset() + with torch.inference_mode(): + while simulation_app.is_running(): + env.step() + + env.close() + + +if __name__ == "__main__": + main() + simulation_app.close() diff --git a/scripts/reinforcement_learning/rsl_rl/LEAPP_annotations_for_isaac_lab.md b/scripts/reinforcement_learning/rsl_rl/LEAPP_annotations_for_isaac_lab.md new file mode 100644 index 000000000000..d8ae03f5a6dd --- /dev/null +++ b/scripts/reinforcement_learning/rsl_rl/LEAPP_annotations_for_isaac_lab.md @@ -0,0 +1,158 @@ +# LEAPP Export for Isaac Lab + +Export RSL-RL reinforcement learning pipelines as portable processing graphs using [LEAPP](https://gitlab-master.nvidia.com/Isaac/leapp). + +## Exported Artifacts + +| File | Description | +|------|-------------| +| `.onnx` | Policy network (ONNX) | +| `.yaml` | Pipeline configuration and metadata | +| `.png` | Visualization of the processing graph | + +The YAML file includes semantic metadata (joint names, units, etc.) extracted from IO descriptors. For details on the YAML format, see the [LEAPP documentation](https://gitlab-master.nvidia.com/Isaac/leapp/-/blob/main/docs/0_getting_started.md). + +## Usage + +### 1. Install LEAPP + +```bash +git clone ssh://git@gitlab-master.nvidia.com:12051/Isaac/leapp.git +cd leapp +git checkout develop +pip install -e . +``` + +### 2. Export a Policy + +```bash +./isaaclab.sh -p scripts/reinforcement_learning/rsl_rl/export.py \ + --task Isaac-Reach-Franka-v0 \ + --use_pretrained_checkpoint \ + --headless +``` + +> **Note:** Export runs with a single environment instance. + +### 3. View Results + +Artifacts are saved to `.//`. + + + +sample exported `Isaac-Reach-Franka-v0.yaml`: + +```yaml +models: + Isaac-Reach-Franka-v0: + inputs: + - name: joint_pos + dtype: float32 + shape: [1, 9] + type: tensor + - name: joint_vel + dtype: float32 + shape: [1, 9] + type: tensor + - name: ee_pose + dtype: float32 + shape: [1, 7] + type: tensor + - name: last_actions + dtype: float32 + shape: [1, 7] + type: tensor + outputs: + - name: arm_action + dtype: float32 + shape: [1, 7] + type: tensor + - name: last_action + dtype: float32 + shape: [1, 7] + type: tensor + - name: arm_action_kp_gains + dtype: float32 + shape: [1, 7] + type: tensor + - name: arm_action_kd_gains + dtype: float32 + shape: [1, 7] + type: tensor + parameters: + model_path: Isaac-Reach-Franka-v0.onnx + md5sum: 38ee55fa7828b5068b86024206bd5ddb + sha256sum: c605a7076fde5c0d03a36f548d458d24bd543df67aac7675d463d29f870a7eb3 + device: cuda + backend: onnx + +pipeline: + data_flow: {} + feedback_flow: + Isaac-Reach-Franka-v0/last_action: [Isaac-Reach-Franka-v0/last_actions] + inputs: + Isaac-Reach-Franka-v0: [joint_pos, joint_vel, ee_pose] + outputs: + Isaac-Reach-Franka-v0: [arm_action, arm_action_kp_gains, arm_action_kd_gains] + +system information: + cuda version: '12.8' + leapp version: 0.3.0 + os: Linux + python version: 3.11.14 + torch version: 2.7.0+cu128 + +semantic: + actions: + - joint_names: + - panda_joint1 + - panda_joint2 + - panda_joint3 + - panda_joint4 + - panda_joint5 + - panda_joint6 + - panda_joint7 + leapp_mapping: + - arm_action + name: joint_position_action + observations: + - joint_names: + - panda_joint1 + - panda_joint2 + - panda_joint3 + - panda_joint4 + - panda_joint5 + - panda_joint6 + - panda_joint7 + - panda_finger_joint1 + - panda_finger_joint2 + leapp_mapping: + - joint_pos + name: joint_pos_rel + units: rad + - joint_names: + - panda_joint1 + - panda_joint2 + - panda_joint3 + - panda_joint4 + - panda_joint5 + - panda_joint6 + - panda_joint7 + - panda_finger_joint1 + - panda_finger_joint2 + leapp_mapping: + - joint_vel + name: joint_vel_rel + units: rad/s + - leapp_mapping: + - ee_pose + name: generated_commands + - leapp_mapping: + - last_actions + name: last_action + scene: + decimation: 2 + dt: 0.03333333333333333 + physics_dt: 0.016666666666666666 + +``` diff --git a/scripts/reinforcement_learning/rsl_rl/export.py b/scripts/reinforcement_learning/rsl_rl/export.py new file mode 100644 index 000000000000..be24c64209d3 --- /dev/null +++ b/scripts/reinforcement_learning/rsl_rl/export.py @@ -0,0 +1,295 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +# ruff: noqa: E402 + +"""Script to export a checkpoint if an RL agent from RSL-RL.""" + +"""Launch Isaac Sim Simulator first.""" + +import argparse +import sys +import time +import torch +from collections.abc import Mapping + +import leapp +from leapp import annotate + +# Disable TorchScript before importing task/environment modules so any +# @torch.jit.script helpers resolve to plain Python functions during export. +torch.jit._state.disable() + +from isaaclab.app import AppLauncher + +# local imports +import cli_args # isort: skip + + +# add argparse arguments +parser = argparse.ArgumentParser(description="Train an RL agent with RSL-RL.") +parser.add_argument( + "--disable_fabric", action="store_true", default=False, help="Disable fabric and use USD I/O operations." +) +parser.add_argument("--task", type=str, default=None, help="Name of the task.") +parser.add_argument( + "--agent", type=str, default="rsl_rl_cfg_entry_point", help="Name of the RL agent configuration entry point." +) +parser.add_argument("--seed", type=int, default=None, help="Seed used for the environment") +parser.add_argument( + "--use_pretrained_checkpoint", + action="store_true", + help="Use the pre-trained checkpoint from Nucleus.", +) + +# LEAPP arguments +parser.add_argument( + "--export_task_name", + type=str, + default=None, + help="Name of the exported task", +) +parser.add_argument( + "--export_method", + type=str, + default="onnx-dynamo", + choices=["onnx-dynamo", "onnx-torchscript", "jit-script", "jit-trace"], + help="Method to export the policy", +) +parser.add_argument( + "--export_save_path", + type=str, + default=None, + help="Path to save the exported model", +) +parser.add_argument( + "--validation_steps", + type=int, + default=5, + help="Number of steps to validate the exported model", +) +parser.add_argument( + "--disable_graph_visualization", + action="store_true", + default=False, + help="Disable LEAPP graph visualization during compile_graph().", +) +parser.add_argument( + "--disable_automatic_module_annotation", + action="store_true", + default=False, + help="Disables automatic detection and annotation of modules that have internal states", +) + +# append RSL-RL cli arguments +cli_args.add_rsl_rl_args(parser) +# append AppLauncher cli args +AppLauncher.add_app_launcher_args(parser) +# parse the arguments +args_cli, hydra_args = parser.parse_known_args() +args_cli.headless = True + +# clear out sys.argv for Hydra +sys.argv = [sys.argv[0]] + hydra_args + +# launch omniverse app +app_launcher = AppLauncher(args_cli) +simulation_app = app_launcher.app + +"""Rest everything follows.""" + +import gymnasium as gym +import os + +_ANNOTATOR_DIR = os.path.join( + os.path.dirname(__file__), + "managed_environment_annotator.py", +) +if _ANNOTATOR_DIR not in sys.path: + sys.path.insert(0, _ANNOTATOR_DIR) + +from export_annotator import patch_env_for_export +from rsl_rl.runners import DistillationRunner, OnPolicyRunner + +from isaaclab.envs import ManagerBasedRLEnv, ManagerBasedRLEnvCfg +from isaaclab.utils.assets import retrieve_file_path + +from isaaclab_rl.rsl_rl import RslRlBaseRunnerCfg, RslRlVecEnvWrapper +from isaaclab_rl.utils.pretrained_checkpoint import get_published_pretrained_checkpoint + +import isaaclab_tasks # noqa: F401 +from isaaclab_tasks.utils import get_checkpoint_path +from isaaclab_tasks.utils.hydra import hydra_task_config + + +def _get_actor_memory_module(policy_nn): + if hasattr(policy_nn, "memory_a"): + return policy_nn.memory_a + if hasattr(policy_nn, "memory_s"): + return policy_nn.memory_s + return None + + +def _ensure_actor_hidden_state_initialized(policy_nn, batch_size: int, device: torch.device, dtype: torch.dtype): + actor_state, _ = policy_nn.get_hidden_states() + if actor_state is not None: + return actor_state + + memory = _get_actor_memory_module(policy_nn) + if memory is None or not hasattr(memory, "rnn"): + return None + + num_layers = memory.rnn.num_layers + hidden_size = memory.rnn.hidden_size + zeros = torch.zeros(num_layers, batch_size, hidden_size, device=device, dtype=dtype) + if isinstance(memory.rnn, torch.nn.LSTM): + actor_state = (zeros.clone(), zeros.clone()) + else: + actor_state = zeros + memory.hidden_state = actor_state + return actor_state + + +def _state_dict_from_actor_hidden(actor_hidden): + if actor_hidden is None: + return {} + if isinstance(actor_hidden, tuple): + return {f"actor_state_{idx}": tensor for idx, tensor in enumerate(actor_hidden)} + return {"actor_state": actor_hidden} + + +def _actor_hidden_from_registered(registered_state, original_hidden): + if isinstance(original_hidden, tuple): + if isinstance(registered_state, tuple): + return registered_state + return (registered_state,) + return registered_state + + +@hydra_task_config(args_cli.task, args_cli.agent) +def main(env_cfg: ManagerBasedRLEnvCfg, agent_cfg: RslRlBaseRunnerCfg): + """Export a RSL-RL agent.""" + # grab task name for checkpoint path + task_name = args_cli.task.split(":")[-1] + train_task_name = task_name.replace("-Play", "") + + # override configurations with non-hydra CLI arguments + agent_cfg: RslRlBaseRunnerCfg = cli_args.update_rsl_rl_cfg(agent_cfg, args_cli) + env_cfg.scene.num_envs = 1 + + # set the environment seed + # note: certain randomizations occur in the environment initialization so we set the seed here + env_cfg.seed = agent_cfg.seed + env_cfg.sim.device = args_cli.device if args_cli.device is not None else env_cfg.sim.device + + # specify directory for logging experiments + log_root_path = os.path.join("logs", "rsl_rl", agent_cfg.experiment_name) + log_root_path = os.path.abspath(log_root_path) + print(f"[INFO] Loading experiment from directory: {log_root_path}") + if args_cli.use_pretrained_checkpoint: + resume_path = get_published_pretrained_checkpoint("rsl_rl", train_task_name) + if not resume_path: + print("[INFO] Unfortunately a pre-trained checkpoint is currently unavailable for this task.") + return + elif args_cli.checkpoint: + resume_path = retrieve_file_path(args_cli.checkpoint) + else: + resume_path = get_checkpoint_path(log_root_path, agent_cfg.load_run, agent_cfg.load_checkpoint) + + log_dir = os.path.dirname(resume_path) + + # set the log directory for the environment (works for all environment types) + env_cfg.log_dir = log_dir + + # create isaac environment + # Note: observation functions are already patched at module level (before isaaclab_tasks import) + env = gym.make(args_cli.task, cfg=env_cfg, render_mode=None) + if not isinstance(env.unwrapped, ManagerBasedRLEnv): + raise NotImplementedError( + "Export currently supports only manager-based environments. " + f"Task '{args_cli.task}' created env type '{type(env.unwrapped).__name__}'." + ) + export_task_name = args_cli.export_task_name if args_cli.export_task_name is not None else task_name + + # required + obs_groups_cfg = getattr(agent_cfg, "obs_groups", None) + if isinstance(obs_groups_cfg, Mapping): + required_obs_groups = set(obs_groups_cfg.get("policy", ["policy"])) + else: + required_obs_groups = {"policy"} + patch_env_for_export( + env, + task_name=export_task_name, + export_method=args_cli.export_method, + required_obs_groups=required_obs_groups, + ) + + # wrap around environment for rsl-rl + env = RslRlVecEnvWrapper(env, clip_actions=agent_cfg.clip_actions) + + print(f"[INFO]: Loading model checkpoint from: {resume_path}") + # load previously trained model + if agent_cfg.class_name == "OnPolicyRunner": + runner = OnPolicyRunner(env, agent_cfg.to_dict(), log_dir=None, device=agent_cfg.device) + elif agent_cfg.class_name == "DistillationRunner": + runner = DistillationRunner(env, agent_cfg.to_dict(), log_dir=None, device=agent_cfg.device) + else: + raise ValueError(f"Unsupported runner class: {agent_cfg.class_name}") + runner.load(resume_path) + + # obtain the trained policy for inference + policy = runner.get_inference_policy(device=env.unwrapped.device) + policy_nn = getattr(policy, "__self__", None) + + # start annotation tracing + # Note: all patching is done at module/class level before isaaclab_tasks import + save_path = args_cli.export_save_path if args_cli.export_save_path is not None else log_dir + leapp.start(export_task_name, save_path=save_path, max_cached_io=max(args_cli.validation_steps, 2)) + # obs = env.get_observations() + obs = env.reset()[0] + # simulate environment + while not simulation_app.is_running(): + time.sleep(0.5) + + for _ in range(max(args_cli.validation_steps, 2)): + # run everything in inference mode + with torch.inference_mode(): + if policy_nn is not None and getattr(policy_nn, "is_recurrent", False): + actor_hidden = _ensure_actor_hidden_state_initialized( + policy_nn, + batch_size=env.num_envs, + device=env.unwrapped.device, + dtype=next(policy_nn.parameters()).dtype, + ) + registered_state = annotate.state_tensors( + export_task_name, + _state_dict_from_actor_hidden(actor_hidden), + ) + actor_memory = _get_actor_memory_module(policy_nn) + if actor_memory is not None: + actor_memory.hidden_state = _actor_hidden_from_registered(registered_state, actor_hidden) + actions = policy(obs) + if policy_nn is not None and getattr(policy_nn, "is_recurrent", False): + actor_hidden_after = policy_nn.get_hidden_states()[0] + annotate.update_state( + export_task_name, + _state_dict_from_actor_hidden(actor_hidden_after), + ) + # env stepping + obs, _, _, _ = env.step(actions) + + leapp.stop() + vilidate = args_cli.validation_steps > 0 + leapp.compile_graph(visualize=not args_cli.disable_graph_visualization, validate=vilidate) + + # close the simulator + env.close() + + +if __name__ == "__main__": + # run the main function + main() + # close sim app + simulation_app.close() diff --git a/scripts/reinforcement_learning/rsl_rl/managed_environment_annotator.py/export_annotator.py b/scripts/reinforcement_learning/rsl_rl/managed_environment_annotator.py/export_annotator.py new file mode 100644 index 000000000000..98e78cf7224f --- /dev/null +++ b/scripts/reinforcement_learning/rsl_rl/managed_environment_annotator.py/export_annotator.py @@ -0,0 +1,759 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Export annotations for Isaac Lab policies using proxy-based patching. + +Observation and action annotation share a single set of annotating getters +and a unified dedup cache so that a state property (e.g. ``joint_pos``) +read by both an observation term and an action term resolves to one LEAPP +input edge. + +- Observation term functions see an _EnvProxy whose scene returns + _ArticulationProxy objects with annotating data getters. + +- Action terms have their ``_asset`` attribute replaced with an + _ArticulationWriteProxy that intercepts ``_leapp_semantics``-decorated + write methods **and** routes ``.data`` reads through the same annotating + data proxy used by observations. + +Cache lifecycle (assuming single-env play-mode export): + + compute_group() clear cache → obs terms populate cache + policy inference TracedTensors propagate through NN + process_action() register_buffer for raw_actions + apply_action() [tracing] reuse cached TracedTensors for state reads, + capture write outputs, call output_tensors(), + then clear cache + apply_action() [decim.] clear cache → fresh reads for simulation + ... + compute_group() clear cache → fresh reads for next obs +""" + +from __future__ import annotations + +import inspect +import logging +import torch +from contextlib import suppress +from typing import TYPE_CHECKING, Any + +from leapp import annotate +from leapp.utils.tensor_description import TensorSemantics +from proxy import _ArticulationWriteProxy, _DataProxy, _EnvProxy, _ManagerTermProxy, _SceneProxy + +from isaaclab.assets.articulation.articulation import Articulation +from isaaclab.managers import ManagerTermBase +from isaaclab.utils.leapp_semantics import resolve_leapp_element_names + +if TYPE_CHECKING: + from isaaclab.envs import ManagerBasedEnv + + +# Reuse the generic joint-name resolver for kp/kd outputs by providing the +# same ``element_names_source`` contract as articulation getters/writers. +_GAIN_JOINT_SEMANTICS = type( + "GainJointSemantics", + (), + {"element_names": None, "element_names_source": "joint_names"}, +)() + + +def _discover_data_classes(scene) -> set[type]: + """Discover all data class types from entities present in the scene. + + Iterates over all dict-valued instance attributes on the scene object + (which is how ``InteractiveScene`` stores its entity families internally) + and collects ``type(entity.data)`` for any value that exposes a ``.data`` + attribute. (``.data``) is a formal contract in the asset base class. + getting data from any data class @property will be automatically traced. + """ + classes: set[type] = set() + for attr_value in vars(scene).values(): + if not isinstance(attr_value, dict): + continue + for entity in attr_value.values(): + data = getattr(entity, "data", None) + if data is not None: + classes.add(type(data)) + return classes + + +# ══════════════════════════════════════════════════════════════════ +# ExportPatcher +# ══════════════════════════════════════════════════════════════════ + + +class ExportPatcher: + """Unified patcher that annotates observation inputs and action outputs for LEAPP export. + + At setup time the patcher discovers all data classes present in the scene + (``ArticulationData``, ``RigidObjectData``, sensor data classes, …) and + builds annotating getters for **every** ``@property`` on those classes. + + - Properties carrying ``_leapp_semantics`` produce rich annotations + (kind, element_names) used by the downstream deployment resolver. + - Properties **without** ``_leapp_semantics`` are still traced so that + no tensor is silently baked as a constant during export. + + The getters and a shared dedup cache are wired into both: + + - The observation proxy chain (``_EnvProxy`` → ``_SceneProxy`` → + ``_EntityProxy`` → ``_DataProxy``) for state reads + by observation term functions. + - The ``_ArticulationWriteProxy`` on each action term, which intercepts + target writes **and** routes ``.data`` reads through the same + ``_DataProxy`` / cache. + + This ensures that a property like ``joint_pos`` read by both an + observation term and ``RelativeJointPositionAction.apply_actions()`` + resolves to a single LEAPP input edge rather than being silently baked + in as a constant. + """ + + def __init__(self, task_name: str, export_method: str, required_obs_groups: set[str] | None = None): + self.task_name = task_name + self.export_method = export_method + self.required_obs_groups = required_obs_groups + self._annotated_tensor_cache: dict[tuple[int, str], torch.Tensor] = {} + self._action_output_cache: list[TensorSemantics] = [] + self._captured_write_term_names: set[str] = set() + self._fallback_term_names: set[str] = set() + self._pending_action_output_export: bool = False + self._uses_last_action_state: bool = False + self._patched_history_state_names: dict[int, str] = {} + + def setup(self, env): + """Patch observation and action managers on the unwrapped env.""" + unwrapped = env.env.unwrapped + + annotating_getters = self._build_annotating_getters(unwrapped.scene) + annotating_write_methods = self._build_annotating_write_methods() + cache = self._annotated_tensor_cache + + scene_proxy = _SceneProxy(unwrapped.scene, annotating_getters, cache) + proxy_env = _EnvProxy(unwrapped, scene_proxy) + + self._disable_training_managers(unwrapped) + self._patch_observation_manager(unwrapped.observation_manager, proxy_env) + self._patch_history_buffers(unwrapped.observation_manager) + self._patch_action_manager( + unwrapped.action_manager, + annotating_getters, + cache, + annotating_write_methods, + ) + + # ── Disable training-only managers ───────────────────────────── + + @staticmethod + def _disable_training_managers(unwrapped): + """Replace training-only manager methods with no-ops. + + During export the curriculum, reward, termination, and recorder + managers serve no purpose. Disabling them avoids side-effect + crashes (e.g. ADR curriculum terms accessing nullified noise + configs) and removes unnecessary computation. + """ + num_envs = unwrapped.num_envs + device = unwrapped.device + + if hasattr(unwrapped, "curriculum_manager"): + unwrapped.curriculum_manager.compute = lambda env_ids=None: None + + if hasattr(unwrapped, "reward_manager"): + _zero_reward = torch.zeros(num_envs, device=device) + unwrapped.reward_manager.compute = lambda dt: _zero_reward + + if hasattr(unwrapped, "termination_manager"): + _no_termination = torch.zeros(num_envs, dtype=torch.bool, device=device) + unwrapped.termination_manager.compute = lambda: _no_termination + + if hasattr(unwrapped, "recorder_manager"): + rm = unwrapped.recorder_manager + + def _noop(*args, **kwargs): + return None + + rm.record_pre_step = _noop + rm.record_post_step = _noop + rm.record_pre_reset = _noop + rm.record_post_reset = _noop + rm.record_post_physics_decimation_step = _noop + + # ── Scanning ────────────────────────────────────────────────── + + def _build_annotating_getters(self, scene) -> dict[type, dict[str, callable]]: + """Discover data classes from the scene and build annotating getters for all properties. + + This method introspects the actual scene to find every data class in use. + For each class it registers getters for **all** public ``@property`` + descriptors — not just those decorated with ``_leapp_semantics``. Properties + without semantics are still traced (with ``kind=None``) so that no tensor + read is silently baked as a constant during export. + + Returns a dict mapping data class type to a dict of + ``property_name -> callable(data_self, input_name) -> annotated_tensor``. + """ + data_classes = _discover_data_classes(scene) + getters: dict[type, dict[str, callable]] = {} + for data_cls in data_classes: + class_getters: dict[str, callable] = {} + for prop_name in dir(data_cls): + if prop_name.startswith("_"): # ignore all private properties + continue + prop = getattr(data_cls, prop_name, None) + if isinstance(prop, property) and prop.fget: # only consider properties with a getter + class_getters[prop_name] = self._make_annotating_getter(prop.fget, prop_name) + if class_getters: + getters[data_cls] = class_getters + return getters + + def _make_annotating_getter(self, original_fget, prop_name: str): + """Create an annotating getter callable for a single annotated data property. + + The returned callable invokes the real getter, then registers the result + as a LEAPP input tensor with the property's semantic metadata and the + caller-supplied public input name. + """ + task_name = self.task_name + + def getter(data_self, input_name: str): + result = original_fget(data_self) + if not isinstance(result, torch.Tensor): + return result + semantics_meta = getattr(original_fget, "_leapp_semantics", None) + sem = TensorSemantics( + name=input_name, + ref=result, + kind=semantics_meta.kind if semantics_meta else None, + element_names=resolve_leapp_element_names(semantics_meta, data_self), + ) + return annotate.input_tensors(task_name, sem) + + return getter + + def _build_annotating_write_methods(self) -> dict[str, callable]: + """Scan Articulation for ``_leapp_semantics`` methods and build interceptors. + + Returns a dict mapping method name to a factory callable. The factory takes + ``(real_asset, original_bound_method, term_name, output_cache)`` and returns + a callable that the proxy returns in ``__getattr__``. + """ + methods: dict[str, callable] = {} + for method_name in dir(Articulation): + method = getattr(Articulation, method_name, None) + if callable(method) and hasattr(method, "_leapp_semantics"): + methods[method_name] = self._make_write_interceptor_factory(method, method_name) + return methods + + def _make_write_interceptor_factory(self, original_unbound, method_name: str): + """Create a factory that produces bound annotating wrappers for a single write method. + + The factory is called by ``_ArticulationWriteProxy.__getattr__`` each time the + method is accessed. It returns a callable that: + + 1. Calls the real method on the real asset. + 2. Inspects the ``target`` argument. + 3. Records a ``TensorSemantics`` entry in the shared output cache. + 4. Records the term name in ``_captured_write_term_names`` so that + the fallback path knows this term produced write outputs. + """ + signature = inspect.signature(original_unbound) + semantics = getattr(original_unbound, "_leapp_semantics", None) + captured_write_term_names = self._captured_write_term_names + + def factory(real_asset: Articulation, original_bound, term_name: str, output_cache: list): + + def interceptor(*args, **kwargs): + result = original_bound(*args, **kwargs) + bound_args = signature.bind_partial(real_asset, *args, **kwargs) + target = bound_args.arguments.get("target") + + if isinstance(target, torch.Tensor): + tensor_target: torch.Tensor = target + output_name = _unique_output_name(term_name, method_name, output_cache) + joint_ids = bound_args.arguments.get("joint_ids") + output_cache.append( + TensorSemantics( + name=output_name, + ref=tensor_target.clone(), + kind=semantics.kind if semantics is not None else None, + element_names=resolve_leapp_element_names( + semantics, + _JointNameContext(real_asset.joint_names, joint_ids), + ), + ) + ) + captured_write_term_names.add(term_name) + + return result + + return interceptor + + return factory + + @staticmethod + def _resolve_scene_entity_key(scene, entity: Any) -> str | None: + """Return the scene dictionary key for a given entity, if present.""" + for attr_value in vars(scene).values(): + if not isinstance(attr_value, dict): + continue + for key, candidate in attr_value.items(): + if candidate is entity: + return key + return None + + # ── Observation manager patches ─────────────────────────────── + + def _patch_history_buffers(self, obs_manager): + """Patch history-enabled observation buffers to export as LEAPP state.""" + history_buffers = getattr(obs_manager, "_group_obs_term_history_buffer", {}) + term_names_by_group = getattr(obs_manager, "_group_obs_term_names", {}) + + for group_name, term_cfgs in obs_manager._group_obs_term_cfgs.items(): + if self.required_obs_groups is not None and group_name not in self.required_obs_groups: + continue + group_buffers = history_buffers.get(group_name, {}) + group_term_names = term_names_by_group.get(group_name, []) + + for index, term_cfg in enumerate(term_cfgs): + history_length = getattr(term_cfg, "history_length", 0) or 0 + if history_length <= 0: + continue + + if index >= len(group_term_names): + continue + + term_name = group_term_names[index] + circular_buffer = group_buffers.get(term_name) + if circular_buffer is None: + continue + + state_name = f"h_{group_name}_{term_name}" + self._patch_history_buffer_append(circular_buffer, state_name) + + def _patch_history_buffer_append(self, circular_buffer, state_name: str): + """Wrap ``_append`` so history buffers become explicit LEAPP state.""" + if hasattr(circular_buffer, "_leapp_original_append"): + return + + task_name = self.task_name + original_append = circular_buffer._append + + def patched_append(data: torch.Tensor): + if circular_buffer._buffer is not None: + circular_buffer._buffer = annotate.state_tensors(task_name, {state_name: circular_buffer._buffer}) + + original_append(data) + + if circular_buffer._buffer is not None: + circular_buffer._buffer = annotate.update_state(task_name, {state_name: circular_buffer._buffer}) + + circular_buffer._leapp_original_append = original_append + circular_buffer._append = patched_append + self._patched_history_state_names[id(circular_buffer)] = state_name + + def _patch_observation_manager(self, obs_manager, proxy_env): + """Patch observation terms to use annotating proxies and disable noise.""" + for group_name, term_cfgs in obs_manager._group_obs_term_cfgs.items(): + if self.required_obs_groups is not None and group_name not in self.required_obs_groups: + continue + for term_cfg in term_cfgs: + original_func = term_cfg.func + func_name = getattr(original_func, "__name__", None) + + if func_name == "last_action": + self._uses_last_action_state = True + term_cfg.func = self._wrap_last_action(original_func) + elif func_name == "generated_commands": + term_cfg.func = self._wrap_generated_commands(original_func, term_cfg) + else: + term_cfg.func = self._wrap_with_proxy(original_func, proxy_env) + + term_cfg.noise = None + + original_compute = obs_manager.compute + original_compute_group = obs_manager.compute_group + cache = self._annotated_tensor_cache + + def patched_compute(*args, **kwargs): + """Clear the tensor dedup cache once per full observation pass.""" + cache.clear() + return original_compute(*args, **kwargs) + + def patched_compute_group(*args, **kwargs): + """Run the real compute_group using the current observation-pass cache.""" + return original_compute_group(*args, **kwargs) + + obs_manager.compute = patched_compute + obs_manager.compute_group = patched_compute_group + + # ── Action manager patches ──────────────────────────────────── + + def _patch_action_manager(self, action_manager, annotating_getters, cache, annotating_write_methods): + """Patch action terms with write+read proxies and patch manager methods.""" + scene = action_manager._env.scene + for term_name, term in action_manager._terms.items(): + asset = getattr(term, "_asset", None) + if isinstance(asset, Articulation): + real_asset: Articulation = asset + scene_key = self._resolve_scene_entity_key(scene, real_asset) or "ego" + data_proxy = _DataProxy( + real_asset.data, + annotating_getters, + cache, + input_name_resolver=lambda prop_name, k=scene_key: f"{k}_{prop_name}", + ) + term._asset = _ArticulationWriteProxy( + real_asset=real_asset, + term_name=term_name, + output_cache=self._action_output_cache, + annotating_methods=annotating_write_methods, + data_proxy=data_proxy, + ) + + self._patch_action_manager_methods(action_manager) + + def _patch_action_manager_methods(self, action_manager): + """Patch ``process_action`` and ``apply_action`` on the action manager instance. + + ``process_action`` registers raw_action buffers for LEAPP tracing and + preserves the action tensor clone. + + ``apply_action`` coordinates the cache and output lifecycle: + + - **Tracing pass** (first ``apply_action`` after ``process_action``): + The cache still holds TracedTensors populated by ``compute_group``. + Action terms that read state (e.g. ``RelativeJointPositionAction`` + reading ``joint_pos``) get those TracedTensors from the cache, + keeping the LEAPP graph connected. After ``output_tensors()`` the + cache is cleared so subsequent decimation sub-steps read fresh values. + + - **Non-tracing passes** (remaining decimation sub-steps and all + subsequent iterations): The cache is cleared **before** running + action terms so every ``.data`` read returns the current simulator + value, preserving simulation correctness. + """ + original_process = action_manager.process_action + original_apply = action_manager.apply_action + task_name = self.task_name + cache = self._annotated_tensor_cache + + def patched_process_action(action: torch.Tensor): + """Register raw_action buffers, call real process_action, preserve action clone.""" + for term_name, term in action_manager._terms.items(): + if hasattr(term, "_raw_actions") and term._raw_actions is not None: + term._raw_actions = annotate.register_buffer( + task_name, {f"{term_name}_raw_actions": term._raw_actions} + ) + + original_process(action) + action_manager._action = action.clone() + self._pending_action_output_export = True + + def patched_apply_action(): + """Coordinate cache lifecycle and LEAPP output annotation.""" + if not self._pending_action_output_export: + cache.clear() + return original_apply() + + # Tracing pass: cache still holds TracedTensors from compute_group. + self._action_output_cache.clear() + self._captured_write_term_names.clear() + original_apply() + + self._action_output_cache.extend(self._collect_action_outputs(action_manager)) + self._action_output_cache.extend(self._collect_processed_action_fallbacks(action_manager)) + if self._uses_last_action_state: + annotate.update_state(task_name, {"last_action": action_manager._action}) + fallback_terms = self._fallback_term_names + static_values = self._collect_action_static_outputs(action_manager, fallback_terms) + annotate.output_tensors( + task_name, + self._action_output_cache, + static_outputs=static_values or None, + export_with=self.export_method, + ) + self._pending_action_output_export = False + self._action_output_cache.clear() + cache.clear() + return None + + action_manager.process_action = patched_process_action + action_manager.apply_action = patched_apply_action + + # ── Observation term wrappers ───────────────────────────────── + + @staticmethod + def _wrap_with_proxy(original_func, proxy_env): + """Wrap a term function so it receives the proxy env instead of the real env.""" + + if isinstance(original_func, ManagerTermBase): + return _ManagerTermProxy(original_func, proxy_env) + + def wrapped(*args, **kwargs): + if args: + args = (proxy_env, *args[1:]) + else: + args = (proxy_env,) + return original_func(*args, **kwargs) + + wrapped.__name__ = getattr(original_func, "__name__", "unknown") + return wrapped + + def _wrap_last_action(self, original_func): + """Wrap ``last_action`` as a LEAPP state tensor. + + ``last_action`` is feedback state, not a regular dangling input. We + therefore register it through ``annotate.state_tensors(...)`` on the + observation side and update it through ``annotate.update_state(...)`` + after the traced action pass. + """ + task_name = self.task_name + + def wrapped(env, action_name=None, **kwargs): + result = original_func(env, action_name, **kwargs) + return annotate.state_tensors(task_name, {"last_action": result}) + + wrapped.__name__ = original_func.__name__ + return wrapped + + def _wrap_generated_commands(self, original_func, term_cfg): + """Wrap the ``generated_commands`` observation term to annotate its output as a LEAPP input. + + Resolves command semantics (kind, element_names) from the command manager + configuration when available. + """ + task_name = self.task_name + command_name_from_cfg = term_cfg.params.get("command_name") + + def wrapped(env, command_name=None, **kwargs): + result = original_func(env, command_name, **kwargs) + leapp_input_name = command_name or command_name_from_cfg or "commands" + command_cfg = None + with suppress(AttributeError, KeyError): + command_cfg = env.command_manager.get_term(leapp_input_name).cfg + sem = TensorSemantics( + name=leapp_input_name, + ref=result, + kind=getattr(command_cfg, "cmd_hint", None), + element_names=getattr(command_cfg, "element_names", None), + ) + return annotate.input_tensors(task_name, sem) + + wrapped.__name__ = original_func.__name__ + return wrapped + + # ── Output collection ───────────────────────────────────────── + + @staticmethod + def _collect_action_outputs(action_manager) -> list[TensorSemantics]: + """Collect non-writer action tensors that should be exported (e.g. OSC dynamic gains).""" + tensors: list[TensorSemantics] = [] + for term_name, term in action_manager._terms.items(): + osc = getattr(term, "_osc", None) + if osc and hasattr(osc, "cfg") and osc.cfg.impedance_mode in ["variable", "variable_kp"]: + asset = getattr(term, "_asset", None) + real_asset = getattr(asset, "_real_asset", asset) + joint_ids = getattr(term, "_joint_ids", None) + joint_name_context = None + if real_asset is not None and hasattr(real_asset, "joint_names"): + joint_name_context = _JointNameContext(real_asset.joint_names, joint_ids) + tensors.append( + TensorSemantics( + name=f"{term_name}_kp_gains", + ref=torch.diagonal(osc._motion_p_gains_task, dim1=-2, dim2=-1), + kind="kp", + element_names=( + resolve_leapp_element_names( + _GAIN_JOINT_SEMANTICS, + joint_name_context, + ) + if joint_name_context is not None + else None + ), + ) + ) + tensors.append( + TensorSemantics( + name=f"{term_name}_kd_gains", + ref=torch.diagonal(osc._motion_d_gains_task, dim1=-2, dim2=-1), + kind="kd", + element_names=( + resolve_leapp_element_names( + _GAIN_JOINT_SEMANTICS, + joint_name_context, + ) + if joint_name_context is not None + else None + ), + ) + ) + return tensors + + def _collect_processed_action_fallbacks(self, action_manager) -> list[TensorSemantics]: + """Fallback: use ``term.processed_actions`` for terms that produced no write outputs. + + When an action term does not call any ``_leapp_semantics``-decorated write method + (e.g. ``PreTrainedPolicyAction`` which delegates writes to a nested sub-policy), + we fall back to capturing ``term.processed_actions`` as the output tensor. + """ + logger = logging.getLogger(__name__) + fallback_terms: set[str] = set() + tensors: list[TensorSemantics] = [] + for term_name, term in action_manager._terms.items(): + if term_name in self._captured_write_term_names: + continue + processed = getattr(term, "processed_actions", None) + if processed is None: + continue + if isinstance(processed, torch.Tensor): + logger.warning( + "Action term '%s' did not write to any asset directly. Falling back to processed_actions as the" + " export output.\nIf you wish to add semantic data to this policy, you need to manually annotate it" + " with output_tensors.", + term_name, + ) + tensors.append( + TensorSemantics( + name=term_name, + ref=processed.clone(), + kind=None, + element_names=None, + ) + ) + fallback_terms.add(term_name) + self._fallback_term_names = fallback_terms + return tensors + + @staticmethod + def _collect_action_static_outputs(action_manager, skip_terms: set[str] | None = None) -> list[TensorSemantics]: + """Collect static kp/kd gain values from action terms for export metadata. + + Terms in ``skip_terms`` are excluded — these are terms that fell back + to ``processed_actions`` and whose static gains (kp/kd) belong to a + lower abstraction level that is not part of the exported policy. + """ + static_values: list[TensorSemantics] = [] + for term_name, term in action_manager._terms.items(): + if skip_terms and term_name in skip_terms: + continue + osc = getattr(term, "_osc", None) + if osc and hasattr(osc, "cfg") and osc.cfg.impedance_mode in ["variable", "variable_kp"]: + continue + asset = getattr(term, "_asset", None) + real_asset = getattr(asset, "_real_asset", asset) + if real_asset and hasattr(real_asset, "data"): + data = real_asset.data + joint_ids = getattr(term, "_joint_ids", None) + joint_name_context = None + if hasattr(real_asset, "joint_names"): + joint_name_context = _JointNameContext(real_asset.joint_names, joint_ids) + if hasattr(data, "default_joint_stiffness") and data.default_joint_stiffness is not None: + gains = data.default_joint_stiffness + static_values.append( + TensorSemantics( + name=f"{term_name}_kp_gains", + ref=gains[:, joint_ids] if joint_ids else gains, + kind="kp", + element_names=( + resolve_leapp_element_names( + _GAIN_JOINT_SEMANTICS, + joint_name_context, + ) + if joint_name_context is not None + else None + ), + ) + ) + if hasattr(data, "default_joint_damping") and data.default_joint_damping is not None: + gains = data.default_joint_damping + static_values.append( + TensorSemantics( + name=f"{term_name}_kd_gains", + ref=gains[:, joint_ids] if joint_ids else gains, + kind="kd", + element_names=( + resolve_leapp_element_names( + _GAIN_JOINT_SEMANTICS, + joint_name_context, + ) + if joint_name_context is not None + else None + ), + ) + ) + return static_values + + +# ══════════════════════════════════════════════════════════════════ +# Helpers +# ══════════════════════════════════════════════════════════════════ + + +class _JointNameContext: + """Lightweight stand-in for resolving runtime joint name subsets in ``resolve_leapp_element_names``.""" + + __slots__ = ("joint_names", "_joint_ids") + + def __init__(self, joint_names: list[str], joint_ids): + self.joint_names = joint_names + self._joint_ids = joint_ids + + +def _unique_output_name(term_name: str, method_name: str, output_cache: list[TensorSemantics]) -> str: + """Return a stable, unique output name for an action write entry. + + Prefers ``term_name``, falls back to ``term_name_method_name``, and appends a + numeric suffix if even that collides. + """ + existing = {t.name for t in output_cache} + candidate = term_name + if candidate in existing: + candidate = f"{term_name}_{method_name}" + suffix = 2 + while candidate in existing: + candidate = f"{term_name}_{method_name}_{suffix}" + suffix += 1 + return candidate + + +# ══════════════════════════════════════════════════════════════════ +# Public entry point +# ══════════════════════════════════════════════════════════════════ + + +def patch_env_for_export( + env: ManagerBasedEnv, + task_name: str, + export_method: str, + required_obs_groups: set[str] | None = None, +) -> None: + """Patch the env's observation and action managers for LEAPP export. + + This is a thin public entry point around ``ExportPatcher``. It mutates + the provided env instance in-place so that: + + - Observation terms route through proxy objects that annotate tensor + reads from **any** scene entity data class (articulations, rigid + objects, sensors, etc.). + - Action terms route through proxy objects that annotate both data + reads **and** ``Articulation`` write methods. + + Data classes are discovered automatically by scanning the scene at + setup time — no hardcoded class list is required. Properties with + ``_leapp_semantics`` produce rich annotations; properties without it + are still traced so that no tensor is silently baked as a constant. + + State reads are deduplicated across observation and action paths via a + shared cache, so a property like ``joint_pos`` that is read by both an + observation term and a relative-position action term appears as a single + LEAPP input edge. + + The underlying env, scene, assets, and tensors remain shared with the rest + of the pipeline; only the manager call paths are redirected. + """ + patcher = ExportPatcher(task_name, export_method, required_obs_groups=required_obs_groups) + patcher.setup(env) diff --git a/scripts/reinforcement_learning/rsl_rl/managed_environment_annotator.py/proxy.py b/scripts/reinforcement_learning/rsl_rl/managed_environment_annotator.py/proxy.py new file mode 100644 index 000000000000..1c1dd55a5763 --- /dev/null +++ b/scripts/reinforcement_learning/rsl_rl/managed_environment_annotator.py/proxy.py @@ -0,0 +1,346 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from __future__ import annotations + +import torch +from collections.abc import Callable +from typing import Any + +from leapp.utils.tensor_description import TensorSemantics + +from isaaclab.assets.articulation.articulation import Articulation +from isaaclab.managers import ManagerTermBase + + +def _lookup_annotating_getter( + annotating_getters_by_type: dict[type, dict[str, Callable]], real_data: Any, name: str +) -> Callable | None: + """Return the annotating getter for a property on the given data object, if any.""" + for data_cls in type(real_data).__mro__: + getter = annotating_getters_by_type.get(data_cls, {}).get(name) + if getter is not None: + return getter + return None + + +class _DataProxy: + """Proxy around a real data object that intercepts tensor-returning property reads. + + The real data object may be any scene entity data class (``ArticulationData``, + ``RigidObjectData``, sensor data classes, etc.). The proxy intercepts all + ``@property`` getters that were registered during scene introspection. When + the getter returns a ``torch.Tensor``, the result is annotated as a LEAPP + input and cached for deduplication. Non-tensor results are forwarded + transparently. + + Properties with ``_leapp_semantics`` metadata produce rich annotations + (kind, element_names). Properties without it are still traced — with no + semantic metadata — so that no tensor is silently baked as a constant. + + All other attribute access is forwarded transparently to the real object. + """ + + def __init__( + self, + real_data: Any, + annotating_getters_by_type: dict[type, dict[str, Callable]], + cache: dict, + input_name_resolver: Callable, + ): + object.__setattr__(self, "_real_data", real_data) + object.__setattr__(self, "_annotating_getters_by_type", annotating_getters_by_type) + object.__setattr__(self, "_cache", cache) + object.__setattr__(self, "_input_name_resolver", input_name_resolver) + + def __getattr__(self, name): + """Intercept registered property reads; forward everything else.""" + real_data = object.__getattribute__(self, "_real_data") + getter = _lookup_annotating_getter( + object.__getattribute__(self, "_annotating_getters_by_type"), real_data, name + ) + if getter is not None: + cache = object.__getattribute__(self, "_cache") + cache_key = (id(real_data), name) + if cache_key in cache: + return cache[cache_key].clone() + input_name = object.__getattribute__(self, "_input_name_resolver")(name) + result = getter(real_data, input_name) + if isinstance(result, torch.Tensor): + cache[cache_key] = result + return result + return getattr(real_data, name) + + +class _EntityProxy: + """Proxy around a real scene entity that returns a ``_DataProxy`` for ``.data``. + + All other attribute access is forwarded transparently to the real asset. + """ + + def __init__(self, real_entity: Any, data_proxy: _DataProxy): + object.__setattr__(self, "_real_entity", real_entity) + object.__setattr__(self, "_data_proxy", data_proxy) + + @property + def data(self): + """Return the annotating data proxy instead of the real data object.""" + return object.__getattribute__(self, "_data_proxy") + + def __getattr__(self, name): + """Forward all non-data attribute access to the real scene entity.""" + return getattr(object.__getattribute__(self, "_real_entity"), name) + + +class _EntityMappingProxy: + """Proxy around a mapping of scene entities that lazily wraps data-producing entries.""" + + def __init__(self, real_mapping, annotating_getters_by_type: dict[type, dict[str, Callable]], cache: dict): + object.__setattr__(self, "_real_mapping", real_mapping) + object.__setattr__(self, "_annotating_getters_by_type", annotating_getters_by_type) + object.__setattr__(self, "_cache", cache) + object.__setattr__(self, "_proxied", {}) + + def __getitem__(self, key): + """Return a proxied entity when it has a ``.data`` attribute.""" + proxied = object.__getattribute__(self, "_proxied") + if key in proxied: + return proxied[key] + real_mapping = object.__getattribute__(self, "_real_mapping") + entity = real_mapping[key] + data = getattr(entity, "data", None) + if data is None: + return entity + annotating_getters_by_type = object.__getattribute__(self, "_annotating_getters_by_type") + data_proxy = _DataProxy( + data, + annotating_getters_by_type, + object.__getattribute__(self, "_cache"), + input_name_resolver=lambda prop_name: f"{key}_{prop_name}", + ) + proxy = _EntityProxy(entity, data_proxy) + proxied[key] = proxy + return proxy + + def get(self, key, default=None): + """Return a proxied entity when present, default otherwise.""" + real_mapping = object.__getattribute__(self, "_real_mapping") + if key not in real_mapping: + return default + return self[key] + + def __iter__(self): + return iter(object.__getattribute__(self, "_real_mapping")) + + def __len__(self): + return len(object.__getattribute__(self, "_real_mapping")) + + def __getattr__(self, name): + """Forward all other mapping access to the real mapping.""" + return getattr(object.__getattribute__(self, "_real_mapping"), name) + + +class _SceneProxy: + """Proxy around the real InteractiveScene. + + When an observation term looks up a scene entity by name, this proxy lazily + wraps any entity that has a ``.data`` attribute. All tensor-returning + properties on the data object are intercepted for LEAPP annotation. This + covers articulations, rigid objects, and sensors through both + ``scene["name"]`` and ``scene.sensors["name"]`` access paths. + """ + + def __init__(self, real_scene, annotating_getters_by_type: dict[type, dict[str, Callable]], cache: dict): + # use object.__setattr__ to avoid creating new attributes, only set the ones that are already defined + object.__setattr__(self, "_real_scene", real_scene) + object.__setattr__(self, "_annotating_getters_by_type", annotating_getters_by_type) + object.__setattr__(self, "_cache", cache) + object.__setattr__(self, "_proxied", {}) + object.__setattr__(self, "_sensor_mapping_proxy", None) + + def _maybe_proxy_entity(self, key: str, entity: Any): + """Return a proxy for any entity that has a ``.data`` attribute.""" + proxied = object.__getattribute__(self, "_proxied") + if key in proxied: + return proxied[key] + + data = getattr(entity, "data", None) + if data is None: + return entity + + annotating_getters_by_type = object.__getattribute__(self, "_annotating_getters_by_type") + cache = object.__getattribute__(self, "_cache") + data_proxy = _DataProxy( + data, + annotating_getters_by_type, + cache, + input_name_resolver=lambda prop_name, k=key: f"{k}_{prop_name}", + ) + proxy = _EntityProxy(entity, data_proxy) + proxied[key] = proxy + return proxy + + def __getitem__(self, key): + """Return a proxied entity when it exposes annotated data getters.""" + real_scene = object.__getattribute__(self, "_real_scene") + entity = real_scene[key] + return self._maybe_proxy_entity(key, entity) + + @property + def sensors(self): + """Return a mapping proxy for scene sensors.""" + sensor_mapping_proxy = object.__getattribute__(self, "_sensor_mapping_proxy") + if sensor_mapping_proxy is None: + real_scene = object.__getattribute__(self, "_real_scene") + sensor_mapping_proxy = _EntityMappingProxy( + real_scene.sensors, + object.__getattribute__(self, "_annotating_getters_by_type"), + object.__getattribute__(self, "_cache"), + ) + object.__setattr__(self, "_sensor_mapping_proxy", sensor_mapping_proxy) + return sensor_mapping_proxy + + def __getattr__(self, name): + """Forward all other scene access to the real scene.""" + return getattr(object.__getattribute__(self, "_real_scene"), name) + + +class _EnvProxy: + """Proxy around the real env that returns a _SceneProxy for ``.scene``. + + All other attribute access (``num_envs``, ``command_manager``, etc.) + is forwarded transparently to the real env. + """ + + def __init__(self, real_env, scene_proxy: _SceneProxy): + object.__setattr__(self, "_real_env", real_env) + object.__setattr__(self, "_scene_proxy", scene_proxy) + + @property + def scene(self): + """Return the scene proxy instead of the real scene.""" + return object.__getattribute__(self, "_scene_proxy") + + def __getattr__(self, name): + """Forward all non-scene attribute access to the real env.""" + return getattr(object.__getattribute__(self, "_real_env"), name) + + +def _build_scene_entity_lookup(real_scene) -> dict[int, tuple[str, str]]: + """Map real scene entity object ids to their lookup path.""" + lookup: dict[int, tuple[str, str]] = {} + for attr_name, attr_value in vars(real_scene).items(): + if not isinstance(attr_value, dict): + continue + container_kind = "sensors" if attr_name == "sensors" else "scene" + for key, entity in attr_value.items(): + lookup[id(entity)] = (container_kind, key) + return lookup + + +class _ManagerTermProxy(ManagerTermBase): + """Proxy a class-based manager term while preserving its lifecycle methods. + + Observation manager terms can be stateful ``ManagerTermBase`` instances that + expose ``reset()`` and ``serialize()`` in addition to being callable. This + proxy preserves that interface while swapping the env argument passed into + ``__call__`` for the observation-side proxy env. + """ + + def __init__(self, target: ManagerTermBase, proxy_env: _EnvProxy): + super().__init__(target.cfg, target._env) + self._target = target + self._proxy_env = proxy_env + self._entity_lookup = _build_scene_entity_lookup(target._env.scene) + + @property + def __name__(self) -> str: + """Expose the wrapped term name for compatibility and debugging.""" + return getattr(self._target, "__name__", self._target.__class__.__name__) + + def reset(self, env_ids=None) -> None: + """Forward resets to the wrapped term instance.""" + self._target.reset(env_ids=env_ids) + + def serialize(self) -> dict: + """Forward serialization to the wrapped term instance.""" + return self._target.serialize() + + def __call__(self, *args, **kwargs): + """Call the wrapped term with the proxy env in place of the real env.""" + if args: + args = (self._proxy_env, *args[1:]) + else: + args = (self._proxy_env,) + swapped_attrs: list[tuple[str, Any]] = [] + for attr_name, attr_value in vars(self._target).items(): + lookup = self._entity_lookup.get(id(attr_value)) + if lookup is None: + continue + + container_kind, key = lookup + proxy_entity = ( + self._proxy_env.scene.sensors[key] if container_kind == "sensors" else self._proxy_env.scene[key] + ) + swapped_attrs.append((attr_name, attr_value)) + setattr(self._target, attr_name, proxy_entity) + + try: + return self._target(*args, **kwargs) + finally: + for attr_name, attr_value in swapped_attrs: + setattr(self._target, attr_name, attr_value) + + def __getattr__(self, name): + """Forward all other attribute access to the wrapped term instance.""" + return getattr(self._target, name) + + +# ══════════════════════════════════════════════════════════════════ +# Action-side proxy +# ══════════════════════════════════════════════════════════════════ + + +class _ArticulationWriteProxy: + """Proxy around a real Articulation for action terms. + + Intercepts ``_leapp_semantics``-decorated write methods **and** routes + ``.data`` reads through a shared ``_DataProxy`` so that + action-side state reads (e.g. ``self._asset.data.joint_pos`` inside + ``RelativeJointPositionAction``) participate in LEAPP annotation and + share the dedup cache with observation-side reads. + + All other attribute access is forwarded transparently to the real asset. + """ + + def __init__( + self, + real_asset: Articulation, + term_name: str, + output_cache: list[TensorSemantics], + annotating_methods: dict[str, Callable], + data_proxy: _DataProxy, + ): + object.__setattr__(self, "_real_asset", real_asset) + object.__setattr__(self, "_term_name", term_name) + object.__setattr__(self, "_output_cache", output_cache) + object.__setattr__(self, "_annotating_methods", annotating_methods) + object.__setattr__(self, "_data_proxy", data_proxy) + + @property + def data(self): + """Return the shared annotating data proxy.""" + return object.__getattribute__(self, "_data_proxy") + + def __getattr__(self, name): + """Return an annotating wrapper for _leapp_semantics methods; forward everything else.""" + methods = object.__getattribute__(self, "_annotating_methods") + if name in methods: + real_asset = object.__getattribute__(self, "_real_asset") + term_name = object.__getattribute__(self, "_term_name") + output_cache = object.__getattribute__(self, "_output_cache") + original_method = getattr(real_asset, name) + return methods[name](real_asset, original_method, term_name, output_cache) + return getattr(object.__getattribute__(self, "_real_asset"), name) diff --git a/scripts/reinforcement_learning/rsl_rl/managed_environment_annotator.py/utils.py b/scripts/reinforcement_learning/rsl_rl/managed_environment_annotator.py/utils.py new file mode 100644 index 000000000000..460a30569089 --- /dev/null +++ b/scripts/reinforcement_learning/rsl_rl/managed_environment_annotator.py/utils.py @@ -0,0 +1,4 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause diff --git a/source/isaaclab/isaaclab/envs/direct_deployment_env.py b/source/isaaclab/isaaclab/envs/direct_deployment_env.py new file mode 100644 index 000000000000..7841282432b3 --- /dev/null +++ b/source/isaaclab/isaaclab/envs/direct_deployment_env.py @@ -0,0 +1,397 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Deployment environment that runs LEAPP-exported policies in simulation. + +This environment bypasses all Isaac Lab managers (observation, action, reward, etc.) +and instead wires raw ``ArticulationData`` properties and ``CommandManager`` outputs +directly to a LEAPP ``InferenceManager``, then writes the model outputs back to the +articulation. All I/O resolution is driven by the ``kind`` field in the LEAPP YAML. +""" + +from __future__ import annotations + +import logging +import torch +import yaml +from dataclasses import dataclass +from typing import Any + +from leapp import InferenceManager + +from isaaclab.assets.articulation.articulation import Articulation +from isaaclab.assets.articulation.articulation_data import ArticulationData +from isaaclab.managers import CommandManager, EventManager +from isaaclab.scene import InteractiveScene +from isaaclab.sim import SimulationContext +from isaaclab.sim.utils.stage import attach_stage_to_usd_context, use_stage + +logger = logging.getLogger(__name__) + + +# ══════════════════════════════════════════════════════════════════ +# I/O spec dataclasses +# ══════════════════════════════════════════════════════════════════ + + +@dataclass +class StateInputSpec: + """Read a property from ``ArticulationData``, optionally sliced by joint.""" + + property_name: str + joint_ids: list[int] | None = None + + +@dataclass +class CommandInputSpec: + """Read a command tensor from ``CommandManager``.""" + + command_term_name: str + + +@dataclass +class OutputSpec: + """Write a tensor to an ``Articulation`` method, optionally indexed by joint.""" + + method_name: str + joint_ids: list[int] | None = None + + +# ══════════════════════════════════════════════════════════════════ +# Kind → source/target resolution helpers +# ══════════════════════════════════════════════════════════════════ + +_JOINT_LEVEL_KIND_PREFIXES = ("state/joint/", "target/joint/") +_JOINT_LEVEL_GAIN_KINDS = ("kp", "kd") + + +def _build_kind_to_property_map() -> dict[str, list[str]]: + """Scan ``ArticulationData`` for ``_leapp_semantics`` properties. + + Returns a mapping from ``kind`` string to a list of property names that + carry that kind (there can be more than one, e.g. ``root_lin_vel_b`` and + ``root_lin_vel_w`` both have ``state/body/linear_velocity``). + """ + kind_to_props: dict[str, list[str]] = {} + for prop_name in dir(ArticulationData): + prop = getattr(ArticulationData, prop_name, None) + if isinstance(prop, property) and prop.fget and hasattr(prop.fget, "_leapp_semantics"): + kind = prop.fget._leapp_semantics.kind + if kind is not None: + kind_to_props.setdefault(kind, []).append(prop_name) + return kind_to_props + + +def _build_kind_to_write_method_map() -> dict[str, str]: + """Scan ``Articulation`` for ``_leapp_semantics`` methods + hardcoded kp/kd. + + Returns a mapping from output ``kind`` to the method name on ``Articulation``. + """ + kind_to_method: dict[str, str] = {} + for method_name in dir(Articulation): + method = getattr(Articulation, method_name, None) + if callable(method) and hasattr(method, "_leapp_semantics"): + kind = method._leapp_semantics.kind + if kind is not None: + kind_to_method[kind] = method_name + kind_to_method["kp"] = "write_joint_stiffness_to_sim" + kind_to_method["kd"] = "write_joint_damping_to_sim" + return kind_to_method + + +def _disambiguate_property(kind: str, leapp_name: str, kind_to_props: dict[str, list[str]]) -> str: + """Pick the right ``ArticulationData`` property when multiple share a ``kind``. + + The export path uses the property name as the LEAPP input name, so we strip + the ``_in`` / ``_out`` suffix that LEAPP adds for collision avoidance and match. + """ + candidates = kind_to_props.get(kind) + if candidates is None: + raise ValueError(f"No ArticulationData property found for kind='{kind}'") + if len(candidates) == 1: + return candidates[0] + base_name = leapp_name.removesuffix("_in").removesuffix("_out") + for prop in candidates: + if prop == base_name: + return prop + return candidates[0] + + +def _resolve_joint_ids(element_names: list | None, asset: Articulation) -> list[int] | None: + """Convert ``element_names[0]`` joint names to integer joint indices. + + Returns ``None`` when no slicing is needed (all joints or non-joint tensor). + """ + if element_names is None: + return None + joint_names = element_names[0] + if not isinstance(joint_names, list) or not joint_names: + return None + if joint_names == list(asset.joint_names): + return None + joint_ids, _ = asset.find_joints(joint_names, preserve_order=True) + return joint_ids + + +def _find_command_term_by_hint(kind: str, command_manager: CommandManager) -> str: + """Find the ``CommandTerm`` name whose ``cfg.cmd_hint`` matches ``kind``.""" + for name, term in command_manager._terms.items(): + if getattr(term.cfg, "cmd_hint", None) == kind: + return name + raise ValueError(f"No command term with cmd_hint='{kind}'. Available terms: {list(command_manager._terms.keys())}") + + +def _find_robot_asset(scene: InteractiveScene) -> Articulation: + """Return the first ``Articulation`` in the scene (assumed to be the robot).""" + for entity_name in scene.articulations: + entity = scene[entity_name] + if isinstance(entity, Articulation): + return entity + raise RuntimeError("No Articulation found in scene") + + +# ══════════════════════════════════════════════════════════════════ +# DirectDeploymentEnv +# ══════════════════════════════════════════════════════════════════ + + +class DirectDeploymentEnv: + """Runs a LEAPP-exported policy in an Isaac Lab scene. + + The environment sets up the simulation scene and physics from a standard + Isaac Lab config, then wires raw sensor/command data to a LEAPP + ``InferenceManager`` and writes the model outputs back to the articulation. + + No observation, action, reward, termination, or curriculum managers are used. + The LEAPP model already contains all pre/post-processing. + """ + + def __init__(self, cfg: Any, leapp_yaml_path: str): + """Initialize the deployment environment. + + Args: + cfg: A ``ManagerBasedRLEnvCfg`` (or compatible) task config. + leapp_yaml_path: Path to the LEAPP ``.yaml`` pipeline description. + """ + + cfg.scene.num_envs = 1 + cfg.validate() + self.cfg = cfg + self._is_closed = False + self._leapp_yaml_path = leapp_yaml_path + self._step_count = 0 + + # ── Simulation + scene ──────────────────────────────────── + self.sim = SimulationContext(cfg.sim) + if "cuda" in self.sim.device: + torch.cuda.set_device(self.sim.device) + + with use_stage(self.sim.get_initial_stage()): + self.scene = InteractiveScene(cfg.scene) + attach_stage_to_usd_context() + self.sim.reset() + self.scene.update(dt=self.physics_dt) + + # ── Robot asset ─────────────────────────────────────────── + self._asset = _find_robot_asset(self.scene) + + # ── EventManager (optional, for resets) ─────────────────── + self.event_manager: EventManager | None = None + if hasattr(cfg, "events") and cfg.events is not None: + self.event_manager = EventManager(cfg.events, self) + + # ── CommandManager (optional, for command/* inputs) ─────── + self.command_manager: CommandManager | None = None + if hasattr(cfg, "commands") and cfg.commands is not None: + self.command_manager = CommandManager(cfg.commands, self) + + # ── LEAPP InferenceManager ──────────────────────────────── + self.inference = InferenceManager(leapp_yaml_path) + + # ── Parse YAML and resolve I/O mappings ─────────────────── + with open(leapp_yaml_path) as f: + self._leapp_desc = yaml.safe_load(f) + self._input_mapping: dict[str, StateInputSpec | CommandInputSpec] = {} + self._output_mapping: dict[str, OutputSpec] = {} + self._resolve_io() + + logger.info( + "DirectDeploymentEnv ready — %d inputs, %d outputs mapped", + len(self._input_mapping), + len(self._output_mapping), + ) + + # ── Properties ──────────────────────────────────────────────── + + @property + def num_envs(self) -> int: + return 1 + + @property + def physics_dt(self) -> float: + return self.cfg.sim.dt + + @property + def step_dt(self) -> float: + return self.cfg.sim.dt * self.cfg.decimation + + @property + def device(self) -> str: + return self.sim.device + + # ── I/O Resolution ──────────────────────────────────────────── + + def _resolve_io(self): + """Build ``_input_mapping`` and ``_output_mapping`` from LEAPP YAML ``kind`` fields.""" + kind_to_props = _build_kind_to_property_map() + kind_to_write = _build_kind_to_write_method_map() + pipeline = self._leapp_desc["pipeline"] + + # --- Inputs --- + for node_name, input_names in pipeline["inputs"].items(): + node = self.inference.nodes[node_name] + desc_by_name = {d["name"]: d for d in node.input_descriptions} + for input_name in input_names: + desc = desc_by_name[input_name] + kind = desc.get("kind") + key = f"{node_name}/{input_name}" + if kind is None: + continue + if kind.startswith("state/"): + prop = _disambiguate_property(kind, input_name, kind_to_props) + needs_joint_slice = kind.startswith("state/joint/") + jids = _resolve_joint_ids(desc.get("element_names"), self._asset) if needs_joint_slice else None + self._input_mapping[key] = StateInputSpec(property_name=prop, joint_ids=jids) + elif kind.startswith("command/"): + if self.command_manager is None: + raise RuntimeError( + f"LEAPP input '{key}' has kind='{kind}' but no CommandManager " + "is available (cfg.commands is None)." + ) + term_name = _find_command_term_by_hint(kind, self.command_manager) + self._input_mapping[key] = CommandInputSpec(command_term_name=term_name) + else: + logger.warning("Unknown input kind '%s' for '%s' — skipping", kind, key) + + # --- Outputs --- + for node_name, output_names in pipeline["outputs"].items(): + node = self.inference.nodes[node_name] + desc_by_name = {d["name"]: d for d in node.output_descriptions} + for output_name in output_names: + desc = desc_by_name[output_name] + kind = desc.get("kind") + key = f"{node_name}/{output_name}" + if kind is None: + continue + if kind not in kind_to_write: + logger.warning("Unknown output kind '%s' for '%s' — skipping", kind, key) + continue + method_name = kind_to_write[kind] + needs_joint_ids = kind.startswith("target/joint/") or kind in _JOINT_LEVEL_GAIN_KINDS + jids = _resolve_joint_ids(desc.get("element_names"), self._asset) if needs_joint_ids else None + self._output_mapping[key] = OutputSpec(method_name=method_name, joint_ids=jids) + + # ── Read / Write ────────────────────────────────────────────── + + def _read_inputs(self) -> dict[str, torch.Tensor]: + """Read all mapped inputs from the scene and command manager.""" + inputs: dict[str, torch.Tensor] = {} + for key, spec in self._input_mapping.items(): + if isinstance(spec, StateInputSpec): + value = getattr(self._asset.data, spec.property_name) + if spec.joint_ids is not None: + value = value[:, spec.joint_ids] + inputs[key] = value + elif isinstance(spec, CommandInputSpec): + inputs[key] = self.command_manager.get_command(spec.command_term_name) + return inputs + + def _write_outputs(self, outputs: dict[str, torch.Tensor]): + """Write model outputs to the articulation.""" + for key, tensor in outputs.items(): + spec = self._output_mapping.get(key) + if spec is None: + continue + method = getattr(self._asset, spec.method_name) + if spec.joint_ids is not None: + method(tensor, joint_ids=spec.joint_ids) + else: + method(tensor) + + # ── Public API ──────────────────────────────────────────────── + + def reset(self) -> dict[str, torch.Tensor]: + """Reset the scene and inference state. + + Returns: + The initial input tensors (for logging / debugging). + """ + env_ids = torch.tensor([0], device=self.device, dtype=torch.long) + + self.scene.reset(env_ids) + + if self.event_manager is not None and "reset" in self.event_manager.available_modes: + self.event_manager.apply(mode="reset", env_ids=env_ids, global_env_step_count=self._step_count) + if self.command_manager is not None: + self.command_manager.reset(env_ids) + + self.scene.write_data_to_sim() + self.sim.forward() + self.scene.update(dt=self.physics_dt) + + self.inference.reset() + + return self._read_inputs() + + def step(self, external_inputs: dict[str, torch.Tensor] | None = None) -> dict[str, torch.Tensor]: + """Run one environment step: read → infer → write → physics. + + Args: + external_inputs: Optional overrides keyed by ``"ModelName/input_name"``. + Takes precedence over auto-resolved state/command values. + + Returns: + The dict of pipeline outputs from ``InferenceManager.run_policy()``. + """ + self._step_count += 1 + + # 1. Update commands + if self.command_manager is not None: + self.command_manager.compute(dt=self.step_dt) + + # 2. Read inputs + inputs = self._read_inputs() + + # 3. Merge external overrides + if external_inputs is not None: + inputs.update(external_inputs) + + # 4. Infer + with torch.inference_mode(): + outputs = self.inference.run_policy(inputs) + + # 5. Write outputs to asset + self._write_outputs(outputs) + + # 6. Decimation loop + is_rendering = self.sim.has_gui() or self.sim.has_rtx_sensors() + for _ in range(self.cfg.decimation): + self.scene.write_data_to_sim() + self.sim.step(render=False) + if is_rendering: + self.sim.render() + self.scene.update(dt=self.physics_dt) + + return outputs + + def close(self): + """Clean up the environment.""" + if not self._is_closed: + if self.command_manager is not None: + del self.command_manager + if self.event_manager is not None: + del self.event_manager + del self.scene + self._is_closed = True diff --git a/source/isaaclab/isaaclab/envs/mdp/commands/pose_2d_command.py b/source/isaaclab/isaaclab/envs/mdp/commands/pose_2d_command.py index 1c181b3e32d7..1e54f7aa4021 100644 --- a/source/isaaclab/isaaclab/envs/mdp/commands/pose_2d_command.py +++ b/source/isaaclab/isaaclab/envs/mdp/commands/pose_2d_command.py @@ -62,6 +62,9 @@ def __init__(self, cfg: UniformPose2dCommandCfg, env: ManagerBasedEnv): self.metrics["error_pos"] = torch.zeros(self.num_envs, device=self.device) self.metrics["error_heading"] = torch.zeros(self.num_envs, device=self.device) + self.cfg.cmd_hint = self.cfg.cmd_hint or "command/body/pose" + self.cfg.element_names = self.cfg.element_names or ["x", "y", "z", "heading"] + def __str__(self) -> str: msg = "PositionCommand:\n" msg += f"\tCommand dimension: {tuple(self.command.shape[1:])}\n" diff --git a/source/isaaclab/isaaclab/envs/mdp/commands/pose_command.py b/source/isaaclab/isaaclab/envs/mdp/commands/pose_command.py index 33c58fd45f95..130d5f9f0bcb 100644 --- a/source/isaaclab/isaaclab/envs/mdp/commands/pose_command.py +++ b/source/isaaclab/isaaclab/envs/mdp/commands/pose_command.py @@ -69,6 +69,9 @@ def __init__(self, cfg: UniformPoseCommandCfg, env: ManagerBasedEnv): self.metrics["position_error"] = torch.zeros(self.num_envs, device=self.device) self.metrics["orientation_error"] = torch.zeros(self.num_envs, device=self.device) + self.cfg.cmd_hint = self.cfg.cmd_hint or "command/body/pose" + self.cfg.element_names = self.cfg.element_names or ["x", "y", "z", "qw", "qx", "qy", "qz"] + def __str__(self) -> str: msg = "UniformPoseCommand:\n" msg += f"\tCommand dimension: {tuple(self.command.shape[1:])}\n" diff --git a/source/isaaclab/isaaclab/envs/mdp/commands/velocity_command.py b/source/isaaclab/isaaclab/envs/mdp/commands/velocity_command.py index eadc89af3af9..8bbe63ac5aab 100644 --- a/source/isaaclab/isaaclab/envs/mdp/commands/velocity_command.py +++ b/source/isaaclab/isaaclab/envs/mdp/commands/velocity_command.py @@ -88,6 +88,9 @@ def __init__(self, cfg: UniformVelocityCommandCfg, env: ManagerBasedEnv): self.metrics["error_vel_xy"] = torch.zeros(self.num_envs, device=self.device) self.metrics["error_vel_yaw"] = torch.zeros(self.num_envs, device=self.device) + self.cfg.cmd_hint = self.cfg.cmd_hint or "command/body/velocity" + self.cfg.element_names = self.cfg.element_names or ["lin_vel_x", "lin_vel_y", "ang_vel_z"] + def __str__(self) -> str: """Return a string representation of the command generator.""" msg = "UniformVelocityCommand:\n" diff --git a/source/isaaclab/isaaclab/managers/manager_term_cfg.py b/source/isaaclab/isaaclab/managers/manager_term_cfg.py index de7c23aa220b..c3c731c9e52e 100644 --- a/source/isaaclab/isaaclab/managers/manager_term_cfg.py +++ b/source/isaaclab/isaaclab/managers/manager_term_cfg.py @@ -118,6 +118,9 @@ class CommandTermCfg: debug_vis: bool = False """Whether to visualize debug information. Defaults to False.""" + cmd_hint: str | None = None # type hint for the command for deployment + element_names: list[str] | list[list[str]] | None = None # element names for the command for deployment + ## # Curriculum manager. diff --git a/source/isaaclab/isaaclab/sensors/contact_sensor/contact_sensor_data.py b/source/isaaclab/isaaclab/sensors/contact_sensor/contact_sensor_data.py index 0ca31f9e40ac..3cb6cd4debc6 100644 --- a/source/isaaclab/isaaclab/sensors/contact_sensor/contact_sensor_data.py +++ b/source/isaaclab/isaaclab/sensors/contact_sensor/contact_sensor_data.py @@ -17,6 +17,8 @@ from isaaclab_newton.sensors.contact_sensor.contact_sensor_data import ContactSensorData as NewtonContactSensorData from isaaclab_physx.sensors.contact_sensor import ContactSensorData as PhysXContactSensorData +from isaaclab.utils.leapp_semantics import leapp_tensor_semantics + class ContactSensorData(FactoryBase, BaseContactSensorData): """Factory for creating contact sensor data instances.""" diff --git a/source/isaaclab/isaaclab/sensors/frame_transformer/frame_transformer_data.py b/source/isaaclab/isaaclab/sensors/frame_transformer/frame_transformer_data.py index f6b28faea395..22c385b29de8 100644 --- a/source/isaaclab/isaaclab/sensors/frame_transformer/frame_transformer_data.py +++ b/source/isaaclab/isaaclab/sensors/frame_transformer/frame_transformer_data.py @@ -19,6 +19,8 @@ ) from isaaclab_physx.sensors.frame_transformer import FrameTransformerData as PhysXFrameTransformerData +from isaaclab.utils.leapp_semantics import leapp_tensor_semantics + class FrameTransformerData(FactoryBase, BaseFrameTransformerData): """Factory for creating frame transformer data instances.""" diff --git a/source/isaaclab/isaaclab/sensors/imu/imu_data.py b/source/isaaclab/isaaclab/sensors/imu/imu_data.py index f23f2a3be6ca..59cbf68c02be 100644 --- a/source/isaaclab/isaaclab/sensors/imu/imu_data.py +++ b/source/isaaclab/isaaclab/sensors/imu/imu_data.py @@ -17,6 +17,8 @@ from isaaclab_newton.sensors.imu import ImuData as NewtonImuData from isaaclab_physx.sensors.imu import ImuData as PhysXImuData +from isaaclab.utils.leapp_semantics import leapp_tensor_semantics + class ImuData(FactoryBase, BaseImuData): """Factory for creating IMU data instances.""" diff --git a/source/isaaclab/isaaclab/sensors/ray_caster/multi_mesh_ray_caster_camera.py b/source/isaaclab/isaaclab/sensors/ray_caster/multi_mesh_ray_caster_camera.py index a1be3160d99b..a90826cc0cc8 100644 --- a/source/isaaclab/isaaclab/sensors/ray_caster/multi_mesh_ray_caster_camera.py +++ b/source/isaaclab/isaaclab/sensors/ray_caster/multi_mesh_ray_caster_camera.py @@ -107,8 +107,8 @@ def _initialize_rays_impl(self): self._offset_quat = quat_w.repeat(self._view.count, 1) self._offset_pos = torch.tensor(list(self.cfg.offset.pos), device=self._device).repeat(self._view.count, 1) - self._data.quat_w = torch.zeros(self._view.count, 4, device=self.device) - self._data.pos_w = torch.zeros(self._view.count, 3, device=self.device) + self._data._quat_w = torch.zeros(self._view.count, 4, device=self.device) + self._data._pos_w = torch.zeros(self._view.count, 3, device=self.device) self._ray_starts_w = torch.zeros(self._view.count, self.num_rays, 3, device=self.device) self._ray_directions_w = torch.zeros(self._view.count, self.num_rays, 3, device=self.device) diff --git a/source/isaaclab/isaaclab/sensors/ray_caster/ray_caster.py b/source/isaaclab/isaaclab/sensors/ray_caster/ray_caster.py index 731d57f1638f..79e14823973a 100644 --- a/source/isaaclab/isaaclab/sensors/ray_caster/ray_caster.py +++ b/source/isaaclab/isaaclab/sensors/ray_caster/ray_caster.py @@ -222,9 +222,9 @@ def _initialize_rays_impl(self): self.drift = torch.zeros(self._view.count, 3, device=self.device) self.ray_cast_drift = torch.zeros(self._view.count, 3, device=self.device) # fill the data buffer - self._data.pos_w = torch.zeros(self._view.count, 3, device=self.device) - self._data.quat_w = torch.zeros(self._view.count, 4, device=self.device) - self._data.ray_hits_w = torch.zeros(self._view.count, self.num_rays, 3, device=self.device) + self._data._pos_w = torch.zeros(self._view.count, 3, device=self.device) + self._data._quat_w = torch.zeros(self._view.count, 4, device=self.device) + self._data._ray_hits_w = torch.zeros(self._view.count, self.num_rays, 3, device=self.device) self._ray_starts_w = torch.zeros(self._view.count, self.num_rays, 3, device=self.device) self._ray_directions_w = torch.zeros(self._view.count, self.num_rays, 3, device=self.device) @@ -238,8 +238,8 @@ def _update_ray_infos(self, env_ids: Sequence[int]): # apply drift to ray starting position in world frame pos_w += self.drift[env_ids] # store the poses - self._data.pos_w[env_ids] = pos_w - self._data.quat_w[env_ids] = quat_w + self._data._pos_w[env_ids] = pos_w + self._data._quat_w[env_ids] = quat_w # check if user provided attach_yaw_only flag if self.cfg.attach_yaw_only is not None: @@ -293,7 +293,7 @@ def _update_buffers_impl(self, env_mask: wp.array): # ray cast and store the hits # TODO: Make this work for multiple meshes? - self._data.ray_hits_w[env_ids] = raycast_mesh( + self._data._ray_hits_w[env_ids] = raycast_mesh( self._ray_starts_w[env_ids], self._ray_directions_w[env_ids], max_dist=self.cfg.max_distance, @@ -301,7 +301,7 @@ def _update_buffers_impl(self, env_mask: wp.array): )[0] # apply vertical drift to ray starting position in ray caster frame - self._data.ray_hits_w[env_ids, :, 2] += self.ray_cast_drift[env_ids, 2].unsqueeze(-1) + self._data._ray_hits_w[env_ids, :, 2] += self.ray_cast_drift[env_ids, 2].unsqueeze(-1) def _set_debug_vis_impl(self, debug_vis: bool): # set visibility of markers diff --git a/source/isaaclab/isaaclab/sensors/ray_caster/ray_caster_data.py b/source/isaaclab/isaaclab/sensors/ray_caster/ray_caster_data.py index 6103a2167d66..7863a76fe875 100644 --- a/source/isaaclab/isaaclab/sensors/ray_caster/ray_caster_data.py +++ b/source/isaaclab/isaaclab/sensors/ray_caster/ray_caster_data.py @@ -6,13 +6,14 @@ from dataclasses import dataclass import torch +from isaaclab.utils.leapp_semantics import leapp_tensor_semantics @dataclass class RayCasterData: """Data container for the ray-cast sensor.""" - pos_w: torch.Tensor = None + _pos_w: torch.Tensor = None """Position of the sensor origin in world frame. Shape is (N, 3), where N is the number of sensors. @@ -22,9 +23,27 @@ class RayCasterData: Shape is (N, 4), where N is the number of sensors. """ - ray_hits_w: torch.Tensor = None + _ray_hits_w: torch.Tensor = None """The ray hit positions in the world frame. Shape is (N, B, 3), where N is the number of sensors, B is the number of rays in the scan pattern per sensor. """ + + @property + @leapp_tensor_semantics(kind="state/sensor/position", element_names_source="xyz") + def pos_w(self) -> torch.Tensor: + """Position of the sensor origin in world frame.""" + return self._pos_w + + @property + @leapp_tensor_semantics(kind="state/sensor/rotation", element_names_source="quat_wxyz") + def quat_w(self) -> torch.Tensor: + """Orientation of the sensor origin in quaternion (w, x, y, z) in world frame.""" + return self._quat_w + + @property + @leapp_tensor_semantics(kind="state/sensor/ray_hit_position") + def ray_hits_w(self) -> torch.Tensor: + """The ray hit positions in the world frame.""" + return self._ray_hits_w diff --git a/source/isaaclab/isaaclab/utils/buffers/circular_buffer.py b/source/isaaclab/isaaclab/utils/buffers/circular_buffer.py index c5c9fe9ff6ad..80fbc2d52a03 100644 --- a/source/isaaclab/isaaclab/utils/buffers/circular_buffer.py +++ b/source/isaaclab/isaaclab/utils/buffers/circular_buffer.py @@ -11,10 +11,11 @@ class CircularBuffer: """Circular buffer for storing a history of batched tensor data. - This class implements a circular buffer for storing a history of batched tensor data. The buffer is - initialized with a maximum length and a batch size. The data is stored in a circular fashion, and the - data can be retrieved in a LIFO (Last-In-First-Out) fashion. The buffer is designed to be used in - multi-environment settings, where each environment has its own data. + This class stores a history of batched tensor data with the oldest entry at + index 0 and the most recent entry at index ``max_len - 1`` of the internal + buffer. The public indexing API remains LIFO (last-in-first-out), while the + ordered internal layout keeps ``buffer`` retrieval cheap and makes the + implementation compatible with tracing-based export flows. The shape of the appended data is expected to be (batch_size, ...), where the first dimension is the batch dimension. Correspondingly, the shape of the ring buffer is (max_len, batch_size, ...). @@ -42,8 +43,6 @@ def __init__(self, max_len: int, batch_size: int, device: str): self._max_len = torch.full((batch_size,), max_len, dtype=torch.int, device=device) # number of data pushes passed since the last call to :meth:`reset` self._num_pushes = torch.zeros(batch_size, dtype=torch.long, device=device) - # the pointer to the current head of the circular buffer (-1 means not initialized) - self._pointer: int = -1 # the actual buffer for data storage # note: this is initialized on the first call to :meth:`append` self._buffer: torch.Tensor = None # type: ignore @@ -80,14 +79,10 @@ def current_length(self) -> torch.Tensor: def buffer(self) -> torch.Tensor: """Complete circular buffer with most recent entry at the end and oldest entry at the beginning. - The shape of the buffer is (batch_size, max_length, ...). - - Note: - The oldest entry is at the beginning of dimension 1. + Returns: + Complete circular buffer with most recent entry at the end and oldest entry at the beginning of dimension 1. The shape is [batch_size, max_length, data.shape[1:]]. """ - buf = self._buffer.clone() - buf = torch.roll(buf, shifts=self.max_length - self._pointer - 1, dims=0) - return torch.transpose(buf, dim0=0, dim1=1) + return torch.transpose(self._buffer, dim0=0, dim1=1) """ Operations. @@ -99,15 +94,16 @@ def reset(self, batch_ids: Sequence[int] | None = None): Args: batch_ids: Elements to reset in the batch dimension. Default is None, which resets all the batch indices. """ - # resolve all indices + batch_ids_resolved: Sequence[int] | slice if batch_ids is None: - batch_ids = slice(None) + batch_ids_resolved = slice(None) + else: + batch_ids_resolved = batch_ids # reset the number of pushes for the specified batch indices - self._num_pushes[batch_ids] = 0 + self._num_pushes[batch_ids_resolved] = 0 if self._buffer is not None: - # set buffer at batch_id reset indices to 0.0 so that the buffer() - # getter returns the cleared circular buffer after reset. - self._buffer[:, batch_ids, :] = 0.0 + # set buffer at batch_id reset indices to 0.0 so that the buffer() getter returns the cleared circular buffer after reset. + self._buffer[:, batch_ids_resolved] = 0.0 def append(self, data: torch.Tensor): """Append the data to the circular buffer. @@ -125,21 +121,18 @@ def append(self, data: torch.Tensor): # move the data to the device data = data.to(self._device) - # at the first call, initialize the buffer size - if self._buffer is None: - self._pointer = -1 - self._buffer = torch.empty((self.max_length, *data.shape), dtype=data.dtype, device=self._device) - # move the head to the next slot - self._pointer = (self._pointer + 1) % self.max_length - # add the new data to the last layer - self._buffer[self._pointer] = data - # Check for batches with zero pushes and initialize all values in batch to first append is_first_push = self._num_pushes == 0 + if self._buffer is None: + self._buffer = data.unsqueeze(0).expand(self.max_length, *data.shape).clone() if torch.any(is_first_push): self._buffer[:, is_first_push] = data[is_first_push] # increment number of number of pushes for all batches + self._append(data) self._num_pushes += 1 + def _append(self, data: torch.Tensor): + self._buffer = torch.cat([self._buffer[1:], data.unsqueeze(0)], dim=0) + def __getitem__(self, key: torch.Tensor) -> torch.Tensor: """Retrieve the data from the circular buffer in last-in-first-out (LIFO) fashion. @@ -160,13 +153,11 @@ def __getitem__(self, key: torch.Tensor) -> torch.Tensor: # check the batch size if len(key) != self.batch_size: raise ValueError(f"The argument 'key' has length {key.shape[0]}, while expecting {self.batch_size}") - # check if the buffer is empty - if torch.any(self._num_pushes == 0) or self._buffer is None: - raise RuntimeError("Attempting to retrieve data on an empty circular buffer. Please append data first.") # admissible lag valid_keys = torch.minimum(key, self._num_pushes - 1) - # the index in the circular buffer (pointer points to the last+1 index) - index_in_buffer = torch.remainder(self._pointer - valid_keys, self.max_length) + # The buffer is stored oldest->newest along dimension 0, so the most + # recent item lives at the last index. + index_in_buffer = (self.max_length - 1 - valid_keys).to(dtype=torch.long) # return output return self._buffer[index_in_buffer, self._ALL_INDICES] diff --git a/source/isaaclab/isaaclab/utils/leapp_semantics.py b/source/isaaclab/isaaclab/utils/leapp_semantics.py new file mode 100644 index 000000000000..3a9c53c8c7e8 --- /dev/null +++ b/source/isaaclab/isaaclab/utils/leapp_semantics.py @@ -0,0 +1,125 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""LEAPP semantic metadata helpers for raw tensor-producing functions.""" + +from __future__ import annotations + +from collections.abc import Callable +from contextlib import suppress +from dataclasses import dataclass +from typing import Any + + +@dataclass(frozen=True) +class LeappTensorSemantics: + """Semantic metadata attached directly to a raw tensor-producing function.""" + + kind: Any = None + element_names: list[str] | list[list[str]] | None = None + element_names_source: str | None = None + + +XYZ_ELEMENT_NAMES: list[str] = ["x", "y", "z"] +QUAT_WXYZ_ELEMENT_NAMES: list[str] = ["qw", "qx", "qy", "qz"] +POSE7_ELEMENT_NAMES: list[str] = ["x", "y", "z", "qw", "qx", "qy", "qz"] +WRENCH6_ELEMENT_NAMES: list[str] = ["fx", "fy", "fz", "tx", "ty", "tz"] + + +def leapp_tensor_semantics( + *, + kind: Any = None, + element_names: list[str] | list[list[str]] | None = None, + element_names_source: str | None = None, +) -> Callable: + """Attach LEAPP semantic metadata to a raw tensor-producing function.""" + + semantics = LeappTensorSemantics( + kind=kind, + element_names=element_names, + element_names_source=element_names_source, + ) + + def _apply(func: Callable) -> Callable: + func._leapp_semantics = semantics + return func + + return _apply + + +def _select_element_names(names: list[str] | None, indices: Any = None) -> list[str] | None: + """Select element names using optional runtime indices.""" + if names is None: + return None + if indices is None or indices == slice(None): + return list(names) + if isinstance(indices, slice): + return list(names[indices]) + with suppress(AttributeError): + indices = indices.tolist() + if isinstance(indices, (list, tuple)): + return [names[int(index)] for index in indices] + if isinstance(indices, int): + return [names[indices]] + return None + + +def resolve_leapp_element_names(semantics: LeappTensorSemantics | None, data_self) -> list | None: + """Resolve element names from attached semantics and a tensor-producing object.""" + if semantics is None: + return None + if semantics.element_names is not None: + return semantics.element_names + + source = semantics.element_names_source + if source == "joint_names": + return _select_element_names( + getattr(data_self, "joint_names", getattr(data_self, "_joint_names", None)), + getattr(data_self, "_joint_ids", None), + ) + if source == "body_names": + return _select_element_names( + getattr(data_self, "body_names", getattr(data_self, "_body_names", None)), + getattr(data_self, "_body_ids", None), + ) + if source == "body_xyz": + body_names = _select_element_names( + getattr(data_self, "body_names", getattr(data_self, "_body_names", None)), + getattr(data_self, "_body_ids", None), + ) + if body_names is None: + return None + return [body_names, XYZ_ELEMENT_NAMES] + if source == "body_pose": + body_names = _select_element_names( + getattr(data_self, "body_names", getattr(data_self, "_body_names", None)), + getattr(data_self, "_body_ids", None), + ) + if body_names is None: + return None + return [body_names, POSE7_ELEMENT_NAMES] + if source == "body_quat": + body_names = _select_element_names( + getattr(data_self, "body_names", getattr(data_self, "_body_names", None)), + getattr(data_self, "_body_ids", None), + ) + if body_names is None: + return None + return [body_names, QUAT_WXYZ_ELEMENT_NAMES] + if source == "body_wrench": + body_names = _select_element_names( + getattr(data_self, "body_names", getattr(data_self, "_body_names", None)), + getattr(data_self, "_body_ids", None), + ) + if body_names is None: + return None + return [body_names, WRENCH6_ELEMENT_NAMES] + if source == "pose7": + return POSE7_ELEMENT_NAMES + if source == "xyz": + return XYZ_ELEMENT_NAMES + if source == "quat_wxyz": + return QUAT_WXYZ_ELEMENT_NAMES + return None diff --git a/source/isaaclab_rl/test/test_rsl_rl_export_flow.py b/source/isaaclab_rl/test/test_rsl_rl_export_flow.py new file mode 100644 index 000000000000..8a80781f30d1 --- /dev/null +++ b/source/isaaclab_rl/test/test_rsl_rl_export_flow.py @@ -0,0 +1,144 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Export pipeline integration tests. + +Each test calls ``export.py`` as a subprocess so that Isaac Sim's AppLauncher +is fully isolated per task and the export logic is not duplicated here. +The export artifacts land in the default checkpoint directory; only the +per-task export subdirectory is removed after each test. +""" + +import os +import pytest +import shutil +import subprocess + +# Root of the repository (three levels up from this file). +_REPO_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..", "..")) +_EXPORT_SCRIPT = os.path.join("scripts", "reinforcement_learning", "rsl_rl", "export.py") + + +# Tasks with confirmed pretrained checkpoints (Direct and no-checkpoint tasks excluded). +# See FRANK_TESTING/no-pretrain.txt for the excluded set. +TASKS = [ + # Classic + "Isaac-Ant-v0", + "Isaac-Cartpole-v0", + # Navigation + "Isaac-Navigation-Flat-Anymal-C-v0", + "Isaac-Navigation-Flat-Anymal-C-Play-v0", + # Locomotion Velocity + "Isaac-Velocity-Flat-Anymal-B-v0", + "Isaac-Velocity-Flat-Anymal-B-Play-v0", + "Isaac-Velocity-Rough-Anymal-B-v0", + "Isaac-Velocity-Rough-Anymal-B-Play-v0", + "Isaac-Velocity-Flat-Anymal-C-v0", + "Isaac-Velocity-Flat-Anymal-C-Play-v0", + "Isaac-Velocity-Rough-Anymal-C-v0", + "Isaac-Velocity-Rough-Anymal-C-Play-v0", + "Isaac-Velocity-Flat-Anymal-D-v0", + "Isaac-Velocity-Flat-Anymal-D-Play-v0", + "Isaac-Velocity-Rough-Anymal-D-v0", + "Isaac-Velocity-Rough-Anymal-D-Play-v0", + "Isaac-Velocity-Flat-Cassie-v0", + "Isaac-Velocity-Flat-Cassie-Play-v0", + "Isaac-Velocity-Rough-Cassie-v0", + "Isaac-Velocity-Rough-Cassie-Play-v0", + "Isaac-Velocity-Flat-G1-v0", + "Isaac-Velocity-Flat-G1-Play-v0", + "Isaac-Velocity-Rough-G1-v0", + "Isaac-Velocity-Rough-G1-Play-v0", + "Isaac-Velocity-Flat-H1-v0", + "Isaac-Velocity-Flat-H1-Play-v0", + "Isaac-Velocity-Rough-H1-v0", + "Isaac-Velocity-Rough-H1-Play-v0", + "Isaac-Velocity-Flat-Spot-v0", + "Isaac-Velocity-Flat-Spot-Play-v0", + "Isaac-Velocity-Flat-Unitree-A1-v0", + "Isaac-Velocity-Flat-Unitree-A1-Play-v0", + "Isaac-Velocity-Rough-Unitree-A1-v0", + "Isaac-Velocity-Rough-Unitree-A1-Play-v0", + "Isaac-Velocity-Flat-Unitree-Go1-v0", + "Isaac-Velocity-Flat-Unitree-Go1-Play-v0", + "Isaac-Velocity-Rough-Unitree-Go1-v0", + "Isaac-Velocity-Rough-Unitree-Go1-Play-v0", + "Isaac-Velocity-Flat-Unitree-Go2-v0", + "Isaac-Velocity-Flat-Unitree-Go2-Play-v0", + "Isaac-Velocity-Rough-Unitree-Go2-v0", + "Isaac-Velocity-Rough-Unitree-Go2-Play-v0", + # Manipulation Reach + "Isaac-Reach-Franka-v0", + "Isaac-Reach-Franka-Play-v0", + "Isaac-Reach-UR10-v0", + "Isaac-Reach-UR10-Play-v0", + # Manipulation Lift + "Isaac-Lift-Cube-Franka-v0", + "Isaac-Lift-Cube-Franka-Play-v0", + # Manipulation Cabinet + "Isaac-Open-Drawer-Franka-v0", + "Isaac-Open-Drawer-Franka-Play-v0", + # Dexsuite + "Isaac-Dexsuite-Kuka-Allegro-Reorient-v0", + "Isaac-Dexsuite-Kuka-Allegro-Reorient-Play-v0", + "Isaac-Dexsuite-Kuka-Allegro-Lift-v0", + "Isaac-Dexsuite-Kuka-Allegro-Lift-Play-v0", +] + + +def _export_dir(task_name: str) -> str: + """Return the directory where export.py writes artifacts for *task_name*.""" + train_task = task_name.replace("-Play", "") + return os.path.join(_REPO_ROOT, ".pretrained_checkpoints", "rsl_rl", train_task, task_name) + + +@pytest.mark.parametrize("task_name", TASKS) +def test_export_flow(task_name): + """Run export.py for *task_name* and assert the expected artifacts are created.""" + export_dir = _export_dir(task_name) + + try: + result = subprocess.run( + [ + "./isaaclab.sh", + "-p", + _EXPORT_SCRIPT, + "--task", + task_name, + "--use_pretrained_checkpoint", + "--disable_graph_visualization", + "--headless", + ], + cwd=_REPO_ROOT, + capture_output=True, + text=True, + timeout=600, + ) + + # Gracefully skip tasks whose checkpoint isn't published yet + if "pre-trained checkpoint is currently unavailable" in result.stdout: + pytest.skip(f"No pretrained checkpoint available for {task_name.replace('-Play', '')}") + + # Surface stdout/stderr on failure for easier debugging + if result.returncode != 0: + log_txt_path = os.path.join(export_dir, "log.txt") + leapp_tail = "" + if os.path.isfile(log_txt_path): + with open(log_txt_path) as f: + last_lines = f.readlines()[-50:] + leapp_tail = f"\n--- leapp log.txt (last 50 lines) ---\n{''.join(last_lines)}" + pytest.fail( + f"export.py exited with code {result.returncode}.\n" + f"--- stdout ---\n{result.stdout[-3000:]}\n" + f"--- stderr ---\n{result.stderr[-3000:]}" + f"{leapp_tail}" + ) + + assert os.path.isfile(os.path.join(export_dir, f"{task_name}.onnx")), "Missing .onnx export" + assert os.path.isfile(os.path.join(export_dir, f"{task_name}.yaml")), "Missing .yaml export" + assert os.path.isfile(os.path.join(export_dir, "log.txt")), "Missing log.txt" + + finally: + shutil.rmtree(export_dir, ignore_errors=True) diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/classic/humanoid/mdp/observations.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/classic/humanoid/mdp/observations.py index 5445328cc175..827275f68015 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/classic/humanoid/mdp/observations.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/classic/humanoid/mdp/observations.py @@ -48,8 +48,10 @@ def base_heading_proj( # extract the used quantities (to enable type-hinting) asset: Articulation = env.scene[asset_cfg.name] # compute desired heading direction + to_target_pos = torch.tensor(target_pos, device=env.device) - wp.to_torch(asset.data.root_pos_w)[:, :3] - to_target_pos[:, 2] = 0.0 + to_target_pos = torch.cat((to_target_pos[:, :2], torch.zeros_like(to_target_pos[:, 2:3])), dim=-1) + to_target_dir = math_utils.normalize(to_target_pos) # compute base forward vector heading_vec = math_utils.quat_apply(wp.to_torch(asset.data.root_quat_w), wp.to_torch(asset.data.FORWARD_VEC_B)) diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/deploy/mdp/observations.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/deploy/mdp/observations.py index e70b77d7d8f9..75bb68f0dddd 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/deploy/mdp/observations.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/deploy/mdp/observations.py @@ -187,8 +187,10 @@ def __call__( # Ensure w component is positive (q and -q represent the same rotation) # Pick one canonical form to reduce observation variation seen by the policy w_negative = base_quat[:, 0] < 0 - positive_quat = base_quat.clone() - positive_quat[w_negative] = -base_quat[w_negative] + + # positive_quat = base_quat.clone() + # positive_quat[w_negative] = -base_quat[w_negative] + positive_quat = torch.where(w_negative.unsqueeze(-1), -base_quat, base_quat) return positive_quat diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/dexsuite/mdp/commands/pose_commands.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/dexsuite/mdp/commands/pose_commands.py index 35549df614ab..437ac079ef3d 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/dexsuite/mdp/commands/pose_commands.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/dexsuite/mdp/commands/pose_commands.py @@ -83,6 +83,9 @@ def __init__(self, cfg: dex_cmd_cfgs.ObjectUniformPoseCommandCfg, env: ManagerBa self.success_visualizer = VisualizationMarkers(self.cfg.success_visualizer_cfg) self.success_visualizer.set_visibility(True) + self.cfg.cmd_hint = self.cfg.cmd_hint or "command/body/pose" + self.cfg.element_names = self.cfg.element_names or ["x", "y", "z", "qw", "qx", "qy", "qz"] + def __str__(self) -> str: msg = "UniformPoseCommand:\n" msg += f"\tCommand dimension: {tuple(self.command.shape[1:])}\n" diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/inhand/mdp/commands/orientation_command.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/inhand/mdp/commands/orientation_command.py index 468ae2764743..e23359d875b0 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/inhand/mdp/commands/orientation_command.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/inhand/mdp/commands/orientation_command.py @@ -75,6 +75,9 @@ def __init__(self, cfg: InHandReOrientationCommandCfg, env: ManagerBasedRLEnv): self.metrics["position_error"] = torch.zeros(self.num_envs, device=self.device) self.metrics["consecutive_success"] = torch.zeros(self.num_envs, device=self.device) + self.cfg.cmd_hint = self.cfg.cmd_hint or "command/body/pose" + self.cfg.element_names = self.cfg.element_names or ["x", "y", "z", "qw", "qx", "qy", "qz"] + def __str__(self) -> str: msg = "InHandManipulationCommandGenerator:\n" msg += f"\tCommand dimension: {tuple(self.command.shape[1:])}\n" From d8f89a0e3de1af5f72709ee09c5daee8bb946c6d Mon Sep 17 00:00:00 2001 From: Frank Lai Date: Sun, 22 Mar 2026 19:45:22 -0700 Subject: [PATCH 02/20] updated scripts to handle warp kernels --- .../reinforcement_learning/rsl_rl/export.py | 40 ++++++++++++++----- .../managed_environment_annotator.py/utils.py | 4 -- .../export_annotator.py | 8 ++-- .../proxy.py | 2 +- .../managed_environment_annotator/utils.py | 19 +++++++++ 5 files changed, 54 insertions(+), 19 deletions(-) delete mode 100644 scripts/reinforcement_learning/rsl_rl/managed_environment_annotator.py/utils.py rename scripts/reinforcement_learning/rsl_rl/{managed_environment_annotator.py => managed_environment_annotator}/export_annotator.py (99%) rename scripts/reinforcement_learning/rsl_rl/{managed_environment_annotator.py => managed_environment_annotator}/proxy.py (100%) create mode 100644 scripts/reinforcement_learning/rsl_rl/managed_environment_annotator/utils.py diff --git a/scripts/reinforcement_learning/rsl_rl/export.py b/scripts/reinforcement_learning/rsl_rl/export.py index be24c64209d3..e25abe88fc49 100644 --- a/scripts/reinforcement_learning/rsl_rl/export.py +++ b/scripts/reinforcement_learning/rsl_rl/export.py @@ -10,12 +10,14 @@ """Launch Isaac Sim Simulator first.""" import argparse +import importlib.metadata as metadata import sys import time -import torch from collections.abc import Mapping import leapp +import torch +import warp as wp from leapp import annotate # Disable TorchScript before importing task/environment modules so any @@ -94,29 +96,25 @@ # clear out sys.argv for Hydra sys.argv = [sys.argv[0]] + hydra_args +# Check for installed RSL-RL version +installed_version = metadata.version("rsl-rl-lib") + # launch omniverse app app_launcher = AppLauncher(args_cli) simulation_app = app_launcher.app """Rest everything follows.""" -import gymnasium as gym import os -_ANNOTATOR_DIR = os.path.join( - os.path.dirname(__file__), - "managed_environment_annotator.py", -) -if _ANNOTATOR_DIR not in sys.path: - sys.path.insert(0, _ANNOTATOR_DIR) - -from export_annotator import patch_env_for_export +import gymnasium as gym +from managed_environment_annotator.export_annotator import patch_env_for_export from rsl_rl.runners import DistillationRunner, OnPolicyRunner from isaaclab.envs import ManagerBasedRLEnv, ManagerBasedRLEnvCfg from isaaclab.utils.assets import retrieve_file_path -from isaaclab_rl.rsl_rl import RslRlBaseRunnerCfg, RslRlVecEnvWrapper +from isaaclab_rl.rsl_rl import RslRlBaseRunnerCfg, RslRlVecEnvWrapper, handle_deprecated_rsl_rl_cfg from isaaclab_rl.utils.pretrained_checkpoint import get_published_pretrained_checkpoint import isaaclab_tasks # noqa: F401 @@ -124,6 +122,22 @@ from isaaclab_tasks.utils.hydra import hydra_task_config +def _patch_warp_to_torch_passthrough() -> None: + """Make wp.to_torch idempotent for torch tensors during export.""" + if getattr(wp.to_torch, "_leapp_passthrough_patch", False): + return + + original_to_torch = wp.to_torch + + def patched_to_torch(value, *args, **kwargs): + if isinstance(value, torch.Tensor): + return value + return original_to_torch(value, *args, **kwargs) + + patched_to_torch._leapp_passthrough_patch = True # type: ignore[attr-defined] + wp.to_torch = patched_to_torch + + def _get_actor_memory_module(policy_nn): if hasattr(policy_nn, "memory_a"): return policy_nn.memory_a @@ -179,6 +193,9 @@ def main(env_cfg: ManagerBasedRLEnvCfg, agent_cfg: RslRlBaseRunnerCfg): agent_cfg: RslRlBaseRunnerCfg = cli_args.update_rsl_rl_cfg(agent_cfg, args_cli) env_cfg.scene.num_envs = 1 + # handle deprecated configurations + agent_cfg = handle_deprecated_rsl_rl_cfg(agent_cfg, installed_version) + # set the environment seed # note: certain randomizations occur in the environment initialization so we set the seed here env_cfg.seed = agent_cfg.seed @@ -225,6 +242,7 @@ def main(env_cfg: ManagerBasedRLEnvCfg, agent_cfg: RslRlBaseRunnerCfg): export_method=args_cli.export_method, required_obs_groups=required_obs_groups, ) + _patch_warp_to_torch_passthrough() # wrap around environment for rsl-rl env = RslRlVecEnvWrapper(env, clip_actions=agent_cfg.clip_actions) diff --git a/scripts/reinforcement_learning/rsl_rl/managed_environment_annotator.py/utils.py b/scripts/reinforcement_learning/rsl_rl/managed_environment_annotator.py/utils.py deleted file mode 100644 index 460a30569089..000000000000 --- a/scripts/reinforcement_learning/rsl_rl/managed_environment_annotator.py/utils.py +++ /dev/null @@ -1,4 +0,0 @@ -# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). -# All rights reserved. -# -# SPDX-License-Identifier: BSD-3-Clause diff --git a/scripts/reinforcement_learning/rsl_rl/managed_environment_annotator.py/export_annotator.py b/scripts/reinforcement_learning/rsl_rl/managed_environment_annotator/export_annotator.py similarity index 99% rename from scripts/reinforcement_learning/rsl_rl/managed_environment_annotator.py/export_annotator.py rename to scripts/reinforcement_learning/rsl_rl/managed_environment_annotator/export_annotator.py index 98e78cf7224f..f134778d9b11 100644 --- a/scripts/reinforcement_learning/rsl_rl/managed_environment_annotator.py/export_annotator.py +++ b/scripts/reinforcement_learning/rsl_rl/managed_environment_annotator/export_annotator.py @@ -35,18 +35,20 @@ import inspect import logging -import torch from contextlib import suppress from typing import TYPE_CHECKING, Any +import torch from leapp import annotate from leapp.utils.tensor_description import TensorSemantics -from proxy import _ArticulationWriteProxy, _DataProxy, _EnvProxy, _ManagerTermProxy, _SceneProxy from isaaclab.assets.articulation.articulation import Articulation from isaaclab.managers import ManagerTermBase from isaaclab.utils.leapp_semantics import resolve_leapp_element_names +from .proxy import _ArticulationWriteProxy, _DataProxy, _EnvProxy, _ManagerTermProxy, _SceneProxy +from .utils import ensure_torch_tensor + if TYPE_CHECKING: from isaaclab.envs import ManagerBasedEnv @@ -221,6 +223,7 @@ def _make_annotating_getter(self, original_fget, prop_name: str): def getter(data_self, input_name: str): result = original_fget(data_self) + result = ensure_torch_tensor(result) if not isinstance(result, torch.Tensor): return result semantics_meta = getattr(original_fget, "_leapp_semantics", None) @@ -265,7 +268,6 @@ def _make_write_interceptor_factory(self, original_unbound, method_name: str): captured_write_term_names = self._captured_write_term_names def factory(real_asset: Articulation, original_bound, term_name: str, output_cache: list): - def interceptor(*args, **kwargs): result = original_bound(*args, **kwargs) bound_args = signature.bind_partial(real_asset, *args, **kwargs) diff --git a/scripts/reinforcement_learning/rsl_rl/managed_environment_annotator.py/proxy.py b/scripts/reinforcement_learning/rsl_rl/managed_environment_annotator/proxy.py similarity index 100% rename from scripts/reinforcement_learning/rsl_rl/managed_environment_annotator.py/proxy.py rename to scripts/reinforcement_learning/rsl_rl/managed_environment_annotator/proxy.py index 1c1dd55a5763..7de546bbb822 100644 --- a/scripts/reinforcement_learning/rsl_rl/managed_environment_annotator.py/proxy.py +++ b/scripts/reinforcement_learning/rsl_rl/managed_environment_annotator/proxy.py @@ -5,10 +5,10 @@ from __future__ import annotations -import torch from collections.abc import Callable from typing import Any +import torch from leapp.utils.tensor_description import TensorSemantics from isaaclab.assets.articulation.articulation import Articulation diff --git a/scripts/reinforcement_learning/rsl_rl/managed_environment_annotator/utils.py b/scripts/reinforcement_learning/rsl_rl/managed_environment_annotator/utils.py new file mode 100644 index 000000000000..b5740d5763d2 --- /dev/null +++ b/scripts/reinforcement_learning/rsl_rl/managed_environment_annotator/utils.py @@ -0,0 +1,19 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from __future__ import annotations + +import torch +import warp as wp + + +def ensure_torch_tensor(value): + """Convert Warp arrays to torch tensors while leaving torch tensors unchanged.""" + if isinstance(value, torch.Tensor): + return value + try: + return wp.to_torch(value) + except Exception: + return value From 8c7c43de14b88af961eaa4363b7e4fbf4cc969bb Mon Sep 17 00:00:00 2001 From: Frank Lai Date: Mon, 23 Mar 2026 11:30:50 -0700 Subject: [PATCH 03/20] leapp now automatically propagates into buffered values --- .../managed_environment_annotator/export_annotator.py | 6 ------ 1 file changed, 6 deletions(-) diff --git a/scripts/reinforcement_learning/rsl_rl/managed_environment_annotator/export_annotator.py b/scripts/reinforcement_learning/rsl_rl/managed_environment_annotator/export_annotator.py index f134778d9b11..c8de069bcd79 100644 --- a/scripts/reinforcement_learning/rsl_rl/managed_environment_annotator/export_annotator.py +++ b/scripts/reinforcement_learning/rsl_rl/managed_environment_annotator/export_annotator.py @@ -445,12 +445,6 @@ def _patch_action_manager_methods(self, action_manager): def patched_process_action(action: torch.Tensor): """Register raw_action buffers, call real process_action, preserve action clone.""" - for term_name, term in action_manager._terms.items(): - if hasattr(term, "_raw_actions") and term._raw_actions is not None: - term._raw_actions = annotate.register_buffer( - task_name, {f"{term_name}_raw_actions": term._raw_actions} - ) - original_process(action) action_manager._action = action.clone() self._pending_action_output_export = True From 450a7abb3c7882f10165be95d98a3ad56c64191c Mon Sep 17 00:00:00 2001 From: Frank Lai Date: Mon, 23 Mar 2026 16:54:31 -0700 Subject: [PATCH 04/20] changed i/o intercepting. added semantics to all the articulation data --- .../export_annotator.py | 202 ++------------ .../managed_environment_annotator/proxy.py | 258 ++++++++++++++---- .../assets/articulation/base_articulation.py | 8 + .../articulation/base_articulation_data.py | 150 +++++++++- .../isaaclab/utils/leapp_semantics.py | 9 +- 5 files changed, 388 insertions(+), 239 deletions(-) diff --git a/scripts/reinforcement_learning/rsl_rl/managed_environment_annotator/export_annotator.py b/scripts/reinforcement_learning/rsl_rl/managed_environment_annotator/export_annotator.py index c8de069bcd79..1213d1fdbbdc 100644 --- a/scripts/reinforcement_learning/rsl_rl/managed_environment_annotator/export_annotator.py +++ b/scripts/reinforcement_learning/rsl_rl/managed_environment_annotator/export_annotator.py @@ -5,13 +5,12 @@ """Export annotations for Isaac Lab policies using proxy-based patching. -Observation and action annotation share a single set of annotating getters -and a unified dedup cache so that a state property (e.g. ``joint_pos``) -read by both an observation term and an action term resolves to one LEAPP -input edge. +Observation and action annotation share a unified dedup cache so that a +state property (e.g. ``joint_pos``) read by both an observation term and +an action term resolves to one LEAPP input edge. - Observation term functions see an _EnvProxy whose scene returns - _ArticulationProxy objects with annotating data getters. + _ArticulationProxy objects with annotating data proxies. - Action terms have their ``_asset`` attribute replaced with an _ArticulationWriteProxy that intercepts ``_leapp_semantics``-decorated @@ -33,8 +32,8 @@ from __future__ import annotations -import inspect import logging +from collections.abc import Callable from contextlib import suppress from typing import TYPE_CHECKING, Any @@ -42,7 +41,7 @@ from leapp import annotate from leapp.utils.tensor_description import TensorSemantics -from isaaclab.assets.articulation.articulation import Articulation +from isaaclab.assets.articulation.base_articulation import BaseArticulation from isaaclab.managers import ManagerTermBase from isaaclab.utils.leapp_semantics import resolve_leapp_element_names @@ -62,26 +61,6 @@ )() -def _discover_data_classes(scene) -> set[type]: - """Discover all data class types from entities present in the scene. - - Iterates over all dict-valued instance attributes on the scene object - (which is how ``InteractiveScene`` stores its entity families internally) - and collects ``type(entity.data)`` for any value that exposes a ``.data`` - attribute. (``.data``) is a formal contract in the asset base class. - getting data from any data class @property will be automatically traced. - """ - classes: set[type] = set() - for attr_value in vars(scene).values(): - if not isinstance(attr_value, dict): - continue - for entity in attr_value.values(): - data = getattr(entity, "data", None) - if data is not None: - classes.add(type(data)) - return classes - - # ══════════════════════════════════════════════════════════════════ # ExportPatcher # ══════════════════════════════════════════════════════════════════ @@ -90,16 +69,16 @@ def _discover_data_classes(scene) -> set[type]: class ExportPatcher: """Unified patcher that annotates observation inputs and action outputs for LEAPP export. - At setup time the patcher discovers all data classes present in the scene - (``ArticulationData``, ``RigidObjectData``, sensor data classes, …) and - builds annotating getters for **every** ``@property`` on those classes. + Observation-side property semantics are resolved lazily inside + ``_DataProxy`` by combining: + + - the concrete runtime getter from the backend data class + - the nearest ``_leapp_semantics`` metadata found while walking the MRO - - Properties carrying ``_leapp_semantics`` produce rich annotations - (kind, element_names) used by the downstream deployment resolver. - - Properties **without** ``_leapp_semantics`` are still traced so that - no tensor is silently baked as a constant during export. + This lets backends override property implementations without duplicating + decorators from the abstract API. - The getters and a shared dedup cache are wired into both: + The proxies and a shared dedup cache are wired into both: - The observation proxy chain (``_EnvProxy`` → ``_SceneProxy`` → ``_EntityProxy`` → ``_DataProxy``) for state reads @@ -119,6 +98,8 @@ def __init__(self, task_name: str, export_method: str, required_obs_groups: set[ self.export_method = export_method self.required_obs_groups = required_obs_groups self._annotated_tensor_cache: dict[tuple[int, str], torch.Tensor] = {} + self._data_property_resolution_cache: dict[tuple[type, str], tuple[Callable, object] | None] = {} + self._write_method_resolution_cache: dict[tuple[type, str], tuple[Callable, object, object] | None] = {} self._action_output_cache: list[TensorSemantics] = [] self._captured_write_term_names: set[str] = set() self._fallback_term_names: set[str] = set() @@ -130,11 +111,9 @@ def setup(self, env): """Patch observation and action managers on the unwrapped env.""" unwrapped = env.env.unwrapped - annotating_getters = self._build_annotating_getters(unwrapped.scene) - annotating_write_methods = self._build_annotating_write_methods() cache = self._annotated_tensor_cache - scene_proxy = _SceneProxy(unwrapped.scene, annotating_getters, cache) + scene_proxy = _SceneProxy(unwrapped.scene, self.task_name, self._data_property_resolution_cache, cache) proxy_env = _EnvProxy(unwrapped, scene_proxy) self._disable_training_managers(unwrapped) @@ -142,9 +121,7 @@ def setup(self, env): self._patch_history_buffers(unwrapped.observation_manager) self._patch_action_manager( unwrapped.action_manager, - annotating_getters, cache, - annotating_write_methods, ) # ── Disable training-only managers ───────────────────────────── @@ -184,118 +161,6 @@ def _noop(*args, **kwargs): rm.record_post_reset = _noop rm.record_post_physics_decimation_step = _noop - # ── Scanning ────────────────────────────────────────────────── - - def _build_annotating_getters(self, scene) -> dict[type, dict[str, callable]]: - """Discover data classes from the scene and build annotating getters for all properties. - - This method introspects the actual scene to find every data class in use. - For each class it registers getters for **all** public ``@property`` - descriptors — not just those decorated with ``_leapp_semantics``. Properties - without semantics are still traced (with ``kind=None``) so that no tensor - read is silently baked as a constant during export. - - Returns a dict mapping data class type to a dict of - ``property_name -> callable(data_self, input_name) -> annotated_tensor``. - """ - data_classes = _discover_data_classes(scene) - getters: dict[type, dict[str, callable]] = {} - for data_cls in data_classes: - class_getters: dict[str, callable] = {} - for prop_name in dir(data_cls): - if prop_name.startswith("_"): # ignore all private properties - continue - prop = getattr(data_cls, prop_name, None) - if isinstance(prop, property) and prop.fget: # only consider properties with a getter - class_getters[prop_name] = self._make_annotating_getter(prop.fget, prop_name) - if class_getters: - getters[data_cls] = class_getters - return getters - - def _make_annotating_getter(self, original_fget, prop_name: str): - """Create an annotating getter callable for a single annotated data property. - - The returned callable invokes the real getter, then registers the result - as a LEAPP input tensor with the property's semantic metadata and the - caller-supplied public input name. - """ - task_name = self.task_name - - def getter(data_self, input_name: str): - result = original_fget(data_self) - result = ensure_torch_tensor(result) - if not isinstance(result, torch.Tensor): - return result - semantics_meta = getattr(original_fget, "_leapp_semantics", None) - sem = TensorSemantics( - name=input_name, - ref=result, - kind=semantics_meta.kind if semantics_meta else None, - element_names=resolve_leapp_element_names(semantics_meta, data_self), - ) - return annotate.input_tensors(task_name, sem) - - return getter - - def _build_annotating_write_methods(self) -> dict[str, callable]: - """Scan Articulation for ``_leapp_semantics`` methods and build interceptors. - - Returns a dict mapping method name to a factory callable. The factory takes - ``(real_asset, original_bound_method, term_name, output_cache)`` and returns - a callable that the proxy returns in ``__getattr__``. - """ - methods: dict[str, callable] = {} - for method_name in dir(Articulation): - method = getattr(Articulation, method_name, None) - if callable(method) and hasattr(method, "_leapp_semantics"): - methods[method_name] = self._make_write_interceptor_factory(method, method_name) - return methods - - def _make_write_interceptor_factory(self, original_unbound, method_name: str): - """Create a factory that produces bound annotating wrappers for a single write method. - - The factory is called by ``_ArticulationWriteProxy.__getattr__`` each time the - method is accessed. It returns a callable that: - - 1. Calls the real method on the real asset. - 2. Inspects the ``target`` argument. - 3. Records a ``TensorSemantics`` entry in the shared output cache. - 4. Records the term name in ``_captured_write_term_names`` so that - the fallback path knows this term produced write outputs. - """ - signature = inspect.signature(original_unbound) - semantics = getattr(original_unbound, "_leapp_semantics", None) - captured_write_term_names = self._captured_write_term_names - - def factory(real_asset: Articulation, original_bound, term_name: str, output_cache: list): - def interceptor(*args, **kwargs): - result = original_bound(*args, **kwargs) - bound_args = signature.bind_partial(real_asset, *args, **kwargs) - target = bound_args.arguments.get("target") - - if isinstance(target, torch.Tensor): - tensor_target: torch.Tensor = target - output_name = _unique_output_name(term_name, method_name, output_cache) - joint_ids = bound_args.arguments.get("joint_ids") - output_cache.append( - TensorSemantics( - name=output_name, - ref=tensor_target.clone(), - kind=semantics.kind if semantics is not None else None, - element_names=resolve_leapp_element_names( - semantics, - _JointNameContext(real_asset.joint_names, joint_ids), - ), - ) - ) - captured_write_term_names.add(term_name) - - return result - - return interceptor - - return factory - @staticmethod def _resolve_scene_entity_key(scene, entity: Any) -> str | None: """Return the scene dictionary key for a given entity, if present.""" @@ -394,17 +259,18 @@ def patched_compute_group(*args, **kwargs): # ── Action manager patches ──────────────────────────────────── - def _patch_action_manager(self, action_manager, annotating_getters, cache, annotating_write_methods): + def _patch_action_manager(self, action_manager, cache): """Patch action terms with write+read proxies and patch manager methods.""" scene = action_manager._env.scene for term_name, term in action_manager._terms.items(): asset = getattr(term, "_asset", None) - if isinstance(asset, Articulation): - real_asset: Articulation = asset + if isinstance(asset, BaseArticulation): + real_asset: BaseArticulation = asset scene_key = self._resolve_scene_entity_key(scene, real_asset) or "ego" data_proxy = _DataProxy( real_asset.data, - annotating_getters, + self.task_name, + self._data_property_resolution_cache, cache, input_name_resolver=lambda prop_name, k=scene_key: f"{k}_{prop_name}", ) @@ -412,7 +278,8 @@ def _patch_action_manager(self, action_manager, annotating_getters, cache, annot real_asset=real_asset, term_name=term_name, output_cache=self._action_output_cache, - annotating_methods=annotating_write_methods, + method_resolution_cache=self._write_method_resolution_cache, + captured_write_term_names=self._captured_write_term_names, data_proxy=data_proxy, ) @@ -648,7 +515,7 @@ def _collect_action_static_outputs(action_manager, skip_terms: set[str] | None = if hasattr(real_asset, "joint_names"): joint_name_context = _JointNameContext(real_asset.joint_names, joint_ids) if hasattr(data, "default_joint_stiffness") and data.default_joint_stiffness is not None: - gains = data.default_joint_stiffness + gains = ensure_torch_tensor(data.default_joint_stiffness) static_values.append( TensorSemantics( name=f"{term_name}_kp_gains", @@ -665,7 +532,7 @@ def _collect_action_static_outputs(action_manager, skip_terms: set[str] | None = ) ) if hasattr(data, "default_joint_damping") and data.default_joint_damping is not None: - gains = data.default_joint_damping + gains = ensure_torch_tensor(data.default_joint_damping) static_values.append( TensorSemantics( name=f"{term_name}_kd_gains", @@ -699,23 +566,6 @@ def __init__(self, joint_names: list[str], joint_ids): self._joint_ids = joint_ids -def _unique_output_name(term_name: str, method_name: str, output_cache: list[TensorSemantics]) -> str: - """Return a stable, unique output name for an action write entry. - - Prefers ``term_name``, falls back to ``term_name_method_name``, and appends a - numeric suffix if even that collides. - """ - existing = {t.name for t in output_cache} - candidate = term_name - if candidate in existing: - candidate = f"{term_name}_{method_name}" - suffix = 2 - while candidate in existing: - candidate = f"{term_name}_{method_name}_{suffix}" - suffix += 1 - return candidate - - # ══════════════════════════════════════════════════════════════════ # Public entry point # ══════════════════════════════════════════════════════════════════ diff --git a/scripts/reinforcement_learning/rsl_rl/managed_environment_annotator/proxy.py b/scripts/reinforcement_learning/rsl_rl/managed_environment_annotator/proxy.py index 7de546bbb822..9b279d53ffae 100644 --- a/scripts/reinforcement_learning/rsl_rl/managed_environment_annotator/proxy.py +++ b/scripts/reinforcement_learning/rsl_rl/managed_environment_annotator/proxy.py @@ -5,40 +5,131 @@ from __future__ import annotations +import inspect from collections.abc import Callable from typing import Any import torch +from leapp import annotate from leapp.utils.tensor_description import TensorSemantics -from isaaclab.assets.articulation.articulation import Articulation +from isaaclab.assets.articulation.base_articulation import BaseArticulation from isaaclab.managers import ManagerTermBase +from isaaclab.utils.leapp_semantics import resolve_leapp_element_names +from .utils import ensure_torch_tensor -def _lookup_annotating_getter( - annotating_getters_by_type: dict[type, dict[str, Callable]], real_data: Any, name: str -) -> Callable | None: - """Return the annotating getter for a property on the given data object, if any.""" + +def _resolve_annotated_property( + property_resolution_cache: dict[tuple[type, str], tuple[Callable, Any] | None], + real_data: Any, + name: str, +) -> tuple[Callable, Any] | None: + """Resolve a concrete property getter and inherited semantics metadata. + + The execution getter always comes from the concrete runtime class. Semantic + metadata is resolved independently by walking the MRO until a property + definition with ``_leapp_semantics`` is found. This mirrors the output-side + export path, where semantics are authored on the base API while concrete + backends provide the runtime implementation. + """ + cache_key = (type(real_data), name) + if cache_key in property_resolution_cache: + return property_resolution_cache[cache_key] + + execution_prop = getattr(type(real_data), name, None) + if not isinstance(execution_prop, property) or execution_prop.fget is None: + property_resolution_cache[cache_key] = None + return None + + semantics_meta = None for data_cls in type(real_data).__mro__: - getter = annotating_getters_by_type.get(data_cls, {}).get(name) - if getter is not None: - return getter + prop = data_cls.__dict__.get(name) + if not isinstance(prop, property) or prop.fget is None: + continue + candidate = getattr(prop.fget, "_leapp_semantics", None) + if candidate is None: + continue + if getattr(candidate, "const", False): + property_resolution_cache[cache_key] = None + return None + semantics_meta = candidate + break + + if semantics_meta is None: + property_resolution_cache[cache_key] = None + return None + + resolution = (execution_prop.fget, semantics_meta) + property_resolution_cache[cache_key] = resolution + return resolution + + +def _resolve_annotated_method( + method_resolution_cache: dict[tuple[type, str], tuple[Callable, Any, inspect.Signature] | None], + real_asset: BaseArticulation, + name: str, +) -> tuple[Callable, Any, inspect.Signature] | None: + """Resolve a concrete bound method and inherited semantics metadata.""" + cache_key = (type(real_asset), name) + if cache_key in method_resolution_cache: + return method_resolution_cache[cache_key] + + original_method = getattr(real_asset, name, None) + if not callable(original_method): + method_resolution_cache[cache_key] = None + return None + + for asset_cls in type(real_asset).__mro__: + candidate = asset_cls.__dict__.get(name) + if not callable(candidate): + continue + semantics_meta = getattr(candidate, "_leapp_semantics", None) + if semantics_meta is None: + continue + resolution = (original_method, semantics_meta, inspect.signature(candidate)) + method_resolution_cache[cache_key] = resolution + return resolution + + method_resolution_cache[cache_key] = None return None +class _WriteJointNameContext: + """Resolve runtime joint-name subsets for lazy write interception.""" + + __slots__ = ("joint_names", "_joint_ids") + + def __init__(self, joint_names: list[str], joint_ids): + self.joint_names = joint_names + self._joint_ids = joint_ids + + +def _unique_output_name(term_name: str, method_name: str, output_cache: list[TensorSemantics]) -> str: + """Return a stable, unique output name for an action write entry.""" + existing = {t.name for t in output_cache} + candidate = term_name + if candidate in existing: + candidate = f"{term_name}_{method_name}" + suffix = 2 + while candidate in existing: + candidate = f"{term_name}_{method_name}_{suffix}" + suffix += 1 + return candidate + + class _DataProxy: """Proxy around a real data object that intercepts tensor-returning property reads. The real data object may be any scene entity data class (``ArticulationData``, - ``RigidObjectData``, sensor data classes, etc.). The proxy intercepts all - ``@property`` getters that were registered during scene introspection. When - the getter returns a ``torch.Tensor``, the result is annotated as a LEAPP - input and cached for deduplication. Non-tensor results are forwarded - transparently. + ``RigidObjectData``, sensor data classes, etc.). The proxy resolves property + semantics lazily on first access by walking the runtime class MRO. This lets + concrete backend overrides reuse semantic metadata authored on abstract base + properties without copying decorators onto every implementation. - Properties with ``_leapp_semantics`` metadata produce rich annotations - (kind, element_names). Properties without it are still traced — with no - semantic metadata — so that no tensor is silently baked as a constant. + When a semantic property returns a ``torch.Tensor``, the result is annotated + as a LEAPP input and cached for deduplication. Non-tensor results and + ordinary attributes are forwarded transparently. All other attribute access is forwarded transparently to the real object. """ @@ -46,32 +137,47 @@ class _DataProxy: def __init__( self, real_data: Any, - annotating_getters_by_type: dict[type, dict[str, Callable]], + task_name: str, + property_resolution_cache: dict[tuple[type, str], tuple[Callable, Any] | None], cache: dict, input_name_resolver: Callable, ): object.__setattr__(self, "_real_data", real_data) - object.__setattr__(self, "_annotating_getters_by_type", annotating_getters_by_type) + object.__setattr__(self, "_task_name", task_name) + object.__setattr__(self, "_property_resolution_cache", property_resolution_cache) object.__setattr__(self, "_cache", cache) object.__setattr__(self, "_input_name_resolver", input_name_resolver) def __getattr__(self, name): - """Intercept registered property reads; forward everything else.""" + """Intercept semantic property reads; forward everything else.""" real_data = object.__getattribute__(self, "_real_data") - getter = _lookup_annotating_getter( - object.__getattribute__(self, "_annotating_getters_by_type"), real_data, name + resolution = _resolve_annotated_property( + object.__getattribute__(self, "_property_resolution_cache"), real_data, name ) - if getter is not None: - cache = object.__getattribute__(self, "_cache") - cache_key = (id(real_data), name) - if cache_key in cache: - return cache[cache_key].clone() - input_name = object.__getattribute__(self, "_input_name_resolver")(name) - result = getter(real_data, input_name) - if isinstance(result, torch.Tensor): - cache[cache_key] = result + if resolution is None: + return getattr(real_data, name) + + cache = object.__getattribute__(self, "_cache") + cache_key = (id(real_data), name) + if cache_key in cache: + return cache[cache_key].clone() + + execution_fget, semantics_meta = resolution + result = execution_fget(real_data) + result = ensure_torch_tensor(result) + if not isinstance(result, torch.Tensor): return result - return getattr(real_data, name) + + input_name = object.__getattribute__(self, "_input_name_resolver")(name) + sem = TensorSemantics( + name=input_name, + ref=result, + kind=semantics_meta.kind, + element_names=resolve_leapp_element_names(semantics_meta, real_data), + ) + annotated = annotate.input_tensors(object.__getattribute__(self, "_task_name"), sem) + cache[cache_key] = annotated + return annotated class _EntityProxy: @@ -97,9 +203,16 @@ def __getattr__(self, name): class _EntityMappingProxy: """Proxy around a mapping of scene entities that lazily wraps data-producing entries.""" - def __init__(self, real_mapping, annotating_getters_by_type: dict[type, dict[str, Callable]], cache: dict): + def __init__( + self, + real_mapping, + task_name: str, + property_resolution_cache: dict[tuple[type, str], tuple[Callable, Any] | None], + cache: dict, + ): object.__setattr__(self, "_real_mapping", real_mapping) - object.__setattr__(self, "_annotating_getters_by_type", annotating_getters_by_type) + object.__setattr__(self, "_task_name", task_name) + object.__setattr__(self, "_property_resolution_cache", property_resolution_cache) object.__setattr__(self, "_cache", cache) object.__setattr__(self, "_proxied", {}) @@ -113,10 +226,10 @@ def __getitem__(self, key): data = getattr(entity, "data", None) if data is None: return entity - annotating_getters_by_type = object.__getattribute__(self, "_annotating_getters_by_type") data_proxy = _DataProxy( data, - annotating_getters_by_type, + object.__getattribute__(self, "_task_name"), + object.__getattribute__(self, "_property_resolution_cache"), object.__getattribute__(self, "_cache"), input_name_resolver=lambda prop_name: f"{key}_{prop_name}", ) @@ -152,10 +265,17 @@ class _SceneProxy: ``scene["name"]`` and ``scene.sensors["name"]`` access paths. """ - def __init__(self, real_scene, annotating_getters_by_type: dict[type, dict[str, Callable]], cache: dict): + def __init__( + self, + real_scene, + task_name: str, + property_resolution_cache: dict[tuple[type, str], tuple[Callable, Any] | None], + cache: dict, + ): # use object.__setattr__ to avoid creating new attributes, only set the ones that are already defined object.__setattr__(self, "_real_scene", real_scene) - object.__setattr__(self, "_annotating_getters_by_type", annotating_getters_by_type) + object.__setattr__(self, "_task_name", task_name) + object.__setattr__(self, "_property_resolution_cache", property_resolution_cache) object.__setattr__(self, "_cache", cache) object.__setattr__(self, "_proxied", {}) object.__setattr__(self, "_sensor_mapping_proxy", None) @@ -170,11 +290,11 @@ def _maybe_proxy_entity(self, key: str, entity: Any): if data is None: return entity - annotating_getters_by_type = object.__getattribute__(self, "_annotating_getters_by_type") cache = object.__getattribute__(self, "_cache") data_proxy = _DataProxy( data, - annotating_getters_by_type, + object.__getattribute__(self, "_task_name"), + object.__getattribute__(self, "_property_resolution_cache"), cache, input_name_resolver=lambda prop_name, k=key: f"{k}_{prop_name}", ) @@ -196,7 +316,8 @@ def sensors(self): real_scene = object.__getattribute__(self, "_real_scene") sensor_mapping_proxy = _EntityMappingProxy( real_scene.sensors, - object.__getattribute__(self, "_annotating_getters_by_type"), + object.__getattribute__(self, "_task_name"), + object.__getattribute__(self, "_property_resolution_cache"), object.__getattribute__(self, "_cache"), ) object.__setattr__(self, "_sensor_mapping_proxy", sensor_mapping_proxy) @@ -304,7 +425,7 @@ def __getattr__(self, name): class _ArticulationWriteProxy: - """Proxy around a real Articulation for action terms. + """Proxy around a real articulation implementation for action terms. Intercepts ``_leapp_semantics``-decorated write methods **and** routes ``.data`` reads through a shared ``_DataProxy`` so that @@ -317,16 +438,18 @@ class _ArticulationWriteProxy: def __init__( self, - real_asset: Articulation, + real_asset: BaseArticulation, term_name: str, output_cache: list[TensorSemantics], - annotating_methods: dict[str, Callable], + method_resolution_cache: dict[tuple[type, str], tuple[Callable, Any, inspect.Signature] | None], + captured_write_term_names: set[str], data_proxy: _DataProxy, ): object.__setattr__(self, "_real_asset", real_asset) object.__setattr__(self, "_term_name", term_name) object.__setattr__(self, "_output_cache", output_cache) - object.__setattr__(self, "_annotating_methods", annotating_methods) + object.__setattr__(self, "_method_resolution_cache", method_resolution_cache) + object.__setattr__(self, "_captured_write_term_names", captured_write_term_names) object.__setattr__(self, "_data_proxy", data_proxy) @property @@ -335,12 +458,41 @@ def data(self): return object.__getattribute__(self, "_data_proxy") def __getattr__(self, name): - """Return an annotating wrapper for _leapp_semantics methods; forward everything else.""" - methods = object.__getattribute__(self, "_annotating_methods") - if name in methods: - real_asset = object.__getattribute__(self, "_real_asset") - term_name = object.__getattribute__(self, "_term_name") - output_cache = object.__getattribute__(self, "_output_cache") - original_method = getattr(real_asset, name) - return methods[name](real_asset, original_method, term_name, output_cache) - return getattr(object.__getattribute__(self, "_real_asset"), name) + """Return an annotating wrapper for semantic write methods; forward everything else.""" + real_asset = object.__getattribute__(self, "_real_asset") + resolution = _resolve_annotated_method( + object.__getattribute__(self, "_method_resolution_cache"), + real_asset, + name, + ) + if resolution is None: + return getattr(real_asset, name) + + original_method, semantics_meta, signature = resolution + term_name = object.__getattribute__(self, "_term_name") + output_cache = object.__getattribute__(self, "_output_cache") + captured_write_term_names = object.__getattribute__(self, "_captured_write_term_names") + + def interceptor(*args, **kwargs): + result = original_method(*args, **kwargs) + bound_args = signature.bind_partial(real_asset, *args, **kwargs) + target = bound_args.arguments.get("target") + + if isinstance(target, torch.Tensor): + joint_ids = bound_args.arguments.get("joint_ids") + output_cache.append( + TensorSemantics( + name=_unique_output_name(term_name, name, output_cache), + ref=target.clone(), + kind=semantics_meta.kind, + element_names=resolve_leapp_element_names( + semantics_meta, + _WriteJointNameContext(real_asset.joint_names, joint_ids), + ), + ) + ) + captured_write_term_names.add(term_name) + + return result + + return interceptor diff --git a/source/isaaclab/isaaclab/assets/articulation/base_articulation.py b/source/isaaclab/isaaclab/assets/articulation/base_articulation.py index 25ca2c4ceaf0..fce8506a2df1 100644 --- a/source/isaaclab/isaaclab/assets/articulation/base_articulation.py +++ b/source/isaaclab/isaaclab/assets/articulation/base_articulation.py @@ -15,7 +15,9 @@ import torch import warp as wp +from leapp import OutputKindEnum +from ...utils.leapp_semantics import leapp_tensor_semantics from ..asset_base import AssetBase if TYPE_CHECKING: @@ -1266,6 +1268,7 @@ def set_inertias_mask( raise NotImplementedError() @abstractmethod + @leapp_tensor_semantics(kind=OutputKindEnum.JOINT_POSITION, element_names_source="joint_names") def set_joint_position_target_index( self, *, @@ -1293,6 +1296,7 @@ def set_joint_position_target_index( raise NotImplementedError() @abstractmethod + @leapp_tensor_semantics(kind=OutputKindEnum.JOINT_POSITION, element_names_source="joint_names") def set_joint_position_target_mask( self, *, @@ -1320,6 +1324,7 @@ def set_joint_position_target_mask( raise NotImplementedError() @abstractmethod + @leapp_tensor_semantics(kind=OutputKindEnum.JOINT_VELOCITY, element_names_source="joint_names") def set_joint_velocity_target_index( self, *, @@ -1347,6 +1352,7 @@ def set_joint_velocity_target_index( raise NotImplementedError() @abstractmethod + @leapp_tensor_semantics(kind=OutputKindEnum.JOINT_VELOCITY, element_names_source="joint_names") def set_joint_velocity_target_mask( self, *, @@ -1374,6 +1380,7 @@ def set_joint_velocity_target_mask( raise NotImplementedError() @abstractmethod + @leapp_tensor_semantics(kind=OutputKindEnum.JOINT_EFFORT, element_names_source="joint_names") def set_joint_effort_target_index( self, *, @@ -1401,6 +1408,7 @@ def set_joint_effort_target_index( raise NotImplementedError() @abstractmethod + @leapp_tensor_semantics(kind=OutputKindEnum.JOINT_EFFORT, element_names_source="joint_names") def set_joint_effort_target_mask( self, *, diff --git a/source/isaaclab/isaaclab/assets/articulation/base_articulation_data.py b/source/isaaclab/isaaclab/assets/articulation/base_articulation_data.py index 61ee8f28e7a9..45d6d2725407 100644 --- a/source/isaaclab/isaaclab/assets/articulation/base_articulation_data.py +++ b/source/isaaclab/isaaclab/assets/articulation/base_articulation_data.py @@ -7,6 +7,15 @@ from abc import ABC, abstractmethod import warp as wp +from leapp import InputKindEnum + +from isaaclab.utils.leapp_semantics import ( + POSE7_ELEMENT_NAMES, + QUAT_WXYZ_ELEMENT_NAMES, + WRENCH6_ELEMENT_NAMES, + XYZ_ELEMENT_NAMES, + leapp_tensor_semantics, +) class BaseArticulationData(ABC): @@ -45,16 +54,16 @@ def update(self, dt: float) -> None: # Names. ## - body_names: list[str] = None + body_names: list[str] | None = None """Body names in the order parsed by the simulation view.""" - joint_names: list[str] = None + joint_names: list[str] | None = None """Joint names in the order parsed by the simulation view.""" - fixed_tendon_names: list[str] = None + fixed_tendon_names: list[str] | None = None """Fixed tendon names in the order parsed by the simulation view.""" - spatial_tendon_names: list[str] = None + spatial_tendon_names: list[str] | None = None """Spatial tendon names in the order parsed by the simulation view.""" ## @@ -63,6 +72,7 @@ def update(self, dt: float) -> None: @property @abstractmethod + @leapp_tensor_semantics(const=True) def default_root_pose(self) -> wp.array: """Default root pose ``[pos, quat]`` in the local environment frame. @@ -73,6 +83,7 @@ def default_root_pose(self) -> wp.array: @property @abstractmethod + @leapp_tensor_semantics(const=True) def default_root_vel(self) -> wp.array: """Default root velocity ``[lin_vel, ang_vel]`` in the local environment frame. @@ -83,12 +94,14 @@ def default_root_vel(self) -> wp.array: @property @abstractmethod + @leapp_tensor_semantics(const=True) def default_root_state(self) -> wp.array: """Deprecated, same as :attr:`default_root_pose` and :attr:`default_root_vel`.""" raise NotImplementedError @property @abstractmethod + @leapp_tensor_semantics(const=True) def default_joint_pos(self) -> wp.array: """Default joint positions of all joints. @@ -100,6 +113,7 @@ def default_joint_pos(self) -> wp.array: @property @abstractmethod + @leapp_tensor_semantics(const=True) def default_joint_vel(self) -> wp.array: """Default joint velocities of all joints. @@ -115,6 +129,7 @@ def default_joint_vel(self) -> wp.array: @property @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.COMMAND_JOINT_POSITION) def joint_pos_target(self) -> wp.array: """Joint position targets commanded by the user. @@ -128,6 +143,7 @@ def joint_pos_target(self) -> wp.array: @property @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.COMMAND_JOINT_VELOCITY) def joint_vel_target(self) -> wp.array: """Joint velocity targets commanded by the user. @@ -141,6 +157,7 @@ def joint_vel_target(self) -> wp.array: @property @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.COMMAND_JOINT_TORQUES) def joint_effort_target(self) -> wp.array: """Joint effort targets commanded by the user. @@ -158,6 +175,7 @@ def joint_effort_target(self) -> wp.array: @property @abstractmethod + @leapp_tensor_semantics(kind="state/joint/computed_torque") def computed_torque(self) -> wp.array: """Joint torques computed from the actuator model (before clipping). @@ -171,6 +189,7 @@ def computed_torque(self) -> wp.array: @property @abstractmethod + @leapp_tensor_semantics(kind="state/joint/applied_torque") def applied_torque(self) -> wp.array: """Joint torques applied from the actuator model (after clipping). @@ -187,6 +206,7 @@ def applied_torque(self) -> wp.array: @property @abstractmethod + @leapp_tensor_semantics(const=True) def joint_stiffness(self) -> wp.array: """Joint stiffness provided to the simulation. @@ -198,6 +218,7 @@ def joint_stiffness(self) -> wp.array: @property @abstractmethod + @leapp_tensor_semantics(const=True) def joint_damping(self) -> wp.array: """Joint damping provided to the simulation. @@ -209,6 +230,7 @@ def joint_damping(self) -> wp.array: @property @abstractmethod + @leapp_tensor_semantics(const=True) def joint_armature(self) -> wp.array: """Joint armature provided to the simulation. @@ -218,6 +240,7 @@ def joint_armature(self) -> wp.array: @property @abstractmethod + @leapp_tensor_semantics(const=True) def joint_friction_coeff(self) -> wp.array: """Joint static friction coefficient provided to the simulation. @@ -227,6 +250,18 @@ def joint_friction_coeff(self) -> wp.array: @property @abstractmethod + @leapp_tensor_semantics(const=True) + def joint_viscous_friction_coeff(self) -> wp.array: + """Joint viscous friction coefficient provided to the simulation. + + Shape is (num_instances, num_joints), dtype = wp.float32. In torch this resolves to + (num_instances, num_joints). + """ + raise NotImplementedError + + @property + @abstractmethod + @leapp_tensor_semantics(const=True) def joint_pos_limits(self) -> wp.array: """Joint position limits provided to the simulation. @@ -239,6 +274,7 @@ def joint_pos_limits(self) -> wp.array: @property @abstractmethod + @leapp_tensor_semantics(const=True) def joint_vel_limits(self) -> wp.array: """Joint maximum velocity provided to the simulation. @@ -248,6 +284,7 @@ def joint_vel_limits(self) -> wp.array: @property @abstractmethod + @leapp_tensor_semantics(const=True) def joint_effort_limits(self) -> wp.array: """Joint maximum effort provided to the simulation. @@ -261,6 +298,7 @@ def joint_effort_limits(self) -> wp.array: @property @abstractmethod + @leapp_tensor_semantics(const=True) def soft_joint_pos_limits(self) -> wp.array: r"""Soft joint positions limits for all joints. @@ -286,6 +324,7 @@ def soft_joint_pos_limits(self) -> wp.array: @property @abstractmethod + @leapp_tensor_semantics(const=True) def soft_joint_vel_limits(self) -> wp.array: """Soft joint velocity limits for all joints. @@ -298,6 +337,7 @@ def soft_joint_vel_limits(self) -> wp.array: @property @abstractmethod + @leapp_tensor_semantics(const=True) def gear_ratio(self) -> wp.array: """Gear ratio for relating motor torques to applied Joint torques. @@ -311,6 +351,7 @@ def gear_ratio(self) -> wp.array: @property @abstractmethod + @leapp_tensor_semantics(const=True) def fixed_tendon_stiffness(self) -> wp.array: """Fixed tendon stiffness provided to the simulation. @@ -321,6 +362,7 @@ def fixed_tendon_stiffness(self) -> wp.array: @property @abstractmethod + @leapp_tensor_semantics(const=True) def fixed_tendon_damping(self) -> wp.array: """Fixed tendon damping provided to the simulation. @@ -331,6 +373,7 @@ def fixed_tendon_damping(self) -> wp.array: @property @abstractmethod + @leapp_tensor_semantics(const=True) def fixed_tendon_limit_stiffness(self) -> wp.array: """Fixed tendon limit stiffness provided to the simulation. @@ -341,6 +384,7 @@ def fixed_tendon_limit_stiffness(self) -> wp.array: @property @abstractmethod + @leapp_tensor_semantics(const=True) def fixed_tendon_rest_length(self) -> wp.array: """Fixed tendon rest length provided to the simulation. @@ -351,6 +395,7 @@ def fixed_tendon_rest_length(self) -> wp.array: @property @abstractmethod + @leapp_tensor_semantics(const=True) def fixed_tendon_offset(self) -> wp.array: """Fixed tendon offset provided to the simulation. @@ -361,6 +406,7 @@ def fixed_tendon_offset(self) -> wp.array: @property @abstractmethod + @leapp_tensor_semantics(const=True) def fixed_tendon_pos_limits(self) -> wp.array: """Fixed tendon position limits provided to the simulation. @@ -375,6 +421,7 @@ def fixed_tendon_pos_limits(self) -> wp.array: @property @abstractmethod + @leapp_tensor_semantics(const=True) def spatial_tendon_stiffness(self) -> wp.array: """Spatial tendon stiffness provided to the simulation. @@ -385,6 +432,7 @@ def spatial_tendon_stiffness(self) -> wp.array: @property @abstractmethod + @leapp_tensor_semantics(const=True) def spatial_tendon_damping(self) -> wp.array: """Spatial tendon damping provided to the simulation. @@ -395,6 +443,7 @@ def spatial_tendon_damping(self) -> wp.array: @property @abstractmethod + @leapp_tensor_semantics(const=True) def spatial_tendon_limit_stiffness(self) -> wp.array: """Spatial tendon limit stiffness provided to the simulation. @@ -405,6 +454,7 @@ def spatial_tendon_limit_stiffness(self) -> wp.array: @property @abstractmethod + @leapp_tensor_semantics(const=True) def spatial_tendon_offset(self) -> wp.array: """Spatial tendon offset provided to the simulation. @@ -419,6 +469,7 @@ def spatial_tendon_offset(self) -> wp.array: @property @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.BODY_POSE, element_names=POSE7_ELEMENT_NAMES) def root_link_pose_w(self) -> wp.array: """Root link pose ``[pos, quat]`` in simulation world frame. @@ -431,6 +482,7 @@ def root_link_pose_w(self) -> wp.array: @property @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.BODY_VEL, element_names=WRENCH6_ELEMENT_NAMES) def root_link_vel_w(self) -> wp.array: """Root link velocity ``[lin_vel, ang_vel]`` in simulation world frame. @@ -443,6 +495,7 @@ def root_link_vel_w(self) -> wp.array: @property @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.BODY_POSE, element_names=POSE7_ELEMENT_NAMES) def root_com_pose_w(self) -> wp.array: """Root center of mass pose ``[pos, quat]`` in simulation world frame. @@ -455,6 +508,7 @@ def root_com_pose_w(self) -> wp.array: @property @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.BODY_VEL, element_names=WRENCH6_ELEMENT_NAMES) def root_com_vel_w(self) -> wp.array: """Root center of mass velocity ``[lin_vel, ang_vel]`` in simulation world frame. @@ -467,18 +521,21 @@ def root_com_vel_w(self) -> wp.array: @property @abstractmethod + @leapp_tensor_semantics(kind="state/body/state") def root_state_w(self) -> wp.array: """Deprecated, same as :attr:`root_link_pose_w` and :attr:`root_com_vel_w`.""" raise NotImplementedError @property @abstractmethod + @leapp_tensor_semantics(kind="state/body/link_state") def root_link_state_w(self) -> wp.array: """Deprecated, same as :attr:`root_link_pose_w` and :attr:`root_link_vel_w`.""" raise NotImplementedError @property @abstractmethod + @leapp_tensor_semantics(kind="state/body/com_state") def root_com_state_w(self) -> wp.array: """Deprecated, same as :attr:`root_com_pose_w` and :attr:`root_com_vel_w`.""" raise NotImplementedError @@ -489,6 +546,7 @@ def root_com_state_w(self) -> wp.array: @property @abstractmethod + @leapp_tensor_semantics(const=True) def body_mass(self) -> wp.array: """Body mass ``wp.float32`` in the world frame. @@ -498,6 +556,7 @@ def body_mass(self) -> wp.array: @property @abstractmethod + @leapp_tensor_semantics(const=True) def body_inertia(self) -> wp.array: """Flattened body inertia in the world frame. @@ -508,6 +567,7 @@ def body_inertia(self) -> wp.array: @property @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.BODY_POSE, element_names_source="body_pose") def body_link_pose_w(self) -> wp.array: """Body link pose ``[pos, quat]`` in simulation world frame. @@ -521,6 +581,7 @@ def body_link_pose_w(self) -> wp.array: @property @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.BODY_VEL, element_names_source="body_pose") def body_link_vel_w(self) -> wp.array: """Body link velocity ``[lin_vel, ang_vel]`` in simulation world frame. @@ -534,6 +595,7 @@ def body_link_vel_w(self) -> wp.array: @property @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.BODY_POSE, element_names_source="body_pose") def body_com_pose_w(self) -> wp.array: """Body center of mass pose ``[pos, quat]`` in simulation world frame. @@ -547,6 +609,7 @@ def body_com_pose_w(self) -> wp.array: @property @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.BODY_VEL, element_names_source="body_pose") def body_com_vel_w(self) -> wp.array: """Body center of mass velocity ``[lin_vel, ang_vel]`` in simulation world frame. @@ -560,24 +623,28 @@ def body_com_vel_w(self) -> wp.array: @property @abstractmethod + @leapp_tensor_semantics(kind="state/body/state") def body_state_w(self) -> wp.array: """Deprecated, same as :attr:`body_link_pose_w` and :attr:`body_com_vel_w`.""" raise NotImplementedError @property @abstractmethod + @leapp_tensor_semantics(kind="state/body/link_state") def body_link_state_w(self) -> wp.array: """Deprecated, same as :attr:`body_link_pose_w` and :attr:`body_link_vel_w`.""" raise NotImplementedError @property @abstractmethod + @leapp_tensor_semantics(kind="state/body/com_state") def body_com_state_w(self) -> wp.array: """Deprecated, same as :attr:`body_com_pose_w` and :attr:`body_com_vel_w`.""" raise NotImplementedError @property @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.BODY_ACC, element_names_source="body_pose") def body_com_acc_w(self) -> wp.array: """Acceleration of all bodies center of mass ``[lin_acc, ang_acc]``. @@ -590,6 +657,7 @@ def body_com_acc_w(self) -> wp.array: @property @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.BODY_POSE, element_names_source="body_pose") def body_com_pose_b(self) -> wp.array: """Center of mass pose ``[pos, quat]`` of all bodies in their respective body's link frames. @@ -603,6 +671,7 @@ def body_com_pose_b(self) -> wp.array: @property @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.WRENCH) def body_incoming_joint_wrench_b(self) -> wp.array: """Joint reaction wrench applied from body parent to child body in parent body frame. @@ -624,6 +693,7 @@ def body_incoming_joint_wrench_b(self) -> wp.array: @property @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.JOINT_POSITION, element_names_source="joint_names") def joint_pos(self) -> wp.array: """Joint positions of all joints. @@ -634,6 +704,7 @@ def joint_pos(self) -> wp.array: @property @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.JOINT_VELOCITY, element_names_source="joint_names") def joint_vel(self) -> wp.array: """Joint velocities of all joints. @@ -644,6 +715,7 @@ def joint_vel(self) -> wp.array: @property @abstractmethod + @leapp_tensor_semantics(kind="state/joint/acceleration", element_names_source="joint_names") def joint_acc(self) -> wp.array: """Joint acceleration of all joints. @@ -658,6 +730,7 @@ def joint_acc(self) -> wp.array: @property @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.VECTOR3D, element_names=XYZ_ELEMENT_NAMES) def projected_gravity_b(self) -> wp.array: """Projection of the gravity direction on base frame. @@ -667,6 +740,7 @@ def projected_gravity_b(self) -> wp.array: @property @abstractmethod + @leapp_tensor_semantics(kind="state/body/heading") def heading_w(self) -> wp.array: """Yaw heading of the base frame (in radians). @@ -680,6 +754,7 @@ def heading_w(self) -> wp.array: @property @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.BODY_LINEAR_VELOCITY, element_names=XYZ_ELEMENT_NAMES) def root_link_lin_vel_b(self) -> wp.array: """Root link linear velocity in base frame. @@ -692,6 +767,7 @@ def root_link_lin_vel_b(self) -> wp.array: @property @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.BODY_ANGULAR_VELOCITY, element_names=XYZ_ELEMENT_NAMES) def root_link_ang_vel_b(self) -> wp.array: """Root link angular velocity in base frame. @@ -704,6 +780,7 @@ def root_link_ang_vel_b(self) -> wp.array: @property @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.BODY_LINEAR_VELOCITY, element_names=XYZ_ELEMENT_NAMES) def root_com_lin_vel_b(self) -> wp.array: """Root center of mass linear velocity in base frame. @@ -716,6 +793,7 @@ def root_com_lin_vel_b(self) -> wp.array: @property @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.BODY_ANGULAR_VELOCITY, element_names=XYZ_ELEMENT_NAMES) def root_com_ang_vel_b(self) -> wp.array: """Root center of mass angular velocity in base frame. @@ -732,6 +810,7 @@ def root_com_ang_vel_b(self) -> wp.array: @property @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.BODY_POSITION, element_names=XYZ_ELEMENT_NAMES) def root_link_pos_w(self) -> wp.array: """Root link position in simulation world frame. @@ -743,6 +822,7 @@ def root_link_pos_w(self) -> wp.array: @property @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.BODY_ROTATION, element_names=QUAT_WXYZ_ELEMENT_NAMES) def root_link_quat_w(self) -> wp.array: """Root link orientation (x, y, z, w) in simulation world frame. @@ -754,6 +834,7 @@ def root_link_quat_w(self) -> wp.array: @property @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.BODY_LINEAR_VELOCITY, element_names=XYZ_ELEMENT_NAMES) def root_link_lin_vel_w(self) -> wp.array: """Root linear velocity in simulation world frame. @@ -765,6 +846,7 @@ def root_link_lin_vel_w(self) -> wp.array: @property @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.BODY_ANGULAR_VELOCITY, element_names=XYZ_ELEMENT_NAMES) def root_link_ang_vel_w(self) -> wp.array: """Root link angular velocity in simulation world frame. @@ -776,6 +858,7 @@ def root_link_ang_vel_w(self) -> wp.array: @property @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.BODY_POSITION, element_names=XYZ_ELEMENT_NAMES) def root_com_pos_w(self) -> wp.array: """Root center of mass position in simulation world frame. @@ -787,6 +870,7 @@ def root_com_pos_w(self) -> wp.array: @property @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.BODY_ROTATION, element_names=QUAT_WXYZ_ELEMENT_NAMES) def root_com_quat_w(self) -> wp.array: """Root center of mass orientation (x, y, z, w) in simulation world frame. @@ -798,6 +882,7 @@ def root_com_quat_w(self) -> wp.array: @property @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.BODY_LINEAR_VELOCITY, element_names=XYZ_ELEMENT_NAMES) def root_com_lin_vel_w(self) -> wp.array: """Root center of mass linear velocity in simulation world frame. @@ -809,6 +894,7 @@ def root_com_lin_vel_w(self) -> wp.array: @property @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.BODY_ANGULAR_VELOCITY, element_names=XYZ_ELEMENT_NAMES) def root_com_ang_vel_w(self) -> wp.array: """Root center of mass angular velocity in simulation world frame. @@ -820,6 +906,7 @@ def root_com_ang_vel_w(self) -> wp.array: @property @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.BODY_POSITION, element_names_source="body_xyz") def body_link_pos_w(self) -> wp.array: """Positions of all bodies in simulation world frame. @@ -832,6 +919,7 @@ def body_link_pos_w(self) -> wp.array: @property @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.BODY_ROTATION, element_names_source="body_quat") def body_link_quat_w(self) -> wp.array: """Orientation (x, y, z, w) of all bodies in simulation world frame. @@ -844,6 +932,7 @@ def body_link_quat_w(self) -> wp.array: @property @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.BODY_LINEAR_VELOCITY, element_names_source="body_xyz") def body_link_lin_vel_w(self) -> wp.array: """Linear velocity of all bodies in simulation world frame. @@ -856,6 +945,7 @@ def body_link_lin_vel_w(self) -> wp.array: @property @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.BODY_ANGULAR_VELOCITY, element_names_source="body_xyz") def body_link_ang_vel_w(self) -> wp.array: """Angular velocity of all bodies in simulation world frame. @@ -868,6 +958,7 @@ def body_link_ang_vel_w(self) -> wp.array: @property @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.BODY_POSITION, element_names_source="body_xyz") def body_com_pos_w(self) -> wp.array: """Positions of all bodies in simulation world frame. @@ -880,6 +971,7 @@ def body_com_pos_w(self) -> wp.array: @property @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.BODY_ROTATION, element_names_source="body_quat") def body_com_quat_w(self) -> wp.array: """Orientation (x, y, z, w) of the principal axes of inertia of all bodies in simulation world frame. @@ -892,6 +984,7 @@ def body_com_quat_w(self) -> wp.array: @property @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.BODY_LINEAR_VELOCITY, element_names_source="body_xyz") def body_com_lin_vel_w(self) -> wp.array: """Linear velocity of all bodies in simulation world frame. @@ -904,6 +997,7 @@ def body_com_lin_vel_w(self) -> wp.array: @property @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.BODY_ANGULAR_VELOCITY, element_names_source="body_xyz") def body_com_ang_vel_w(self) -> wp.array: """Angular velocity of all bodies in simulation world frame. @@ -916,6 +1010,7 @@ def body_com_ang_vel_w(self) -> wp.array: @property @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.BODY_LINEAR_ACCELERATION, element_names_source="body_xyz") def body_com_lin_acc_w(self) -> wp.array: """Linear acceleration of all bodies in simulation world frame. @@ -928,6 +1023,7 @@ def body_com_lin_acc_w(self) -> wp.array: @property @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.BODY_ANGULAR_ACCELERATION, element_names_source="body_xyz") def body_com_ang_acc_w(self) -> wp.array: """Angular acceleration of all bodies in simulation world frame. @@ -940,6 +1036,7 @@ def body_com_ang_acc_w(self) -> wp.array: @property @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.BODY_POSITION, element_names_source="body_xyz") def body_com_pos_b(self) -> wp.array: """Center of mass position of all of the bodies in their respective link frames. @@ -952,6 +1049,7 @@ def body_com_pos_b(self) -> wp.array: @property @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.BODY_ROTATION, element_names_source="body_quat") def body_com_quat_b(self) -> wp.array: """Orientation (x, y, z, w) of the principal axes of inertia of all of the bodies in their respective link frames. @@ -989,121 +1087,145 @@ def _create_buffers(self) -> None: """ @property + @leapp_tensor_semantics(kind=InputKindEnum.BODY_POSE, element_names=POSE7_ELEMENT_NAMES) def root_pose_w(self) -> wp.array: """Shorthand for :attr:`root_link_pose_w`.""" return self.root_link_pose_w @property + @leapp_tensor_semantics(kind=InputKindEnum.BODY_POSITION, element_names=XYZ_ELEMENT_NAMES) def root_pos_w(self) -> wp.array: """Shorthand for :attr:`root_link_pos_w`.""" return self.root_link_pos_w @property + @leapp_tensor_semantics(kind=InputKindEnum.BODY_ROTATION, element_names=QUAT_WXYZ_ELEMENT_NAMES) def root_quat_w(self) -> wp.array: """Shorthand for :attr:`root_link_quat_w`.""" return self.root_link_quat_w @property + @leapp_tensor_semantics(kind=InputKindEnum.BODY_VEL, element_names=WRENCH6_ELEMENT_NAMES) def root_vel_w(self) -> wp.array: """Shorthand for :attr:`root_com_vel_w`.""" return self.root_com_vel_w @property + @leapp_tensor_semantics(kind=InputKindEnum.BODY_LINEAR_VELOCITY, element_names=XYZ_ELEMENT_NAMES) def root_lin_vel_w(self) -> wp.array: """Shorthand for :attr:`root_com_lin_vel_w`.""" return self.root_com_lin_vel_w @property + @leapp_tensor_semantics(kind=InputKindEnum.BODY_ANGULAR_VELOCITY, element_names=XYZ_ELEMENT_NAMES) def root_ang_vel_w(self) -> wp.array: """Shorthand for :attr:`root_com_ang_vel_w`.""" return self.root_com_ang_vel_w @property + @leapp_tensor_semantics(kind=InputKindEnum.BODY_LINEAR_VELOCITY, element_names=XYZ_ELEMENT_NAMES) def root_lin_vel_b(self) -> wp.array: """Shorthand for :attr:`root_com_lin_vel_b`.""" return self.root_com_lin_vel_b @property + @leapp_tensor_semantics(kind=InputKindEnum.BODY_ANGULAR_VELOCITY, element_names=XYZ_ELEMENT_NAMES) def root_ang_vel_b(self) -> wp.array: """Shorthand for :attr:`root_com_ang_vel_b`.""" return self.root_com_ang_vel_b @property + @leapp_tensor_semantics(kind=InputKindEnum.BODY_POSE, element_names_source="body_pose") def body_pose_w(self) -> wp.array: """Shorthand for :attr:`body_link_pose_w`.""" return self.body_link_pose_w @property + @leapp_tensor_semantics(kind=InputKindEnum.BODY_POSITION, element_names_source="body_xyz") def body_pos_w(self) -> wp.array: """Shorthand for :attr:`body_link_pos_w`.""" return self.body_link_pos_w @property + @leapp_tensor_semantics(kind=InputKindEnum.BODY_ROTATION, element_names_source="body_quat") def body_quat_w(self) -> wp.array: """Shorthand for :attr:`body_link_quat_w`.""" return self.body_link_quat_w @property + @leapp_tensor_semantics(kind=InputKindEnum.BODY_VEL, element_names_source="body_wrench") def body_vel_w(self) -> wp.array: """Shorthand for :attr:`body_com_vel_w`.""" return self.body_com_vel_w @property + @leapp_tensor_semantics(kind=InputKindEnum.BODY_LINEAR_VELOCITY, element_names_source="body_xyz") def body_lin_vel_w(self) -> wp.array: """Shorthand for :attr:`body_com_lin_vel_w`.""" return self.body_com_lin_vel_w @property + @leapp_tensor_semantics(kind=InputKindEnum.BODY_ANGULAR_VELOCITY, element_names_source="body_xyz") def body_ang_vel_w(self) -> wp.array: """Shorthand for :attr:`body_com_ang_vel_w`.""" return self.body_com_ang_vel_w @property + @leapp_tensor_semantics(kind=InputKindEnum.BODY_ACC, element_names_source="body_wrench") def body_acc_w(self) -> wp.array: """Shorthand for :attr:`body_com_acc_w`.""" return self.body_com_acc_w @property + @leapp_tensor_semantics(kind=InputKindEnum.BODY_LINEAR_ACCELERATION, element_names_source="body_xyz") def body_lin_acc_w(self) -> wp.array: """Shorthand for :attr:`body_com_lin_acc_w`.""" return self.body_com_lin_acc_w @property + @leapp_tensor_semantics(kind=InputKindEnum.BODY_ANGULAR_ACCELERATION, element_names_source="body_xyz") def body_ang_acc_w(self) -> wp.array: """Shorthand for :attr:`body_com_ang_acc_w`.""" return self.body_com_ang_acc_w @property + @leapp_tensor_semantics(kind=InputKindEnum.BODY_POSITION, element_names_source="body_xyz") def com_pos_b(self) -> wp.array: """Shorthand for :attr:`body_com_pos_b`.""" return self.body_com_pos_b @property + @leapp_tensor_semantics(kind=InputKindEnum.BODY_ROTATION, element_names_source="body_quat") def com_quat_b(self) -> wp.array: """Shorthand for :attr:`body_com_quat_b`.""" return self.body_com_quat_b @property + @leapp_tensor_semantics(const=True) def joint_limits(self) -> wp.array: """Shorthand for :attr:`joint_pos_limits`.""" return self.joint_pos_limits @property + @leapp_tensor_semantics(const=True) def default_joint_limits(self) -> wp.array: """Shorthand for :attr:`default_joint_pos_limits`.""" return self.default_joint_pos_limits @property + @leapp_tensor_semantics(const=True) def joint_velocity_limits(self) -> wp.array: """Shorthand for :attr:`joint_vel_limits`.""" return self.joint_vel_limits @property + @leapp_tensor_semantics(const=True) def joint_friction(self) -> wp.array: """Shorthand for :attr:`joint_friction_coeff`.""" return self.joint_friction_coeff @property + @leapp_tensor_semantics(const=True) def fixed_tendon_limit(self) -> wp.array: """Shorthand for :attr:`fixed_tendon_pos_limits`.""" return self.fixed_tendon_pos_limits @@ -1113,6 +1235,7 @@ def fixed_tendon_limit(self) -> wp.array: """ @property + @leapp_tensor_semantics(const=True) def default_mass(self) -> wp.array: """Deprecated property. Please use :attr:`body_mass` instead and manage the default mass manually.""" warnings.warn( @@ -1126,6 +1249,7 @@ def default_mass(self) -> wp.array: return self._default_mass @property + @leapp_tensor_semantics(const=True) def default_inertia(self) -> wp.array: """Deprecated property. Please use :attr:`body_inertia` instead and manage the default inertia manually.""" warnings.warn( @@ -1139,6 +1263,7 @@ def default_inertia(self) -> wp.array: return self._default_inertia @property + @leapp_tensor_semantics(const=True) def default_joint_stiffness(self) -> wp.array: """Deprecated property. Please use :attr:`joint_stiffness` instead and manage the default joint stiffness manually.""" @@ -1153,6 +1278,7 @@ def default_joint_stiffness(self) -> wp.array: return self._default_joint_stiffness @property + @leapp_tensor_semantics(const=True) def default_joint_damping(self) -> wp.array: """Deprecated property. Please use :attr:`joint_damping` instead and manage the default joint damping manually.""" @@ -1167,6 +1293,7 @@ def default_joint_damping(self) -> wp.array: return self._default_joint_damping @property + @leapp_tensor_semantics(const=True) def default_joint_armature(self) -> wp.array: """Deprecated property. Please use :attr:`joint_armature` instead and manage the default joint armature manually.""" @@ -1181,6 +1308,7 @@ def default_joint_armature(self) -> wp.array: return self._default_joint_armature @property + @leapp_tensor_semantics(const=True) def default_joint_friction_coeff(self) -> wp.array: """Deprecated property. Please use :attr:`joint_friction_coeff` instead and manage the default joint friction coefficient manually.""" @@ -1195,6 +1323,7 @@ def default_joint_friction_coeff(self) -> wp.array: return self._default_joint_friction_coeff @property + @leapp_tensor_semantics(const=True) def default_joint_viscous_friction_coeff(self) -> wp.array: """Deprecated property. Please use :attr:`joint_viscous_friction_coeff` instead and manage the default joint viscous friction coefficient manually.""" @@ -1209,6 +1338,7 @@ def default_joint_viscous_friction_coeff(self) -> wp.array: return self._default_joint_viscous_friction_coeff @property + @leapp_tensor_semantics(const=True) def default_joint_pos_limits(self) -> wp.array: """Deprecated property. Please use :attr:`joint_pos_limits` instead and manage the default joint position limits manually.""" @@ -1223,6 +1353,7 @@ def default_joint_pos_limits(self) -> wp.array: return self._default_joint_pos_limits @property + @leapp_tensor_semantics(const=True) def default_fixed_tendon_stiffness(self) -> wp.array: """Deprecated property. Please use :attr:`fixed_tendon_stiffness` instead and manage the default fixed tendon stiffness manually.""" @@ -1237,6 +1368,7 @@ def default_fixed_tendon_stiffness(self) -> wp.array: return self._default_fixed_tendon_stiffness @property + @leapp_tensor_semantics(const=True) def default_fixed_tendon_damping(self) -> wp.array: """Deprecated property. Please use :attr:`fixed_tendon_damping` instead and manage the default fixed tendon damping manually.""" @@ -1251,6 +1383,7 @@ def default_fixed_tendon_damping(self) -> wp.array: return self._default_fixed_tendon_damping @property + @leapp_tensor_semantics(const=True) def default_fixed_tendon_limit_stiffness(self) -> wp.array: """Deprecated property. Please use :attr:`fixed_tendon_limit_stiffness` instead and manage the default fixed tendon limit stiffness manually.""" @@ -1265,6 +1398,7 @@ def default_fixed_tendon_limit_stiffness(self) -> wp.array: return self._default_fixed_tendon_limit_stiffness @property + @leapp_tensor_semantics(const=True) def default_fixed_tendon_rest_length(self) -> wp.array: """Deprecated property. Please use :attr:`fixed_tendon_rest_length` instead and manage the default fixed tendon rest length manually.""" @@ -1279,6 +1413,7 @@ def default_fixed_tendon_rest_length(self) -> wp.array: return self._default_fixed_tendon_rest_length @property + @leapp_tensor_semantics(const=True) def default_fixed_tendon_offset(self) -> wp.array: """Deprecated property. Please use :attr:`fixed_tendon_offset` instead and manage the default fixed tendon offset manually.""" @@ -1293,6 +1428,7 @@ def default_fixed_tendon_offset(self) -> wp.array: return self._default_fixed_tendon_offset @property + @leapp_tensor_semantics(const=True) def default_fixed_tendon_pos_limits(self) -> wp.array: """Deprecated property. Please use :attr:`fixed_tendon_pos_limits` instead and manage the default fixed tendon position limits manually.""" @@ -1307,6 +1443,7 @@ def default_fixed_tendon_pos_limits(self) -> wp.array: return self._default_fixed_tendon_pos_limits @property + @leapp_tensor_semantics(const=True) def default_spatial_tendon_stiffness(self) -> wp.array: """Deprecated property. Please use :attr:`spatial_tendon_stiffness` instead and manage the default spatial tendon stiffness manually.""" @@ -1321,6 +1458,7 @@ def default_spatial_tendon_stiffness(self) -> wp.array: return self._default_spatial_tendon_stiffness @property + @leapp_tensor_semantics(const=True) def default_spatial_tendon_damping(self) -> wp.array: """Deprecated property. Please use :attr:`spatial_tendon_damping` instead and manage the default spatial tendon damping manually.""" @@ -1335,6 +1473,7 @@ def default_spatial_tendon_damping(self) -> wp.array: return self._default_spatial_tendon_damping @property + @leapp_tensor_semantics(const=True) def default_spatial_tendon_limit_stiffness(self) -> wp.array: """Deprecated property. Please use :attr:`spatial_tendon_limit_stiffness` instead and manage the default spatial tendon limit stiffness manually.""" @@ -1349,6 +1488,7 @@ def default_spatial_tendon_limit_stiffness(self) -> wp.array: return self._default_spatial_tendon_limit_stiffness @property + @leapp_tensor_semantics(const=True) def default_spatial_tendon_offset(self) -> wp.array: """Deprecated property. Please use :attr:`spatial_tendon_offset` instead and manage the default spatial tendon offset manually.""" @@ -1363,6 +1503,7 @@ def default_spatial_tendon_offset(self) -> wp.array: return self._default_spatial_tendon_offset @property + @leapp_tensor_semantics(const=True) def default_fixed_tendon_limit(self) -> wp.array: """Deprecated property. Please use :attr:`default_fixed_tendon_pos_limits` instead.""" warnings.warn( @@ -1374,6 +1515,7 @@ def default_fixed_tendon_limit(self) -> wp.array: return self.default_fixed_tendon_pos_limits @property + @leapp_tensor_semantics(const=True) def default_joint_friction(self) -> wp.array: """Deprecated property. Please use :attr:`default_joint_friction_coeff` instead.""" warnings.warn( diff --git a/source/isaaclab/isaaclab/utils/leapp_semantics.py b/source/isaaclab/isaaclab/utils/leapp_semantics.py index 3a9c53c8c7e8..716b327576ce 100644 --- a/source/isaaclab/isaaclab/utils/leapp_semantics.py +++ b/source/isaaclab/isaaclab/utils/leapp_semantics.py @@ -20,6 +20,7 @@ class LeappTensorSemantics: kind: Any = None element_names: list[str] | list[list[str]] | None = None element_names_source: str | None = None + const: bool = False XYZ_ELEMENT_NAMES: list[str] = ["x", "y", "z"] @@ -33,6 +34,7 @@ def leapp_tensor_semantics( kind: Any = None, element_names: list[str] | list[list[str]] | None = None, element_names_source: str | None = None, + const: bool = False, ) -> Callable: """Attach LEAPP semantic metadata to a raw tensor-producing function.""" @@ -40,6 +42,7 @@ def leapp_tensor_semantics( kind=kind, element_names=element_names, element_names_source=element_names_source, + const=const, ) def _apply(func: Callable) -> Callable: @@ -116,10 +119,4 @@ def resolve_leapp_element_names(semantics: LeappTensorSemantics | None, data_sel if body_names is None: return None return [body_names, WRENCH6_ELEMENT_NAMES] - if source == "pose7": - return POSE7_ELEMENT_NAMES - if source == "xyz": - return XYZ_ELEMENT_NAMES - if source == "quat_wxyz": - return QUAT_WXYZ_ELEMENT_NAMES return None From 1b821c0aeedff5aac57b04677e839f678418613d Mon Sep 17 00:00:00 2001 From: Frank Lai Date: Mon, 23 Mar 2026 18:20:36 -0700 Subject: [PATCH 05/20] propagated tensor semantics to all input datatypes --- .../export_annotator.py | 43 ++++----- .../managed_environment_annotator/proxy.py | 50 ++++++---- .../rigid_object/base_rigid_object_data.py | 78 +++++++++++++++- .../base_rigid_object_collection_data.py | 91 ++++++++++++++++++- .../base_contact_sensor_data.py | 21 +++++ .../base_frame_transformer_data.py | 17 ++++ .../isaaclab/sensors/imu/base_imu_data.py | 5 + .../isaaclab/utils/leapp_semantics.py | 21 +++++ 8 files changed, 282 insertions(+), 44 deletions(-) diff --git a/scripts/reinforcement_learning/rsl_rl/managed_environment_annotator/export_annotator.py b/scripts/reinforcement_learning/rsl_rl/managed_environment_annotator/export_annotator.py index 1213d1fdbbdc..2d95919a5a69 100644 --- a/scripts/reinforcement_learning/rsl_rl/managed_environment_annotator/export_annotator.py +++ b/scripts/reinforcement_learning/rsl_rl/managed_environment_annotator/export_annotator.py @@ -9,8 +9,8 @@ state property (e.g. ``joint_pos``) read by both an observation term and an action term resolves to one LEAPP input edge. -- Observation term functions see an _EnvProxy whose scene returns - _ArticulationProxy objects with annotating data proxies. +- Observation term functions see an ``_EnvProxy`` whose scene returns + ``_EntityProxy`` objects with annotating data proxies. - Action terms have their ``_asset`` attribute replaced with an _ArticulationWriteProxy that intercepts ``_leapp_semantics``-decorated @@ -19,7 +19,7 @@ Cache lifecycle (assuming single-env play-mode export): - compute_group() clear cache → obs terms populate cache + compute() clear cache → obs terms populate cache policy inference TracedTensors propagate through NN process_action() register_buffer for raw_actions apply_action() [tracing] reuse cached TracedTensors for state reads, @@ -27,11 +27,12 @@ then clear cache apply_action() [decim.] clear cache → fresh reads for simulation ... - compute_group() clear cache → fresh reads for next obs + compute() clear cache → fresh reads for next obs """ from __future__ import annotations +import inspect import logging from collections.abc import Callable from contextlib import suppress @@ -45,7 +46,7 @@ from isaaclab.managers import ManagerTermBase from isaaclab.utils.leapp_semantics import resolve_leapp_element_names -from .proxy import _ArticulationWriteProxy, _DataProxy, _EnvProxy, _ManagerTermProxy, _SceneProxy +from .proxy import _ArticulationWriteProxy, _DataProxy, _EnvProxy, _ManagerTermProxy from .utils import ensure_torch_tensor if TYPE_CHECKING: @@ -99,29 +100,32 @@ def __init__(self, task_name: str, export_method: str, required_obs_groups: set[ self.required_obs_groups = required_obs_groups self._annotated_tensor_cache: dict[tuple[int, str], torch.Tensor] = {} self._data_property_resolution_cache: dict[tuple[type, str], tuple[Callable, object] | None] = {} - self._write_method_resolution_cache: dict[tuple[type, str], tuple[Callable, object, object] | None] = {} + self._write_method_resolution_cache: dict[ + tuple[type, str], tuple[Callable, object, inspect.Signature] | None + ] = {} self._action_output_cache: list[TensorSemantics] = [] self._captured_write_term_names: set[str] = set() self._fallback_term_names: set[str] = set() self._pending_action_output_export: bool = False self._uses_last_action_state: bool = False - self._patched_history_state_names: dict[int, str] = {} def setup(self, env): """Patch observation and action managers on the unwrapped env.""" unwrapped = env.env.unwrapped - cache = self._annotated_tensor_cache - - scene_proxy = _SceneProxy(unwrapped.scene, self.task_name, self._data_property_resolution_cache, cache) - proxy_env = _EnvProxy(unwrapped, scene_proxy) + proxy_env = _EnvProxy( + unwrapped, + self.task_name, + self._data_property_resolution_cache, + self._annotated_tensor_cache, + ) self._disable_training_managers(unwrapped) self._patch_observation_manager(unwrapped.observation_manager, proxy_env) self._patch_history_buffers(unwrapped.observation_manager) self._patch_action_manager( unwrapped.action_manager, - cache, + self._annotated_tensor_cache, ) # ── Disable training-only managers ───────────────────────────── @@ -220,7 +224,6 @@ def patched_append(data: torch.Tensor): circular_buffer._leapp_original_append = original_append circular_buffer._append = patched_append - self._patched_history_state_names[id(circular_buffer)] = state_name def _patch_observation_manager(self, obs_manager, proxy_env): """Patch observation terms to use annotating proxies and disable noise.""" @@ -242,7 +245,6 @@ def _patch_observation_manager(self, obs_manager, proxy_env): term_cfg.noise = None original_compute = obs_manager.compute - original_compute_group = obs_manager.compute_group cache = self._annotated_tensor_cache def patched_compute(*args, **kwargs): @@ -250,12 +252,7 @@ def patched_compute(*args, **kwargs): cache.clear() return original_compute(*args, **kwargs) - def patched_compute_group(*args, **kwargs): - """Run the real compute_group using the current observation-pass cache.""" - return original_compute_group(*args, **kwargs) - obs_manager.compute = patched_compute - obs_manager.compute_group = patched_compute_group # ── Action manager patches ──────────────────────────────────── @@ -588,10 +585,10 @@ def patch_env_for_export( - Action terms route through proxy objects that annotate both data reads **and** ``Articulation`` write methods. - Data classes are discovered automatically by scanning the scene at - setup time — no hardcoded class list is required. Properties with - ``_leapp_semantics`` produce rich annotations; properties without it - are still traced so that no tensor is silently baked as a constant. + Data properties are resolved lazily through proxies — no hardcoded + class list is required. Properties with ``_leapp_semantics`` produce + rich annotations; properties without it are still traced so that no + tensor is silently baked as a constant. State reads are deduplicated across observation and action paths via a shared cache, so a property like ``joint_pos`` that is read by both an diff --git a/scripts/reinforcement_learning/rsl_rl/managed_environment_annotator/proxy.py b/scripts/reinforcement_learning/rsl_rl/managed_environment_annotator/proxy.py index 9b279d53ffae..f763a2154fed 100644 --- a/scripts/reinforcement_learning/rsl_rl/managed_environment_annotator/proxy.py +++ b/scripts/reinforcement_learning/rsl_rl/managed_environment_annotator/proxy.py @@ -7,13 +7,12 @@ import inspect from collections.abc import Callable -from typing import Any +from typing import Any, cast import torch from leapp import annotate from leapp.utils.tensor_description import TensorSemantics -from isaaclab.assets.articulation.base_articulation import BaseArticulation from isaaclab.managers import ManagerTermBase from isaaclab.utils.leapp_semantics import resolve_leapp_element_names @@ -67,7 +66,7 @@ def _resolve_annotated_property( def _resolve_annotated_method( method_resolution_cache: dict[tuple[type, str], tuple[Callable, Any, inspect.Signature] | None], - real_asset: BaseArticulation, + real_asset: Any, name: str, ) -> tuple[Callable, Any, inspect.Signature] | None: """Resolve a concrete bound method and inherited semantics metadata.""" @@ -335,9 +334,19 @@ class _EnvProxy: is forwarded transparently to the real env. """ - def __init__(self, real_env, scene_proxy: _SceneProxy): + def __init__( + self, + real_env, + task_name: str, + property_resolution_cache: dict[tuple[type, str], tuple[Callable, Any] | None], + cache: dict, + ): object.__setattr__(self, "_real_env", real_env) - object.__setattr__(self, "_scene_proxy", scene_proxy) + object.__setattr__( + self, + "_scene_proxy", + _SceneProxy(real_env.scene, task_name, property_resolution_cache, cache), + ) @property def scene(self): @@ -438,7 +447,7 @@ class _ArticulationWriteProxy: def __init__( self, - real_asset: BaseArticulation, + real_asset: Any, term_name: str, output_cache: list[TensorSemantics], method_resolution_cache: dict[tuple[type, str], tuple[Callable, Any, inspect.Signature] | None], @@ -478,20 +487,23 @@ def interceptor(*args, **kwargs): bound_args = signature.bind_partial(real_asset, *args, **kwargs) target = bound_args.arguments.get("target") - if isinstance(target, torch.Tensor): - joint_ids = bound_args.arguments.get("joint_ids") - output_cache.append( - TensorSemantics( - name=_unique_output_name(term_name, name, output_cache), - ref=target.clone(), - kind=semantics_meta.kind, - element_names=resolve_leapp_element_names( - semantics_meta, - _WriteJointNameContext(real_asset.joint_names, joint_ids), - ), - ) + if not isinstance(target, torch.Tensor): + return result + + target_tensor = cast(torch.Tensor, target) + joint_ids = bound_args.arguments.get("joint_ids") + output_cache.append( + TensorSemantics( + name=_unique_output_name(term_name, name, output_cache), + ref=target_tensor.clone(), + kind=semantics_meta.kind, + element_names=resolve_leapp_element_names( + semantics_meta, + _WriteJointNameContext(real_asset.joint_names, joint_ids), + ), ) - captured_write_term_names.add(term_name) + ) + captured_write_term_names.add(term_name) return result diff --git a/source/isaaclab/isaaclab/assets/rigid_object/base_rigid_object_data.py b/source/isaaclab/isaaclab/assets/rigid_object/base_rigid_object_data.py index e04a1ffa8bc1..b41c44f04731 100644 --- a/source/isaaclab/isaaclab/assets/rigid_object/base_rigid_object_data.py +++ b/source/isaaclab/isaaclab/assets/rigid_object/base_rigid_object_data.py @@ -7,6 +7,15 @@ from abc import ABC, abstractmethod import warp as wp +from leapp import InputKindEnum + +from isaaclab.utils.leapp_semantics import ( + POSE7_ELEMENT_NAMES, + QUAT_WXYZ_ELEMENT_NAMES, + WRENCH6_ELEMENT_NAMES, + XYZ_ELEMENT_NAMES, + leapp_tensor_semantics, +) class BaseRigidObjectData(ABC): @@ -52,7 +61,7 @@ def update(self, dt: float) -> None: # Names. ## - body_names: list[str] = None + body_names: list[str] | None = None """Body names in the order parsed by the simulation view.""" ## @@ -61,6 +70,7 @@ def update(self, dt: float) -> None: @property @abstractmethod + @leapp_tensor_semantics(const=True) def default_root_pose(self) -> wp.array: """Default root pose ``[pos, quat]`` in local environment frame. @@ -71,6 +81,7 @@ def default_root_pose(self) -> wp.array: @property @abstractmethod + @leapp_tensor_semantics(const=True) def default_root_vel(self) -> wp.array: """Default root velocity ``[lin_vel, ang_vel]`` in local environment frame. @@ -91,6 +102,7 @@ def default_root_state(self) -> wp.array: @property @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.BODY_POSE, element_names=POSE7_ELEMENT_NAMES) def root_link_pose_w(self) -> wp.array: """Root link pose ``[pos, quat]`` in simulation world frame. @@ -103,6 +115,7 @@ def root_link_pose_w(self) -> wp.array: @property @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.BODY_VEL, element_names=WRENCH6_ELEMENT_NAMES) def root_link_vel_w(self) -> wp.array: """Root link velocity ``[lin_vel, ang_vel]`` in simulation world frame. @@ -115,6 +128,7 @@ def root_link_vel_w(self) -> wp.array: @property @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.BODY_POSE, element_names=POSE7_ELEMENT_NAMES) def root_com_pose_w(self) -> wp.array: """Root center of mass pose ``[pos, quat]`` in simulation world frame. @@ -127,6 +141,7 @@ def root_com_pose_w(self) -> wp.array: @property @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.BODY_VEL, element_names=WRENCH6_ELEMENT_NAMES) def root_com_vel_w(self) -> wp.array: """Root center of mass velocity ``[lin_vel, ang_vel]`` in simulation world frame. @@ -139,18 +154,21 @@ def root_com_vel_w(self) -> wp.array: @property @abstractmethod + @leapp_tensor_semantics(kind="state/body/state") def root_state_w(self) -> wp.array: """Deprecated, same as :attr:`root_link_pose_w` and :attr:`root_com_vel_w`.""" raise NotImplementedError() @property @abstractmethod + @leapp_tensor_semantics(kind="state/body/link_state") def root_link_state_w(self) -> wp.array: """Deprecated, same as :attr:`root_link_pose_w` and :attr:`root_link_vel_w`.""" raise NotImplementedError() @property @abstractmethod + @leapp_tensor_semantics(kind="state/body/com_state") def root_com_state_w(self) -> wp.array: """Deprecated, same as :attr:`root_com_pose_w` and :attr:`root_com_vel_w`.""" raise NotImplementedError() @@ -161,6 +179,7 @@ def root_com_state_w(self) -> wp.array: @property @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.BODY_POSE, element_names_source="body_pose") def body_link_pose_w(self) -> wp.array: """Body link pose ``[pos, quat]`` in simulation world frame. @@ -174,6 +193,7 @@ def body_link_pose_w(self) -> wp.array: @property @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.BODY_VEL, element_names_source="body_pose") def body_link_vel_w(self) -> wp.array: """Body link velocity ``[lin_vel, ang_vel]`` in simulation world frame. @@ -187,6 +207,7 @@ def body_link_vel_w(self) -> wp.array: @property @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.BODY_POSE, element_names_source="body_pose") def body_com_pose_w(self) -> wp.array: """Body center of mass pose ``[pos, quat]`` in simulation world frame. @@ -200,6 +221,7 @@ def body_com_pose_w(self) -> wp.array: @property @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.BODY_VEL, element_names_source="body_pose") def body_com_vel_w(self) -> wp.array: """Body center of mass velocity ``[lin_vel, ang_vel]`` in simulation world frame. @@ -213,24 +235,28 @@ def body_com_vel_w(self) -> wp.array: @property @abstractmethod + @leapp_tensor_semantics(kind="state/body/state") def body_state_w(self) -> wp.array: """Deprecated, same as :attr:`body_link_pose_w` and :attr:`body_com_vel_w`.""" raise NotImplementedError() @property @abstractmethod + @leapp_tensor_semantics(kind="state/body/link_state") def body_link_state_w(self) -> wp.array: """Deprecated, same as :attr:`body_link_pose_w` and :attr:`body_link_vel_w`.""" raise NotImplementedError() @property @abstractmethod + @leapp_tensor_semantics(kind="state/body/com_state") def body_com_state_w(self) -> wp.array: """Deprecated, same as :attr:`body_com_pose_w` and :attr:`body_com_vel_w`.""" raise NotImplementedError() @property @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.BODY_ACC, element_names_source="body_pose") def body_com_acc_w(self) -> wp.array: """Acceleration of all bodies ``[lin_acc, ang_acc]`` in the simulation world frame. @@ -243,6 +269,7 @@ def body_com_acc_w(self) -> wp.array: @property @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.BODY_POSE, element_names_source="body_pose") def body_com_pose_b(self) -> wp.array: """Center of mass pose ``[pos, quat]`` of all bodies in their respective body's link frames. @@ -256,6 +283,7 @@ def body_com_pose_b(self) -> wp.array: @property @abstractmethod + @leapp_tensor_semantics(const=True) def body_mass(self) -> wp.array: """Mass of all bodies in the simulation world frame. @@ -266,6 +294,7 @@ def body_mass(self) -> wp.array: @property @abstractmethod + @leapp_tensor_semantics(const=True) def body_inertia(self) -> wp.array: """Inertia of all bodies in the simulation world frame. @@ -280,6 +309,7 @@ def body_inertia(self) -> wp.array: @property @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.VECTOR3D, element_names=XYZ_ELEMENT_NAMES) def projected_gravity_b(self) -> wp.array: """Projection of the gravity direction on base frame. @@ -289,6 +319,7 @@ def projected_gravity_b(self) -> wp.array: @property @abstractmethod + @leapp_tensor_semantics(kind="state/body/heading") def heading_w(self) -> wp.array: """Yaw heading of the base frame (in radians). @@ -302,6 +333,7 @@ def heading_w(self) -> wp.array: @property @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.BODY_LINEAR_VELOCITY, element_names=XYZ_ELEMENT_NAMES) def root_link_lin_vel_b(self) -> wp.array: """Root link linear velocity in base frame. @@ -314,6 +346,7 @@ def root_link_lin_vel_b(self) -> wp.array: @property @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.BODY_ANGULAR_VELOCITY, element_names=XYZ_ELEMENT_NAMES) def root_link_ang_vel_b(self) -> wp.array: """Root link angular velocity in base frame. @@ -326,6 +359,7 @@ def root_link_ang_vel_b(self) -> wp.array: @property @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.BODY_LINEAR_VELOCITY, element_names=XYZ_ELEMENT_NAMES) def root_com_lin_vel_b(self) -> wp.array: """Root center of mass linear velocity in base frame. @@ -338,6 +372,7 @@ def root_com_lin_vel_b(self) -> wp.array: @property @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.BODY_ANGULAR_VELOCITY, element_names=XYZ_ELEMENT_NAMES) def root_com_ang_vel_b(self) -> wp.array: """Root center of mass angular velocity in base frame. @@ -354,6 +389,7 @@ def root_com_ang_vel_b(self) -> wp.array: @property @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.BODY_POSITION, element_names=XYZ_ELEMENT_NAMES) def root_link_pos_w(self) -> wp.array: """Root link position in simulation world frame. @@ -365,6 +401,7 @@ def root_link_pos_w(self) -> wp.array: @property @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.BODY_ROTATION, element_names=QUAT_WXYZ_ELEMENT_NAMES) def root_link_quat_w(self) -> wp.array: """Root link orientation (x, y, z, w) in simulation world frame. @@ -376,6 +413,7 @@ def root_link_quat_w(self) -> wp.array: @property @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.BODY_LINEAR_VELOCITY, element_names=XYZ_ELEMENT_NAMES) def root_link_lin_vel_w(self) -> wp.array: """Root linear velocity in simulation world frame. @@ -387,6 +425,7 @@ def root_link_lin_vel_w(self) -> wp.array: @property @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.BODY_ANGULAR_VELOCITY, element_names=XYZ_ELEMENT_NAMES) def root_link_ang_vel_w(self) -> wp.array: """Root link angular velocity in simulation world frame. @@ -398,6 +437,7 @@ def root_link_ang_vel_w(self) -> wp.array: @property @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.BODY_POSITION, element_names=XYZ_ELEMENT_NAMES) def root_com_pos_w(self) -> wp.array: """Root center of mass position in simulation world frame. @@ -409,6 +449,7 @@ def root_com_pos_w(self) -> wp.array: @property @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.BODY_ROTATION, element_names=QUAT_WXYZ_ELEMENT_NAMES) def root_com_quat_w(self) -> wp.array: """Root center of mass orientation (x, y, z, w) in simulation world frame. @@ -420,6 +461,7 @@ def root_com_quat_w(self) -> wp.array: @property @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.BODY_LINEAR_VELOCITY, element_names=XYZ_ELEMENT_NAMES) def root_com_lin_vel_w(self) -> wp.array: """Root center of mass linear velocity in simulation world frame. @@ -431,6 +473,7 @@ def root_com_lin_vel_w(self) -> wp.array: @property @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.BODY_ANGULAR_VELOCITY, element_names=XYZ_ELEMENT_NAMES) def root_com_ang_vel_w(self) -> wp.array: """Root center of mass angular velocity in simulation world frame. @@ -442,6 +485,7 @@ def root_com_ang_vel_w(self) -> wp.array: @property @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.BODY_POSITION, element_names_source="body_xyz") def body_link_pos_w(self) -> wp.array: """Positions of all bodies in simulation world frame. @@ -453,6 +497,7 @@ def body_link_pos_w(self) -> wp.array: @property @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.BODY_ROTATION, element_names_source="body_quat") def body_link_quat_w(self) -> wp.array: """Orientation (x, y, z, w) of all bodies in simulation world frame. @@ -464,6 +509,7 @@ def body_link_quat_w(self) -> wp.array: @property @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.BODY_LINEAR_VELOCITY, element_names_source="body_xyz") def body_link_lin_vel_w(self) -> wp.array: """Linear velocity of all bodies in simulation world frame. @@ -475,6 +521,7 @@ def body_link_lin_vel_w(self) -> wp.array: @property @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.BODY_ANGULAR_VELOCITY, element_names_source="body_xyz") def body_link_ang_vel_w(self) -> wp.array: """Angular velocity of all bodies in simulation world frame. @@ -486,6 +533,7 @@ def body_link_ang_vel_w(self) -> wp.array: @property @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.BODY_POSITION, element_names_source="body_xyz") def body_com_pos_w(self) -> wp.array: """Positions of all bodies' center of mass in simulation world frame. @@ -497,6 +545,7 @@ def body_com_pos_w(self) -> wp.array: @property @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.BODY_ROTATION, element_names_source="body_quat") def body_com_quat_w(self) -> wp.array: """Orientation (x, y, z, w) of the principal axes of inertia of all bodies in simulation world frame. @@ -508,6 +557,7 @@ def body_com_quat_w(self) -> wp.array: @property @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.BODY_LINEAR_VELOCITY, element_names_source="body_xyz") def body_com_lin_vel_w(self) -> wp.array: """Linear velocity of all bodies in simulation world frame. @@ -519,6 +569,7 @@ def body_com_lin_vel_w(self) -> wp.array: @property @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.BODY_ANGULAR_VELOCITY, element_names_source="body_xyz") def body_com_ang_vel_w(self) -> wp.array: """Angular velocity of all bodies in simulation world frame. @@ -530,6 +581,7 @@ def body_com_ang_vel_w(self) -> wp.array: @property @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.BODY_LINEAR_ACCELERATION, element_names_source="body_xyz") def body_com_lin_acc_w(self) -> wp.array: """Linear acceleration of all bodies in simulation world frame. @@ -541,6 +593,7 @@ def body_com_lin_acc_w(self) -> wp.array: @property @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.BODY_ANGULAR_ACCELERATION, element_names_source="body_xyz") def body_com_ang_acc_w(self) -> wp.array: """Angular acceleration of all bodies in simulation world frame. @@ -552,6 +605,7 @@ def body_com_ang_acc_w(self) -> wp.array: @property @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.BODY_POSITION, element_names_source="body_xyz") def body_com_pos_b(self) -> wp.array: """Center of mass position of all of the bodies in their respective link frames. @@ -563,6 +617,7 @@ def body_com_pos_b(self) -> wp.array: @property @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.BODY_ROTATION, element_names_source="body_quat") def body_com_quat_b(self) -> wp.array: """Orientation (x, y, z, w) of the principal axes of inertia of all of the bodies in their respective link frames. @@ -583,96 +638,115 @@ def _create_buffers(self) -> None: """ @property + @leapp_tensor_semantics(kind=InputKindEnum.BODY_POSE, element_names=POSE7_ELEMENT_NAMES) def root_pose_w(self) -> wp.array: """Shorthand for :attr:`root_link_pose_w`.""" return self.root_link_pose_w @property + @leapp_tensor_semantics(kind=InputKindEnum.BODY_POSITION, element_names=XYZ_ELEMENT_NAMES) def root_pos_w(self) -> wp.array: """Shorthand for :attr:`root_link_pos_w`.""" return self.root_link_pos_w @property + @leapp_tensor_semantics(kind=InputKindEnum.BODY_ROTATION, element_names=QUAT_WXYZ_ELEMENT_NAMES) def root_quat_w(self) -> wp.array: """Shorthand for :attr:`root_link_quat_w`.""" return self.root_link_quat_w @property + @leapp_tensor_semantics(kind=InputKindEnum.BODY_VEL, element_names=WRENCH6_ELEMENT_NAMES) def root_vel_w(self) -> wp.array: """Shorthand for :attr:`root_com_vel_w`.""" return self.root_com_vel_w @property + @leapp_tensor_semantics(kind=InputKindEnum.BODY_LINEAR_VELOCITY, element_names=XYZ_ELEMENT_NAMES) def root_lin_vel_w(self) -> wp.array: """Shorthand for :attr:`root_com_lin_vel_w`.""" return self.root_com_lin_vel_w @property + @leapp_tensor_semantics(kind=InputKindEnum.BODY_ANGULAR_VELOCITY, element_names=XYZ_ELEMENT_NAMES) def root_ang_vel_w(self) -> wp.array: """Shorthand for :attr:`root_com_ang_vel_w`.""" return self.root_com_ang_vel_w @property + @leapp_tensor_semantics(kind=InputKindEnum.BODY_LINEAR_VELOCITY, element_names=XYZ_ELEMENT_NAMES) def root_lin_vel_b(self) -> wp.array: """Shorthand for :attr:`root_com_lin_vel_b`.""" return self.root_com_lin_vel_b @property + @leapp_tensor_semantics(kind=InputKindEnum.BODY_ANGULAR_VELOCITY, element_names=XYZ_ELEMENT_NAMES) def root_ang_vel_b(self) -> wp.array: """Shorthand for :attr:`root_com_ang_vel_b`.""" return self.root_com_ang_vel_b @property + @leapp_tensor_semantics(kind=InputKindEnum.BODY_POSE, element_names_source="body_pose") def body_pose_w(self) -> wp.array: """Shorthand for :attr:`body_link_pose_w`.""" return self.body_link_pose_w @property + @leapp_tensor_semantics(kind=InputKindEnum.BODY_POSITION, element_names_source="body_xyz") def body_pos_w(self) -> wp.array: """Shorthand for :attr:`body_link_pos_w`.""" return self.body_link_pos_w @property + @leapp_tensor_semantics(kind=InputKindEnum.BODY_ROTATION, element_names_source="body_quat") def body_quat_w(self) -> wp.array: """Shorthand for :attr:`body_link_quat_w`.""" return self.body_link_quat_w @property + @leapp_tensor_semantics(kind=InputKindEnum.BODY_VEL, element_names_source="body_wrench") def body_vel_w(self) -> wp.array: """Shorthand for :attr:`body_com_vel_w`.""" return self.body_com_vel_w @property + @leapp_tensor_semantics(kind=InputKindEnum.BODY_LINEAR_VELOCITY, element_names_source="body_xyz") def body_lin_vel_w(self) -> wp.array: """Shorthand for :attr:`body_com_lin_vel_w`.""" return self.body_com_lin_vel_w @property + @leapp_tensor_semantics(kind=InputKindEnum.BODY_ANGULAR_VELOCITY, element_names_source="body_xyz") def body_ang_vel_w(self) -> wp.array: """Shorthand for :attr:`body_com_ang_vel_w`.""" return self.body_com_ang_vel_w @property + @leapp_tensor_semantics(kind=InputKindEnum.BODY_ACC, element_names_source="body_wrench") def body_acc_w(self) -> wp.array: """Shorthand for :attr:`body_com_acc_w`.""" return self.body_com_acc_w @property + @leapp_tensor_semantics(kind=InputKindEnum.BODY_LINEAR_ACCELERATION, element_names_source="body_xyz") def body_lin_acc_w(self) -> wp.array: """Shorthand for :attr:`body_com_lin_acc_w`.""" return self.body_com_lin_acc_w @property + @leapp_tensor_semantics(kind=InputKindEnum.BODY_ANGULAR_ACCELERATION, element_names_source="body_xyz") def body_ang_acc_w(self) -> wp.array: """Shorthand for :attr:`body_com_ang_acc_w`.""" return self.body_com_ang_acc_w @property + @leapp_tensor_semantics(kind=InputKindEnum.BODY_POSITION, element_names_source="body_xyz") def com_pos_b(self) -> wp.array: """Shorthand for :attr:`body_com_pos_b`.""" return self.body_com_pos_b @property + @leapp_tensor_semantics(kind=InputKindEnum.BODY_ROTATION, element_names_source="body_quat") def com_quat_b(self) -> wp.array: """Shorthand for :attr:`body_com_quat_b`.""" return self.body_com_quat_b @@ -682,6 +756,7 @@ def com_quat_b(self) -> wp.array: """ @property + @leapp_tensor_semantics(const=True) def default_mass(self) -> wp.array: """Deprecated property. Please use :attr:`body_mass` instead and manage the default mass manually.""" warnings.warn( @@ -695,6 +770,7 @@ def default_mass(self) -> wp.array: return self._default_mass @property + @leapp_tensor_semantics(const=True) def default_inertia(self) -> wp.array: """Deprecated property. Please use :attr:`body_inertia` instead and manage the default inertia manually.""" warnings.warn( diff --git a/source/isaaclab/isaaclab/assets/rigid_object_collection/base_rigid_object_collection_data.py b/source/isaaclab/isaaclab/assets/rigid_object_collection/base_rigid_object_collection_data.py index 1913bb4bef9b..30cbd06c50cd 100644 --- a/source/isaaclab/isaaclab/assets/rigid_object_collection/base_rigid_object_collection_data.py +++ b/source/isaaclab/isaaclab/assets/rigid_object_collection/base_rigid_object_collection_data.py @@ -7,6 +7,11 @@ from abc import ABC, abstractmethod import warp as wp +from leapp import InputKindEnum + +from isaaclab.utils.leapp_semantics import ( + leapp_tensor_semantics, +) class BaseRigidObjectCollectionData(ABC): @@ -54,7 +59,7 @@ def update(self, dt: float) -> None: # Names. ## - body_names: list[str] = None + body_names: list[str] | None = None """Body names in the order parsed by the simulation view.""" ## @@ -63,6 +68,7 @@ def update(self, dt: float) -> None: @property @abstractmethod + @leapp_tensor_semantics(const=True) def default_body_pose(self) -> wp.array: """Default body pose ``[pos, quat]`` in local environment frame. @@ -74,6 +80,7 @@ def default_body_pose(self) -> wp.array: @property @abstractmethod + @leapp_tensor_semantics(const=True) def default_body_vel(self) -> wp.array: """Default body velocity ``[lin_vel, ang_vel]`` in local environment frame. @@ -85,6 +92,7 @@ def default_body_vel(self) -> wp.array: @property @abstractmethod + @leapp_tensor_semantics(const=True) def default_body_state(self) -> wp.array: """Deprecated, same as :attr:`default_body_pose` and :attr:`default_body_vel`.""" raise NotImplementedError() @@ -95,6 +103,7 @@ def default_body_state(self) -> wp.array: @property @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.BODY_POSE, element_names_source="body_pose") def body_link_pose_w(self) -> wp.array: """Body link pose ``[pos, quat]`` in simulation world frame. @@ -108,6 +117,7 @@ def body_link_pose_w(self) -> wp.array: @property @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.BODY_VEL, element_names_source="body_pose") def body_link_vel_w(self) -> wp.array: """Body link velocity ``[lin_vel, ang_vel]`` in simulation world frame. @@ -121,6 +131,7 @@ def body_link_vel_w(self) -> wp.array: @property @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.BODY_POSE, element_names_source="body_pose") def body_com_pose_w(self) -> wp.array: """Body center of mass pose ``[pos, quat]`` in simulation world frame. @@ -134,6 +145,7 @@ def body_com_pose_w(self) -> wp.array: @property @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.BODY_VEL, element_names_source="body_pose") def body_com_vel_w(self) -> wp.array: """Body center of mass velocity ``[lin_vel, ang_vel]`` in simulation world frame. @@ -147,24 +159,28 @@ def body_com_vel_w(self) -> wp.array: @property @abstractmethod + @leapp_tensor_semantics(kind="state/body/state") def body_state_w(self) -> wp.array: """Deprecated, same as :attr:`body_link_pose_w` and :attr:`body_com_vel_w`.""" raise NotImplementedError() @property @abstractmethod + @leapp_tensor_semantics(kind="state/body/link_state") def body_link_state_w(self) -> wp.array: """Deprecated, same as :attr:`body_link_pose_w` and :attr:`body_link_vel_w`.""" raise NotImplementedError() @property @abstractmethod + @leapp_tensor_semantics(kind="state/body/com_state") def body_com_state_w(self) -> wp.array: """Deprecated, same as :attr:`body_com_pose_w` and :attr:`body_com_vel_w`.""" raise NotImplementedError() @property @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.BODY_ACC, element_names_source="body_pose") def body_com_acc_w(self) -> wp.array: """Acceleration of all bodies ``[lin_acc, ang_acc]`` in the simulation world frame. @@ -177,6 +193,7 @@ def body_com_acc_w(self) -> wp.array: @property @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.BODY_POSE, element_names_source="body_pose") def body_com_pose_b(self) -> wp.array: """Center of mass pose ``[pos, quat]`` of all bodies in their respective body's link frames. @@ -190,6 +207,7 @@ def body_com_pose_b(self) -> wp.array: @property @abstractmethod + @leapp_tensor_semantics(const=True) def body_mass(self) -> wp.array: """Mass of all bodies in the simulation world frame. @@ -199,6 +217,7 @@ def body_mass(self) -> wp.array: @property @abstractmethod + @leapp_tensor_semantics(const=True) def body_inertia(self) -> wp.array: """Inertia of all bodies in the simulation world frame. @@ -213,6 +232,7 @@ def body_inertia(self) -> wp.array: @property @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.VECTOR3D, element_names_source="body_xyz") def projected_gravity_b(self) -> wp.array: """Projection of the gravity direction on base frame. @@ -223,6 +243,7 @@ def projected_gravity_b(self) -> wp.array: @property @abstractmethod + @leapp_tensor_semantics(kind="state/body/heading") def heading_w(self) -> wp.array: """Yaw heading of the base frame (in radians). @@ -237,6 +258,7 @@ def heading_w(self) -> wp.array: @property @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.BODY_LINEAR_VELOCITY, element_names_source="body_xyz") def body_link_lin_vel_b(self) -> wp.array: """Root link linear velocity in base frame. @@ -250,6 +272,7 @@ def body_link_lin_vel_b(self) -> wp.array: @property @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.BODY_ANGULAR_VELOCITY, element_names_source="body_xyz") def body_link_ang_vel_b(self) -> wp.array: """Root link angular velocity in base frame. @@ -263,6 +286,7 @@ def body_link_ang_vel_b(self) -> wp.array: @property @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.BODY_LINEAR_VELOCITY, element_names_source="body_xyz") def body_com_lin_vel_b(self) -> wp.array: """Root center of mass linear velocity in base frame. @@ -276,6 +300,7 @@ def body_com_lin_vel_b(self) -> wp.array: @property @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.BODY_ANGULAR_VELOCITY, element_names_source="body_xyz") def body_com_ang_vel_b(self) -> wp.array: """Root center of mass angular velocity in base frame. @@ -293,6 +318,7 @@ def body_com_ang_vel_b(self) -> wp.array: @property @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.BODY_POSITION, element_names_source="body_xyz") def body_link_pos_w(self) -> wp.array: """Positions of all bodies in simulation world frame. @@ -305,6 +331,7 @@ def body_link_pos_w(self) -> wp.array: @property @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.BODY_ROTATION, element_names_source="body_quat") def body_link_quat_w(self) -> wp.array: """Orientation (x, y, z, w) of all bodies in simulation world frame. @@ -317,6 +344,7 @@ def body_link_quat_w(self) -> wp.array: @property @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.BODY_LINEAR_VELOCITY, element_names_source="body_xyz") def body_link_lin_vel_w(self) -> wp.array: """Linear velocity of all bodies in simulation world frame. @@ -329,6 +357,7 @@ def body_link_lin_vel_w(self) -> wp.array: @property @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.BODY_ANGULAR_VELOCITY, element_names_source="body_xyz") def body_link_ang_vel_w(self) -> wp.array: """Angular velocity of all bodies in simulation world frame. @@ -341,6 +370,7 @@ def body_link_ang_vel_w(self) -> wp.array: @property @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.BODY_POSITION, element_names_source="body_xyz") def body_com_pos_w(self) -> wp.array: """Positions of all bodies' center of mass in simulation world frame. @@ -353,6 +383,7 @@ def body_com_pos_w(self) -> wp.array: @property @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.BODY_ROTATION, element_names_source="body_quat") def body_com_quat_w(self) -> wp.array: """Orientation (x, y, z, w) of the principal axes of inertia of all bodies in simulation world frame. @@ -365,6 +396,7 @@ def body_com_quat_w(self) -> wp.array: @property @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.BODY_LINEAR_VELOCITY, element_names_source="body_xyz") def body_com_lin_vel_w(self) -> wp.array: """Linear velocity of all bodies in simulation world frame. @@ -377,6 +409,7 @@ def body_com_lin_vel_w(self) -> wp.array: @property @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.BODY_ANGULAR_VELOCITY, element_names_source="body_xyz") def body_com_ang_vel_w(self) -> wp.array: """Angular velocity of all bodies in simulation world frame. @@ -389,6 +422,7 @@ def body_com_ang_vel_w(self) -> wp.array: @property @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.BODY_LINEAR_ACCELERATION, element_names_source="body_xyz") def body_com_lin_acc_w(self) -> wp.array: """Linear acceleration of all bodies in simulation world frame. @@ -401,6 +435,7 @@ def body_com_lin_acc_w(self) -> wp.array: @property @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.BODY_ANGULAR_ACCELERATION, element_names_source="body_xyz") def body_com_ang_acc_w(self) -> wp.array: """Angular acceleration of all bodies in simulation world frame. @@ -413,6 +448,7 @@ def body_com_ang_acc_w(self) -> wp.array: @property @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.BODY_POSITION, element_names_source="body_xyz") def body_com_pos_b(self) -> wp.array: """Center of mass position of all of the bodies in their respective link frames. @@ -425,6 +461,7 @@ def body_com_pos_b(self) -> wp.array: @property @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.BODY_ROTATION, element_names_source="body_quat") def body_com_quat_b(self) -> wp.array: """Orientation (x, y, z, w) of the principal axes of inertia of all of the bodies in their respective link frames. @@ -441,56 +478,67 @@ def body_com_quat_b(self) -> wp.array: """ @property + @leapp_tensor_semantics(kind=InputKindEnum.BODY_POSE, element_names_source="body_pose") def body_pose_w(self) -> wp.array: """Shorthand for :attr:`body_link_pose_w`.""" return self.body_link_pose_w @property + @leapp_tensor_semantics(kind=InputKindEnum.BODY_POSITION, element_names_source="body_xyz") def body_pos_w(self) -> wp.array: """Shorthand for :attr:`body_link_pos_w`.""" return self.body_link_pos_w @property + @leapp_tensor_semantics(kind=InputKindEnum.BODY_ROTATION, element_names_source="body_quat") def body_quat_w(self) -> wp.array: """Shorthand for :attr:`body_link_quat_w`.""" return self.body_link_quat_w @property + @leapp_tensor_semantics(kind=InputKindEnum.BODY_VEL, element_names_source="body_wrench") def body_vel_w(self) -> wp.array: """Shorthand for :attr:`body_com_vel_w`.""" return self.body_com_vel_w @property + @leapp_tensor_semantics(kind=InputKindEnum.BODY_LINEAR_VELOCITY, element_names_source="body_xyz") def body_lin_vel_w(self) -> wp.array: """Shorthand for :attr:`body_com_lin_vel_w`.""" return self.body_com_lin_vel_w @property + @leapp_tensor_semantics(kind=InputKindEnum.BODY_ANGULAR_VELOCITY, element_names_source="body_xyz") def body_ang_vel_w(self) -> wp.array: """Shorthand for :attr:`body_com_ang_vel_w`.""" return self.body_com_ang_vel_w @property + @leapp_tensor_semantics(kind=InputKindEnum.BODY_ACC, element_names_source="body_wrench") def body_acc_w(self) -> wp.array: """Shorthand for :attr:`body_com_acc_w`.""" return self.body_com_acc_w @property + @leapp_tensor_semantics(kind=InputKindEnum.BODY_LINEAR_ACCELERATION, element_names_source="body_xyz") def body_lin_acc_w(self) -> wp.array: """Shorthand for :attr:`body_com_lin_acc_w`.""" return self.body_com_lin_acc_w @property + @leapp_tensor_semantics(kind=InputKindEnum.BODY_ANGULAR_ACCELERATION, element_names_source="body_xyz") def body_ang_acc_w(self) -> wp.array: """Shorthand for :attr:`body_com_ang_acc_w`.""" return self.body_com_ang_acc_w @property + @leapp_tensor_semantics(kind=InputKindEnum.BODY_POSITION, element_names_source="body_xyz") def com_pos_b(self) -> wp.array: """Shorthand for :attr:`body_com_pos_b`.""" return self.body_com_pos_b @property + @leapp_tensor_semantics(kind=InputKindEnum.BODY_ROTATION, element_names_source="body_quat") def com_quat_b(self) -> wp.array: """Shorthand for :attr:`body_com_quat_b`.""" return self.body_com_quat_b @@ -505,6 +553,7 @@ def _create_buffers(self): """ @property + @leapp_tensor_semantics(const=True) def default_object_pose(self) -> wp.array: """Deprecated property. Please use :attr:`default_body_pose` instead.""" warnings.warn( @@ -516,6 +565,7 @@ def default_object_pose(self) -> wp.array: return self.default_body_pose @property + @leapp_tensor_semantics(const=True) def default_object_vel(self) -> wp.array: """Deprecated property. Please use :attr:`default_body_vel` instead.""" warnings.warn( @@ -527,6 +577,7 @@ def default_object_vel(self) -> wp.array: return self.default_body_vel @property + @leapp_tensor_semantics(const=True) def default_object_state(self) -> wp.array: """Deprecated property. Please use :attr:`default_body_state` instead.""" warnings.warn( @@ -538,6 +589,7 @@ def default_object_state(self) -> wp.array: return self.default_body_state @property + @leapp_tensor_semantics(kind=InputKindEnum.BODY_POSE, element_names_source="body_pose") def object_link_pose_w(self): """Deprecated property. Please use :attr:`body_link_pose_w` instead.""" warnings.warn( @@ -549,6 +601,7 @@ def object_link_pose_w(self): return self.body_link_pose_w @property + @leapp_tensor_semantics(kind=InputKindEnum.BODY_VEL, element_names_source="body_pose") def object_link_vel_w(self): """Deprecated property. Please use :attr:`body_link_vel_w` instead.""" warnings.warn( @@ -560,6 +613,7 @@ def object_link_vel_w(self): return self.body_link_vel_w @property + @leapp_tensor_semantics(kind=InputKindEnum.BODY_POSE, element_names_source="body_pose") def object_com_pose_w(self): """Deprecated property. Please use :attr:`body_com_pose_w` instead.""" warnings.warn( @@ -571,6 +625,7 @@ def object_com_pose_w(self): return self.body_com_pose_w @property + @leapp_tensor_semantics(kind=InputKindEnum.BODY_VEL, element_names_source="body_pose") def object_com_vel_w(self): """Deprecated property. Please use :attr:`body_com_vel_w` instead.""" warnings.warn( @@ -582,6 +637,7 @@ def object_com_vel_w(self): return self.body_com_vel_w @property + @leapp_tensor_semantics(kind="state/body/state") def object_state_w(self): """Deprecated property. Please use :attr:`body_state_w` instead.""" warnings.warn( @@ -592,6 +648,7 @@ def object_state_w(self): return self.body_state_w @property + @leapp_tensor_semantics(kind="state/body/link_state") def object_link_state_w(self): """Deprecated property. Please use :attr:`body_link_state_w` instead.""" warnings.warn( @@ -603,6 +660,7 @@ def object_link_state_w(self): return self.body_link_state_w @property + @leapp_tensor_semantics(kind="state/body/com_state") def object_com_state_w(self): """Deprecated property. Please use :attr:`body_com_state_w` instead.""" warnings.warn( @@ -614,6 +672,7 @@ def object_com_state_w(self): return self.body_com_state_w @property + @leapp_tensor_semantics(kind=InputKindEnum.BODY_ACC, element_names_source="body_pose") def object_com_acc_w(self): """Deprecated property. Please use :attr:`body_com_acc_w` instead.""" warnings.warn( @@ -625,6 +684,7 @@ def object_com_acc_w(self): return self.body_com_acc_w @property + @leapp_tensor_semantics(kind=InputKindEnum.BODY_POSE, element_names_source="body_pose") def object_com_pose_b(self): """Deprecated property. Please use :attr:`body_com_pose_b` instead.""" warnings.warn( @@ -636,6 +696,7 @@ def object_com_pose_b(self): return self.body_com_pose_b @property + @leapp_tensor_semantics(kind=InputKindEnum.BODY_POSITION, element_names_source="body_xyz") def object_link_pos_w(self) -> wp.array: """Deprecated property. Please use :attr:`body_link_pos_w` instead.""" warnings.warn( @@ -647,6 +708,7 @@ def object_link_pos_w(self) -> wp.array: return self.body_link_pos_w @property + @leapp_tensor_semantics(kind=InputKindEnum.BODY_ROTATION, element_names_source="body_quat") def object_link_quat_w(self) -> wp.array: """Deprecated property. Please use :attr:`body_link_quat_w` instead.""" warnings.warn( @@ -658,6 +720,7 @@ def object_link_quat_w(self) -> wp.array: return self.body_link_quat_w @property + @leapp_tensor_semantics(kind=InputKindEnum.BODY_LINEAR_VELOCITY, element_names_source="body_xyz") def object_link_lin_vel_w(self) -> wp.array: """Deprecated property. Please use :attr:`body_link_lin_vel_w` instead.""" warnings.warn( @@ -669,6 +732,7 @@ def object_link_lin_vel_w(self) -> wp.array: return self.body_link_lin_vel_w @property + @leapp_tensor_semantics(kind=InputKindEnum.BODY_ANGULAR_VELOCITY, element_names_source="body_xyz") def object_link_ang_vel_w(self) -> wp.array: """Deprecated property. Please use :attr:`body_link_ang_vel_w` instead.""" warnings.warn( @@ -680,6 +744,7 @@ def object_link_ang_vel_w(self) -> wp.array: return self.body_link_ang_vel_w @property + @leapp_tensor_semantics(kind=InputKindEnum.BODY_POSITION, element_names_source="body_xyz") def object_com_pos_w(self) -> wp.array: """Deprecated property. Please use :attr:`body_com_pos_w` instead.""" warnings.warn( @@ -691,6 +756,7 @@ def object_com_pos_w(self) -> wp.array: return self.body_com_pos_w @property + @leapp_tensor_semantics(kind=InputKindEnum.BODY_ROTATION, element_names_source="body_quat") def object_com_quat_w(self) -> wp.array: """Deprecated property. Please use :attr:`body_com_quat_w` instead.""" warnings.warn( @@ -702,6 +768,7 @@ def object_com_quat_w(self) -> wp.array: return self.body_com_quat_w @property + @leapp_tensor_semantics(kind=InputKindEnum.BODY_LINEAR_VELOCITY, element_names_source="body_xyz") def object_com_lin_vel_w(self) -> wp.array: """Deprecated property. Please use :attr:`body_com_lin_vel_w` instead.""" warnings.warn( @@ -713,6 +780,7 @@ def object_com_lin_vel_w(self) -> wp.array: return self.body_com_lin_vel_w @property + @leapp_tensor_semantics(kind=InputKindEnum.BODY_ANGULAR_VELOCITY, element_names_source="body_xyz") def object_com_ang_vel_w(self) -> wp.array: """Deprecated property. Please use :attr:`body_com_ang_vel_w` instead.""" warnings.warn( @@ -724,6 +792,7 @@ def object_com_ang_vel_w(self) -> wp.array: return self.body_com_ang_vel_w @property + @leapp_tensor_semantics(kind=InputKindEnum.BODY_LINEAR_ACCELERATION, element_names_source="body_xyz") def object_com_lin_acc_w(self) -> wp.array: """Deprecated property. Please use :attr:`body_com_lin_acc_w` instead.""" warnings.warn( @@ -735,6 +804,7 @@ def object_com_lin_acc_w(self) -> wp.array: return self.body_com_lin_acc_w @property + @leapp_tensor_semantics(kind=InputKindEnum.BODY_ANGULAR_ACCELERATION, element_names_source="body_xyz") def object_com_ang_acc_w(self) -> wp.array: """Deprecated property. Please use :attr:`body_com_ang_acc_w` instead.""" warnings.warn( @@ -746,6 +816,7 @@ def object_com_ang_acc_w(self) -> wp.array: return self.body_com_ang_acc_w @property + @leapp_tensor_semantics(kind=InputKindEnum.BODY_POSITION, element_names_source="body_xyz") def object_com_pos_b(self) -> wp.array: """Deprecated property. Please use :attr:`body_com_pos_b` instead.""" warnings.warn( @@ -757,6 +828,7 @@ def object_com_pos_b(self) -> wp.array: return self.body_com_pos_b @property + @leapp_tensor_semantics(kind=InputKindEnum.BODY_ROTATION, element_names_source="body_quat") def object_com_quat_b(self) -> wp.array: """Deprecated property. Please use :attr:`body_com_quat_b` instead.""" warnings.warn( @@ -768,6 +840,7 @@ def object_com_quat_b(self) -> wp.array: return self.body_com_quat_b @property + @leapp_tensor_semantics(kind=InputKindEnum.BODY_LINEAR_VELOCITY, element_names_source="body_xyz") def object_link_lin_vel_b(self) -> wp.array: """Deprecated property. Please use :attr:`body_link_lin_vel_b` instead.""" warnings.warn( @@ -779,6 +852,7 @@ def object_link_lin_vel_b(self) -> wp.array: return self.body_link_lin_vel_b @property + @leapp_tensor_semantics(kind=InputKindEnum.BODY_ANGULAR_VELOCITY, element_names_source="body_xyz") def object_link_ang_vel_b(self) -> wp.array: """Deprecated property. Please use :attr:`body_link_ang_vel_b` instead.""" warnings.warn( @@ -790,6 +864,7 @@ def object_link_ang_vel_b(self) -> wp.array: return self.body_link_ang_vel_b @property + @leapp_tensor_semantics(kind=InputKindEnum.BODY_LINEAR_VELOCITY, element_names_source="body_xyz") def object_com_lin_vel_b(self) -> wp.array: """Deprecated property. Please use :attr:`body_com_lin_vel_b` instead.""" warnings.warn( @@ -801,6 +876,7 @@ def object_com_lin_vel_b(self) -> wp.array: return self.body_com_lin_vel_b @property + @leapp_tensor_semantics(kind=InputKindEnum.BODY_ANGULAR_VELOCITY, element_names_source="body_xyz") def object_com_ang_vel_b(self) -> wp.array: """Deprecated property. Please use :attr:`body_com_ang_vel_b` instead.""" warnings.warn( @@ -812,6 +888,7 @@ def object_com_ang_vel_b(self) -> wp.array: return self.body_com_ang_vel_b @property + @leapp_tensor_semantics(kind=InputKindEnum.BODY_POSE, element_names_source="body_pose") def object_pose_w(self) -> wp.array: """Deprecated property. Please use :attr:`body_link_pose_w` instead.""" warnings.warn( @@ -822,6 +899,7 @@ def object_pose_w(self) -> wp.array: return self.body_link_pose_w @property + @leapp_tensor_semantics(kind=InputKindEnum.BODY_POSITION, element_names_source="body_xyz") def object_pos_w(self) -> wp.array: """Deprecated property. Please use :attr:`body_link_pos_w` instead.""" warnings.warn( @@ -832,6 +910,7 @@ def object_pos_w(self) -> wp.array: return self.body_link_pos_w @property + @leapp_tensor_semantics(kind=InputKindEnum.BODY_ROTATION, element_names_source="body_quat") def object_quat_w(self) -> wp.array: """Deprecated property. Please use :attr:`body_link_quat_w` instead.""" warnings.warn( @@ -842,6 +921,7 @@ def object_quat_w(self) -> wp.array: return self.body_link_quat_w @property + @leapp_tensor_semantics(kind=InputKindEnum.BODY_VEL, element_names_source="body_wrench") def object_vel_w(self) -> wp.array: """Deprecated property. Please use :attr:`body_com_vel_w` instead.""" warnings.warn( @@ -852,6 +932,7 @@ def object_vel_w(self) -> wp.array: return self.body_com_vel_w @property + @leapp_tensor_semantics(kind=InputKindEnum.BODY_LINEAR_VELOCITY, element_names_source="body_xyz") def object_lin_vel_w(self) -> wp.array: """Deprecated property. Please use :attr:`body_com_lin_vel_w` instead.""" warnings.warn( @@ -863,6 +944,7 @@ def object_lin_vel_w(self) -> wp.array: return self.body_com_lin_vel_w @property + @leapp_tensor_semantics(kind=InputKindEnum.BODY_ANGULAR_VELOCITY, element_names_source="body_xyz") def object_ang_vel_w(self) -> wp.array: """Deprecated property. Please use :attr:`body_com_ang_vel_w` instead.""" warnings.warn( @@ -874,6 +956,7 @@ def object_ang_vel_w(self) -> wp.array: return self.body_com_ang_vel_w @property + @leapp_tensor_semantics(kind=InputKindEnum.BODY_LINEAR_VELOCITY, element_names_source="body_xyz") def object_lin_vel_b(self) -> wp.array: """Deprecated property. Please use :attr:`body_com_lin_vel_b` instead.""" warnings.warn( @@ -885,6 +968,7 @@ def object_lin_vel_b(self) -> wp.array: return self.body_com_lin_vel_b @property + @leapp_tensor_semantics(kind=InputKindEnum.BODY_ANGULAR_VELOCITY, element_names_source="body_xyz") def object_ang_vel_b(self) -> wp.array: """Deprecated property. Please use :attr:`body_com_ang_vel_b` instead.""" warnings.warn( @@ -896,6 +980,7 @@ def object_ang_vel_b(self) -> wp.array: return self.body_com_ang_vel_b @property + @leapp_tensor_semantics(kind=InputKindEnum.BODY_ACC, element_names_source="body_wrench") def object_acc_w(self) -> wp.array: """Deprecated property. Please use :attr:`body_com_acc_w` instead.""" warnings.warn( @@ -906,6 +991,7 @@ def object_acc_w(self) -> wp.array: return self.body_com_acc_w @property + @leapp_tensor_semantics(kind=InputKindEnum.BODY_LINEAR_ACCELERATION, element_names_source="body_xyz") def object_lin_acc_w(self) -> wp.array: """Deprecated property. Please use :attr:`body_com_lin_acc_w` instead.""" warnings.warn( @@ -917,6 +1003,7 @@ def object_lin_acc_w(self) -> wp.array: return self.body_com_lin_acc_w @property + @leapp_tensor_semantics(kind=InputKindEnum.BODY_ANGULAR_ACCELERATION, element_names_source="body_xyz") def object_ang_acc_w(self) -> wp.array: """Deprecated property. Please use :attr:`body_com_ang_acc_w` instead.""" warnings.warn( @@ -932,6 +1019,7 @@ def object_ang_acc_w(self) -> wp.array: """ @property + @leapp_tensor_semantics(const=True) def default_mass(self) -> wp.array: """Deprecated property. Please use :attr:`body_mass` instead and manage the default mass manually.""" warnings.warn( @@ -945,6 +1033,7 @@ def default_mass(self) -> wp.array: return self._default_mass @property + @leapp_tensor_semantics(const=True) def default_inertia(self) -> wp.array: """Deprecated property. Please use :attr:`body_inertia` instead and manage the default inertia manually.""" warnings.warn( diff --git a/source/isaaclab/isaaclab/sensors/contact_sensor/base_contact_sensor_data.py b/source/isaaclab/isaaclab/sensors/contact_sensor/base_contact_sensor_data.py index 74acf5092761..62ad0c921baf 100644 --- a/source/isaaclab/isaaclab/sensors/contact_sensor/base_contact_sensor_data.py +++ b/source/isaaclab/isaaclab/sensors/contact_sensor/base_contact_sensor_data.py @@ -10,6 +10,14 @@ from abc import ABC, abstractmethod import warp as wp +from leapp import InputKindEnum + +from isaaclab.utils.leapp_semantics import ( + POSE7_ELEMENT_NAMES, + QUAT_WXYZ_ELEMENT_NAMES, + XYZ_ELEMENT_NAMES, + leapp_tensor_semantics, +) class BaseContactSensorData(ABC): @@ -21,6 +29,7 @@ class BaseContactSensorData(ABC): @property @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.BODY_POSE, element_names=POSE7_ELEMENT_NAMES) def pose_w(self) -> wp.array | None: """Pose of the sensor origin in world frame. @@ -30,6 +39,7 @@ def pose_w(self) -> wp.array | None: @property @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.BODY_POSITION, element_names=XYZ_ELEMENT_NAMES) def pos_w(self) -> wp.array | None: """Position of the sensor origin in world frame. @@ -42,6 +52,7 @@ def pos_w(self) -> wp.array | None: @property @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.BODY_ROTATION, element_names=QUAT_WXYZ_ELEMENT_NAMES) def quat_w(self) -> wp.array | None: """Orientation of the sensor origin in world frame. @@ -54,6 +65,7 @@ def quat_w(self) -> wp.array | None: @property @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.VECTOR3D, element_names=XYZ_ELEMENT_NAMES) def net_forces_w(self) -> wp.array | None: """The net normal contact forces in world frame. @@ -64,6 +76,7 @@ def net_forces_w(self) -> wp.array | None: @property @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.VECTOR3D, element_names=XYZ_ELEMENT_NAMES) def net_forces_w_history(self) -> wp.array | None: """History of net normal contact forces. @@ -74,6 +87,7 @@ def net_forces_w_history(self) -> wp.array | None: @property @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.VECTOR3D, element_names=XYZ_ELEMENT_NAMES) def force_matrix_w(self) -> wp.array | None: """Normal contact forces filtered between sensor and filtered bodies. @@ -86,6 +100,7 @@ def force_matrix_w(self) -> wp.array | None: @property @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.VECTOR3D, element_names=XYZ_ELEMENT_NAMES) def force_matrix_w_history(self) -> wp.array | None: """History of filtered contact forces. @@ -98,6 +113,7 @@ def force_matrix_w_history(self) -> wp.array | None: @property @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.BODY_POSITION, element_names=XYZ_ELEMENT_NAMES) def contact_pos_w(self) -> wp.array | None: """Average position of contact points. @@ -110,6 +126,7 @@ def contact_pos_w(self) -> wp.array | None: @property @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.VECTOR3D, element_names=XYZ_ELEMENT_NAMES) def friction_forces_w(self) -> wp.array | None: """Sum of friction forces. @@ -122,6 +139,7 @@ def friction_forces_w(self) -> wp.array | None: @property @abstractmethod + @leapp_tensor_semantics() def last_air_time(self) -> wp.array | None: """Time spent in air before last contact. @@ -133,6 +151,7 @@ def last_air_time(self) -> wp.array | None: @property @abstractmethod + @leapp_tensor_semantics() def current_air_time(self) -> wp.array | None: """Time spent in air since last detach. @@ -144,6 +163,7 @@ def current_air_time(self) -> wp.array | None: @property @abstractmethod + @leapp_tensor_semantics() def last_contact_time(self) -> wp.array | None: """Time spent in contact before last detach. @@ -155,6 +175,7 @@ def last_contact_time(self) -> wp.array | None: @property @abstractmethod + @leapp_tensor_semantics() def current_contact_time(self) -> wp.array | None: """Time spent in contact since last contact. diff --git a/source/isaaclab/isaaclab/sensors/frame_transformer/base_frame_transformer_data.py b/source/isaaclab/isaaclab/sensors/frame_transformer/base_frame_transformer_data.py index 3b28a7b17d00..063583879102 100644 --- a/source/isaaclab/isaaclab/sensors/frame_transformer/base_frame_transformer_data.py +++ b/source/isaaclab/isaaclab/sensors/frame_transformer/base_frame_transformer_data.py @@ -10,6 +10,14 @@ from abc import ABC, abstractmethod import warp as wp +from leapp import InputKindEnum + +from isaaclab.utils.leapp_semantics import ( + POSE7_ELEMENT_NAMES, + QUAT_WXYZ_ELEMENT_NAMES, + XYZ_ELEMENT_NAMES, + leapp_tensor_semantics, +) class BaseFrameTransformerData(ABC): @@ -30,6 +38,7 @@ def target_frame_names(self) -> list[str]: @property @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.BODY_POSE, element_names_source="target_frame_pose") def target_pose_source(self) -> wp.array | None: """Pose of the target frame(s) relative to source frame. @@ -40,6 +49,7 @@ def target_pose_source(self) -> wp.array | None: @property @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.BODY_POSITION, element_names_source="target_frame_xyz") def target_pos_source(self) -> wp.array: """Position of the target frame(s) relative to source frame. @@ -50,6 +60,7 @@ def target_pos_source(self) -> wp.array: @property @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.BODY_ROTATION, element_names_source="target_frame_quat") def target_quat_source(self) -> wp.array: """Orientation of the target frame(s) relative to source frame. @@ -60,6 +71,7 @@ def target_quat_source(self) -> wp.array: @property @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.BODY_POSE, element_names_source="target_frame_pose") def target_pose_w(self) -> wp.array | None: """Pose of the target frame(s) after offset in world frame. @@ -70,6 +82,7 @@ def target_pose_w(self) -> wp.array | None: @property @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.BODY_POSITION, element_names_source="target_frame_xyz") def target_pos_w(self) -> wp.array: """Position of the target frame(s) after offset in world frame. @@ -80,6 +93,7 @@ def target_pos_w(self) -> wp.array: @property @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.BODY_ROTATION, element_names_source="target_frame_quat") def target_quat_w(self) -> wp.array: """Orientation of the target frame(s) after offset in world frame. @@ -90,6 +104,7 @@ def target_quat_w(self) -> wp.array: @property @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.BODY_POSE, element_names=POSE7_ELEMENT_NAMES) def source_pose_w(self) -> wp.array | None: """Pose of the source frame after offset in world frame. @@ -100,6 +115,7 @@ def source_pose_w(self) -> wp.array | None: @property @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.BODY_POSITION, element_names=XYZ_ELEMENT_NAMES) def source_pos_w(self) -> wp.array: """Position of the source frame after offset in world frame. @@ -109,6 +125,7 @@ def source_pos_w(self) -> wp.array: @property @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.BODY_ROTATION, element_names=QUAT_WXYZ_ELEMENT_NAMES) def source_quat_w(self) -> wp.array: """Orientation of the source frame after offset in world frame. diff --git a/source/isaaclab/isaaclab/sensors/imu/base_imu_data.py b/source/isaaclab/isaaclab/sensors/imu/base_imu_data.py index b1f175d4ebd4..04dda8e7b62b 100644 --- a/source/isaaclab/isaaclab/sensors/imu/base_imu_data.py +++ b/source/isaaclab/isaaclab/sensors/imu/base_imu_data.py @@ -10,6 +10,9 @@ from abc import ABC, abstractmethod import warp as wp +from leapp import InputKindEnum + +from isaaclab.utils.leapp_semantics import XYZ_ELEMENT_NAMES, leapp_tensor_semantics class BaseImuData(ABC): @@ -24,6 +27,7 @@ class BaseImuData(ABC): @property @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.BODY_ANGULAR_VELOCITY, element_names=XYZ_ELEMENT_NAMES) def ang_vel_b(self) -> wp.array: """IMU frame angular velocity relative to the world expressed in IMU frame [rad/s]. @@ -33,6 +37,7 @@ def ang_vel_b(self) -> wp.array: @property @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.BODY_LINEAR_ACCELERATION, element_names=XYZ_ELEMENT_NAMES) def lin_acc_b(self) -> wp.array: """Linear acceleration (proper) in the IMU frame [m/s^2]. diff --git a/source/isaaclab/isaaclab/utils/leapp_semantics.py b/source/isaaclab/isaaclab/utils/leapp_semantics.py index 716b327576ce..009d5a289491 100644 --- a/source/isaaclab/isaaclab/utils/leapp_semantics.py +++ b/source/isaaclab/isaaclab/utils/leapp_semantics.py @@ -77,6 +77,12 @@ def resolve_leapp_element_names(semantics: LeappTensorSemantics | None, data_sel return semantics.element_names source = semantics.element_names_source + if source == "xyz": + return XYZ_ELEMENT_NAMES + if source == "quat_wxyz": + return QUAT_WXYZ_ELEMENT_NAMES + if source == "pose7": + return POSE7_ELEMENT_NAMES if source == "joint_names": return _select_element_names( getattr(data_self, "joint_names", getattr(data_self, "_joint_names", None)), @@ -119,4 +125,19 @@ def resolve_leapp_element_names(semantics: LeappTensorSemantics | None, data_sel if body_names is None: return None return [body_names, WRENCH6_ELEMENT_NAMES] + if source == "target_frame_xyz": + frame_names = getattr(data_self, "target_frame_names", None) + if frame_names is None: + return None + return [list(frame_names), XYZ_ELEMENT_NAMES] + if source == "target_frame_quat": + frame_names = getattr(data_self, "target_frame_names", None) + if frame_names is None: + return None + return [list(frame_names), QUAT_WXYZ_ELEMENT_NAMES] + if source == "target_frame_pose": + frame_names = getattr(data_self, "target_frame_names", None) + if frame_names is None: + return None + return [list(frame_names), POSE7_ELEMENT_NAMES] return None From ad6ac037f0adf4d395e523eeb522fbe3b0bc9f3e Mon Sep 17 00:00:00 2001 From: Frank Lai Date: Tue, 24 Mar 2026 00:12:46 -0700 Subject: [PATCH 06/20] reorganized some files --- .../rsl_rl/LEAPP_annotations_for_isaac_lab.md | 158 ------------------ .../reinforcement_learning/rsl_rl/export.py | 49 ++---- .../managed_environment_annotator/utils.py | 19 --- .../assets/articulation/base_articulation.py | 2 +- .../articulation/base_articulation_data.py | 2 +- .../rigid_object/base_rigid_object_data.py | 2 +- .../base_rigid_object_collection_data.py | 2 +- .../isaaclab/envs/direct_deployment_env.py | 10 +- .../envs/mdp/commands/pose_2d_command.py | 2 +- .../envs/mdp/commands/pose_command.py | 2 +- .../envs/mdp/commands/velocity_command.py | 2 +- .../isaaclab/managers/manager_term_cfg.py | 2 +- .../base_contact_sensor_data.py | 2 +- .../contact_sensor/contact_sensor_data.py | 2 - .../base_frame_transformer_data.py | 2 +- .../frame_transformer_data.py | 2 - .../isaaclab/sensors/imu/base_imu_data.py | 2 +- .../isaaclab/isaaclab/sensors/imu/imu_data.py | 2 - .../sensors/ray_caster/ray_caster_data.py | 5 +- .../isaaclab/utils/leapp}/export_annotator.py | 7 +- .../utils/{ => leapp}/leapp_semantics.py | 0 .../isaaclab/isaaclab/utils/leapp}/proxy.py | 2 +- source/isaaclab/isaaclab/utils/leapp/utils.py | 35 ++++ .../dexsuite/mdp/commands/pose_commands.py | 2 +- .../mdp/commands/orientation_command.py | 2 +- 25 files changed, 79 insertions(+), 238 deletions(-) delete mode 100644 scripts/reinforcement_learning/rsl_rl/LEAPP_annotations_for_isaac_lab.md delete mode 100644 scripts/reinforcement_learning/rsl_rl/managed_environment_annotator/utils.py rename {scripts/reinforcement_learning/rsl_rl/managed_environment_annotator => source/isaaclab/isaaclab/utils/leapp}/export_annotator.py (99%) rename source/isaaclab/isaaclab/utils/{ => leapp}/leapp_semantics.py (100%) rename {scripts/reinforcement_learning/rsl_rl/managed_environment_annotator => source/isaaclab/isaaclab/utils/leapp}/proxy.py (99%) create mode 100644 source/isaaclab/isaaclab/utils/leapp/utils.py diff --git a/scripts/reinforcement_learning/rsl_rl/LEAPP_annotations_for_isaac_lab.md b/scripts/reinforcement_learning/rsl_rl/LEAPP_annotations_for_isaac_lab.md deleted file mode 100644 index d8ae03f5a6dd..000000000000 --- a/scripts/reinforcement_learning/rsl_rl/LEAPP_annotations_for_isaac_lab.md +++ /dev/null @@ -1,158 +0,0 @@ -# LEAPP Export for Isaac Lab - -Export RSL-RL reinforcement learning pipelines as portable processing graphs using [LEAPP](https://gitlab-master.nvidia.com/Isaac/leapp). - -## Exported Artifacts - -| File | Description | -|------|-------------| -| `.onnx` | Policy network (ONNX) | -| `.yaml` | Pipeline configuration and metadata | -| `.png` | Visualization of the processing graph | - -The YAML file includes semantic metadata (joint names, units, etc.) extracted from IO descriptors. For details on the YAML format, see the [LEAPP documentation](https://gitlab-master.nvidia.com/Isaac/leapp/-/blob/main/docs/0_getting_started.md). - -## Usage - -### 1. Install LEAPP - -```bash -git clone ssh://git@gitlab-master.nvidia.com:12051/Isaac/leapp.git -cd leapp -git checkout develop -pip install -e . -``` - -### 2. Export a Policy - -```bash -./isaaclab.sh -p scripts/reinforcement_learning/rsl_rl/export.py \ - --task Isaac-Reach-Franka-v0 \ - --use_pretrained_checkpoint \ - --headless -``` - -> **Note:** Export runs with a single environment instance. - -### 3. View Results - -Artifacts are saved to `.//`. - - - -sample exported `Isaac-Reach-Franka-v0.yaml`: - -```yaml -models: - Isaac-Reach-Franka-v0: - inputs: - - name: joint_pos - dtype: float32 - shape: [1, 9] - type: tensor - - name: joint_vel - dtype: float32 - shape: [1, 9] - type: tensor - - name: ee_pose - dtype: float32 - shape: [1, 7] - type: tensor - - name: last_actions - dtype: float32 - shape: [1, 7] - type: tensor - outputs: - - name: arm_action - dtype: float32 - shape: [1, 7] - type: tensor - - name: last_action - dtype: float32 - shape: [1, 7] - type: tensor - - name: arm_action_kp_gains - dtype: float32 - shape: [1, 7] - type: tensor - - name: arm_action_kd_gains - dtype: float32 - shape: [1, 7] - type: tensor - parameters: - model_path: Isaac-Reach-Franka-v0.onnx - md5sum: 38ee55fa7828b5068b86024206bd5ddb - sha256sum: c605a7076fde5c0d03a36f548d458d24bd543df67aac7675d463d29f870a7eb3 - device: cuda - backend: onnx - -pipeline: - data_flow: {} - feedback_flow: - Isaac-Reach-Franka-v0/last_action: [Isaac-Reach-Franka-v0/last_actions] - inputs: - Isaac-Reach-Franka-v0: [joint_pos, joint_vel, ee_pose] - outputs: - Isaac-Reach-Franka-v0: [arm_action, arm_action_kp_gains, arm_action_kd_gains] - -system information: - cuda version: '12.8' - leapp version: 0.3.0 - os: Linux - python version: 3.11.14 - torch version: 2.7.0+cu128 - -semantic: - actions: - - joint_names: - - panda_joint1 - - panda_joint2 - - panda_joint3 - - panda_joint4 - - panda_joint5 - - panda_joint6 - - panda_joint7 - leapp_mapping: - - arm_action - name: joint_position_action - observations: - - joint_names: - - panda_joint1 - - panda_joint2 - - panda_joint3 - - panda_joint4 - - panda_joint5 - - panda_joint6 - - panda_joint7 - - panda_finger_joint1 - - panda_finger_joint2 - leapp_mapping: - - joint_pos - name: joint_pos_rel - units: rad - - joint_names: - - panda_joint1 - - panda_joint2 - - panda_joint3 - - panda_joint4 - - panda_joint5 - - panda_joint6 - - panda_joint7 - - panda_finger_joint1 - - panda_finger_joint2 - leapp_mapping: - - joint_vel - name: joint_vel_rel - units: rad/s - - leapp_mapping: - - ee_pose - name: generated_commands - - leapp_mapping: - - last_actions - name: last_action - scene: - decimation: 2 - dt: 0.03333333333333333 - physics_dt: 0.016666666666666666 - -``` diff --git a/scripts/reinforcement_learning/rsl_rl/export.py b/scripts/reinforcement_learning/rsl_rl/export.py index e25abe88fc49..ac555be7fa76 100644 --- a/scripts/reinforcement_learning/rsl_rl/export.py +++ b/scripts/reinforcement_learning/rsl_rl/export.py @@ -17,7 +17,6 @@ import leapp import torch -import warp as wp from leapp import annotate # Disable TorchScript before importing task/environment modules so any @@ -108,11 +107,11 @@ import os import gymnasium as gym -from managed_environment_annotator.export_annotator import patch_env_for_export from rsl_rl.runners import DistillationRunner, OnPolicyRunner from isaaclab.envs import ManagerBasedRLEnv, ManagerBasedRLEnvCfg from isaaclab.utils.assets import retrieve_file_path +from isaaclab.utils.leapp.export_annotator import patch_env_for_export from isaaclab_rl.rsl_rl import RslRlBaseRunnerCfg, RslRlVecEnvWrapper, handle_deprecated_rsl_rl_cfg from isaaclab_rl.utils.pretrained_checkpoint import get_published_pretrained_checkpoint @@ -122,23 +121,7 @@ from isaaclab_tasks.utils.hydra import hydra_task_config -def _patch_warp_to_torch_passthrough() -> None: - """Make wp.to_torch idempotent for torch tensors during export.""" - if getattr(wp.to_torch, "_leapp_passthrough_patch", False): - return - - original_to_torch = wp.to_torch - - def patched_to_torch(value, *args, **kwargs): - if isinstance(value, torch.Tensor): - return value - return original_to_torch(value, *args, **kwargs) - - patched_to_torch._leapp_passthrough_patch = True # type: ignore[attr-defined] - wp.to_torch = patched_to_torch - - -def _get_actor_memory_module(policy_nn): +def get_actor_memory_module(policy_nn): if hasattr(policy_nn, "memory_a"): return policy_nn.memory_a if hasattr(policy_nn, "memory_s"): @@ -146,12 +129,12 @@ def _get_actor_memory_module(policy_nn): return None -def _ensure_actor_hidden_state_initialized(policy_nn, batch_size: int, device: torch.device, dtype: torch.dtype): +def ensure_actor_hidden_state_initialized(policy_nn, batch_size: int, device: torch.device, dtype: torch.dtype): actor_state, _ = policy_nn.get_hidden_states() if actor_state is not None: return actor_state - memory = _get_actor_memory_module(policy_nn) + memory = get_actor_memory_module(policy_nn) if memory is None or not hasattr(memory, "rnn"): return None @@ -166,7 +149,7 @@ def _ensure_actor_hidden_state_initialized(policy_nn, batch_size: int, device: t return actor_state -def _state_dict_from_actor_hidden(actor_hidden): +def state_dict_from_actor_hidden(actor_hidden): if actor_hidden is None: return {} if isinstance(actor_hidden, tuple): @@ -174,7 +157,7 @@ def _state_dict_from_actor_hidden(actor_hidden): return {"actor_state": actor_hidden} -def _actor_hidden_from_registered(registered_state, original_hidden): +def actor_hidden_from_registered(registered_state, original_hidden): if isinstance(original_hidden, tuple): if isinstance(registered_state, tuple): return registered_state @@ -230,10 +213,10 @@ def main(env_cfg: ManagerBasedRLEnvCfg, agent_cfg: RslRlBaseRunnerCfg): ) export_task_name = args_cli.export_task_name if args_cli.export_task_name is not None else task_name - # required + # Patch only the observation groups consumed by the actor policy. obs_groups_cfg = getattr(agent_cfg, "obs_groups", None) if isinstance(obs_groups_cfg, Mapping): - required_obs_groups = set(obs_groups_cfg.get("policy", ["policy"])) + required_obs_groups = set(obs_groups_cfg.get("actor", ["policy"])) else: required_obs_groups = {"policy"} patch_env_for_export( @@ -242,7 +225,6 @@ def main(env_cfg: ManagerBasedRLEnvCfg, agent_cfg: RslRlBaseRunnerCfg): export_method=args_cli.export_method, required_obs_groups=required_obs_groups, ) - _patch_warp_to_torch_passthrough() # wrap around environment for rsl-rl env = RslRlVecEnvWrapper(env, clip_actions=agent_cfg.clip_actions) @@ -275,7 +257,7 @@ def main(env_cfg: ManagerBasedRLEnvCfg, agent_cfg: RslRlBaseRunnerCfg): # run everything in inference mode with torch.inference_mode(): if policy_nn is not None and getattr(policy_nn, "is_recurrent", False): - actor_hidden = _ensure_actor_hidden_state_initialized( + actor_hidden = ensure_actor_hidden_state_initialized( policy_nn, batch_size=env.num_envs, device=env.unwrapped.device, @@ -283,18 +265,23 @@ def main(env_cfg: ManagerBasedRLEnvCfg, agent_cfg: RslRlBaseRunnerCfg): ) registered_state = annotate.state_tensors( export_task_name, - _state_dict_from_actor_hidden(actor_hidden), + state_dict_from_actor_hidden(actor_hidden), ) - actor_memory = _get_actor_memory_module(policy_nn) + actor_memory = get_actor_memory_module(policy_nn) if actor_memory is not None: - actor_memory.hidden_state = _actor_hidden_from_registered(registered_state, actor_hidden) + actor_memory.hidden_state = actor_hidden_from_registered(registered_state, actor_hidden) + + # =============RUN POLICY============= actions = policy(obs) + # =============END POLICY============= + if policy_nn is not None and getattr(policy_nn, "is_recurrent", False): actor_hidden_after = policy_nn.get_hidden_states()[0] annotate.update_state( export_task_name, - _state_dict_from_actor_hidden(actor_hidden_after), + state_dict_from_actor_hidden(actor_hidden_after), ) + # env stepping obs, _, _, _ = env.step(actions) diff --git a/scripts/reinforcement_learning/rsl_rl/managed_environment_annotator/utils.py b/scripts/reinforcement_learning/rsl_rl/managed_environment_annotator/utils.py deleted file mode 100644 index b5740d5763d2..000000000000 --- a/scripts/reinforcement_learning/rsl_rl/managed_environment_annotator/utils.py +++ /dev/null @@ -1,19 +0,0 @@ -# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). -# All rights reserved. -# -# SPDX-License-Identifier: BSD-3-Clause - -from __future__ import annotations - -import torch -import warp as wp - - -def ensure_torch_tensor(value): - """Convert Warp arrays to torch tensors while leaving torch tensors unchanged.""" - if isinstance(value, torch.Tensor): - return value - try: - return wp.to_torch(value) - except Exception: - return value diff --git a/source/isaaclab/isaaclab/assets/articulation/base_articulation.py b/source/isaaclab/isaaclab/assets/articulation/base_articulation.py index fce8506a2df1..480f512d4320 100644 --- a/source/isaaclab/isaaclab/assets/articulation/base_articulation.py +++ b/source/isaaclab/isaaclab/assets/articulation/base_articulation.py @@ -17,7 +17,7 @@ import warp as wp from leapp import OutputKindEnum -from ...utils.leapp_semantics import leapp_tensor_semantics +from ...utils.leapp.leapp_semantics import leapp_tensor_semantics from ..asset_base import AssetBase if TYPE_CHECKING: diff --git a/source/isaaclab/isaaclab/assets/articulation/base_articulation_data.py b/source/isaaclab/isaaclab/assets/articulation/base_articulation_data.py index 45d6d2725407..3a53bc27214a 100644 --- a/source/isaaclab/isaaclab/assets/articulation/base_articulation_data.py +++ b/source/isaaclab/isaaclab/assets/articulation/base_articulation_data.py @@ -9,7 +9,7 @@ import warp as wp from leapp import InputKindEnum -from isaaclab.utils.leapp_semantics import ( +from isaaclab.utils.leapp.leapp_semantics import ( POSE7_ELEMENT_NAMES, QUAT_WXYZ_ELEMENT_NAMES, WRENCH6_ELEMENT_NAMES, diff --git a/source/isaaclab/isaaclab/assets/rigid_object/base_rigid_object_data.py b/source/isaaclab/isaaclab/assets/rigid_object/base_rigid_object_data.py index b41c44f04731..83cf1be03bf9 100644 --- a/source/isaaclab/isaaclab/assets/rigid_object/base_rigid_object_data.py +++ b/source/isaaclab/isaaclab/assets/rigid_object/base_rigid_object_data.py @@ -9,7 +9,7 @@ import warp as wp from leapp import InputKindEnum -from isaaclab.utils.leapp_semantics import ( +from isaaclab.utils.leapp.leapp_semantics import ( POSE7_ELEMENT_NAMES, QUAT_WXYZ_ELEMENT_NAMES, WRENCH6_ELEMENT_NAMES, diff --git a/source/isaaclab/isaaclab/assets/rigid_object_collection/base_rigid_object_collection_data.py b/source/isaaclab/isaaclab/assets/rigid_object_collection/base_rigid_object_collection_data.py index 30cbd06c50cd..e1e669e1498a 100644 --- a/source/isaaclab/isaaclab/assets/rigid_object_collection/base_rigid_object_collection_data.py +++ b/source/isaaclab/isaaclab/assets/rigid_object_collection/base_rigid_object_collection_data.py @@ -9,7 +9,7 @@ import warp as wp from leapp import InputKindEnum -from isaaclab.utils.leapp_semantics import ( +from isaaclab.utils.leapp.leapp_semantics import ( leapp_tensor_semantics, ) diff --git a/source/isaaclab/isaaclab/envs/direct_deployment_env.py b/source/isaaclab/isaaclab/envs/direct_deployment_env.py index 7841282432b3..9cc56bdf0174 100644 --- a/source/isaaclab/isaaclab/envs/direct_deployment_env.py +++ b/source/isaaclab/isaaclab/envs/direct_deployment_env.py @@ -14,11 +14,11 @@ from __future__ import annotations import logging -import torch -import yaml from dataclasses import dataclass from typing import Any +import torch +import yaml from leapp import InferenceManager from isaaclab.assets.articulation.articulation import Articulation @@ -136,11 +136,11 @@ def _resolve_joint_ids(element_names: list | None, asset: Articulation) -> list[ def _find_command_term_by_hint(kind: str, command_manager: CommandManager) -> str: - """Find the ``CommandTerm`` name whose ``cfg.cmd_hint`` matches ``kind``.""" + """Find the ``CommandTerm`` name whose ``cfg.cmd_kind`` matches ``kind``.""" for name, term in command_manager._terms.items(): - if getattr(term.cfg, "cmd_hint", None) == kind: + if getattr(term.cfg, "cmd_kind", None) == kind: return name - raise ValueError(f"No command term with cmd_hint='{kind}'. Available terms: {list(command_manager._terms.keys())}") + raise ValueError(f"No command term with cmd_kind='{kind}'. Available terms: {list(command_manager._terms.keys())}") def _find_robot_asset(scene: InteractiveScene) -> Articulation: diff --git a/source/isaaclab/isaaclab/envs/mdp/commands/pose_2d_command.py b/source/isaaclab/isaaclab/envs/mdp/commands/pose_2d_command.py index 1e54f7aa4021..2666774de143 100644 --- a/source/isaaclab/isaaclab/envs/mdp/commands/pose_2d_command.py +++ b/source/isaaclab/isaaclab/envs/mdp/commands/pose_2d_command.py @@ -62,7 +62,7 @@ def __init__(self, cfg: UniformPose2dCommandCfg, env: ManagerBasedEnv): self.metrics["error_pos"] = torch.zeros(self.num_envs, device=self.device) self.metrics["error_heading"] = torch.zeros(self.num_envs, device=self.device) - self.cfg.cmd_hint = self.cfg.cmd_hint or "command/body/pose" + self.cfg.cmd_kind = self.cfg.cmd_kind or "command/body/pose" self.cfg.element_names = self.cfg.element_names or ["x", "y", "z", "heading"] def __str__(self) -> str: diff --git a/source/isaaclab/isaaclab/envs/mdp/commands/pose_command.py b/source/isaaclab/isaaclab/envs/mdp/commands/pose_command.py index 130d5f9f0bcb..7e2dbd6ac2a4 100644 --- a/source/isaaclab/isaaclab/envs/mdp/commands/pose_command.py +++ b/source/isaaclab/isaaclab/envs/mdp/commands/pose_command.py @@ -69,7 +69,7 @@ def __init__(self, cfg: UniformPoseCommandCfg, env: ManagerBasedEnv): self.metrics["position_error"] = torch.zeros(self.num_envs, device=self.device) self.metrics["orientation_error"] = torch.zeros(self.num_envs, device=self.device) - self.cfg.cmd_hint = self.cfg.cmd_hint or "command/body/pose" + self.cfg.cmd_kind = self.cfg.cmd_kind or "command/body/pose" self.cfg.element_names = self.cfg.element_names or ["x", "y", "z", "qw", "qx", "qy", "qz"] def __str__(self) -> str: diff --git a/source/isaaclab/isaaclab/envs/mdp/commands/velocity_command.py b/source/isaaclab/isaaclab/envs/mdp/commands/velocity_command.py index 8bbe63ac5aab..2e4285ea6bf7 100644 --- a/source/isaaclab/isaaclab/envs/mdp/commands/velocity_command.py +++ b/source/isaaclab/isaaclab/envs/mdp/commands/velocity_command.py @@ -88,7 +88,7 @@ def __init__(self, cfg: UniformVelocityCommandCfg, env: ManagerBasedEnv): self.metrics["error_vel_xy"] = torch.zeros(self.num_envs, device=self.device) self.metrics["error_vel_yaw"] = torch.zeros(self.num_envs, device=self.device) - self.cfg.cmd_hint = self.cfg.cmd_hint or "command/body/velocity" + self.cfg.cmd_kind = self.cfg.cmd_kind or "command/body/velocity" self.cfg.element_names = self.cfg.element_names or ["lin_vel_x", "lin_vel_y", "ang_vel_z"] def __str__(self) -> str: diff --git a/source/isaaclab/isaaclab/managers/manager_term_cfg.py b/source/isaaclab/isaaclab/managers/manager_term_cfg.py index c3c731c9e52e..d8adbd8eef0e 100644 --- a/source/isaaclab/isaaclab/managers/manager_term_cfg.py +++ b/source/isaaclab/isaaclab/managers/manager_term_cfg.py @@ -118,7 +118,7 @@ class CommandTermCfg: debug_vis: bool = False """Whether to visualize debug information. Defaults to False.""" - cmd_hint: str | None = None # type hint for the command for deployment + cmd_kind: str | None = None # type hint for the command for deployment element_names: list[str] | list[list[str]] | None = None # element names for the command for deployment diff --git a/source/isaaclab/isaaclab/sensors/contact_sensor/base_contact_sensor_data.py b/source/isaaclab/isaaclab/sensors/contact_sensor/base_contact_sensor_data.py index 62ad0c921baf..11bf456ae1f5 100644 --- a/source/isaaclab/isaaclab/sensors/contact_sensor/base_contact_sensor_data.py +++ b/source/isaaclab/isaaclab/sensors/contact_sensor/base_contact_sensor_data.py @@ -12,7 +12,7 @@ import warp as wp from leapp import InputKindEnum -from isaaclab.utils.leapp_semantics import ( +from isaaclab.utils.leapp.leapp_semantics import ( POSE7_ELEMENT_NAMES, QUAT_WXYZ_ELEMENT_NAMES, XYZ_ELEMENT_NAMES, diff --git a/source/isaaclab/isaaclab/sensors/contact_sensor/contact_sensor_data.py b/source/isaaclab/isaaclab/sensors/contact_sensor/contact_sensor_data.py index 3cb6cd4debc6..0ca31f9e40ac 100644 --- a/source/isaaclab/isaaclab/sensors/contact_sensor/contact_sensor_data.py +++ b/source/isaaclab/isaaclab/sensors/contact_sensor/contact_sensor_data.py @@ -17,8 +17,6 @@ from isaaclab_newton.sensors.contact_sensor.contact_sensor_data import ContactSensorData as NewtonContactSensorData from isaaclab_physx.sensors.contact_sensor import ContactSensorData as PhysXContactSensorData -from isaaclab.utils.leapp_semantics import leapp_tensor_semantics - class ContactSensorData(FactoryBase, BaseContactSensorData): """Factory for creating contact sensor data instances.""" diff --git a/source/isaaclab/isaaclab/sensors/frame_transformer/base_frame_transformer_data.py b/source/isaaclab/isaaclab/sensors/frame_transformer/base_frame_transformer_data.py index 063583879102..15b8ce1356c0 100644 --- a/source/isaaclab/isaaclab/sensors/frame_transformer/base_frame_transformer_data.py +++ b/source/isaaclab/isaaclab/sensors/frame_transformer/base_frame_transformer_data.py @@ -12,7 +12,7 @@ import warp as wp from leapp import InputKindEnum -from isaaclab.utils.leapp_semantics import ( +from isaaclab.utils.leapp.leapp_semantics import ( POSE7_ELEMENT_NAMES, QUAT_WXYZ_ELEMENT_NAMES, XYZ_ELEMENT_NAMES, diff --git a/source/isaaclab/isaaclab/sensors/frame_transformer/frame_transformer_data.py b/source/isaaclab/isaaclab/sensors/frame_transformer/frame_transformer_data.py index 22c385b29de8..f6b28faea395 100644 --- a/source/isaaclab/isaaclab/sensors/frame_transformer/frame_transformer_data.py +++ b/source/isaaclab/isaaclab/sensors/frame_transformer/frame_transformer_data.py @@ -19,8 +19,6 @@ ) from isaaclab_physx.sensors.frame_transformer import FrameTransformerData as PhysXFrameTransformerData -from isaaclab.utils.leapp_semantics import leapp_tensor_semantics - class FrameTransformerData(FactoryBase, BaseFrameTransformerData): """Factory for creating frame transformer data instances.""" diff --git a/source/isaaclab/isaaclab/sensors/imu/base_imu_data.py b/source/isaaclab/isaaclab/sensors/imu/base_imu_data.py index 04dda8e7b62b..996e5bd0b81e 100644 --- a/source/isaaclab/isaaclab/sensors/imu/base_imu_data.py +++ b/source/isaaclab/isaaclab/sensors/imu/base_imu_data.py @@ -12,7 +12,7 @@ import warp as wp from leapp import InputKindEnum -from isaaclab.utils.leapp_semantics import XYZ_ELEMENT_NAMES, leapp_tensor_semantics +from isaaclab.utils.leapp.leapp_semantics import XYZ_ELEMENT_NAMES, leapp_tensor_semantics class BaseImuData(ABC): diff --git a/source/isaaclab/isaaclab/sensors/imu/imu_data.py b/source/isaaclab/isaaclab/sensors/imu/imu_data.py index 59cbf68c02be..f23f2a3be6ca 100644 --- a/source/isaaclab/isaaclab/sensors/imu/imu_data.py +++ b/source/isaaclab/isaaclab/sensors/imu/imu_data.py @@ -17,8 +17,6 @@ from isaaclab_newton.sensors.imu import ImuData as NewtonImuData from isaaclab_physx.sensors.imu import ImuData as PhysXImuData -from isaaclab.utils.leapp_semantics import leapp_tensor_semantics - class ImuData(FactoryBase, BaseImuData): """Factory for creating IMU data instances.""" diff --git a/source/isaaclab/isaaclab/sensors/ray_caster/ray_caster_data.py b/source/isaaclab/isaaclab/sensors/ray_caster/ray_caster_data.py index 7863a76fe875..27f461f783da 100644 --- a/source/isaaclab/isaaclab/sensors/ray_caster/ray_caster_data.py +++ b/source/isaaclab/isaaclab/sensors/ray_caster/ray_caster_data.py @@ -6,7 +6,8 @@ from dataclasses import dataclass import torch -from isaaclab.utils.leapp_semantics import leapp_tensor_semantics + +from isaaclab.utils.leapp.leapp_semantics import leapp_tensor_semantics @dataclass @@ -18,7 +19,7 @@ class RayCasterData: Shape is (N, 3), where N is the number of sensors. """ - quat_w: torch.Tensor = None + _quat_w: torch.Tensor = None """Orientation of the sensor origin in quaternion (x, y, z, w) in world frame. Shape is (N, 4), where N is the number of sensors. diff --git a/scripts/reinforcement_learning/rsl_rl/managed_environment_annotator/export_annotator.py b/source/isaaclab/isaaclab/utils/leapp/export_annotator.py similarity index 99% rename from scripts/reinforcement_learning/rsl_rl/managed_environment_annotator/export_annotator.py rename to source/isaaclab/isaaclab/utils/leapp/export_annotator.py index 2d95919a5a69..265b14248c87 100644 --- a/scripts/reinforcement_learning/rsl_rl/managed_environment_annotator/export_annotator.py +++ b/source/isaaclab/isaaclab/utils/leapp/export_annotator.py @@ -44,10 +44,10 @@ from isaaclab.assets.articulation.base_articulation import BaseArticulation from isaaclab.managers import ManagerTermBase -from isaaclab.utils.leapp_semantics import resolve_leapp_element_names +from .leapp_semantics import resolve_leapp_element_names from .proxy import _ArticulationWriteProxy, _DataProxy, _EnvProxy, _ManagerTermProxy -from .utils import ensure_torch_tensor +from .utils import ensure_torch_tensor, patch_warp_to_torch_passthrough if TYPE_CHECKING: from isaaclab.envs import ManagerBasedEnv @@ -398,7 +398,7 @@ def wrapped(env, command_name=None, **kwargs): sem = TensorSemantics( name=leapp_input_name, ref=result, - kind=getattr(command_cfg, "cmd_hint", None), + kind=getattr(command_cfg, "cmd_kind", None), element_names=getattr(command_cfg, "element_names", None), ) return annotate.input_tensors(task_name, sem) @@ -598,5 +598,6 @@ class list is required. Properties with ``_leapp_semantics`` produce The underlying env, scene, assets, and tensors remain shared with the rest of the pipeline; only the manager call paths are redirected. """ + patch_warp_to_torch_passthrough() patcher = ExportPatcher(task_name, export_method, required_obs_groups=required_obs_groups) patcher.setup(env) diff --git a/source/isaaclab/isaaclab/utils/leapp_semantics.py b/source/isaaclab/isaaclab/utils/leapp/leapp_semantics.py similarity index 100% rename from source/isaaclab/isaaclab/utils/leapp_semantics.py rename to source/isaaclab/isaaclab/utils/leapp/leapp_semantics.py diff --git a/scripts/reinforcement_learning/rsl_rl/managed_environment_annotator/proxy.py b/source/isaaclab/isaaclab/utils/leapp/proxy.py similarity index 99% rename from scripts/reinforcement_learning/rsl_rl/managed_environment_annotator/proxy.py rename to source/isaaclab/isaaclab/utils/leapp/proxy.py index f763a2154fed..40d8344a82c5 100644 --- a/scripts/reinforcement_learning/rsl_rl/managed_environment_annotator/proxy.py +++ b/source/isaaclab/isaaclab/utils/leapp/proxy.py @@ -14,8 +14,8 @@ from leapp.utils.tensor_description import TensorSemantics from isaaclab.managers import ManagerTermBase -from isaaclab.utils.leapp_semantics import resolve_leapp_element_names +from .leapp_semantics import resolve_leapp_element_names from .utils import ensure_torch_tensor diff --git a/source/isaaclab/isaaclab/utils/leapp/utils.py b/source/isaaclab/isaaclab/utils/leapp/utils.py new file mode 100644 index 000000000000..53c86b91fe00 --- /dev/null +++ b/source/isaaclab/isaaclab/utils/leapp/utils.py @@ -0,0 +1,35 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from __future__ import annotations + +import torch +import warp as wp + + +def ensure_torch_tensor(value): + """Convert Warp arrays to torch tensors while leaving torch tensors unchanged.""" + if isinstance(value, torch.Tensor): + return value + try: + return wp.to_torch(value) + except Exception: + return value + + +def patch_warp_to_torch_passthrough() -> None: + """Make ``wp.to_torch`` idempotent for torch tensors during export.""" + if getattr(wp.to_torch, "_leapp_passthrough_patch", False): + return + + original_to_torch = wp.to_torch + + def patched_to_torch(value, *args, **kwargs): + if isinstance(value, torch.Tensor): + return value + return original_to_torch(value, *args, **kwargs) + + patched_to_torch._leapp_passthrough_patch = True # type: ignore[attr-defined] + wp.to_torch = patched_to_torch diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/dexsuite/mdp/commands/pose_commands.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/dexsuite/mdp/commands/pose_commands.py index 437ac079ef3d..fb8b71e5e6ac 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/dexsuite/mdp/commands/pose_commands.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/dexsuite/mdp/commands/pose_commands.py @@ -83,7 +83,7 @@ def __init__(self, cfg: dex_cmd_cfgs.ObjectUniformPoseCommandCfg, env: ManagerBa self.success_visualizer = VisualizationMarkers(self.cfg.success_visualizer_cfg) self.success_visualizer.set_visibility(True) - self.cfg.cmd_hint = self.cfg.cmd_hint or "command/body/pose" + self.cfg.cmd_kind = self.cfg.cmd_kind or "command/body/pose" self.cfg.element_names = self.cfg.element_names or ["x", "y", "z", "qw", "qx", "qy", "qz"] def __str__(self) -> str: diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/inhand/mdp/commands/orientation_command.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/inhand/mdp/commands/orientation_command.py index e23359d875b0..946ff908bc55 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/inhand/mdp/commands/orientation_command.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/inhand/mdp/commands/orientation_command.py @@ -75,7 +75,7 @@ def __init__(self, cfg: InHandReOrientationCommandCfg, env: ManagerBasedRLEnv): self.metrics["position_error"] = torch.zeros(self.num_envs, device=self.device) self.metrics["consecutive_success"] = torch.zeros(self.num_envs, device=self.device) - self.cfg.cmd_hint = self.cfg.cmd_hint or "command/body/pose" + self.cfg.cmd_kind = self.cfg.cmd_kind or "command/body/pose" self.cfg.element_names = self.cfg.element_names or ["x", "y", "z", "qw", "qx", "qy", "qz"] def __str__(self) -> str: From 03e6b0eee82be13b999594f584ea603930473989 Mon Sep 17 00:00:00 2001 From: Frank Lai Date: Tue, 24 Mar 2026 01:21:21 -0700 Subject: [PATCH 07/20] updated tensor semantics to create isaac lab connection. simplified some logic --- .../assets/articulation/base_articulation.py | 14 +- .../articulation/base_articulation_data.py | 69 +++++---- .../rigid_object/base_rigid_object_data.py | 62 ++++---- .../base_rigid_object_collection_data.py | 138 +++++++++--------- .../base_frame_transformer_data.py | 15 +- .../sensors/ray_caster/ray_caster_data.py | 10 +- .../isaaclab/utils/leapp/export_annotator.py | 91 ++++-------- .../isaaclab/utils/leapp/leapp_semantics.py | 136 +++++++---------- source/isaaclab/isaaclab/utils/leapp/proxy.py | 13 +- source/isaaclab/isaaclab/utils/leapp/utils.py | 40 +++++ 10 files changed, 294 insertions(+), 294 deletions(-) diff --git a/source/isaaclab/isaaclab/assets/articulation/base_articulation.py b/source/isaaclab/isaaclab/assets/articulation/base_articulation.py index 480f512d4320..5fb3bd4a72f0 100644 --- a/source/isaaclab/isaaclab/assets/articulation/base_articulation.py +++ b/source/isaaclab/isaaclab/assets/articulation/base_articulation.py @@ -17,7 +17,7 @@ import warp as wp from leapp import OutputKindEnum -from ...utils.leapp.leapp_semantics import leapp_tensor_semantics +from ...utils.leapp.leapp_semantics import joint_names_resolver, leapp_tensor_semantics from ..asset_base import AssetBase if TYPE_CHECKING: @@ -1268,7 +1268,7 @@ def set_inertias_mask( raise NotImplementedError() @abstractmethod - @leapp_tensor_semantics(kind=OutputKindEnum.JOINT_POSITION, element_names_source="joint_names") + @leapp_tensor_semantics(kind=OutputKindEnum.JOINT_POSITION, element_names_resolver=joint_names_resolver) def set_joint_position_target_index( self, *, @@ -1296,7 +1296,7 @@ def set_joint_position_target_index( raise NotImplementedError() @abstractmethod - @leapp_tensor_semantics(kind=OutputKindEnum.JOINT_POSITION, element_names_source="joint_names") + @leapp_tensor_semantics(kind=OutputKindEnum.JOINT_POSITION, element_names_resolver=joint_names_resolver) def set_joint_position_target_mask( self, *, @@ -1324,7 +1324,7 @@ def set_joint_position_target_mask( raise NotImplementedError() @abstractmethod - @leapp_tensor_semantics(kind=OutputKindEnum.JOINT_VELOCITY, element_names_source="joint_names") + @leapp_tensor_semantics(kind=OutputKindEnum.JOINT_VELOCITY, element_names_resolver=joint_names_resolver) def set_joint_velocity_target_index( self, *, @@ -1352,7 +1352,7 @@ def set_joint_velocity_target_index( raise NotImplementedError() @abstractmethod - @leapp_tensor_semantics(kind=OutputKindEnum.JOINT_VELOCITY, element_names_source="joint_names") + @leapp_tensor_semantics(kind=OutputKindEnum.JOINT_VELOCITY, element_names_resolver=joint_names_resolver) def set_joint_velocity_target_mask( self, *, @@ -1380,7 +1380,7 @@ def set_joint_velocity_target_mask( raise NotImplementedError() @abstractmethod - @leapp_tensor_semantics(kind=OutputKindEnum.JOINT_EFFORT, element_names_source="joint_names") + @leapp_tensor_semantics(kind=OutputKindEnum.JOINT_EFFORT, element_names_resolver=joint_names_resolver) def set_joint_effort_target_index( self, *, @@ -1408,7 +1408,7 @@ def set_joint_effort_target_index( raise NotImplementedError() @abstractmethod - @leapp_tensor_semantics(kind=OutputKindEnum.JOINT_EFFORT, element_names_source="joint_names") + @leapp_tensor_semantics(kind=OutputKindEnum.JOINT_EFFORT, element_names_resolver=joint_names_resolver) def set_joint_effort_target_mask( self, *, diff --git a/source/isaaclab/isaaclab/assets/articulation/base_articulation_data.py b/source/isaaclab/isaaclab/assets/articulation/base_articulation_data.py index 3a53bc27214a..4c29b736f7df 100644 --- a/source/isaaclab/isaaclab/assets/articulation/base_articulation_data.py +++ b/source/isaaclab/isaaclab/assets/articulation/base_articulation_data.py @@ -14,6 +14,11 @@ QUAT_WXYZ_ELEMENT_NAMES, WRENCH6_ELEMENT_NAMES, XYZ_ELEMENT_NAMES, + body_pose_resolver, + body_quat_resolver, + body_wrench_resolver, + body_xyz_resolver, + joint_names_resolver, leapp_tensor_semantics, ) @@ -567,7 +572,7 @@ def body_inertia(self) -> wp.array: @property @abstractmethod - @leapp_tensor_semantics(kind=InputKindEnum.BODY_POSE, element_names_source="body_pose") + @leapp_tensor_semantics(kind=InputKindEnum.BODY_POSE, element_names_resolver=body_pose_resolver) def body_link_pose_w(self) -> wp.array: """Body link pose ``[pos, quat]`` in simulation world frame. @@ -581,7 +586,7 @@ def body_link_pose_w(self) -> wp.array: @property @abstractmethod - @leapp_tensor_semantics(kind=InputKindEnum.BODY_VEL, element_names_source="body_pose") + @leapp_tensor_semantics(kind=InputKindEnum.BODY_VEL, element_names_resolver=body_pose_resolver) def body_link_vel_w(self) -> wp.array: """Body link velocity ``[lin_vel, ang_vel]`` in simulation world frame. @@ -595,7 +600,7 @@ def body_link_vel_w(self) -> wp.array: @property @abstractmethod - @leapp_tensor_semantics(kind=InputKindEnum.BODY_POSE, element_names_source="body_pose") + @leapp_tensor_semantics(kind=InputKindEnum.BODY_POSE, element_names_resolver=body_pose_resolver) def body_com_pose_w(self) -> wp.array: """Body center of mass pose ``[pos, quat]`` in simulation world frame. @@ -609,7 +614,7 @@ def body_com_pose_w(self) -> wp.array: @property @abstractmethod - @leapp_tensor_semantics(kind=InputKindEnum.BODY_VEL, element_names_source="body_pose") + @leapp_tensor_semantics(kind=InputKindEnum.BODY_VEL, element_names_resolver=body_pose_resolver) def body_com_vel_w(self) -> wp.array: """Body center of mass velocity ``[lin_vel, ang_vel]`` in simulation world frame. @@ -644,7 +649,7 @@ def body_com_state_w(self) -> wp.array: @property @abstractmethod - @leapp_tensor_semantics(kind=InputKindEnum.BODY_ACC, element_names_source="body_pose") + @leapp_tensor_semantics(kind=InputKindEnum.BODY_ACC, element_names_resolver=body_pose_resolver) def body_com_acc_w(self) -> wp.array: """Acceleration of all bodies center of mass ``[lin_acc, ang_acc]``. @@ -657,7 +662,7 @@ def body_com_acc_w(self) -> wp.array: @property @abstractmethod - @leapp_tensor_semantics(kind=InputKindEnum.BODY_POSE, element_names_source="body_pose") + @leapp_tensor_semantics(kind=InputKindEnum.BODY_POSE, element_names_resolver=body_pose_resolver) def body_com_pose_b(self) -> wp.array: """Center of mass pose ``[pos, quat]`` of all bodies in their respective body's link frames. @@ -693,7 +698,7 @@ def body_incoming_joint_wrench_b(self) -> wp.array: @property @abstractmethod - @leapp_tensor_semantics(kind=InputKindEnum.JOINT_POSITION, element_names_source="joint_names") + @leapp_tensor_semantics(kind=InputKindEnum.JOINT_POSITION, element_names_resolver=joint_names_resolver) def joint_pos(self) -> wp.array: """Joint positions of all joints. @@ -704,7 +709,7 @@ def joint_pos(self) -> wp.array: @property @abstractmethod - @leapp_tensor_semantics(kind=InputKindEnum.JOINT_VELOCITY, element_names_source="joint_names") + @leapp_tensor_semantics(kind=InputKindEnum.JOINT_VELOCITY, element_names_resolver=joint_names_resolver) def joint_vel(self) -> wp.array: """Joint velocities of all joints. @@ -715,7 +720,7 @@ def joint_vel(self) -> wp.array: @property @abstractmethod - @leapp_tensor_semantics(kind="state/joint/acceleration", element_names_source="joint_names") + @leapp_tensor_semantics(kind="state/joint/acceleration", element_names_resolver=joint_names_resolver) def joint_acc(self) -> wp.array: """Joint acceleration of all joints. @@ -906,7 +911,7 @@ def root_com_ang_vel_w(self) -> wp.array: @property @abstractmethod - @leapp_tensor_semantics(kind=InputKindEnum.BODY_POSITION, element_names_source="body_xyz") + @leapp_tensor_semantics(kind=InputKindEnum.BODY_POSITION, element_names_resolver=body_xyz_resolver) def body_link_pos_w(self) -> wp.array: """Positions of all bodies in simulation world frame. @@ -919,7 +924,7 @@ def body_link_pos_w(self) -> wp.array: @property @abstractmethod - @leapp_tensor_semantics(kind=InputKindEnum.BODY_ROTATION, element_names_source="body_quat") + @leapp_tensor_semantics(kind=InputKindEnum.BODY_ROTATION, element_names_resolver=body_quat_resolver) def body_link_quat_w(self) -> wp.array: """Orientation (x, y, z, w) of all bodies in simulation world frame. @@ -932,7 +937,7 @@ def body_link_quat_w(self) -> wp.array: @property @abstractmethod - @leapp_tensor_semantics(kind=InputKindEnum.BODY_LINEAR_VELOCITY, element_names_source="body_xyz") + @leapp_tensor_semantics(kind=InputKindEnum.BODY_LINEAR_VELOCITY, element_names_resolver=body_xyz_resolver) def body_link_lin_vel_w(self) -> wp.array: """Linear velocity of all bodies in simulation world frame. @@ -945,7 +950,7 @@ def body_link_lin_vel_w(self) -> wp.array: @property @abstractmethod - @leapp_tensor_semantics(kind=InputKindEnum.BODY_ANGULAR_VELOCITY, element_names_source="body_xyz") + @leapp_tensor_semantics(kind=InputKindEnum.BODY_ANGULAR_VELOCITY, element_names_resolver=body_xyz_resolver) def body_link_ang_vel_w(self) -> wp.array: """Angular velocity of all bodies in simulation world frame. @@ -958,7 +963,7 @@ def body_link_ang_vel_w(self) -> wp.array: @property @abstractmethod - @leapp_tensor_semantics(kind=InputKindEnum.BODY_POSITION, element_names_source="body_xyz") + @leapp_tensor_semantics(kind=InputKindEnum.BODY_POSITION, element_names_resolver=body_xyz_resolver) def body_com_pos_w(self) -> wp.array: """Positions of all bodies in simulation world frame. @@ -971,7 +976,7 @@ def body_com_pos_w(self) -> wp.array: @property @abstractmethod - @leapp_tensor_semantics(kind=InputKindEnum.BODY_ROTATION, element_names_source="body_quat") + @leapp_tensor_semantics(kind=InputKindEnum.BODY_ROTATION, element_names_resolver=body_quat_resolver) def body_com_quat_w(self) -> wp.array: """Orientation (x, y, z, w) of the principal axes of inertia of all bodies in simulation world frame. @@ -984,7 +989,7 @@ def body_com_quat_w(self) -> wp.array: @property @abstractmethod - @leapp_tensor_semantics(kind=InputKindEnum.BODY_LINEAR_VELOCITY, element_names_source="body_xyz") + @leapp_tensor_semantics(kind=InputKindEnum.BODY_LINEAR_VELOCITY, element_names_resolver=body_xyz_resolver) def body_com_lin_vel_w(self) -> wp.array: """Linear velocity of all bodies in simulation world frame. @@ -997,7 +1002,7 @@ def body_com_lin_vel_w(self) -> wp.array: @property @abstractmethod - @leapp_tensor_semantics(kind=InputKindEnum.BODY_ANGULAR_VELOCITY, element_names_source="body_xyz") + @leapp_tensor_semantics(kind=InputKindEnum.BODY_ANGULAR_VELOCITY, element_names_resolver=body_xyz_resolver) def body_com_ang_vel_w(self) -> wp.array: """Angular velocity of all bodies in simulation world frame. @@ -1010,7 +1015,7 @@ def body_com_ang_vel_w(self) -> wp.array: @property @abstractmethod - @leapp_tensor_semantics(kind=InputKindEnum.BODY_LINEAR_ACCELERATION, element_names_source="body_xyz") + @leapp_tensor_semantics(kind=InputKindEnum.BODY_LINEAR_ACCELERATION, element_names_resolver=body_xyz_resolver) def body_com_lin_acc_w(self) -> wp.array: """Linear acceleration of all bodies in simulation world frame. @@ -1023,7 +1028,7 @@ def body_com_lin_acc_w(self) -> wp.array: @property @abstractmethod - @leapp_tensor_semantics(kind=InputKindEnum.BODY_ANGULAR_ACCELERATION, element_names_source="body_xyz") + @leapp_tensor_semantics(kind=InputKindEnum.BODY_ANGULAR_ACCELERATION, element_names_resolver=body_xyz_resolver) def body_com_ang_acc_w(self) -> wp.array: """Angular acceleration of all bodies in simulation world frame. @@ -1036,7 +1041,7 @@ def body_com_ang_acc_w(self) -> wp.array: @property @abstractmethod - @leapp_tensor_semantics(kind=InputKindEnum.BODY_POSITION, element_names_source="body_xyz") + @leapp_tensor_semantics(kind=InputKindEnum.BODY_POSITION, element_names_resolver=body_xyz_resolver) def body_com_pos_b(self) -> wp.array: """Center of mass position of all of the bodies in their respective link frames. @@ -1049,7 +1054,7 @@ def body_com_pos_b(self) -> wp.array: @property @abstractmethod - @leapp_tensor_semantics(kind=InputKindEnum.BODY_ROTATION, element_names_source="body_quat") + @leapp_tensor_semantics(kind=InputKindEnum.BODY_ROTATION, element_names_resolver=body_quat_resolver) def body_com_quat_b(self) -> wp.array: """Orientation (x, y, z, w) of the principal axes of inertia of all of the bodies in their respective link frames. @@ -1135,67 +1140,67 @@ def root_ang_vel_b(self) -> wp.array: return self.root_com_ang_vel_b @property - @leapp_tensor_semantics(kind=InputKindEnum.BODY_POSE, element_names_source="body_pose") + @leapp_tensor_semantics(kind=InputKindEnum.BODY_POSE, element_names_resolver=body_pose_resolver) def body_pose_w(self) -> wp.array: """Shorthand for :attr:`body_link_pose_w`.""" return self.body_link_pose_w @property - @leapp_tensor_semantics(kind=InputKindEnum.BODY_POSITION, element_names_source="body_xyz") + @leapp_tensor_semantics(kind=InputKindEnum.BODY_POSITION, element_names_resolver=body_xyz_resolver) def body_pos_w(self) -> wp.array: """Shorthand for :attr:`body_link_pos_w`.""" return self.body_link_pos_w @property - @leapp_tensor_semantics(kind=InputKindEnum.BODY_ROTATION, element_names_source="body_quat") + @leapp_tensor_semantics(kind=InputKindEnum.BODY_ROTATION, element_names_resolver=body_quat_resolver) def body_quat_w(self) -> wp.array: """Shorthand for :attr:`body_link_quat_w`.""" return self.body_link_quat_w @property - @leapp_tensor_semantics(kind=InputKindEnum.BODY_VEL, element_names_source="body_wrench") + @leapp_tensor_semantics(kind=InputKindEnum.BODY_VEL, element_names_resolver=body_wrench_resolver) def body_vel_w(self) -> wp.array: """Shorthand for :attr:`body_com_vel_w`.""" return self.body_com_vel_w @property - @leapp_tensor_semantics(kind=InputKindEnum.BODY_LINEAR_VELOCITY, element_names_source="body_xyz") + @leapp_tensor_semantics(kind=InputKindEnum.BODY_LINEAR_VELOCITY, element_names_resolver=body_xyz_resolver) def body_lin_vel_w(self) -> wp.array: """Shorthand for :attr:`body_com_lin_vel_w`.""" return self.body_com_lin_vel_w @property - @leapp_tensor_semantics(kind=InputKindEnum.BODY_ANGULAR_VELOCITY, element_names_source="body_xyz") + @leapp_tensor_semantics(kind=InputKindEnum.BODY_ANGULAR_VELOCITY, element_names_resolver=body_xyz_resolver) def body_ang_vel_w(self) -> wp.array: """Shorthand for :attr:`body_com_ang_vel_w`.""" return self.body_com_ang_vel_w @property - @leapp_tensor_semantics(kind=InputKindEnum.BODY_ACC, element_names_source="body_wrench") + @leapp_tensor_semantics(kind=InputKindEnum.BODY_ACC, element_names_resolver=body_wrench_resolver) def body_acc_w(self) -> wp.array: """Shorthand for :attr:`body_com_acc_w`.""" return self.body_com_acc_w @property - @leapp_tensor_semantics(kind=InputKindEnum.BODY_LINEAR_ACCELERATION, element_names_source="body_xyz") + @leapp_tensor_semantics(kind=InputKindEnum.BODY_LINEAR_ACCELERATION, element_names_resolver=body_xyz_resolver) def body_lin_acc_w(self) -> wp.array: """Shorthand for :attr:`body_com_lin_acc_w`.""" return self.body_com_lin_acc_w @property - @leapp_tensor_semantics(kind=InputKindEnum.BODY_ANGULAR_ACCELERATION, element_names_source="body_xyz") + @leapp_tensor_semantics(kind=InputKindEnum.BODY_ANGULAR_ACCELERATION, element_names_resolver=body_xyz_resolver) def body_ang_acc_w(self) -> wp.array: """Shorthand for :attr:`body_com_ang_acc_w`.""" return self.body_com_ang_acc_w @property - @leapp_tensor_semantics(kind=InputKindEnum.BODY_POSITION, element_names_source="body_xyz") + @leapp_tensor_semantics(kind=InputKindEnum.BODY_POSITION, element_names_resolver=body_xyz_resolver) def com_pos_b(self) -> wp.array: """Shorthand for :attr:`body_com_pos_b`.""" return self.body_com_pos_b @property - @leapp_tensor_semantics(kind=InputKindEnum.BODY_ROTATION, element_names_source="body_quat") + @leapp_tensor_semantics(kind=InputKindEnum.BODY_ROTATION, element_names_resolver=body_quat_resolver) def com_quat_b(self) -> wp.array: """Shorthand for :attr:`body_com_quat_b`.""" return self.body_com_quat_b diff --git a/source/isaaclab/isaaclab/assets/rigid_object/base_rigid_object_data.py b/source/isaaclab/isaaclab/assets/rigid_object/base_rigid_object_data.py index 83cf1be03bf9..1b45637a7b27 100644 --- a/source/isaaclab/isaaclab/assets/rigid_object/base_rigid_object_data.py +++ b/source/isaaclab/isaaclab/assets/rigid_object/base_rigid_object_data.py @@ -14,6 +14,10 @@ QUAT_WXYZ_ELEMENT_NAMES, WRENCH6_ELEMENT_NAMES, XYZ_ELEMENT_NAMES, + body_pose_resolver, + body_quat_resolver, + body_wrench_resolver, + body_xyz_resolver, leapp_tensor_semantics, ) @@ -179,7 +183,7 @@ def root_com_state_w(self) -> wp.array: @property @abstractmethod - @leapp_tensor_semantics(kind=InputKindEnum.BODY_POSE, element_names_source="body_pose") + @leapp_tensor_semantics(kind=InputKindEnum.BODY_POSE, element_names_resolver=body_pose_resolver) def body_link_pose_w(self) -> wp.array: """Body link pose ``[pos, quat]`` in simulation world frame. @@ -193,7 +197,7 @@ def body_link_pose_w(self) -> wp.array: @property @abstractmethod - @leapp_tensor_semantics(kind=InputKindEnum.BODY_VEL, element_names_source="body_pose") + @leapp_tensor_semantics(kind=InputKindEnum.BODY_VEL, element_names_resolver=body_pose_resolver) def body_link_vel_w(self) -> wp.array: """Body link velocity ``[lin_vel, ang_vel]`` in simulation world frame. @@ -207,7 +211,7 @@ def body_link_vel_w(self) -> wp.array: @property @abstractmethod - @leapp_tensor_semantics(kind=InputKindEnum.BODY_POSE, element_names_source="body_pose") + @leapp_tensor_semantics(kind=InputKindEnum.BODY_POSE, element_names_resolver=body_pose_resolver) def body_com_pose_w(self) -> wp.array: """Body center of mass pose ``[pos, quat]`` in simulation world frame. @@ -221,7 +225,7 @@ def body_com_pose_w(self) -> wp.array: @property @abstractmethod - @leapp_tensor_semantics(kind=InputKindEnum.BODY_VEL, element_names_source="body_pose") + @leapp_tensor_semantics(kind=InputKindEnum.BODY_VEL, element_names_resolver=body_pose_resolver) def body_com_vel_w(self) -> wp.array: """Body center of mass velocity ``[lin_vel, ang_vel]`` in simulation world frame. @@ -256,7 +260,7 @@ def body_com_state_w(self) -> wp.array: @property @abstractmethod - @leapp_tensor_semantics(kind=InputKindEnum.BODY_ACC, element_names_source="body_pose") + @leapp_tensor_semantics(kind=InputKindEnum.BODY_ACC, element_names_resolver=body_pose_resolver) def body_com_acc_w(self) -> wp.array: """Acceleration of all bodies ``[lin_acc, ang_acc]`` in the simulation world frame. @@ -269,7 +273,7 @@ def body_com_acc_w(self) -> wp.array: @property @abstractmethod - @leapp_tensor_semantics(kind=InputKindEnum.BODY_POSE, element_names_source="body_pose") + @leapp_tensor_semantics(kind=InputKindEnum.BODY_POSE, element_names_resolver=body_pose_resolver) def body_com_pose_b(self) -> wp.array: """Center of mass pose ``[pos, quat]`` of all bodies in their respective body's link frames. @@ -485,7 +489,7 @@ def root_com_ang_vel_w(self) -> wp.array: @property @abstractmethod - @leapp_tensor_semantics(kind=InputKindEnum.BODY_POSITION, element_names_source="body_xyz") + @leapp_tensor_semantics(kind=InputKindEnum.BODY_POSITION, element_names_resolver=body_xyz_resolver) def body_link_pos_w(self) -> wp.array: """Positions of all bodies in simulation world frame. @@ -497,7 +501,7 @@ def body_link_pos_w(self) -> wp.array: @property @abstractmethod - @leapp_tensor_semantics(kind=InputKindEnum.BODY_ROTATION, element_names_source="body_quat") + @leapp_tensor_semantics(kind=InputKindEnum.BODY_ROTATION, element_names_resolver=body_quat_resolver) def body_link_quat_w(self) -> wp.array: """Orientation (x, y, z, w) of all bodies in simulation world frame. @@ -509,7 +513,7 @@ def body_link_quat_w(self) -> wp.array: @property @abstractmethod - @leapp_tensor_semantics(kind=InputKindEnum.BODY_LINEAR_VELOCITY, element_names_source="body_xyz") + @leapp_tensor_semantics(kind=InputKindEnum.BODY_LINEAR_VELOCITY, element_names_resolver=body_xyz_resolver) def body_link_lin_vel_w(self) -> wp.array: """Linear velocity of all bodies in simulation world frame. @@ -521,7 +525,7 @@ def body_link_lin_vel_w(self) -> wp.array: @property @abstractmethod - @leapp_tensor_semantics(kind=InputKindEnum.BODY_ANGULAR_VELOCITY, element_names_source="body_xyz") + @leapp_tensor_semantics(kind=InputKindEnum.BODY_ANGULAR_VELOCITY, element_names_resolver=body_xyz_resolver) def body_link_ang_vel_w(self) -> wp.array: """Angular velocity of all bodies in simulation world frame. @@ -533,7 +537,7 @@ def body_link_ang_vel_w(self) -> wp.array: @property @abstractmethod - @leapp_tensor_semantics(kind=InputKindEnum.BODY_POSITION, element_names_source="body_xyz") + @leapp_tensor_semantics(kind=InputKindEnum.BODY_POSITION, element_names_resolver=body_xyz_resolver) def body_com_pos_w(self) -> wp.array: """Positions of all bodies' center of mass in simulation world frame. @@ -545,7 +549,7 @@ def body_com_pos_w(self) -> wp.array: @property @abstractmethod - @leapp_tensor_semantics(kind=InputKindEnum.BODY_ROTATION, element_names_source="body_quat") + @leapp_tensor_semantics(kind=InputKindEnum.BODY_ROTATION, element_names_resolver=body_quat_resolver) def body_com_quat_w(self) -> wp.array: """Orientation (x, y, z, w) of the principal axes of inertia of all bodies in simulation world frame. @@ -557,7 +561,7 @@ def body_com_quat_w(self) -> wp.array: @property @abstractmethod - @leapp_tensor_semantics(kind=InputKindEnum.BODY_LINEAR_VELOCITY, element_names_source="body_xyz") + @leapp_tensor_semantics(kind=InputKindEnum.BODY_LINEAR_VELOCITY, element_names_resolver=body_xyz_resolver) def body_com_lin_vel_w(self) -> wp.array: """Linear velocity of all bodies in simulation world frame. @@ -569,7 +573,7 @@ def body_com_lin_vel_w(self) -> wp.array: @property @abstractmethod - @leapp_tensor_semantics(kind=InputKindEnum.BODY_ANGULAR_VELOCITY, element_names_source="body_xyz") + @leapp_tensor_semantics(kind=InputKindEnum.BODY_ANGULAR_VELOCITY, element_names_resolver=body_xyz_resolver) def body_com_ang_vel_w(self) -> wp.array: """Angular velocity of all bodies in simulation world frame. @@ -581,7 +585,7 @@ def body_com_ang_vel_w(self) -> wp.array: @property @abstractmethod - @leapp_tensor_semantics(kind=InputKindEnum.BODY_LINEAR_ACCELERATION, element_names_source="body_xyz") + @leapp_tensor_semantics(kind=InputKindEnum.BODY_LINEAR_ACCELERATION, element_names_resolver=body_xyz_resolver) def body_com_lin_acc_w(self) -> wp.array: """Linear acceleration of all bodies in simulation world frame. @@ -593,7 +597,7 @@ def body_com_lin_acc_w(self) -> wp.array: @property @abstractmethod - @leapp_tensor_semantics(kind=InputKindEnum.BODY_ANGULAR_ACCELERATION, element_names_source="body_xyz") + @leapp_tensor_semantics(kind=InputKindEnum.BODY_ANGULAR_ACCELERATION, element_names_resolver=body_xyz_resolver) def body_com_ang_acc_w(self) -> wp.array: """Angular acceleration of all bodies in simulation world frame. @@ -605,7 +609,7 @@ def body_com_ang_acc_w(self) -> wp.array: @property @abstractmethod - @leapp_tensor_semantics(kind=InputKindEnum.BODY_POSITION, element_names_source="body_xyz") + @leapp_tensor_semantics(kind=InputKindEnum.BODY_POSITION, element_names_resolver=body_xyz_resolver) def body_com_pos_b(self) -> wp.array: """Center of mass position of all of the bodies in their respective link frames. @@ -617,7 +621,7 @@ def body_com_pos_b(self) -> wp.array: @property @abstractmethod - @leapp_tensor_semantics(kind=InputKindEnum.BODY_ROTATION, element_names_source="body_quat") + @leapp_tensor_semantics(kind=InputKindEnum.BODY_ROTATION, element_names_resolver=body_quat_resolver) def body_com_quat_b(self) -> wp.array: """Orientation (x, y, z, w) of the principal axes of inertia of all of the bodies in their respective link frames. @@ -686,67 +690,67 @@ def root_ang_vel_b(self) -> wp.array: return self.root_com_ang_vel_b @property - @leapp_tensor_semantics(kind=InputKindEnum.BODY_POSE, element_names_source="body_pose") + @leapp_tensor_semantics(kind=InputKindEnum.BODY_POSE, element_names_resolver=body_pose_resolver) def body_pose_w(self) -> wp.array: """Shorthand for :attr:`body_link_pose_w`.""" return self.body_link_pose_w @property - @leapp_tensor_semantics(kind=InputKindEnum.BODY_POSITION, element_names_source="body_xyz") + @leapp_tensor_semantics(kind=InputKindEnum.BODY_POSITION, element_names_resolver=body_xyz_resolver) def body_pos_w(self) -> wp.array: """Shorthand for :attr:`body_link_pos_w`.""" return self.body_link_pos_w @property - @leapp_tensor_semantics(kind=InputKindEnum.BODY_ROTATION, element_names_source="body_quat") + @leapp_tensor_semantics(kind=InputKindEnum.BODY_ROTATION, element_names_resolver=body_quat_resolver) def body_quat_w(self) -> wp.array: """Shorthand for :attr:`body_link_quat_w`.""" return self.body_link_quat_w @property - @leapp_tensor_semantics(kind=InputKindEnum.BODY_VEL, element_names_source="body_wrench") + @leapp_tensor_semantics(kind=InputKindEnum.BODY_VEL, element_names_resolver=body_wrench_resolver) def body_vel_w(self) -> wp.array: """Shorthand for :attr:`body_com_vel_w`.""" return self.body_com_vel_w @property - @leapp_tensor_semantics(kind=InputKindEnum.BODY_LINEAR_VELOCITY, element_names_source="body_xyz") + @leapp_tensor_semantics(kind=InputKindEnum.BODY_LINEAR_VELOCITY, element_names_resolver=body_xyz_resolver) def body_lin_vel_w(self) -> wp.array: """Shorthand for :attr:`body_com_lin_vel_w`.""" return self.body_com_lin_vel_w @property - @leapp_tensor_semantics(kind=InputKindEnum.BODY_ANGULAR_VELOCITY, element_names_source="body_xyz") + @leapp_tensor_semantics(kind=InputKindEnum.BODY_ANGULAR_VELOCITY, element_names_resolver=body_xyz_resolver) def body_ang_vel_w(self) -> wp.array: """Shorthand for :attr:`body_com_ang_vel_w`.""" return self.body_com_ang_vel_w @property - @leapp_tensor_semantics(kind=InputKindEnum.BODY_ACC, element_names_source="body_wrench") + @leapp_tensor_semantics(kind=InputKindEnum.BODY_ACC, element_names_resolver=body_wrench_resolver) def body_acc_w(self) -> wp.array: """Shorthand for :attr:`body_com_acc_w`.""" return self.body_com_acc_w @property - @leapp_tensor_semantics(kind=InputKindEnum.BODY_LINEAR_ACCELERATION, element_names_source="body_xyz") + @leapp_tensor_semantics(kind=InputKindEnum.BODY_LINEAR_ACCELERATION, element_names_resolver=body_xyz_resolver) def body_lin_acc_w(self) -> wp.array: """Shorthand for :attr:`body_com_lin_acc_w`.""" return self.body_com_lin_acc_w @property - @leapp_tensor_semantics(kind=InputKindEnum.BODY_ANGULAR_ACCELERATION, element_names_source="body_xyz") + @leapp_tensor_semantics(kind=InputKindEnum.BODY_ANGULAR_ACCELERATION, element_names_resolver=body_xyz_resolver) def body_ang_acc_w(self) -> wp.array: """Shorthand for :attr:`body_com_ang_acc_w`.""" return self.body_com_ang_acc_w @property - @leapp_tensor_semantics(kind=InputKindEnum.BODY_POSITION, element_names_source="body_xyz") + @leapp_tensor_semantics(kind=InputKindEnum.BODY_POSITION, element_names_resolver=body_xyz_resolver) def com_pos_b(self) -> wp.array: """Shorthand for :attr:`body_com_pos_b`.""" return self.body_com_pos_b @property - @leapp_tensor_semantics(kind=InputKindEnum.BODY_ROTATION, element_names_source="body_quat") + @leapp_tensor_semantics(kind=InputKindEnum.BODY_ROTATION, element_names_resolver=body_quat_resolver) def com_quat_b(self) -> wp.array: """Shorthand for :attr:`body_com_quat_b`.""" return self.body_com_quat_b diff --git a/source/isaaclab/isaaclab/assets/rigid_object_collection/base_rigid_object_collection_data.py b/source/isaaclab/isaaclab/assets/rigid_object_collection/base_rigid_object_collection_data.py index e1e669e1498a..1fd981067e52 100644 --- a/source/isaaclab/isaaclab/assets/rigid_object_collection/base_rigid_object_collection_data.py +++ b/source/isaaclab/isaaclab/assets/rigid_object_collection/base_rigid_object_collection_data.py @@ -10,6 +10,10 @@ from leapp import InputKindEnum from isaaclab.utils.leapp.leapp_semantics import ( + body_pose_resolver, + body_quat_resolver, + body_wrench_resolver, + body_xyz_resolver, leapp_tensor_semantics, ) @@ -103,7 +107,7 @@ def default_body_state(self) -> wp.array: @property @abstractmethod - @leapp_tensor_semantics(kind=InputKindEnum.BODY_POSE, element_names_source="body_pose") + @leapp_tensor_semantics(kind=InputKindEnum.BODY_POSE, element_names_resolver=body_pose_resolver) def body_link_pose_w(self) -> wp.array: """Body link pose ``[pos, quat]`` in simulation world frame. @@ -117,7 +121,7 @@ def body_link_pose_w(self) -> wp.array: @property @abstractmethod - @leapp_tensor_semantics(kind=InputKindEnum.BODY_VEL, element_names_source="body_pose") + @leapp_tensor_semantics(kind=InputKindEnum.BODY_VEL, element_names_resolver=body_pose_resolver) def body_link_vel_w(self) -> wp.array: """Body link velocity ``[lin_vel, ang_vel]`` in simulation world frame. @@ -131,7 +135,7 @@ def body_link_vel_w(self) -> wp.array: @property @abstractmethod - @leapp_tensor_semantics(kind=InputKindEnum.BODY_POSE, element_names_source="body_pose") + @leapp_tensor_semantics(kind=InputKindEnum.BODY_POSE, element_names_resolver=body_pose_resolver) def body_com_pose_w(self) -> wp.array: """Body center of mass pose ``[pos, quat]`` in simulation world frame. @@ -145,7 +149,7 @@ def body_com_pose_w(self) -> wp.array: @property @abstractmethod - @leapp_tensor_semantics(kind=InputKindEnum.BODY_VEL, element_names_source="body_pose") + @leapp_tensor_semantics(kind=InputKindEnum.BODY_VEL, element_names_resolver=body_pose_resolver) def body_com_vel_w(self) -> wp.array: """Body center of mass velocity ``[lin_vel, ang_vel]`` in simulation world frame. @@ -180,7 +184,7 @@ def body_com_state_w(self) -> wp.array: @property @abstractmethod - @leapp_tensor_semantics(kind=InputKindEnum.BODY_ACC, element_names_source="body_pose") + @leapp_tensor_semantics(kind=InputKindEnum.BODY_ACC, element_names_resolver=body_pose_resolver) def body_com_acc_w(self) -> wp.array: """Acceleration of all bodies ``[lin_acc, ang_acc]`` in the simulation world frame. @@ -193,7 +197,7 @@ def body_com_acc_w(self) -> wp.array: @property @abstractmethod - @leapp_tensor_semantics(kind=InputKindEnum.BODY_POSE, element_names_source="body_pose") + @leapp_tensor_semantics(kind=InputKindEnum.BODY_POSE, element_names_resolver=body_pose_resolver) def body_com_pose_b(self) -> wp.array: """Center of mass pose ``[pos, quat]`` of all bodies in their respective body's link frames. @@ -232,7 +236,7 @@ def body_inertia(self) -> wp.array: @property @abstractmethod - @leapp_tensor_semantics(kind=InputKindEnum.VECTOR3D, element_names_source="body_xyz") + @leapp_tensor_semantics(kind=InputKindEnum.VECTOR3D, element_names_resolver=body_xyz_resolver) def projected_gravity_b(self) -> wp.array: """Projection of the gravity direction on base frame. @@ -258,7 +262,7 @@ def heading_w(self) -> wp.array: @property @abstractmethod - @leapp_tensor_semantics(kind=InputKindEnum.BODY_LINEAR_VELOCITY, element_names_source="body_xyz") + @leapp_tensor_semantics(kind=InputKindEnum.BODY_LINEAR_VELOCITY, element_names_resolver=body_xyz_resolver) def body_link_lin_vel_b(self) -> wp.array: """Root link linear velocity in base frame. @@ -272,7 +276,7 @@ def body_link_lin_vel_b(self) -> wp.array: @property @abstractmethod - @leapp_tensor_semantics(kind=InputKindEnum.BODY_ANGULAR_VELOCITY, element_names_source="body_xyz") + @leapp_tensor_semantics(kind=InputKindEnum.BODY_ANGULAR_VELOCITY, element_names_resolver=body_xyz_resolver) def body_link_ang_vel_b(self) -> wp.array: """Root link angular velocity in base frame. @@ -286,7 +290,7 @@ def body_link_ang_vel_b(self) -> wp.array: @property @abstractmethod - @leapp_tensor_semantics(kind=InputKindEnum.BODY_LINEAR_VELOCITY, element_names_source="body_xyz") + @leapp_tensor_semantics(kind=InputKindEnum.BODY_LINEAR_VELOCITY, element_names_resolver=body_xyz_resolver) def body_com_lin_vel_b(self) -> wp.array: """Root center of mass linear velocity in base frame. @@ -300,7 +304,7 @@ def body_com_lin_vel_b(self) -> wp.array: @property @abstractmethod - @leapp_tensor_semantics(kind=InputKindEnum.BODY_ANGULAR_VELOCITY, element_names_source="body_xyz") + @leapp_tensor_semantics(kind=InputKindEnum.BODY_ANGULAR_VELOCITY, element_names_resolver=body_xyz_resolver) def body_com_ang_vel_b(self) -> wp.array: """Root center of mass angular velocity in base frame. @@ -318,7 +322,7 @@ def body_com_ang_vel_b(self) -> wp.array: @property @abstractmethod - @leapp_tensor_semantics(kind=InputKindEnum.BODY_POSITION, element_names_source="body_xyz") + @leapp_tensor_semantics(kind=InputKindEnum.BODY_POSITION, element_names_resolver=body_xyz_resolver) def body_link_pos_w(self) -> wp.array: """Positions of all bodies in simulation world frame. @@ -331,7 +335,7 @@ def body_link_pos_w(self) -> wp.array: @property @abstractmethod - @leapp_tensor_semantics(kind=InputKindEnum.BODY_ROTATION, element_names_source="body_quat") + @leapp_tensor_semantics(kind=InputKindEnum.BODY_ROTATION, element_names_resolver=body_quat_resolver) def body_link_quat_w(self) -> wp.array: """Orientation (x, y, z, w) of all bodies in simulation world frame. @@ -344,7 +348,7 @@ def body_link_quat_w(self) -> wp.array: @property @abstractmethod - @leapp_tensor_semantics(kind=InputKindEnum.BODY_LINEAR_VELOCITY, element_names_source="body_xyz") + @leapp_tensor_semantics(kind=InputKindEnum.BODY_LINEAR_VELOCITY, element_names_resolver=body_xyz_resolver) def body_link_lin_vel_w(self) -> wp.array: """Linear velocity of all bodies in simulation world frame. @@ -357,7 +361,7 @@ def body_link_lin_vel_w(self) -> wp.array: @property @abstractmethod - @leapp_tensor_semantics(kind=InputKindEnum.BODY_ANGULAR_VELOCITY, element_names_source="body_xyz") + @leapp_tensor_semantics(kind=InputKindEnum.BODY_ANGULAR_VELOCITY, element_names_resolver=body_xyz_resolver) def body_link_ang_vel_w(self) -> wp.array: """Angular velocity of all bodies in simulation world frame. @@ -370,7 +374,7 @@ def body_link_ang_vel_w(self) -> wp.array: @property @abstractmethod - @leapp_tensor_semantics(kind=InputKindEnum.BODY_POSITION, element_names_source="body_xyz") + @leapp_tensor_semantics(kind=InputKindEnum.BODY_POSITION, element_names_resolver=body_xyz_resolver) def body_com_pos_w(self) -> wp.array: """Positions of all bodies' center of mass in simulation world frame. @@ -383,7 +387,7 @@ def body_com_pos_w(self) -> wp.array: @property @abstractmethod - @leapp_tensor_semantics(kind=InputKindEnum.BODY_ROTATION, element_names_source="body_quat") + @leapp_tensor_semantics(kind=InputKindEnum.BODY_ROTATION, element_names_resolver=body_quat_resolver) def body_com_quat_w(self) -> wp.array: """Orientation (x, y, z, w) of the principal axes of inertia of all bodies in simulation world frame. @@ -396,7 +400,7 @@ def body_com_quat_w(self) -> wp.array: @property @abstractmethod - @leapp_tensor_semantics(kind=InputKindEnum.BODY_LINEAR_VELOCITY, element_names_source="body_xyz") + @leapp_tensor_semantics(kind=InputKindEnum.BODY_LINEAR_VELOCITY, element_names_resolver=body_xyz_resolver) def body_com_lin_vel_w(self) -> wp.array: """Linear velocity of all bodies in simulation world frame. @@ -409,7 +413,7 @@ def body_com_lin_vel_w(self) -> wp.array: @property @abstractmethod - @leapp_tensor_semantics(kind=InputKindEnum.BODY_ANGULAR_VELOCITY, element_names_source="body_xyz") + @leapp_tensor_semantics(kind=InputKindEnum.BODY_ANGULAR_VELOCITY, element_names_resolver=body_xyz_resolver) def body_com_ang_vel_w(self) -> wp.array: """Angular velocity of all bodies in simulation world frame. @@ -422,7 +426,7 @@ def body_com_ang_vel_w(self) -> wp.array: @property @abstractmethod - @leapp_tensor_semantics(kind=InputKindEnum.BODY_LINEAR_ACCELERATION, element_names_source="body_xyz") + @leapp_tensor_semantics(kind=InputKindEnum.BODY_LINEAR_ACCELERATION, element_names_resolver=body_xyz_resolver) def body_com_lin_acc_w(self) -> wp.array: """Linear acceleration of all bodies in simulation world frame. @@ -435,7 +439,7 @@ def body_com_lin_acc_w(self) -> wp.array: @property @abstractmethod - @leapp_tensor_semantics(kind=InputKindEnum.BODY_ANGULAR_ACCELERATION, element_names_source="body_xyz") + @leapp_tensor_semantics(kind=InputKindEnum.BODY_ANGULAR_ACCELERATION, element_names_resolver=body_xyz_resolver) def body_com_ang_acc_w(self) -> wp.array: """Angular acceleration of all bodies in simulation world frame. @@ -448,7 +452,7 @@ def body_com_ang_acc_w(self) -> wp.array: @property @abstractmethod - @leapp_tensor_semantics(kind=InputKindEnum.BODY_POSITION, element_names_source="body_xyz") + @leapp_tensor_semantics(kind=InputKindEnum.BODY_POSITION, element_names_resolver=body_xyz_resolver) def body_com_pos_b(self) -> wp.array: """Center of mass position of all of the bodies in their respective link frames. @@ -461,7 +465,7 @@ def body_com_pos_b(self) -> wp.array: @property @abstractmethod - @leapp_tensor_semantics(kind=InputKindEnum.BODY_ROTATION, element_names_source="body_quat") + @leapp_tensor_semantics(kind=InputKindEnum.BODY_ROTATION, element_names_resolver=body_quat_resolver) def body_com_quat_b(self) -> wp.array: """Orientation (x, y, z, w) of the principal axes of inertia of all of the bodies in their respective link frames. @@ -478,67 +482,67 @@ def body_com_quat_b(self) -> wp.array: """ @property - @leapp_tensor_semantics(kind=InputKindEnum.BODY_POSE, element_names_source="body_pose") + @leapp_tensor_semantics(kind=InputKindEnum.BODY_POSE, element_names_resolver=body_pose_resolver) def body_pose_w(self) -> wp.array: """Shorthand for :attr:`body_link_pose_w`.""" return self.body_link_pose_w @property - @leapp_tensor_semantics(kind=InputKindEnum.BODY_POSITION, element_names_source="body_xyz") + @leapp_tensor_semantics(kind=InputKindEnum.BODY_POSITION, element_names_resolver=body_xyz_resolver) def body_pos_w(self) -> wp.array: """Shorthand for :attr:`body_link_pos_w`.""" return self.body_link_pos_w @property - @leapp_tensor_semantics(kind=InputKindEnum.BODY_ROTATION, element_names_source="body_quat") + @leapp_tensor_semantics(kind=InputKindEnum.BODY_ROTATION, element_names_resolver=body_quat_resolver) def body_quat_w(self) -> wp.array: """Shorthand for :attr:`body_link_quat_w`.""" return self.body_link_quat_w @property - @leapp_tensor_semantics(kind=InputKindEnum.BODY_VEL, element_names_source="body_wrench") + @leapp_tensor_semantics(kind=InputKindEnum.BODY_VEL, element_names_resolver=body_wrench_resolver) def body_vel_w(self) -> wp.array: """Shorthand for :attr:`body_com_vel_w`.""" return self.body_com_vel_w @property - @leapp_tensor_semantics(kind=InputKindEnum.BODY_LINEAR_VELOCITY, element_names_source="body_xyz") + @leapp_tensor_semantics(kind=InputKindEnum.BODY_LINEAR_VELOCITY, element_names_resolver=body_xyz_resolver) def body_lin_vel_w(self) -> wp.array: """Shorthand for :attr:`body_com_lin_vel_w`.""" return self.body_com_lin_vel_w @property - @leapp_tensor_semantics(kind=InputKindEnum.BODY_ANGULAR_VELOCITY, element_names_source="body_xyz") + @leapp_tensor_semantics(kind=InputKindEnum.BODY_ANGULAR_VELOCITY, element_names_resolver=body_xyz_resolver) def body_ang_vel_w(self) -> wp.array: """Shorthand for :attr:`body_com_ang_vel_w`.""" return self.body_com_ang_vel_w @property - @leapp_tensor_semantics(kind=InputKindEnum.BODY_ACC, element_names_source="body_wrench") + @leapp_tensor_semantics(kind=InputKindEnum.BODY_ACC, element_names_resolver=body_wrench_resolver) def body_acc_w(self) -> wp.array: """Shorthand for :attr:`body_com_acc_w`.""" return self.body_com_acc_w @property - @leapp_tensor_semantics(kind=InputKindEnum.BODY_LINEAR_ACCELERATION, element_names_source="body_xyz") + @leapp_tensor_semantics(kind=InputKindEnum.BODY_LINEAR_ACCELERATION, element_names_resolver=body_xyz_resolver) def body_lin_acc_w(self) -> wp.array: """Shorthand for :attr:`body_com_lin_acc_w`.""" return self.body_com_lin_acc_w @property - @leapp_tensor_semantics(kind=InputKindEnum.BODY_ANGULAR_ACCELERATION, element_names_source="body_xyz") + @leapp_tensor_semantics(kind=InputKindEnum.BODY_ANGULAR_ACCELERATION, element_names_resolver=body_xyz_resolver) def body_ang_acc_w(self) -> wp.array: """Shorthand for :attr:`body_com_ang_acc_w`.""" return self.body_com_ang_acc_w @property - @leapp_tensor_semantics(kind=InputKindEnum.BODY_POSITION, element_names_source="body_xyz") + @leapp_tensor_semantics(kind=InputKindEnum.BODY_POSITION, element_names_resolver=body_xyz_resolver) def com_pos_b(self) -> wp.array: """Shorthand for :attr:`body_com_pos_b`.""" return self.body_com_pos_b @property - @leapp_tensor_semantics(kind=InputKindEnum.BODY_ROTATION, element_names_source="body_quat") + @leapp_tensor_semantics(kind=InputKindEnum.BODY_ROTATION, element_names_resolver=body_quat_resolver) def com_quat_b(self) -> wp.array: """Shorthand for :attr:`body_com_quat_b`.""" return self.body_com_quat_b @@ -589,7 +593,7 @@ def default_object_state(self) -> wp.array: return self.default_body_state @property - @leapp_tensor_semantics(kind=InputKindEnum.BODY_POSE, element_names_source="body_pose") + @leapp_tensor_semantics(kind=InputKindEnum.BODY_POSE, element_names_resolver=body_pose_resolver) def object_link_pose_w(self): """Deprecated property. Please use :attr:`body_link_pose_w` instead.""" warnings.warn( @@ -601,7 +605,7 @@ def object_link_pose_w(self): return self.body_link_pose_w @property - @leapp_tensor_semantics(kind=InputKindEnum.BODY_VEL, element_names_source="body_pose") + @leapp_tensor_semantics(kind=InputKindEnum.BODY_VEL, element_names_resolver=body_pose_resolver) def object_link_vel_w(self): """Deprecated property. Please use :attr:`body_link_vel_w` instead.""" warnings.warn( @@ -613,7 +617,7 @@ def object_link_vel_w(self): return self.body_link_vel_w @property - @leapp_tensor_semantics(kind=InputKindEnum.BODY_POSE, element_names_source="body_pose") + @leapp_tensor_semantics(kind=InputKindEnum.BODY_POSE, element_names_resolver=body_pose_resolver) def object_com_pose_w(self): """Deprecated property. Please use :attr:`body_com_pose_w` instead.""" warnings.warn( @@ -625,7 +629,7 @@ def object_com_pose_w(self): return self.body_com_pose_w @property - @leapp_tensor_semantics(kind=InputKindEnum.BODY_VEL, element_names_source="body_pose") + @leapp_tensor_semantics(kind=InputKindEnum.BODY_VEL, element_names_resolver=body_pose_resolver) def object_com_vel_w(self): """Deprecated property. Please use :attr:`body_com_vel_w` instead.""" warnings.warn( @@ -672,7 +676,7 @@ def object_com_state_w(self): return self.body_com_state_w @property - @leapp_tensor_semantics(kind=InputKindEnum.BODY_ACC, element_names_source="body_pose") + @leapp_tensor_semantics(kind=InputKindEnum.BODY_ACC, element_names_resolver=body_pose_resolver) def object_com_acc_w(self): """Deprecated property. Please use :attr:`body_com_acc_w` instead.""" warnings.warn( @@ -684,7 +688,7 @@ def object_com_acc_w(self): return self.body_com_acc_w @property - @leapp_tensor_semantics(kind=InputKindEnum.BODY_POSE, element_names_source="body_pose") + @leapp_tensor_semantics(kind=InputKindEnum.BODY_POSE, element_names_resolver=body_pose_resolver) def object_com_pose_b(self): """Deprecated property. Please use :attr:`body_com_pose_b` instead.""" warnings.warn( @@ -696,7 +700,7 @@ def object_com_pose_b(self): return self.body_com_pose_b @property - @leapp_tensor_semantics(kind=InputKindEnum.BODY_POSITION, element_names_source="body_xyz") + @leapp_tensor_semantics(kind=InputKindEnum.BODY_POSITION, element_names_resolver=body_xyz_resolver) def object_link_pos_w(self) -> wp.array: """Deprecated property. Please use :attr:`body_link_pos_w` instead.""" warnings.warn( @@ -708,7 +712,7 @@ def object_link_pos_w(self) -> wp.array: return self.body_link_pos_w @property - @leapp_tensor_semantics(kind=InputKindEnum.BODY_ROTATION, element_names_source="body_quat") + @leapp_tensor_semantics(kind=InputKindEnum.BODY_ROTATION, element_names_resolver=body_quat_resolver) def object_link_quat_w(self) -> wp.array: """Deprecated property. Please use :attr:`body_link_quat_w` instead.""" warnings.warn( @@ -720,7 +724,7 @@ def object_link_quat_w(self) -> wp.array: return self.body_link_quat_w @property - @leapp_tensor_semantics(kind=InputKindEnum.BODY_LINEAR_VELOCITY, element_names_source="body_xyz") + @leapp_tensor_semantics(kind=InputKindEnum.BODY_LINEAR_VELOCITY, element_names_resolver=body_xyz_resolver) def object_link_lin_vel_w(self) -> wp.array: """Deprecated property. Please use :attr:`body_link_lin_vel_w` instead.""" warnings.warn( @@ -732,7 +736,7 @@ def object_link_lin_vel_w(self) -> wp.array: return self.body_link_lin_vel_w @property - @leapp_tensor_semantics(kind=InputKindEnum.BODY_ANGULAR_VELOCITY, element_names_source="body_xyz") + @leapp_tensor_semantics(kind=InputKindEnum.BODY_ANGULAR_VELOCITY, element_names_resolver=body_xyz_resolver) def object_link_ang_vel_w(self) -> wp.array: """Deprecated property. Please use :attr:`body_link_ang_vel_w` instead.""" warnings.warn( @@ -744,7 +748,7 @@ def object_link_ang_vel_w(self) -> wp.array: return self.body_link_ang_vel_w @property - @leapp_tensor_semantics(kind=InputKindEnum.BODY_POSITION, element_names_source="body_xyz") + @leapp_tensor_semantics(kind=InputKindEnum.BODY_POSITION, element_names_resolver=body_xyz_resolver) def object_com_pos_w(self) -> wp.array: """Deprecated property. Please use :attr:`body_com_pos_w` instead.""" warnings.warn( @@ -756,7 +760,7 @@ def object_com_pos_w(self) -> wp.array: return self.body_com_pos_w @property - @leapp_tensor_semantics(kind=InputKindEnum.BODY_ROTATION, element_names_source="body_quat") + @leapp_tensor_semantics(kind=InputKindEnum.BODY_ROTATION, element_names_resolver=body_quat_resolver) def object_com_quat_w(self) -> wp.array: """Deprecated property. Please use :attr:`body_com_quat_w` instead.""" warnings.warn( @@ -768,7 +772,7 @@ def object_com_quat_w(self) -> wp.array: return self.body_com_quat_w @property - @leapp_tensor_semantics(kind=InputKindEnum.BODY_LINEAR_VELOCITY, element_names_source="body_xyz") + @leapp_tensor_semantics(kind=InputKindEnum.BODY_LINEAR_VELOCITY, element_names_resolver=body_xyz_resolver) def object_com_lin_vel_w(self) -> wp.array: """Deprecated property. Please use :attr:`body_com_lin_vel_w` instead.""" warnings.warn( @@ -780,7 +784,7 @@ def object_com_lin_vel_w(self) -> wp.array: return self.body_com_lin_vel_w @property - @leapp_tensor_semantics(kind=InputKindEnum.BODY_ANGULAR_VELOCITY, element_names_source="body_xyz") + @leapp_tensor_semantics(kind=InputKindEnum.BODY_ANGULAR_VELOCITY, element_names_resolver=body_xyz_resolver) def object_com_ang_vel_w(self) -> wp.array: """Deprecated property. Please use :attr:`body_com_ang_vel_w` instead.""" warnings.warn( @@ -792,7 +796,7 @@ def object_com_ang_vel_w(self) -> wp.array: return self.body_com_ang_vel_w @property - @leapp_tensor_semantics(kind=InputKindEnum.BODY_LINEAR_ACCELERATION, element_names_source="body_xyz") + @leapp_tensor_semantics(kind=InputKindEnum.BODY_LINEAR_ACCELERATION, element_names_resolver=body_xyz_resolver) def object_com_lin_acc_w(self) -> wp.array: """Deprecated property. Please use :attr:`body_com_lin_acc_w` instead.""" warnings.warn( @@ -804,7 +808,7 @@ def object_com_lin_acc_w(self) -> wp.array: return self.body_com_lin_acc_w @property - @leapp_tensor_semantics(kind=InputKindEnum.BODY_ANGULAR_ACCELERATION, element_names_source="body_xyz") + @leapp_tensor_semantics(kind=InputKindEnum.BODY_ANGULAR_ACCELERATION, element_names_resolver=body_xyz_resolver) def object_com_ang_acc_w(self) -> wp.array: """Deprecated property. Please use :attr:`body_com_ang_acc_w` instead.""" warnings.warn( @@ -816,7 +820,7 @@ def object_com_ang_acc_w(self) -> wp.array: return self.body_com_ang_acc_w @property - @leapp_tensor_semantics(kind=InputKindEnum.BODY_POSITION, element_names_source="body_xyz") + @leapp_tensor_semantics(kind=InputKindEnum.BODY_POSITION, element_names_resolver=body_xyz_resolver) def object_com_pos_b(self) -> wp.array: """Deprecated property. Please use :attr:`body_com_pos_b` instead.""" warnings.warn( @@ -828,7 +832,7 @@ def object_com_pos_b(self) -> wp.array: return self.body_com_pos_b @property - @leapp_tensor_semantics(kind=InputKindEnum.BODY_ROTATION, element_names_source="body_quat") + @leapp_tensor_semantics(kind=InputKindEnum.BODY_ROTATION, element_names_resolver=body_quat_resolver) def object_com_quat_b(self) -> wp.array: """Deprecated property. Please use :attr:`body_com_quat_b` instead.""" warnings.warn( @@ -840,7 +844,7 @@ def object_com_quat_b(self) -> wp.array: return self.body_com_quat_b @property - @leapp_tensor_semantics(kind=InputKindEnum.BODY_LINEAR_VELOCITY, element_names_source="body_xyz") + @leapp_tensor_semantics(kind=InputKindEnum.BODY_LINEAR_VELOCITY, element_names_resolver=body_xyz_resolver) def object_link_lin_vel_b(self) -> wp.array: """Deprecated property. Please use :attr:`body_link_lin_vel_b` instead.""" warnings.warn( @@ -852,7 +856,7 @@ def object_link_lin_vel_b(self) -> wp.array: return self.body_link_lin_vel_b @property - @leapp_tensor_semantics(kind=InputKindEnum.BODY_ANGULAR_VELOCITY, element_names_source="body_xyz") + @leapp_tensor_semantics(kind=InputKindEnum.BODY_ANGULAR_VELOCITY, element_names_resolver=body_xyz_resolver) def object_link_ang_vel_b(self) -> wp.array: """Deprecated property. Please use :attr:`body_link_ang_vel_b` instead.""" warnings.warn( @@ -864,7 +868,7 @@ def object_link_ang_vel_b(self) -> wp.array: return self.body_link_ang_vel_b @property - @leapp_tensor_semantics(kind=InputKindEnum.BODY_LINEAR_VELOCITY, element_names_source="body_xyz") + @leapp_tensor_semantics(kind=InputKindEnum.BODY_LINEAR_VELOCITY, element_names_resolver=body_xyz_resolver) def object_com_lin_vel_b(self) -> wp.array: """Deprecated property. Please use :attr:`body_com_lin_vel_b` instead.""" warnings.warn( @@ -876,7 +880,7 @@ def object_com_lin_vel_b(self) -> wp.array: return self.body_com_lin_vel_b @property - @leapp_tensor_semantics(kind=InputKindEnum.BODY_ANGULAR_VELOCITY, element_names_source="body_xyz") + @leapp_tensor_semantics(kind=InputKindEnum.BODY_ANGULAR_VELOCITY, element_names_resolver=body_xyz_resolver) def object_com_ang_vel_b(self) -> wp.array: """Deprecated property. Please use :attr:`body_com_ang_vel_b` instead.""" warnings.warn( @@ -888,7 +892,7 @@ def object_com_ang_vel_b(self) -> wp.array: return self.body_com_ang_vel_b @property - @leapp_tensor_semantics(kind=InputKindEnum.BODY_POSE, element_names_source="body_pose") + @leapp_tensor_semantics(kind=InputKindEnum.BODY_POSE, element_names_resolver=body_pose_resolver) def object_pose_w(self) -> wp.array: """Deprecated property. Please use :attr:`body_link_pose_w` instead.""" warnings.warn( @@ -899,7 +903,7 @@ def object_pose_w(self) -> wp.array: return self.body_link_pose_w @property - @leapp_tensor_semantics(kind=InputKindEnum.BODY_POSITION, element_names_source="body_xyz") + @leapp_tensor_semantics(kind=InputKindEnum.BODY_POSITION, element_names_resolver=body_xyz_resolver) def object_pos_w(self) -> wp.array: """Deprecated property. Please use :attr:`body_link_pos_w` instead.""" warnings.warn( @@ -910,7 +914,7 @@ def object_pos_w(self) -> wp.array: return self.body_link_pos_w @property - @leapp_tensor_semantics(kind=InputKindEnum.BODY_ROTATION, element_names_source="body_quat") + @leapp_tensor_semantics(kind=InputKindEnum.BODY_ROTATION, element_names_resolver=body_quat_resolver) def object_quat_w(self) -> wp.array: """Deprecated property. Please use :attr:`body_link_quat_w` instead.""" warnings.warn( @@ -921,7 +925,7 @@ def object_quat_w(self) -> wp.array: return self.body_link_quat_w @property - @leapp_tensor_semantics(kind=InputKindEnum.BODY_VEL, element_names_source="body_wrench") + @leapp_tensor_semantics(kind=InputKindEnum.BODY_VEL, element_names_resolver=body_wrench_resolver) def object_vel_w(self) -> wp.array: """Deprecated property. Please use :attr:`body_com_vel_w` instead.""" warnings.warn( @@ -932,7 +936,7 @@ def object_vel_w(self) -> wp.array: return self.body_com_vel_w @property - @leapp_tensor_semantics(kind=InputKindEnum.BODY_LINEAR_VELOCITY, element_names_source="body_xyz") + @leapp_tensor_semantics(kind=InputKindEnum.BODY_LINEAR_VELOCITY, element_names_resolver=body_xyz_resolver) def object_lin_vel_w(self) -> wp.array: """Deprecated property. Please use :attr:`body_com_lin_vel_w` instead.""" warnings.warn( @@ -944,7 +948,7 @@ def object_lin_vel_w(self) -> wp.array: return self.body_com_lin_vel_w @property - @leapp_tensor_semantics(kind=InputKindEnum.BODY_ANGULAR_VELOCITY, element_names_source="body_xyz") + @leapp_tensor_semantics(kind=InputKindEnum.BODY_ANGULAR_VELOCITY, element_names_resolver=body_xyz_resolver) def object_ang_vel_w(self) -> wp.array: """Deprecated property. Please use :attr:`body_com_ang_vel_w` instead.""" warnings.warn( @@ -956,7 +960,7 @@ def object_ang_vel_w(self) -> wp.array: return self.body_com_ang_vel_w @property - @leapp_tensor_semantics(kind=InputKindEnum.BODY_LINEAR_VELOCITY, element_names_source="body_xyz") + @leapp_tensor_semantics(kind=InputKindEnum.BODY_LINEAR_VELOCITY, element_names_resolver=body_xyz_resolver) def object_lin_vel_b(self) -> wp.array: """Deprecated property. Please use :attr:`body_com_lin_vel_b` instead.""" warnings.warn( @@ -968,7 +972,7 @@ def object_lin_vel_b(self) -> wp.array: return self.body_com_lin_vel_b @property - @leapp_tensor_semantics(kind=InputKindEnum.BODY_ANGULAR_VELOCITY, element_names_source="body_xyz") + @leapp_tensor_semantics(kind=InputKindEnum.BODY_ANGULAR_VELOCITY, element_names_resolver=body_xyz_resolver) def object_ang_vel_b(self) -> wp.array: """Deprecated property. Please use :attr:`body_com_ang_vel_b` instead.""" warnings.warn( @@ -980,7 +984,7 @@ def object_ang_vel_b(self) -> wp.array: return self.body_com_ang_vel_b @property - @leapp_tensor_semantics(kind=InputKindEnum.BODY_ACC, element_names_source="body_wrench") + @leapp_tensor_semantics(kind=InputKindEnum.BODY_ACC, element_names_resolver=body_wrench_resolver) def object_acc_w(self) -> wp.array: """Deprecated property. Please use :attr:`body_com_acc_w` instead.""" warnings.warn( @@ -991,7 +995,7 @@ def object_acc_w(self) -> wp.array: return self.body_com_acc_w @property - @leapp_tensor_semantics(kind=InputKindEnum.BODY_LINEAR_ACCELERATION, element_names_source="body_xyz") + @leapp_tensor_semantics(kind=InputKindEnum.BODY_LINEAR_ACCELERATION, element_names_resolver=body_xyz_resolver) def object_lin_acc_w(self) -> wp.array: """Deprecated property. Please use :attr:`body_com_lin_acc_w` instead.""" warnings.warn( @@ -1003,7 +1007,7 @@ def object_lin_acc_w(self) -> wp.array: return self.body_com_lin_acc_w @property - @leapp_tensor_semantics(kind=InputKindEnum.BODY_ANGULAR_ACCELERATION, element_names_source="body_xyz") + @leapp_tensor_semantics(kind=InputKindEnum.BODY_ANGULAR_ACCELERATION, element_names_resolver=body_xyz_resolver) def object_ang_acc_w(self) -> wp.array: """Deprecated property. Please use :attr:`body_com_ang_acc_w` instead.""" warnings.warn( diff --git a/source/isaaclab/isaaclab/sensors/frame_transformer/base_frame_transformer_data.py b/source/isaaclab/isaaclab/sensors/frame_transformer/base_frame_transformer_data.py index 15b8ce1356c0..ee125cb7a1cf 100644 --- a/source/isaaclab/isaaclab/sensors/frame_transformer/base_frame_transformer_data.py +++ b/source/isaaclab/isaaclab/sensors/frame_transformer/base_frame_transformer_data.py @@ -17,6 +17,9 @@ QUAT_WXYZ_ELEMENT_NAMES, XYZ_ELEMENT_NAMES, leapp_tensor_semantics, + target_frame_pose_resolver, + target_frame_quat_resolver, + target_frame_xyz_resolver, ) @@ -38,7 +41,7 @@ def target_frame_names(self) -> list[str]: @property @abstractmethod - @leapp_tensor_semantics(kind=InputKindEnum.BODY_POSE, element_names_source="target_frame_pose") + @leapp_tensor_semantics(kind=InputKindEnum.BODY_POSE, element_names_resolver=target_frame_pose_resolver) def target_pose_source(self) -> wp.array | None: """Pose of the target frame(s) relative to source frame. @@ -49,7 +52,7 @@ def target_pose_source(self) -> wp.array | None: @property @abstractmethod - @leapp_tensor_semantics(kind=InputKindEnum.BODY_POSITION, element_names_source="target_frame_xyz") + @leapp_tensor_semantics(kind=InputKindEnum.BODY_POSITION, element_names_resolver=target_frame_xyz_resolver) def target_pos_source(self) -> wp.array: """Position of the target frame(s) relative to source frame. @@ -60,7 +63,7 @@ def target_pos_source(self) -> wp.array: @property @abstractmethod - @leapp_tensor_semantics(kind=InputKindEnum.BODY_ROTATION, element_names_source="target_frame_quat") + @leapp_tensor_semantics(kind=InputKindEnum.BODY_ROTATION, element_names_resolver=target_frame_quat_resolver) def target_quat_source(self) -> wp.array: """Orientation of the target frame(s) relative to source frame. @@ -71,7 +74,7 @@ def target_quat_source(self) -> wp.array: @property @abstractmethod - @leapp_tensor_semantics(kind=InputKindEnum.BODY_POSE, element_names_source="target_frame_pose") + @leapp_tensor_semantics(kind=InputKindEnum.BODY_POSE, element_names_resolver=target_frame_pose_resolver) def target_pose_w(self) -> wp.array | None: """Pose of the target frame(s) after offset in world frame. @@ -82,7 +85,7 @@ def target_pose_w(self) -> wp.array | None: @property @abstractmethod - @leapp_tensor_semantics(kind=InputKindEnum.BODY_POSITION, element_names_source="target_frame_xyz") + @leapp_tensor_semantics(kind=InputKindEnum.BODY_POSITION, element_names_resolver=target_frame_xyz_resolver) def target_pos_w(self) -> wp.array: """Position of the target frame(s) after offset in world frame. @@ -93,7 +96,7 @@ def target_pos_w(self) -> wp.array: @property @abstractmethod - @leapp_tensor_semantics(kind=InputKindEnum.BODY_ROTATION, element_names_source="target_frame_quat") + @leapp_tensor_semantics(kind=InputKindEnum.BODY_ROTATION, element_names_resolver=target_frame_quat_resolver) def target_quat_w(self) -> wp.array: """Orientation of the target frame(s) after offset in world frame. diff --git a/source/isaaclab/isaaclab/sensors/ray_caster/ray_caster_data.py b/source/isaaclab/isaaclab/sensors/ray_caster/ray_caster_data.py index 27f461f783da..7b5c3dfe6ab8 100644 --- a/source/isaaclab/isaaclab/sensors/ray_caster/ray_caster_data.py +++ b/source/isaaclab/isaaclab/sensors/ray_caster/ray_caster_data.py @@ -7,7 +7,11 @@ import torch -from isaaclab.utils.leapp.leapp_semantics import leapp_tensor_semantics +from isaaclab.utils.leapp.leapp_semantics import ( + QUAT_WXYZ_ELEMENT_NAMES, + XYZ_ELEMENT_NAMES, + leapp_tensor_semantics, +) @dataclass @@ -32,13 +36,13 @@ class RayCasterData: """ @property - @leapp_tensor_semantics(kind="state/sensor/position", element_names_source="xyz") + @leapp_tensor_semantics(kind="state/sensor/position", element_names=XYZ_ELEMENT_NAMES) def pos_w(self) -> torch.Tensor: """Position of the sensor origin in world frame.""" return self._pos_w @property - @leapp_tensor_semantics(kind="state/sensor/rotation", element_names_source="quat_wxyz") + @leapp_tensor_semantics(kind="state/sensor/rotation", element_names=QUAT_WXYZ_ELEMENT_NAMES) def quat_w(self) -> torch.Tensor: """Orientation of the sensor origin in quaternion (w, x, y, z) in world frame.""" return self._quat_w diff --git a/source/isaaclab/isaaclab/utils/leapp/export_annotator.py b/source/isaaclab/isaaclab/utils/leapp/export_annotator.py index 265b14248c87..df68f1d0b237 100644 --- a/source/isaaclab/isaaclab/utils/leapp/export_annotator.py +++ b/source/isaaclab/isaaclab/utils/leapp/export_annotator.py @@ -45,23 +45,19 @@ from isaaclab.assets.articulation.base_articulation import BaseArticulation from isaaclab.managers import ManagerTermBase -from .leapp_semantics import resolve_leapp_element_names from .proxy import _ArticulationWriteProxy, _DataProxy, _EnvProxy, _ManagerTermProxy -from .utils import ensure_torch_tensor, patch_warp_to_torch_passthrough +from .utils import ( + build_command_connection, + build_write_connection, + ensure_torch_tensor, + patch_warp_to_torch_passthrough, + select_element_names, +) if TYPE_CHECKING: from isaaclab.envs import ManagerBasedEnv -# Reuse the generic joint-name resolver for kp/kd outputs by providing the -# same ``element_names_source`` contract as articulation getters/writers. -_GAIN_JOINT_SEMANTICS = type( - "GainJointSemantics", - (), - {"element_names": None, "element_names_source": "joint_names"}, -)() - - # ══════════════════════════════════════════════════════════════════ # ExportPatcher # ══════════════════════════════════════════════════════════════════ @@ -108,6 +104,7 @@ def __init__(self, task_name: str, export_method: str, required_obs_groups: set[ self._fallback_term_names: set[str] = set() self._pending_action_output_export: bool = False self._uses_last_action_state: bool = False + self._action_term_scene_keys: dict[str, str] = {} def setup(self, env): """Patch observation and action managers on the unwrapped env.""" @@ -266,6 +263,7 @@ def _patch_action_manager(self, action_manager, cache): scene_key = self._resolve_scene_entity_key(scene, real_asset) or "ego" data_proxy = _DataProxy( real_asset.data, + scene_key, self.task_name, self._data_property_resolution_cache, cache, @@ -273,12 +271,14 @@ def _patch_action_manager(self, action_manager, cache): ) term._asset = _ArticulationWriteProxy( real_asset=real_asset, + entity_name=scene_key, term_name=term_name, output_cache=self._action_output_cache, method_resolution_cache=self._write_method_resolution_cache, captured_write_term_names=self._captured_write_term_names, data_proxy=data_proxy, ) + self._action_term_scene_keys[term_name] = scene_key self._patch_action_manager_methods(action_manager) @@ -400,6 +400,7 @@ def wrapped(env, command_name=None, **kwargs): ref=result, kind=getattr(command_cfg, "cmd_kind", None), element_names=getattr(command_cfg, "element_names", None), + extra=build_command_connection(leapp_input_name), ) return annotate.input_tensors(task_name, sem) @@ -408,8 +409,7 @@ def wrapped(env, command_name=None, **kwargs): # ── Output collection ───────────────────────────────────────── - @staticmethod - def _collect_action_outputs(action_manager) -> list[TensorSemantics]: + def _collect_action_outputs(self, action_manager) -> list[TensorSemantics]: """Collect non-writer action tensors that should be exported (e.g. OSC dynamic gains).""" tensors: list[TensorSemantics] = [] for term_name, term in action_manager._terms.items(): @@ -418,22 +418,15 @@ def _collect_action_outputs(action_manager) -> list[TensorSemantics]: asset = getattr(term, "_asset", None) real_asset = getattr(asset, "_real_asset", asset) joint_ids = getattr(term, "_joint_ids", None) - joint_name_context = None - if real_asset is not None and hasattr(real_asset, "joint_names"): - joint_name_context = _JointNameContext(real_asset.joint_names, joint_ids) + joint_names = getattr(real_asset, "joint_names", None) if real_asset else None + scene_key = self._action_term_scene_keys.get(term_name, "ego") tensors.append( TensorSemantics( name=f"{term_name}_kp_gains", ref=torch.diagonal(osc._motion_p_gains_task, dim1=-2, dim2=-1), kind="kp", - element_names=( - resolve_leapp_element_names( - _GAIN_JOINT_SEMANTICS, - joint_name_context, - ) - if joint_name_context is not None - else None - ), + element_names=select_element_names(joint_names, joint_ids), + extra=build_write_connection(scene_key, "write_joint_stiffness_to_sim"), ) ) tensors.append( @@ -441,14 +434,8 @@ def _collect_action_outputs(action_manager) -> list[TensorSemantics]: name=f"{term_name}_kd_gains", ref=torch.diagonal(osc._motion_d_gains_task, dim1=-2, dim2=-1), kind="kd", - element_names=( - resolve_leapp_element_names( - _GAIN_JOINT_SEMANTICS, - joint_name_context, - ) - if joint_name_context is not None - else None - ), + element_names=select_element_names(joint_names, joint_ids), + extra=build_write_connection(scene_key, "write_joint_damping_to_sim"), ) ) return tensors @@ -488,8 +475,9 @@ def _collect_processed_action_fallbacks(self, action_manager) -> list[TensorSema self._fallback_term_names = fallback_terms return tensors - @staticmethod - def _collect_action_static_outputs(action_manager, skip_terms: set[str] | None = None) -> list[TensorSemantics]: + def _collect_action_static_outputs( + self, action_manager, skip_terms: set[str] | None = None + ) -> list[TensorSemantics]: """Collect static kp/kd gain values from action terms for export metadata. Terms in ``skip_terms`` are excluded — these are terms that fell back @@ -508,9 +496,8 @@ def _collect_action_static_outputs(action_manager, skip_terms: set[str] | None = if real_asset and hasattr(real_asset, "data"): data = real_asset.data joint_ids = getattr(term, "_joint_ids", None) - joint_name_context = None - if hasattr(real_asset, "joint_names"): - joint_name_context = _JointNameContext(real_asset.joint_names, joint_ids) + joint_names = getattr(real_asset, "joint_names", None) + scene_key = self._action_term_scene_keys.get(term_name, "ego") if hasattr(data, "default_joint_stiffness") and data.default_joint_stiffness is not None: gains = ensure_torch_tensor(data.default_joint_stiffness) static_values.append( @@ -518,14 +505,8 @@ def _collect_action_static_outputs(action_manager, skip_terms: set[str] | None = name=f"{term_name}_kp_gains", ref=gains[:, joint_ids] if joint_ids else gains, kind="kp", - element_names=( - resolve_leapp_element_names( - _GAIN_JOINT_SEMANTICS, - joint_name_context, - ) - if joint_name_context is not None - else None - ), + element_names=select_element_names(joint_names, joint_ids), + extra=build_write_connection(scene_key, "write_joint_stiffness_to_sim"), ) ) if hasattr(data, "default_joint_damping") and data.default_joint_damping is not None: @@ -535,14 +516,8 @@ def _collect_action_static_outputs(action_manager, skip_terms: set[str] | None = name=f"{term_name}_kd_gains", ref=gains[:, joint_ids] if joint_ids else gains, kind="kd", - element_names=( - resolve_leapp_element_names( - _GAIN_JOINT_SEMANTICS, - joint_name_context, - ) - if joint_name_context is not None - else None - ), + element_names=select_element_names(joint_names, joint_ids), + extra=build_write_connection(scene_key, "write_joint_damping_to_sim"), ) ) return static_values @@ -553,16 +528,6 @@ def _collect_action_static_outputs(action_manager, skip_terms: set[str] | None = # ══════════════════════════════════════════════════════════════════ -class _JointNameContext: - """Lightweight stand-in for resolving runtime joint name subsets in ``resolve_leapp_element_names``.""" - - __slots__ = ("joint_names", "_joint_ids") - - def __init__(self, joint_names: list[str], joint_ids): - self.joint_names = joint_names - self._joint_ids = joint_ids - - # ══════════════════════════════════════════════════════════════════ # Public entry point # ══════════════════════════════════════════════════════════════════ diff --git a/source/isaaclab/isaaclab/utils/leapp/leapp_semantics.py b/source/isaaclab/isaaclab/utils/leapp/leapp_semantics.py index 009d5a289491..eae235e453ca 100644 --- a/source/isaaclab/isaaclab/utils/leapp/leapp_semantics.py +++ b/source/isaaclab/isaaclab/utils/leapp/leapp_semantics.py @@ -8,10 +8,11 @@ from __future__ import annotations from collections.abc import Callable -from contextlib import suppress from dataclasses import dataclass from typing import Any +from .utils import select_element_names + @dataclass(frozen=True) class LeappTensorSemantics: @@ -19,7 +20,7 @@ class LeappTensorSemantics: kind: Any = None element_names: list[str] | list[list[str]] | None = None - element_names_source: str | None = None + element_names_resolver: Callable | None = None const: bool = False @@ -33,7 +34,7 @@ def leapp_tensor_semantics( *, kind: Any = None, element_names: list[str] | list[list[str]] | None = None, - element_names_source: str | None = None, + element_names_resolver: Callable | None = None, const: bool = False, ) -> Callable: """Attach LEAPP semantic metadata to a raw tensor-producing function.""" @@ -41,7 +42,7 @@ def leapp_tensor_semantics( semantics = LeappTensorSemantics( kind=kind, element_names=element_names, - element_names_source=element_names_source, + element_names_resolver=element_names_resolver, const=const, ) @@ -52,92 +53,55 @@ def _apply(func: Callable) -> Callable: return _apply -def _select_element_names(names: list[str] | None, indices: Any = None) -> list[str] | None: - """Select element names using optional runtime indices.""" - if names is None: - return None - if indices is None or indices == slice(None): - return list(names) - if isinstance(indices, slice): - return list(names[indices]) - with suppress(AttributeError): - indices = indices.tolist() - if isinstance(indices, (list, tuple)): - return [names[int(index)] for index in indices] - if isinstance(indices, int): - return [names[indices]] - return None - - def resolve_leapp_element_names(semantics: LeappTensorSemantics | None, data_self) -> list | None: """Resolve element names from attached semantics and a tensor-producing object.""" if semantics is None: return None if semantics.element_names is not None: return semantics.element_names - - source = semantics.element_names_source - if source == "xyz": - return XYZ_ELEMENT_NAMES - if source == "quat_wxyz": - return QUAT_WXYZ_ELEMENT_NAMES - if source == "pose7": - return POSE7_ELEMENT_NAMES - if source == "joint_names": - return _select_element_names( - getattr(data_self, "joint_names", getattr(data_self, "_joint_names", None)), - getattr(data_self, "_joint_ids", None), - ) - if source == "body_names": - return _select_element_names( - getattr(data_self, "body_names", getattr(data_self, "_body_names", None)), - getattr(data_self, "_body_ids", None), - ) - if source == "body_xyz": - body_names = _select_element_names( - getattr(data_self, "body_names", getattr(data_self, "_body_names", None)), - getattr(data_self, "_body_ids", None), - ) - if body_names is None: - return None - return [body_names, XYZ_ELEMENT_NAMES] - if source == "body_pose": - body_names = _select_element_names( - getattr(data_self, "body_names", getattr(data_self, "_body_names", None)), - getattr(data_self, "_body_ids", None), - ) - if body_names is None: - return None - return [body_names, POSE7_ELEMENT_NAMES] - if source == "body_quat": - body_names = _select_element_names( - getattr(data_self, "body_names", getattr(data_self, "_body_names", None)), - getattr(data_self, "_body_ids", None), - ) - if body_names is None: - return None - return [body_names, QUAT_WXYZ_ELEMENT_NAMES] - if source == "body_wrench": - body_names = _select_element_names( - getattr(data_self, "body_names", getattr(data_self, "_body_names", None)), - getattr(data_self, "_body_ids", None), - ) - if body_names is None: - return None - return [body_names, WRENCH6_ELEMENT_NAMES] - if source == "target_frame_xyz": - frame_names = getattr(data_self, "target_frame_names", None) - if frame_names is None: - return None - return [list(frame_names), XYZ_ELEMENT_NAMES] - if source == "target_frame_quat": - frame_names = getattr(data_self, "target_frame_names", None) - if frame_names is None: - return None - return [list(frame_names), QUAT_WXYZ_ELEMENT_NAMES] - if source == "target_frame_pose": - frame_names = getattr(data_self, "target_frame_names", None) - if frame_names is None: - return None - return [list(frame_names), POSE7_ELEMENT_NAMES] + if semantics.element_names_resolver is not None: + return semantics.element_names_resolver(data_self) return None + + +# ── Predefined element-name resolvers ───────────────────────────── + + +def joint_names_resolver(data_self) -> list[str] | None: + """Resolve joint element names from the data object at trace time.""" + return select_element_names( + getattr(data_self, "joint_names", getattr(data_self, "_joint_names", None)), + getattr(data_self, "_joint_ids", None), + ) + + +def body_names_resolver(data_self) -> list[str] | None: + """Resolve body element names from the data object at trace time.""" + return select_element_names( + getattr(data_self, "body_names", getattr(data_self, "_body_names", None)), + getattr(data_self, "_body_ids", None), + ) + + +def _compound_resolver(outer_fn: Callable, inner_names: list[str]) -> Callable: + """Build a 2D resolver: ``[outer_names, inner_constant_names]``.""" + + def resolver(data_self) -> list | None: + outer = outer_fn(data_self) + return [outer, inner_names] if outer else None + + return resolver + + +def _target_frame_names(data_self) -> list[str] | None: + names = getattr(data_self, "target_frame_names", None) + return list(names) if names is not None else None + + +body_xyz_resolver = _compound_resolver(body_names_resolver, XYZ_ELEMENT_NAMES) +body_pose_resolver = _compound_resolver(body_names_resolver, POSE7_ELEMENT_NAMES) +body_quat_resolver = _compound_resolver(body_names_resolver, QUAT_WXYZ_ELEMENT_NAMES) +body_wrench_resolver = _compound_resolver(body_names_resolver, WRENCH6_ELEMENT_NAMES) +target_frame_xyz_resolver = _compound_resolver(_target_frame_names, XYZ_ELEMENT_NAMES) +target_frame_quat_resolver = _compound_resolver(_target_frame_names, QUAT_WXYZ_ELEMENT_NAMES) +target_frame_pose_resolver = _compound_resolver(_target_frame_names, POSE7_ELEMENT_NAMES) diff --git a/source/isaaclab/isaaclab/utils/leapp/proxy.py b/source/isaaclab/isaaclab/utils/leapp/proxy.py index 40d8344a82c5..505e359347b3 100644 --- a/source/isaaclab/isaaclab/utils/leapp/proxy.py +++ b/source/isaaclab/isaaclab/utils/leapp/proxy.py @@ -16,7 +16,7 @@ from isaaclab.managers import ManagerTermBase from .leapp_semantics import resolve_leapp_element_names -from .utils import ensure_torch_tensor +from .utils import build_state_connection, build_write_connection, ensure_torch_tensor def _resolve_annotated_property( @@ -136,12 +136,14 @@ class _DataProxy: def __init__( self, real_data: Any, + entity_name: str, task_name: str, property_resolution_cache: dict[tuple[type, str], tuple[Callable, Any] | None], cache: dict, input_name_resolver: Callable, ): object.__setattr__(self, "_real_data", real_data) + object.__setattr__(self, "_entity_name", entity_name) object.__setattr__(self, "_task_name", task_name) object.__setattr__(self, "_property_resolution_cache", property_resolution_cache) object.__setattr__(self, "_cache", cache) @@ -173,6 +175,7 @@ def __getattr__(self, name): ref=result, kind=semantics_meta.kind, element_names=resolve_leapp_element_names(semantics_meta, real_data), + extra=build_state_connection(object.__getattribute__(self, "_entity_name"), name), ) annotated = annotate.input_tensors(object.__getattribute__(self, "_task_name"), sem) cache[cache_key] = annotated @@ -227,6 +230,7 @@ def __getitem__(self, key): return entity data_proxy = _DataProxy( data, + key, object.__getattribute__(self, "_task_name"), object.__getattribute__(self, "_property_resolution_cache"), object.__getattribute__(self, "_cache"), @@ -292,6 +296,7 @@ def _maybe_proxy_entity(self, key: str, entity: Any): cache = object.__getattribute__(self, "_cache") data_proxy = _DataProxy( data, + key, object.__getattribute__(self, "_task_name"), object.__getattribute__(self, "_property_resolution_cache"), cache, @@ -448,6 +453,7 @@ class _ArticulationWriteProxy: def __init__( self, real_asset: Any, + entity_name: str, term_name: str, output_cache: list[TensorSemantics], method_resolution_cache: dict[tuple[type, str], tuple[Callable, Any, inspect.Signature] | None], @@ -455,6 +461,7 @@ def __init__( data_proxy: _DataProxy, ): object.__setattr__(self, "_real_asset", real_asset) + object.__setattr__(self, "_entity_name", entity_name) object.__setattr__(self, "_term_name", term_name) object.__setattr__(self, "_output_cache", output_cache) object.__setattr__(self, "_method_resolution_cache", method_resolution_cache) @@ -501,6 +508,10 @@ def interceptor(*args, **kwargs): semantics_meta, _WriteJointNameContext(real_asset.joint_names, joint_ids), ), + extra=build_write_connection( + object.__getattribute__(self, "_entity_name"), + name, + ), ) ) captured_write_term_names.add(term_name) diff --git a/source/isaaclab/isaaclab/utils/leapp/utils.py b/source/isaaclab/isaaclab/utils/leapp/utils.py index 53c86b91fe00..0d20fd14eddb 100644 --- a/source/isaaclab/isaaclab/utils/leapp/utils.py +++ b/source/isaaclab/isaaclab/utils/leapp/utils.py @@ -5,10 +5,30 @@ from __future__ import annotations +from contextlib import suppress +from typing import Any + import torch import warp as wp +def select_element_names(names: list[str] | None, indices: Any = None) -> list[str] | None: + """Select element names using optional runtime indices.""" + if names is None: + return None + if indices is None or indices == slice(None): + return list(names) + if isinstance(indices, slice): + return list(names[indices]) + with suppress(AttributeError): + indices = indices.tolist() + if isinstance(indices, (list, tuple)): + return [names[int(index)] for index in indices] + if isinstance(indices, int): + return [names[indices]] + return None + + def ensure_torch_tensor(value): """Convert Warp arrays to torch tensors while leaving torch tensors unchanged.""" if isinstance(value, torch.Tensor): @@ -33,3 +53,23 @@ def patched_to_torch(value, *args, **kwargs): patched_to_torch._leapp_passthrough_patch = True # type: ignore[attr-defined] wp.to_torch = patched_to_torch + + +# ══════════════════════════════════════════════════════════════════ +# Connection Builders +# ══════════════════════════════════════════════════════════════════ + + +def build_state_connection(entity_name: str, property_name: str) -> dict[str, str]: + """Return a compact deployment connection string for a state property.""" + return {"isaaclab_connection": f"state:{entity_name}:{property_name}"} + + +def build_command_connection(command_name: str) -> dict[str, str]: + """Return a compact deployment connection string for a command term.""" + return {"isaaclab_connection": f"command:{command_name}"} + + +def build_write_connection(entity_name: str, method_name: str) -> dict[str, str]: + """Return a compact deployment connection string for an articulation write target.""" + return {"isaaclab_connection": f"write:{entity_name}:{method_name}"} From 2ee62de561def173727802fdeb9d78feae8b84bb Mon Sep 17 00:00:00 2001 From: Frank Lai Date: Tue, 24 Mar 2026 10:19:43 -0700 Subject: [PATCH 08/20] fix direct deployment for new env architecture --- scripts/reinforcement_learning/deploy.py | 22 +- .../isaaclab/envs/direct_deployment_env.py | 269 +++++++++--------- .../isaaclab/utils/leapp/export_annotator.py | 8 +- 3 files changed, 146 insertions(+), 153 deletions(-) diff --git a/scripts/reinforcement_learning/deploy.py b/scripts/reinforcement_learning/deploy.py index cfa05bd549c8..bd6a40aa16db 100644 --- a/scripts/reinforcement_learning/deploy.py +++ b/scripts/reinforcement_learning/deploy.py @@ -3,15 +3,7 @@ # # SPDX-License-Identifier: BSD-3-Clause -"""Deploy a LEAPP-exported policy in an Isaac Lab simulation. - -Usage:: - - ./isaaclab.sh -p scripts/reinforcement_learning/deploy.py \ - --task Isaac-Velocity-Flat-Anymal-B-v0 \ - --leapp_model .pretrained_checkpoints/rsl_rl/Isaac-Velocity-Flat-Anymal-B-v0/Isaac-Velocity-Flat-Anymal-B-v0/Isaac-Velocity-Flat-Anymal-B-v0.yaml \ - --headless -""" +"""Deploy a LEAPP-exported policy in an Isaac Lab simulation.""" """Launch Isaac Sim Simulator first.""" @@ -60,11 +52,13 @@ def main(): # ── Run loop ────────────────────────────────────────────────── env.reset() - with torch.inference_mode(): - while simulation_app.is_running(): - env.step() - - env.close() + try: + with torch.inference_mode(): + while simulation_app.is_running(): + env.step() + env.close() + except KeyboardInterrupt: + pass if __name__ == "__main__": diff --git a/source/isaaclab/isaaclab/envs/direct_deployment_env.py b/source/isaaclab/isaaclab/envs/direct_deployment_env.py index 9cc56bdf0174..9d09b24916aa 100644 --- a/source/isaaclab/isaaclab/envs/direct_deployment_env.py +++ b/source/isaaclab/isaaclab/envs/direct_deployment_env.py @@ -6,27 +6,31 @@ """Deployment environment that runs LEAPP-exported policies in simulation. This environment bypasses all Isaac Lab managers (observation, action, reward, etc.) -and instead wires raw ``ArticulationData`` properties and ``CommandManager`` outputs -directly to a LEAPP ``InferenceManager``, then writes the model outputs back to the -articulation. All I/O resolution is driven by the ``kind`` field in the LEAPP YAML. +and instead wires scene entity data properties and ``CommandManager`` outputs directly +to a LEAPP ``InferenceManager``, then writes the model outputs back to the +corresponding scene entities. All I/O resolution is driven by the +``isaaclab_connection`` field in the LEAPP YAML. """ from __future__ import annotations +import inspect import logging from dataclasses import dataclass -from typing import Any +from typing import Any, cast import torch import yaml from leapp import InferenceManager -from isaaclab.assets.articulation.articulation import Articulation -from isaaclab.assets.articulation.articulation_data import ArticulationData from isaaclab.managers import CommandManager, EventManager from isaaclab.scene import InteractiveScene from isaaclab.sim import SimulationContext -from isaaclab.sim.utils.stage import attach_stage_to_usd_context, use_stage +from isaaclab.sim.utils.stage import use_stage +from isaaclab.utils.configclass import resolve_cfg_presets +from isaaclab.utils.leapp.utils import ensure_torch_tensor + +from .ui import ViewportCameraController logger = logging.getLogger(__name__) @@ -38,8 +42,9 @@ @dataclass class StateInputSpec: - """Read a property from ``ArticulationData``, optionally sliced by joint.""" + """Read a property from a scene entity's data object.""" + entity_name: str property_name: str joint_ids: list[int] | None = None @@ -52,104 +57,43 @@ class CommandInputSpec: @dataclass -class OutputSpec: - """Write a tensor to an ``Articulation`` method, optionally indexed by joint.""" +class WriteOutputSpec: + """Write a tensor to a scene entity method, optionally indexed by joint.""" + entity_name: str method_name: str + value_param: str joint_ids: list[int] | None = None # ══════════════════════════════════════════════════════════════════ -# Kind → source/target resolution helpers +# Connection-string helpers # ══════════════════════════════════════════════════════════════════ -_JOINT_LEVEL_KIND_PREFIXES = ("state/joint/", "target/joint/") -_JOINT_LEVEL_GAIN_KINDS = ("kp", "kd") - - -def _build_kind_to_property_map() -> dict[str, list[str]]: - """Scan ``ArticulationData`` for ``_leapp_semantics`` properties. - - Returns a mapping from ``kind`` string to a list of property names that - carry that kind (there can be more than one, e.g. ``root_lin_vel_b`` and - ``root_lin_vel_w`` both have ``state/body/linear_velocity``). - """ - kind_to_props: dict[str, list[str]] = {} - for prop_name in dir(ArticulationData): - prop = getattr(ArticulationData, prop_name, None) - if isinstance(prop, property) and prop.fget and hasattr(prop.fget, "_leapp_semantics"): - kind = prop.fget._leapp_semantics.kind - if kind is not None: - kind_to_props.setdefault(kind, []).append(prop_name) - return kind_to_props - -def _build_kind_to_write_method_map() -> dict[str, str]: - """Scan ``Articulation`` for ``_leapp_semantics`` methods + hardcoded kp/kd. - - Returns a mapping from output ``kind`` to the method name on ``Articulation``. - """ - kind_to_method: dict[str, str] = {} - for method_name in dir(Articulation): - method = getattr(Articulation, method_name, None) - if callable(method) and hasattr(method, "_leapp_semantics"): - kind = method._leapp_semantics.kind - if kind is not None: - kind_to_method[kind] = method_name - kind_to_method["kp"] = "write_joint_stiffness_to_sim" - kind_to_method["kd"] = "write_joint_damping_to_sim" - return kind_to_method - - -def _disambiguate_property(kind: str, leapp_name: str, kind_to_props: dict[str, list[str]]) -> str: - """Pick the right ``ArticulationData`` property when multiple share a ``kind``. - - The export path uses the property name as the LEAPP input name, so we strip - the ``_in`` / ``_out`` suffix that LEAPP adds for collision avoidance and match. - """ - candidates = kind_to_props.get(kind) - if candidates is None: - raise ValueError(f"No ArticulationData property found for kind='{kind}'") - if len(candidates) == 1: - return candidates[0] - base_name = leapp_name.removesuffix("_in").removesuffix("_out") - for prop in candidates: - if prop == base_name: - return prop - return candidates[0] - - -def _resolve_joint_ids(element_names: list | None, asset: Articulation) -> list[int] | None: +def _resolve_joint_ids(element_names: list | None, entity: Any) -> list[int] | None: """Convert ``element_names[0]`` joint names to integer joint indices. - Returns ``None`` when no slicing is needed (all joints or non-joint tensor). + Returns ``None`` when no slicing is needed (all joints, non-joint tensor, + or entity does not support joint lookup). """ - if element_names is None: + if element_names is None or not hasattr(entity, "find_joints"): return None joint_names = element_names[0] if not isinstance(joint_names, list) or not joint_names: return None - if joint_names == list(asset.joint_names): + if joint_names == list(entity.joint_names): return None - joint_ids, _ = asset.find_joints(joint_names, preserve_order=True) + joint_ids, _ = entity.find_joints(joint_names, preserve_order=True) return joint_ids -def _find_command_term_by_hint(kind: str, command_manager: CommandManager) -> str: - """Find the ``CommandTerm`` name whose ``cfg.cmd_kind`` matches ``kind``.""" - for name, term in command_manager._terms.items(): - if getattr(term.cfg, "cmd_kind", None) == kind: - return name - raise ValueError(f"No command term with cmd_kind='{kind}'. Available terms: {list(command_manager._terms.keys())}") - - -def _find_robot_asset(scene: InteractiveScene) -> Articulation: - """Return the first ``Articulation`` in the scene (assumed to be the robot).""" - for entity_name in scene.articulations: - entity = scene[entity_name] - if isinstance(entity, Articulation): - return entity - raise RuntimeError("No Articulation found in scene") +def _first_param_name(method: Any) -> str: + """Return the name of the first non-self parameter of *method*.""" + params = list(inspect.signature(method).parameters.values()) + if not params: + raise TypeError(f"{method} has no parameters") + return params[0].name # ══════════════════════════════════════════════════════════════════ @@ -162,7 +106,16 @@ class DirectDeploymentEnv: The environment sets up the simulation scene and physics from a standard Isaac Lab config, then wires raw sensor/command data to a LEAPP - ``InferenceManager`` and writes the model outputs back to the articulation. + ``InferenceManager`` and writes the model outputs back to the corresponding + scene entities. + + I/O wiring is driven entirely by the ``isaaclab_connection`` metadata field + in the LEAPP YAML. Each connection string encodes the type of access, the + scene entity name, and the property or method to call: + + - ``state:{entity}:{property}`` -- read ``scene[entity].data.{property}`` + - ``command:{name}`` -- read ``command_manager.get_command(name)`` + - ``write:{entity}:{method}`` -- call ``scene[entity].{method}(tensor, ...)`` No observation, action, reward, termination, or curriculum managers are used. The LEAPP model already contains all pre/post-processing. @@ -178,34 +131,41 @@ def __init__(self, cfg: Any, leapp_yaml_path: str): cfg.scene.num_envs = 1 cfg.validate() + resolve_cfg_presets(cfg) self.cfg = cfg self._is_closed = False self._leapp_yaml_path = leapp_yaml_path self._step_count = 0 + self._sim_step_counter = 0 # ── Simulation + scene ──────────────────────────────────── self.sim = SimulationContext(cfg.sim) if "cuda" in self.sim.device: torch.cuda.set_device(self.sim.device) - with use_stage(self.sim.get_initial_stage()): + with use_stage(self.sim.stage): self.scene = InteractiveScene(cfg.scene) - attach_stage_to_usd_context() - self.sim.reset() + with use_stage(self.sim.stage): + self.sim.reset() self.scene.update(dt=self.physics_dt) + self.has_rtx_sensors = bool(self.sim.get_setting("/isaaclab/render/rtx_sensors")) - # ── Robot asset ─────────────────────────────────────────── - self._asset = _find_robot_asset(self.scene) + # Match the standard env initialization path for viewport camera setup. + has_visualizers = bool(self.sim.get_setting("/isaaclab/visualizer")) + if self.sim.has_gui or has_visualizers: + self.viewport_camera_controller = ViewportCameraController(cast(Any, self), self.cfg.viewer) + else: + self.viewport_camera_controller = None # ── EventManager (optional, for resets) ─────────────────── self.event_manager: EventManager | None = None if hasattr(cfg, "events") and cfg.events is not None: - self.event_manager = EventManager(cfg.events, self) + self.event_manager = EventManager(cfg.events, cast(Any, self)) # ── CommandManager (optional, for command/* inputs) ─────── self.command_manager: CommandManager | None = None if hasattr(cfg, "commands") and cfg.commands is not None: - self.command_manager = CommandManager(cfg.commands, self) + self.command_manager = CommandManager(cfg.commands, cast(Any, self)) # ── LEAPP InferenceManager ──────────────────────────────── self.inference = InferenceManager(leapp_yaml_path) @@ -214,7 +174,7 @@ def __init__(self, cfg: Any, leapp_yaml_path: str): with open(leapp_yaml_path) as f: self._leapp_desc = yaml.safe_load(f) self._input_mapping: dict[str, StateInputSpec | CommandInputSpec] = {} - self._output_mapping: dict[str, OutputSpec] = {} + self._output_mapping: dict[str, WriteOutputSpec] = {} self._resolve_io() logger.info( @@ -223,6 +183,11 @@ def __init__(self, cfg: Any, leapp_yaml_path: str): len(self._output_mapping), ) + if self.sim.has_gui and getattr(self.cfg, "ui_window_class_type", None) is not None: + self._window = self.cfg.ui_window_class_type(self, window_name="IsaacLab") + else: + self._window = None + # ── Properties ──────────────────────────────────────────────── @property @@ -244,81 +209,97 @@ def device(self) -> str: # ── I/O Resolution ──────────────────────────────────────────── def _resolve_io(self): - """Build ``_input_mapping`` and ``_output_mapping`` from LEAPP YAML ``kind`` fields.""" - kind_to_props = _build_kind_to_property_map() - kind_to_write = _build_kind_to_write_method_map() + """Build ``_input_mapping`` and ``_output_mapping`` from ``isaaclab_connection`` fields.""" pipeline = self._leapp_desc["pipeline"] - # --- Inputs --- for node_name, input_names in pipeline["inputs"].items(): node = self.inference.nodes[node_name] desc_by_name = {d["name"]: d for d in node.input_descriptions} for input_name in input_names: desc = desc_by_name[input_name] - kind = desc.get("kind") - key = f"{node_name}/{input_name}" - if kind is None: + connection = desc.get("isaaclab_connection") + if connection is None: continue - if kind.startswith("state/"): - prop = _disambiguate_property(kind, input_name, kind_to_props) - needs_joint_slice = kind.startswith("state/joint/") - jids = _resolve_joint_ids(desc.get("element_names"), self._asset) if needs_joint_slice else None - self._input_mapping[key] = StateInputSpec(property_name=prop, joint_ids=jids) - elif kind.startswith("command/"): + key = f"{node_name}/{input_name}" + parts = connection.split(":") + conn_type = parts[0] + + if conn_type == "state": + entity_name, prop_name = parts[1], parts[2] + entity = self.scene[entity_name] + jids = _resolve_joint_ids(desc.get("element_names"), entity) + self._input_mapping[key] = StateInputSpec( + entity_name=entity_name, + property_name=prop_name, + joint_ids=jids, + ) + elif conn_type == "command": + command_name = parts[1] if self.command_manager is None: raise RuntimeError( - f"LEAPP input '{key}' has kind='{kind}' but no CommandManager " - "is available (cfg.commands is None)." + f"LEAPP input '{key}' requires command '{command_name}' but no " + "CommandManager is available (cfg.commands is None)." ) - term_name = _find_command_term_by_hint(kind, self.command_manager) - self._input_mapping[key] = CommandInputSpec(command_term_name=term_name) + self._input_mapping[key] = CommandInputSpec(command_term_name=command_name) else: - logger.warning("Unknown input kind '%s' for '%s' — skipping", kind, key) + logger.warning("Unknown connection type '%s' for input '%s'", conn_type, key) - # --- Outputs --- for node_name, output_names in pipeline["outputs"].items(): node = self.inference.nodes[node_name] desc_by_name = {d["name"]: d for d in node.output_descriptions} for output_name in output_names: desc = desc_by_name[output_name] - kind = desc.get("kind") - key = f"{node_name}/{output_name}" - if kind is None: - continue - if kind not in kind_to_write: - logger.warning("Unknown output kind '%s' for '%s' — skipping", kind, key) + connection = desc.get("isaaclab_connection") + if connection is None: continue - method_name = kind_to_write[kind] - needs_joint_ids = kind.startswith("target/joint/") or kind in _JOINT_LEVEL_GAIN_KINDS - jids = _resolve_joint_ids(desc.get("element_names"), self._asset) if needs_joint_ids else None - self._output_mapping[key] = OutputSpec(method_name=method_name, joint_ids=jids) + key = f"{node_name}/{output_name}" + parts = connection.split(":") + conn_type = parts[0] + + if conn_type == "write": + entity_name, method_name = parts[1], parts[2] + entity = self.scene[entity_name] + jids = _resolve_joint_ids(desc.get("element_names"), entity) + value_param = _first_param_name(getattr(entity, method_name)) + self._output_mapping[key] = WriteOutputSpec( + entity_name=entity_name, + method_name=method_name, + value_param=value_param, + joint_ids=jids, + ) + else: + logger.warning("Unknown connection type '%s' for output '%s'", conn_type, key) # ── Read / Write ────────────────────────────────────────────── def _read_inputs(self) -> dict[str, torch.Tensor]: - """Read all mapped inputs from the scene and command manager.""" + """Read all mapped inputs from scene entities and command manager.""" inputs: dict[str, torch.Tensor] = {} for key, spec in self._input_mapping.items(): if isinstance(spec, StateInputSpec): - value = getattr(self._asset.data, spec.property_name) + entity = self.scene[spec.entity_name] + value = ensure_torch_tensor(getattr(entity.data, spec.property_name)) if spec.joint_ids is not None: value = value[:, spec.joint_ids] inputs[key] = value elif isinstance(spec, CommandInputSpec): - inputs[key] = self.command_manager.get_command(spec.command_term_name) + command_manager = self.command_manager + assert command_manager is not None + inputs[key] = command_manager.get_command(spec.command_term_name) return inputs def _write_outputs(self, outputs: dict[str, torch.Tensor]): - """Write model outputs to the articulation.""" + """Write model outputs to scene entities.""" for key, tensor in outputs.items(): spec = self._output_mapping.get(key) if spec is None: continue - method = getattr(self._asset, spec.method_name) + entity = self.scene[spec.entity_name] + method = getattr(entity, spec.method_name) if spec.joint_ids is not None: - method(tensor, joint_ids=spec.joint_ids) + method(**{spec.value_param: tensor, "joint_ids": spec.joint_ids}) else: - method(tensor) + method(**{spec.value_param: tensor}) # ── Public API ──────────────────────────────────────────────── @@ -341,12 +322,23 @@ def reset(self) -> dict[str, torch.Tensor]: self.sim.forward() self.scene.update(dt=self.physics_dt) + # If RTX sensors are present, rerender after reset to refresh their outputs. + if self.has_rtx_sensors and getattr(self.cfg, "num_rerenders_on_reset", 0) > 0: + for _ in range(self.cfg.num_rerenders_on_reset): + self.sim.render() + + if getattr(self.cfg, "wait_for_textures", False) and self.has_rtx_sensors: + assets_loading = getattr(self.sim.physics_manager, "assets_loading", None) + if callable(assets_loading): + while assets_loading(): + self.sim.render() + self.inference.reset() return self._read_inputs() def step(self, external_inputs: dict[str, torch.Tensor] | None = None) -> dict[str, torch.Tensor]: - """Run one environment step: read → infer → write → physics. + """Run one environment step: read -> infer -> write -> physics. Args: external_inputs: Optional overrides keyed by ``"ModelName/input_name"``. @@ -372,15 +364,16 @@ def step(self, external_inputs: dict[str, torch.Tensor] | None = None) -> dict[s with torch.inference_mode(): outputs = self.inference.run_policy(inputs) - # 5. Write outputs to asset + # 5. Write outputs to scene entities self._write_outputs(outputs) # 6. Decimation loop - is_rendering = self.sim.has_gui() or self.sim.has_rtx_sensors() + is_rendering = self.sim.is_rendering for _ in range(self.cfg.decimation): + self._sim_step_counter += 1 self.scene.write_data_to_sim() self.sim.step(render=False) - if is_rendering: + if self._sim_step_counter % self.cfg.sim.render_interval == 0 and is_rendering: self.sim.render() self.scene.update(dt=self.physics_dt) @@ -389,9 +382,15 @@ def step(self, external_inputs: dict[str, torch.Tensor] | None = None) -> dict[s def close(self): """Clean up the environment.""" if not self._is_closed: + self.sim.stop() if self.command_manager is not None: del self.command_manager if self.event_manager is not None: del self.event_manager del self.scene + if self.viewport_camera_controller is not None: + del self.viewport_camera_controller + self.sim.clear_instance() + if self._window is not None: + self._window = None self._is_closed = True diff --git a/source/isaaclab/isaaclab/utils/leapp/export_annotator.py b/source/isaaclab/isaaclab/utils/leapp/export_annotator.py index df68f1d0b237..141e9bb09bdd 100644 --- a/source/isaaclab/isaaclab/utils/leapp/export_annotator.py +++ b/source/isaaclab/isaaclab/utils/leapp/export_annotator.py @@ -426,7 +426,7 @@ def _collect_action_outputs(self, action_manager) -> list[TensorSemantics]: ref=torch.diagonal(osc._motion_p_gains_task, dim1=-2, dim2=-1), kind="kp", element_names=select_element_names(joint_names, joint_ids), - extra=build_write_connection(scene_key, "write_joint_stiffness_to_sim"), + extra=build_write_connection(scene_key, "write_joint_stiffness_to_sim_index"), ) ) tensors.append( @@ -435,7 +435,7 @@ def _collect_action_outputs(self, action_manager) -> list[TensorSemantics]: ref=torch.diagonal(osc._motion_d_gains_task, dim1=-2, dim2=-1), kind="kd", element_names=select_element_names(joint_names, joint_ids), - extra=build_write_connection(scene_key, "write_joint_damping_to_sim"), + extra=build_write_connection(scene_key, "write_joint_damping_to_sim_index"), ) ) return tensors @@ -506,7 +506,7 @@ def _collect_action_static_outputs( ref=gains[:, joint_ids] if joint_ids else gains, kind="kp", element_names=select_element_names(joint_names, joint_ids), - extra=build_write_connection(scene_key, "write_joint_stiffness_to_sim"), + extra=build_write_connection(scene_key, "write_joint_stiffness_to_sim_index"), ) ) if hasattr(data, "default_joint_damping") and data.default_joint_damping is not None: @@ -517,7 +517,7 @@ def _collect_action_static_outputs( ref=gains[:, joint_ids] if joint_ids else gains, kind="kd", element_names=select_element_names(joint_names, joint_ids), - extra=build_write_connection(scene_key, "write_joint_damping_to_sim"), + extra=build_write_connection(scene_key, "write_joint_damping_to_sim_index"), ) ) return static_values From e9a87d589c82a2a913e205801ae5f888122603d2 Mon Sep 17 00:00:00 2001 From: Frank Lai Date: Tue, 24 Mar 2026 14:23:51 -0700 Subject: [PATCH 09/20] precommit updates --- source/isaaclab/isaaclab/utils/buffers/circular_buffer.py | 6 ++++-- source/isaaclab_rl/test/test_rsl_rl_export_flow.py | 3 ++- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/source/isaaclab/isaaclab/utils/buffers/circular_buffer.py b/source/isaaclab/isaaclab/utils/buffers/circular_buffer.py index 80fbc2d52a03..3066c975a46f 100644 --- a/source/isaaclab/isaaclab/utils/buffers/circular_buffer.py +++ b/source/isaaclab/isaaclab/utils/buffers/circular_buffer.py @@ -80,7 +80,8 @@ def buffer(self) -> torch.Tensor: """Complete circular buffer with most recent entry at the end and oldest entry at the beginning. Returns: - Complete circular buffer with most recent entry at the end and oldest entry at the beginning of dimension 1. The shape is [batch_size, max_length, data.shape[1:]]. + Complete circular buffer with most recent entry at the end and oldest entry at the beginning of + dimension 1. The shape is [batch_size, max_length, data.shape[1:]]. """ return torch.transpose(self._buffer, dim0=0, dim1=1) @@ -102,7 +103,8 @@ def reset(self, batch_ids: Sequence[int] | None = None): # reset the number of pushes for the specified batch indices self._num_pushes[batch_ids_resolved] = 0 if self._buffer is not None: - # set buffer at batch_id reset indices to 0.0 so that the buffer() getter returns the cleared circular buffer after reset. + # set buffer at batch_id reset indices to 0.0 so that the buffer() getter returns + # the cleared circular buffer after reset. self._buffer[:, batch_ids_resolved] = 0.0 def append(self, data: torch.Tensor): diff --git a/source/isaaclab_rl/test/test_rsl_rl_export_flow.py b/source/isaaclab_rl/test/test_rsl_rl_export_flow.py index 8a80781f30d1..ba27c3f29fad 100644 --- a/source/isaaclab_rl/test/test_rsl_rl_export_flow.py +++ b/source/isaaclab_rl/test/test_rsl_rl_export_flow.py @@ -12,10 +12,11 @@ """ import os -import pytest import shutil import subprocess +import pytest + # Root of the repository (three levels up from this file). _REPO_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..", "..")) _EXPORT_SCRIPT = os.path.join("scripts", "reinforcement_learning", "rsl_rl", "export.py") From 258772b9744b973f0dcd499c7fcd01d500c65891 Mon Sep 17 00:00:00 2001 From: Frank Lai Date: Tue, 24 Mar 2026 18:46:08 -0700 Subject: [PATCH 10/20] added changelogs and updated versions --- source/isaaclab/config/extension.toml | 2 +- source/isaaclab/docs/CHANGELOG.rst | 9 +++++++++ source/isaaclab_rl/config/extension.toml | 2 +- source/isaaclab_rl/docs/CHANGELOG.rst | 10 +++++++++- source/isaaclab_tasks/config/extension.toml | 2 +- source/isaaclab_tasks/docs/CHANGELOG.rst | 9 +++++++++ 6 files changed, 30 insertions(+), 4 deletions(-) diff --git a/source/isaaclab/config/extension.toml b/source/isaaclab/config/extension.toml index e5aff2e058a1..85b9e265b1b7 100644 --- a/source/isaaclab/config/extension.toml +++ b/source/isaaclab/config/extension.toml @@ -1,7 +1,7 @@ [package] # Note: Semantic Versioning is used: https://semver.org/ -version = "4.6.6" +version = "4.6.7" # Description title = "Isaac Lab framework for Robot Learning" diff --git a/source/isaaclab/docs/CHANGELOG.rst b/source/isaaclab/docs/CHANGELOG.rst index 1fa14995f78e..3f021bc73698 100644 --- a/source/isaaclab/docs/CHANGELOG.rst +++ b/source/isaaclab/docs/CHANGELOG.rst @@ -1,6 +1,15 @@ Changelog --------- +4.6.7 (2026-04-20) +~~~~~~~~~~~~~~~~~~~ + +Added +^^^^^ + +* Added LEAPP export annotation support for RSL-RL policies. ``export.py`` and + ``deploy.py`` export and deploy respectively. + 4.6.6 (2026-04-17) ~~~~~~~~~~~~~~~~~~~ diff --git a/source/isaaclab_rl/config/extension.toml b/source/isaaclab_rl/config/extension.toml index 6b5ae668f03e..df9fe2b03612 100644 --- a/source/isaaclab_rl/config/extension.toml +++ b/source/isaaclab_rl/config/extension.toml @@ -1,7 +1,7 @@ [package] # Note: Semantic Versioning is used: https://semver.org/ -version = "0.5.1" +version = "0.5.2" # Description title = "Isaac Lab RL" diff --git a/source/isaaclab_rl/docs/CHANGELOG.rst b/source/isaaclab_rl/docs/CHANGELOG.rst index 4b159bb2ad95..d292eee9faab 100644 --- a/source/isaaclab_rl/docs/CHANGELOG.rst +++ b/source/isaaclab_rl/docs/CHANGELOG.rst @@ -1,6 +1,15 @@ Changelog --------- +0.5.2 (2026-04-20) +~~~~~~~~~~~~~~~~~~ + +Added +^^^^^ + +* Added a test to verify LEAPP export fidelity against the existing policies built + into Isaac Lab. + 0.5.1 (2026-04-06) ~~~~~~~~~~~~~~~~~~ @@ -9,7 +18,6 @@ Changed * Locked h5py dependency to last stable version 3.15.1 to prevent package import errors on Windows with version 3.16.0. - 0.5.0 (2026-3-04) ~~~~~~~~~~~~~~~~~~ diff --git a/source/isaaclab_tasks/config/extension.toml b/source/isaaclab_tasks/config/extension.toml index 061921978b4e..1a579ed0ef48 100644 --- a/source/isaaclab_tasks/config/extension.toml +++ b/source/isaaclab_tasks/config/extension.toml @@ -1,7 +1,7 @@ [package] # Note: Semantic Versioning is used: https://semver.org/ -version = "1.5.21" +version = "1.5.22" # Description title = "Isaac Lab Environments" diff --git a/source/isaaclab_tasks/docs/CHANGELOG.rst b/source/isaaclab_tasks/docs/CHANGELOG.rst index 0aa31de28c6a..583da529525c 100644 --- a/source/isaaclab_tasks/docs/CHANGELOG.rst +++ b/source/isaaclab_tasks/docs/CHANGELOG.rst @@ -1,6 +1,15 @@ Changelog --------- +1.5.22 (2026-04-20) +~~~~~~~~~~~~~~~~~~~ + +Changed +^^^^^^^ + +* Changed Dexsuite and in-hand functions to allow tracing without affecting + behavior. + 1.5.21 (2026-04-13) ~~~~~~~~~~~~~~~~~~~ From 68b34c9de85c3cce68b1e5c2fc057bc961be6395 Mon Sep 17 00:00:00 2001 From: Frank Lai Date: Tue, 24 Mar 2026 18:48:23 -0700 Subject: [PATCH 11/20] added myself to contributors --- CONTRIBUTORS.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CONTRIBUTORS.md b/CONTRIBUTORS.md index 252340151d43..afd6fe917fda 100644 --- a/CONTRIBUTORS.md +++ b/CONTRIBUTORS.md @@ -81,6 +81,7 @@ Guidelines for modifications: * Fabian Jenelten * Felipe Mohr * Felix Yu +* Frank Lai * Gary Lvov * Giulio Romualdi * Grzegorz Malczyk From 4e9a558d99fa4310ceefef036a060920ada34faf Mon Sep 17 00:00:00 2001 From: Frank Lai Date: Tue, 24 Mar 2026 19:30:08 -0700 Subject: [PATCH 12/20] fixed some comments --- scripts/reinforcement_learning/rsl_rl/export.py | 4 ++-- .../isaaclab/isaaclab/utils/buffers/circular_buffer.py | 10 +++++++--- 2 files changed, 9 insertions(+), 5 deletions(-) diff --git a/scripts/reinforcement_learning/rsl_rl/export.py b/scripts/reinforcement_learning/rsl_rl/export.py index ac555be7fa76..c7c2dc4df5ae 100644 --- a/scripts/reinforcement_learning/rsl_rl/export.py +++ b/scripts/reinforcement_learning/rsl_rl/export.py @@ -286,8 +286,8 @@ def main(env_cfg: ManagerBasedRLEnvCfg, agent_cfg: RslRlBaseRunnerCfg): obs, _, _, _ = env.step(actions) leapp.stop() - vilidate = args_cli.validation_steps > 0 - leapp.compile_graph(visualize=not args_cli.disable_graph_visualization, validate=vilidate) + validate = args_cli.validation_steps > 0 + leapp.compile_graph(visualize=not args_cli.disable_graph_visualization, validate=validate) # close the simulator env.close() diff --git a/source/isaaclab/isaaclab/utils/buffers/circular_buffer.py b/source/isaaclab/isaaclab/utils/buffers/circular_buffer.py index 3066c975a46f..c72907b176b3 100644 --- a/source/isaaclab/isaaclab/utils/buffers/circular_buffer.py +++ b/source/isaaclab/isaaclab/utils/buffers/circular_buffer.py @@ -133,7 +133,8 @@ def append(self, data: torch.Tensor): self._num_pushes += 1 def _append(self, data: torch.Tensor): - self._buffer = torch.cat([self._buffer[1:], data.unsqueeze(0)], dim=0) + self._buffer = torch.roll(self._buffer, shifts=-1, dims=0) + self._buffer[-1] = data def __getitem__(self, key: torch.Tensor) -> torch.Tensor: """Retrieve the data from the circular buffer in last-in-first-out (LIFO) fashion. @@ -155,9 +156,12 @@ def __getitem__(self, key: torch.Tensor) -> torch.Tensor: # check the batch size if len(key) != self.batch_size: raise ValueError(f"The argument 'key' has length {key.shape[0]}, while expecting {self.batch_size}") + if self._buffer is None: + raise RuntimeError("The buffer is empty. Please append data before retrieving.") - # admissible lag - valid_keys = torch.minimum(key, self._num_pushes - 1) + # admissible lag — clamp to [0, ..] so batches with _num_pushes == 0 + # return the zeroed-out slot instead of indexing out of bounds. + valid_keys = torch.clamp(torch.minimum(key, self._num_pushes - 1), min=0) # The buffer is stored oldest->newest along dimension 0, so the most # recent item lives at the last index. index_in_buffer = (self.max_length - 1 - valid_keys).to(dtype=torch.long) From 6705d8028f00202f61f0b99ba1e2182e36add87d Mon Sep 17 00:00:00 2001 From: Frank Lai Date: Sat, 28 Mar 2026 17:30:53 -0700 Subject: [PATCH 13/20] added init, made leapp import lazy. leapp is no longer a hard requirement for non-export related tasks. --- .../reinforcement_learning/rsl_rl/export.py | 2 +- .../assets/articulation/base_articulation.py | 3 +- .../articulation/base_articulation_data.py | 4 +- .../rigid_object/base_rigid_object_data.py | 4 +- .../base_rigid_object_collection_data.py | 4 +- .../isaaclab/envs/direct_deployment_env.py | 8 ++- .../isaaclab/managers/manager_term_cfg.py | 6 +- .../base_contact_sensor_data.py | 4 +- .../base_frame_transformer_data.py | 4 +- .../isaaclab/sensors/imu/base_imu_data.py | 7 ++- .../sensors/ray_caster/ray_caster_data.py | 2 +- .../isaaclab/isaaclab/utils/leapp/__init__.py | 10 +++ .../isaaclab/utils/leapp/__init__.pyi | 63 +++++++++++++++++++ .../isaaclab/utils/leapp/leapp_semantics.py | 20 ++++++ source/isaaclab/isaaclab/utils/leapp/utils.py | 6 +- 15 files changed, 123 insertions(+), 24 deletions(-) create mode 100644 source/isaaclab/isaaclab/utils/leapp/__init__.py create mode 100644 source/isaaclab/isaaclab/utils/leapp/__init__.pyi diff --git a/scripts/reinforcement_learning/rsl_rl/export.py b/scripts/reinforcement_learning/rsl_rl/export.py index c7c2dc4df5ae..30608d02465d 100644 --- a/scripts/reinforcement_learning/rsl_rl/export.py +++ b/scripts/reinforcement_learning/rsl_rl/export.py @@ -111,7 +111,7 @@ from isaaclab.envs import ManagerBasedRLEnv, ManagerBasedRLEnvCfg from isaaclab.utils.assets import retrieve_file_path -from isaaclab.utils.leapp.export_annotator import patch_env_for_export +from isaaclab.utils.leapp import patch_env_for_export from isaaclab_rl.rsl_rl import RslRlBaseRunnerCfg, RslRlVecEnvWrapper, handle_deprecated_rsl_rl_cfg from isaaclab_rl.utils.pretrained_checkpoint import get_published_pretrained_checkpoint diff --git a/source/isaaclab/isaaclab/assets/articulation/base_articulation.py b/source/isaaclab/isaaclab/assets/articulation/base_articulation.py index 5fb3bd4a72f0..5d45d221abf0 100644 --- a/source/isaaclab/isaaclab/assets/articulation/base_articulation.py +++ b/source/isaaclab/isaaclab/assets/articulation/base_articulation.py @@ -15,9 +15,8 @@ import torch import warp as wp -from leapp import OutputKindEnum -from ...utils.leapp.leapp_semantics import joint_names_resolver, leapp_tensor_semantics +from ...utils.leapp.leapp_semantics import OutputKindEnum, joint_names_resolver, leapp_tensor_semantics from ..asset_base import AssetBase if TYPE_CHECKING: diff --git a/source/isaaclab/isaaclab/assets/articulation/base_articulation_data.py b/source/isaaclab/isaaclab/assets/articulation/base_articulation_data.py index 4c29b736f7df..d3b688582d2e 100644 --- a/source/isaaclab/isaaclab/assets/articulation/base_articulation_data.py +++ b/source/isaaclab/isaaclab/assets/articulation/base_articulation_data.py @@ -7,13 +7,13 @@ from abc import ABC, abstractmethod import warp as wp -from leapp import InputKindEnum -from isaaclab.utils.leapp.leapp_semantics import ( +from isaaclab.utils.leapp import ( POSE7_ELEMENT_NAMES, QUAT_WXYZ_ELEMENT_NAMES, WRENCH6_ELEMENT_NAMES, XYZ_ELEMENT_NAMES, + InputKindEnum, body_pose_resolver, body_quat_resolver, body_wrench_resolver, diff --git a/source/isaaclab/isaaclab/assets/rigid_object/base_rigid_object_data.py b/source/isaaclab/isaaclab/assets/rigid_object/base_rigid_object_data.py index 1b45637a7b27..69d144bfabb5 100644 --- a/source/isaaclab/isaaclab/assets/rigid_object/base_rigid_object_data.py +++ b/source/isaaclab/isaaclab/assets/rigid_object/base_rigid_object_data.py @@ -7,13 +7,13 @@ from abc import ABC, abstractmethod import warp as wp -from leapp import InputKindEnum -from isaaclab.utils.leapp.leapp_semantics import ( +from isaaclab.utils.leapp import ( POSE7_ELEMENT_NAMES, QUAT_WXYZ_ELEMENT_NAMES, WRENCH6_ELEMENT_NAMES, XYZ_ELEMENT_NAMES, + InputKindEnum, body_pose_resolver, body_quat_resolver, body_wrench_resolver, diff --git a/source/isaaclab/isaaclab/assets/rigid_object_collection/base_rigid_object_collection_data.py b/source/isaaclab/isaaclab/assets/rigid_object_collection/base_rigid_object_collection_data.py index 1fd981067e52..020b8aaed6b2 100644 --- a/source/isaaclab/isaaclab/assets/rigid_object_collection/base_rigid_object_collection_data.py +++ b/source/isaaclab/isaaclab/assets/rigid_object_collection/base_rigid_object_collection_data.py @@ -7,9 +7,9 @@ from abc import ABC, abstractmethod import warp as wp -from leapp import InputKindEnum -from isaaclab.utils.leapp.leapp_semantics import ( +from isaaclab.utils.leapp import ( + InputKindEnum, body_pose_resolver, body_quat_resolver, body_wrench_resolver, diff --git a/source/isaaclab/isaaclab/envs/direct_deployment_env.py b/source/isaaclab/isaaclab/envs/direct_deployment_env.py index 9d09b24916aa..4eb307f6bbc9 100644 --- a/source/isaaclab/isaaclab/envs/direct_deployment_env.py +++ b/source/isaaclab/isaaclab/envs/direct_deployment_env.py @@ -28,7 +28,7 @@ from isaaclab.sim import SimulationContext from isaaclab.sim.utils.stage import use_stage from isaaclab.utils.configclass import resolve_cfg_presets -from isaaclab.utils.leapp.utils import ensure_torch_tensor +from isaaclab.utils.leapp import ensure_torch_tensor from .ui import ViewportCameraController @@ -89,7 +89,11 @@ def _resolve_joint_ids(element_names: list | None, entity: Any) -> list[int] | N def _first_param_name(method: Any) -> str: - """Return the name of the first non-self parameter of *method*.""" + """Return the name of the first non-self parameter of *method*. + + Expects a bound method — ``inspect.signature`` on a bound method + already excludes ``self``, so ``params[0]`` is the first real parameter. + """ params = list(inspect.signature(method).parameters.values()) if not params: raise TypeError(f"{method} has no parameters") diff --git a/source/isaaclab/isaaclab/managers/manager_term_cfg.py b/source/isaaclab/isaaclab/managers/manager_term_cfg.py index d8adbd8eef0e..06f2516324b5 100644 --- a/source/isaaclab/isaaclab/managers/manager_term_cfg.py +++ b/source/isaaclab/isaaclab/managers/manager_term_cfg.py @@ -118,8 +118,10 @@ class CommandTermCfg: debug_vis: bool = False """Whether to visualize debug information. Defaults to False.""" - cmd_kind: str | None = None # type hint for the command for deployment - element_names: list[str] | list[list[str]] | None = None # element names for the command for deployment + cmd_kind: str | None = None + """Type hint for the command for deployment.""" + element_names: list[str] | list[list[str]] | None = None + """Element names for the command for deployment.""" ## diff --git a/source/isaaclab/isaaclab/sensors/contact_sensor/base_contact_sensor_data.py b/source/isaaclab/isaaclab/sensors/contact_sensor/base_contact_sensor_data.py index 11bf456ae1f5..ac0d2c4fdf23 100644 --- a/source/isaaclab/isaaclab/sensors/contact_sensor/base_contact_sensor_data.py +++ b/source/isaaclab/isaaclab/sensors/contact_sensor/base_contact_sensor_data.py @@ -10,12 +10,12 @@ from abc import ABC, abstractmethod import warp as wp -from leapp import InputKindEnum -from isaaclab.utils.leapp.leapp_semantics import ( +from isaaclab.utils.leapp import ( POSE7_ELEMENT_NAMES, QUAT_WXYZ_ELEMENT_NAMES, XYZ_ELEMENT_NAMES, + InputKindEnum, leapp_tensor_semantics, ) diff --git a/source/isaaclab/isaaclab/sensors/frame_transformer/base_frame_transformer_data.py b/source/isaaclab/isaaclab/sensors/frame_transformer/base_frame_transformer_data.py index ee125cb7a1cf..8a1b3ec37853 100644 --- a/source/isaaclab/isaaclab/sensors/frame_transformer/base_frame_transformer_data.py +++ b/source/isaaclab/isaaclab/sensors/frame_transformer/base_frame_transformer_data.py @@ -10,12 +10,12 @@ from abc import ABC, abstractmethod import warp as wp -from leapp import InputKindEnum -from isaaclab.utils.leapp.leapp_semantics import ( +from isaaclab.utils.leapp import ( POSE7_ELEMENT_NAMES, QUAT_WXYZ_ELEMENT_NAMES, XYZ_ELEMENT_NAMES, + InputKindEnum, leapp_tensor_semantics, target_frame_pose_resolver, target_frame_quat_resolver, diff --git a/source/isaaclab/isaaclab/sensors/imu/base_imu_data.py b/source/isaaclab/isaaclab/sensors/imu/base_imu_data.py index 996e5bd0b81e..0067a7d2604c 100644 --- a/source/isaaclab/isaaclab/sensors/imu/base_imu_data.py +++ b/source/isaaclab/isaaclab/sensors/imu/base_imu_data.py @@ -10,9 +10,12 @@ from abc import ABC, abstractmethod import warp as wp -from leapp import InputKindEnum -from isaaclab.utils.leapp.leapp_semantics import XYZ_ELEMENT_NAMES, leapp_tensor_semantics +from isaaclab.utils.leapp import ( + InputKindEnum, + XYZ_ELEMENT_NAMES, + leapp_tensor_semantics, +) class BaseImuData(ABC): diff --git a/source/isaaclab/isaaclab/sensors/ray_caster/ray_caster_data.py b/source/isaaclab/isaaclab/sensors/ray_caster/ray_caster_data.py index 7b5c3dfe6ab8..2b1a5c65e881 100644 --- a/source/isaaclab/isaaclab/sensors/ray_caster/ray_caster_data.py +++ b/source/isaaclab/isaaclab/sensors/ray_caster/ray_caster_data.py @@ -7,7 +7,7 @@ import torch -from isaaclab.utils.leapp.leapp_semantics import ( +from isaaclab.utils.leapp import ( QUAT_WXYZ_ELEMENT_NAMES, XYZ_ELEMENT_NAMES, leapp_tensor_semantics, diff --git a/source/isaaclab/isaaclab/utils/leapp/__init__.py b/source/isaaclab/isaaclab/utils/leapp/__init__.py new file mode 100644 index 000000000000..f39b0e4d7eea --- /dev/null +++ b/source/isaaclab/isaaclab/utils/leapp/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Sub-module for LEAPP export annotation and proxy-based policy tracing.""" + +from isaaclab.utils.module import lazy_export + +lazy_export() diff --git a/source/isaaclab/isaaclab/utils/leapp/__init__.pyi b/source/isaaclab/isaaclab/utils/leapp/__init__.pyi new file mode 100644 index 000000000000..5798df6f3769 --- /dev/null +++ b/source/isaaclab/isaaclab/utils/leapp/__init__.pyi @@ -0,0 +1,63 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +__all__ = [ + "ExportPatcher", + "InputKindEnum", + "LeappTensorSemantics", + "OutputKindEnum", + "POSE7_ELEMENT_NAMES", + "QUAT_WXYZ_ELEMENT_NAMES", + "WRENCH6_ELEMENT_NAMES", + "XYZ_ELEMENT_NAMES", + "body_names_resolver", + "body_pose_resolver", + "body_quat_resolver", + "body_wrench_resolver", + "body_xyz_resolver", + "build_command_connection", + "build_state_connection", + "build_write_connection", + "ensure_torch_tensor", + "joint_names_resolver", + "leapp_tensor_semantics", + "patch_env_for_export", + "patch_warp_to_torch_passthrough", + "resolve_leapp_element_names", + "select_element_names", + "target_frame_pose_resolver", + "target_frame_quat_resolver", + "target_frame_xyz_resolver", +] + +from .export_annotator import ExportPatcher, patch_env_for_export +from .leapp_semantics import ( + InputKindEnum, + OutputKindEnum, + POSE7_ELEMENT_NAMES, + QUAT_WXYZ_ELEMENT_NAMES, + WRENCH6_ELEMENT_NAMES, + XYZ_ELEMENT_NAMES, + LeappTensorSemantics, + body_names_resolver, + body_pose_resolver, + body_quat_resolver, + body_wrench_resolver, + body_xyz_resolver, + joint_names_resolver, + leapp_tensor_semantics, + resolve_leapp_element_names, + target_frame_pose_resolver, + target_frame_quat_resolver, + target_frame_xyz_resolver, +) +from .utils import ( + build_command_connection, + build_state_connection, + build_write_connection, + ensure_torch_tensor, + patch_warp_to_torch_passthrough, + select_element_names, +) diff --git a/source/isaaclab/isaaclab/utils/leapp/leapp_semantics.py b/source/isaaclab/isaaclab/utils/leapp/leapp_semantics.py index eae235e453ca..b42058067847 100644 --- a/source/isaaclab/isaaclab/utils/leapp/leapp_semantics.py +++ b/source/isaaclab/isaaclab/utils/leapp/leapp_semantics.py @@ -13,6 +13,26 @@ from .utils import select_element_names +try: + from leapp import InputKindEnum, OutputKindEnum +except ImportError: + + class _LeappEnumSentinel: + """Stand-in when leapp is not installed. + + Any attribute access returns ``None`` so that + ``@leapp_tensor_semantics(kind=InputKindEnum.BODY_POSE)`` + silently stores ``kind=None`` instead of crashing at import time. + The real enum values are only needed at export time, when leapp + *is* guaranteed to be available. + """ + + def __getattr__(self, name: str): + return None + + InputKindEnum = _LeappEnumSentinel() # type: ignore[assignment,misc] + OutputKindEnum = _LeappEnumSentinel() # type: ignore[assignment,misc] + @dataclass(frozen=True) class LeappTensorSemantics: diff --git a/source/isaaclab/isaaclab/utils/leapp/utils.py b/source/isaaclab/isaaclab/utils/leapp/utils.py index 0d20fd14eddb..19a11aa0bffd 100644 --- a/source/isaaclab/isaaclab/utils/leapp/utils.py +++ b/source/isaaclab/isaaclab/utils/leapp/utils.py @@ -33,10 +33,8 @@ def ensure_torch_tensor(value): """Convert Warp arrays to torch tensors while leaving torch tensors unchanged.""" if isinstance(value, torch.Tensor): return value - try: - return wp.to_torch(value) - except Exception: - return value + + return wp.to_torch(value) def patch_warp_to_torch_passthrough() -> None: From c9d417f045fc0a07365d981878b39ee600bd6e1d Mon Sep 17 00:00:00 2001 From: Frank Lai Date: Wed, 1 Apr 2026 12:25:43 -0700 Subject: [PATCH 14/20] added docs on exporting managed environments. added direct rl example for tutorial. still need to write the tutorial --- .../exporting_policies_with_leapp.rst | 261 ++++++++++++++++++ docs/source/policy_deployment/index.rst | 1 + ...ting_direct_deployment_policies_with_leapp | 0 .../reinforcement_learning/rsl_rl/export.py | 36 ++- scripts/tutorials/06_deploy/anymal_c_env.py | 230 +++++++++++++++ .../isaaclab/utils/leapp/export_annotator.py | 8 +- source/isaaclab/isaaclab/utils/leapp/utils.py | 16 ++ .../export/test_rsl_rl_direct_export_flow.py | 192 +++++++++++++ .../{ => export}/test_rsl_rl_export_flow.py | 0 9 files changed, 721 insertions(+), 23 deletions(-) create mode 100644 docs/source/policy_deployment/05_leapp/exporting_policies_with_leapp.rst create mode 100644 docs/source/tutorials/06_exporting/exporting_direct_deployment_policies_with_leapp create mode 100644 scripts/tutorials/06_deploy/anymal_c_env.py create mode 100644 source/isaaclab_rl/test/export/test_rsl_rl_direct_export_flow.py rename source/isaaclab_rl/test/{ => export}/test_rsl_rl_export_flow.py (100%) diff --git a/docs/source/policy_deployment/05_leapp/exporting_policies_with_leapp.rst b/docs/source/policy_deployment/05_leapp/exporting_policies_with_leapp.rst new file mode 100644 index 000000000000..b560b4f7f511 --- /dev/null +++ b/docs/source/policy_deployment/05_leapp/exporting_policies_with_leapp.rst @@ -0,0 +1,261 @@ +Exporting Policies with LEAPP +============================= + +.. currentmodule:: isaaclab + +This guide covers how to export trained reinforcement learning policies from Isaac Lab using +`LEAPP `_ (Lightweight Export Annotations for Policy Pipelines). +The main goal of the LEAPP export path is to package the policy together with the input and +output semantics needed for deployment, so downstream users do not need to reimplement Isaac Lab +observation preprocessing, action postprocessing, or recurrent-state handling by hand. + +In practice, this makes the exported policy a much better fit for Isaac deployment libraries. +Isaac Lab can already consume these exports through :class:`~envs.DirectDeploymentEnv`, and Isaac +ROS will add direct support for running LEAPP-exported policies in a future release. + +.. note:: + + This export path currently supports **manager-based RL environments** (``ManagerBasedRLEnv``) + trained with **RSL-RL** only. Other environments are not yet supported. + + +Prerequisites +------------- + +.. admonition:: TODO + :class: warning + + Installation instructions will be finalized once LEAPP is publicly released. + +LEAPP requires Python >= 3.8 and PyTorch >= 2.6. Install it with: + +.. code-block:: bash + + # PLACEHOLDER — replace with the actual install command once available + pip install leapp + +Ensure you have a trained RSL-RL checkpoint before proceeding. The standard Isaac Lab +training workflow produces checkpoints under ``logs/rsl_rl//``. + + +Why Export with LEAPP +--------------------- + +Running the export script generates a self-contained export directory alongside your +checkpoint (or at a custom path). The directory contains: + +- **Exported model files** — ``.onnx`` (default) or ``.pt`` depending on the chosen backend. +- **Export metadata** — LEAPP records the semantic information and wiring needed by downstream + deployment runtimes. +- **Initial values** — a ``.safetensors`` file for any feedback state, such as recurrent hidden + state or last action. +- **A graph visualization** — a ``.png`` diagram of the pipeline (can be disabled). + +The important outcome for Isaac deployment workflows is that the exported artifact preserves the +same dataflow that was used during training and inference inside Isaac Lab. That means downstream +consumers can run the policy without reconstructing observation ordering, command wiring, actuator +targets, or policy feedback loops themselves. + +For a detailed description of LEAPP's generated artifacts and APIs, refer to the +`LEAPP documentation `_. + + +Exporting a Policy +------------------ + +Use the RSL-RL export script to export a trained checkpoint: + +.. code-block:: bash + + ./isaaclab.sh -p scripts/reinforcement_learning/rsl_rl/export.py \ + --task \ + --checkpoint + +For example, to export a UR10 reach policy: + +.. code-block:: bash + + ./isaaclab.sh -p scripts/reinforcement_learning/rsl_rl/export.py \ + --task Isaac-Reach-UR10-v0 \ + --checkpoint logs/rsl_rl/ur10_reach/2026-03-22_22-35-55/model_4999.pt + +By default, the export artifacts are saved in the same directory as the checkpoint. The +exported graph is named after the task. + + +CLI Options +^^^^^^^^^^^ + +The export script accepts the following LEAPP-specific arguments in addition to the standard +RSL-RL and AppLauncher arguments: + +.. list-table:: + :widths: 30 15 55 + :header-rows: 1 + + * - Argument + - Default + - Description + * - ``--export_task_name`` + - Task name + - Name for the exported graph and output directory. + * - ``--export_method`` + - ``onnx-dynamo`` + - Export backend. Choices: ``onnx-dynamo``, ``onnx-torchscript``, ``jit-script``, + ``jit-trace``. + * - ``--export_save_path`` + - Checkpoint dir + - Base directory for export output. + * - ``--validation_steps`` + - ``5`` + - Number of environment steps to run during the traced rollout. Set to ``0`` to skip + validation. + * - ``--disable_graph_visualization`` + - ``False`` + - Skip generating the pipeline graph PNG. + +The script also accepts the standard ``--checkpoint``, ``--load_run``, ``--load_checkpoint``, +and ``--use_pretrained_checkpoint`` arguments for locating the trained model. + + +How It Works (High Level) +^^^^^^^^^^^^^^^^^^^^^^^^^ + +The export script performs the following steps: + +1. **Creates the environment** with ``num_envs=1`` and loads the trained checkpoint. +2. **Patches the environment** for export. This step injects annotations into the environment + so that tensor i/o to the pipeline are identified by LEAPP during execution. +3. **Runs a short rollout** (controlled by ``--validation_steps``) with LEAPP tracing + active. During this rollout, LEAPP traces all tensor operations in the pipeline and automatically + builds an onnx file. +4. **Compiles the graph** so the exported model and deployment metadata can be consumed by + downstream runtimes, and optionally validates that the exported model reproduces the traced + outputs. + +The patching is transparent to the policy — no changes to your training code or environment +configuration are needed. + +.. warning:: + + LEAPP is designed to support a broad range of model architectures, but the current + implementation has a few important limitations: + + - **Dynamic control flow** is not supported when the condition depends on runtime tensor + values, such as tensor-dependent ``if``, ``for``, or ``while`` logic. + - **Complex slicing** is not fully supported. Examples include dynamic masked indexing + using multiple traced tensors such as ``tensor[traced1, traced2]``. Slicing with constant values + or with a single traced tensor is supported such as ``tensor[mask]`` or ``tensor[1:5]``. + - **Critical traced operations must be written in PyTorch.** For this release, Warp and + NumPy operations cannot be traced by LEAPP. + + +Verifying an Export +------------------- + +After export, we recommend validating the result in three ways. + +1. **Use LEAPP's automatic verification on seen traced data.** +2. **Inspect the generated graph visualization.** +3. **Read the LEAPP log carefully, especially when the export fails or emits warnings.** + +Automatic Verification on Seen Data +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +By default, Isaac Lab asks LEAPP to validate the exported model after compilation. LEAPP does +this by replaying the data it already saw during the traced rollout and checking that the +exported artifact reproduces the same outputs. + +This is a strong first-line check because it is good at catching export-time issues such as: + +- backend conversion problems +- unsupported or incorrectly lowered operators +- output shape or dtype mismatches +- numerical discrepancies between the original policy and the exported artifact +- recurrent or feedback-state handling mistakes that show up during replay + +This validation is controlled by ``--validation_steps``. Setting it to a positive value gives +LEAPP rollout data to validate against. Setting it to ``0`` skips this automatic check, which +is useful for debugging but not recommended for normal export workflows. + +Inspect the Graph Visualization +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +LEAPP can generate a diagram of the exported pipeline as part of ``compile_graph()``. Even when +automatic verification passes, it is still worth opening the diagram and doing a quick visual +inspection. + +This is especially useful for catching structural issues such as: + +- missing inputs or outputs +- unexpected extra nodes +- incorrect feedback edges +- naming mistakes that make deployment harder to reason about + +You can disable the diagram with ``--disable_graph_visualization``, but we recommend keeping it +enabled while developing and validating a new export path. + +Inspect the LEAPP Log +^^^^^^^^^^^^^^^^^^^^^ + +If something breaks, the LEAPP-generated log is usually the best place to determine exactly what +happened. Read it closely and pay attention to both hard errors and warnings. + +The log is useful for diagnosing issues such as: + +- export backend failures +- warnings about graph construction or validation +- missing metadata +- unsupported model patterns +- file generation problems + +In practice, this should be your first stop when the export does not complete or when the output +artifacts do not look correct. + + +Export Backends +^^^^^^^^^^^^^^^ + +The ``--export_method`` argument controls how the policy network is serialized: + +- **onnx-dynamo** (default) — Uses ``torch.onnx.dynamo_export``. Best compatibility with + modern PyTorch features. +- **onnx-torchscript** — Uses the legacy ``torch.onnx.export`` path. May be needed for + certain model architectures. +- **jit-script** / **jit-trace** — Produces TorchScript ``.pt`` files instead of ONNX. + + +Recurrent Policies +^^^^^^^^^^^^^^^^^^ + +Recurrent policies (e.g., using GRU or LSTM memory) are supported automatically. The export +script detects recurrent hidden state in the RSL-RL policy, registers it as LEAPP feedback +state, and ensures it appears in the ``feedback_flow`` section of the output YAML. The +initial hidden state values are saved in the ``.safetensors`` file. + + +Running the Exported Policy in Simulation +----------------------------------------- + +Isaac Lab provides :class:`~envs.DirectDeploymentEnv` for running exported policies back in +simulation without the training infrastructure. This is the Isaac Lab deployment path for +LEAPP-exported policies and is useful for validating that the packaged policy still behaves +correctly when driven through the deployment stack instead of the training stack. + +.. admonition:: TODO + :class: warning + + A full tutorial on ``DirectDeploymentEnv`` usage will be added in a follow-up guide. + +.. admonition:: TODO + :class: warning + + Add a link to the Isaac ROS feature that directly runs LEAPP-exported policies once that + documentation is available. + + +Further Reading +--------------- + +- `LEAPP documentation and API reference `_ +- :class:`~envs.DirectDeploymentEnv` API reference diff --git a/docs/source/policy_deployment/index.rst b/docs/source/policy_deployment/index.rst index 750ca970df65..70cb7244078e 100644 --- a/docs/source/policy_deployment/index.rst +++ b/docs/source/policy_deployment/index.rst @@ -13,3 +13,4 @@ Below, you'll find detailed examples of various policies for training and deploy 02_gear_assembly/gear_assembly_policy 03_compass_with_NuRec/compass_navigation_policy_with_NuRec 04_reach/reach_policy + 05_leapp/exporting_policies_with_leapp diff --git a/docs/source/tutorials/06_exporting/exporting_direct_deployment_policies_with_leapp b/docs/source/tutorials/06_exporting/exporting_direct_deployment_policies_with_leapp new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/scripts/reinforcement_learning/rsl_rl/export.py b/scripts/reinforcement_learning/rsl_rl/export.py index 30608d02465d..1253c1681ce0 100644 --- a/scripts/reinforcement_learning/rsl_rl/export.py +++ b/scripts/reinforcement_learning/rsl_rl/export.py @@ -112,6 +112,7 @@ from isaaclab.envs import ManagerBasedRLEnv, ManagerBasedRLEnvCfg from isaaclab.utils.assets import retrieve_file_path from isaaclab.utils.leapp import patch_env_for_export +from isaaclab.utils.leapp.utils import ensure_env_spec_id from isaaclab_rl.rsl_rl import RslRlBaseRunnerCfg, RslRlVecEnvWrapper, handle_deprecated_rsl_rl_cfg from isaaclab_rl.utils.pretrained_checkpoint import get_published_pretrained_checkpoint @@ -206,25 +207,22 @@ def main(env_cfg: ManagerBasedRLEnvCfg, agent_cfg: RslRlBaseRunnerCfg): # create isaac environment # Note: observation functions are already patched at module level (before isaaclab_tasks import) env = gym.make(args_cli.task, cfg=env_cfg, render_mode=None) - if not isinstance(env.unwrapped, ManagerBasedRLEnv): - raise NotImplementedError( - "Export currently supports only manager-based environments. " - f"Task '{args_cli.task}' created env type '{type(env.unwrapped).__name__}'." - ) + annotation_task_name = ensure_env_spec_id(env) + export_task_name = args_cli.export_task_name if args_cli.export_task_name is not None else task_name - # Patch only the observation groups consumed by the actor policy. - obs_groups_cfg = getattr(agent_cfg, "obs_groups", None) - if isinstance(obs_groups_cfg, Mapping): - required_obs_groups = set(obs_groups_cfg.get("actor", ["policy"])) - else: - required_obs_groups = {"policy"} - patch_env_for_export( - env, - task_name=export_task_name, - export_method=args_cli.export_method, - required_obs_groups=required_obs_groups, - ) + if isinstance(env.unwrapped, ManagerBasedRLEnv): + # Patch only the observation groups consumed by the actor policy. + obs_groups_cfg = getattr(agent_cfg, "obs_groups", None) + if isinstance(obs_groups_cfg, Mapping): + required_obs_groups = set(obs_groups_cfg.get("actor", ["policy"])) + else: + required_obs_groups = {"policy"} + patch_env_for_export( + env, + export_method=args_cli.export_method, + required_obs_groups=required_obs_groups, + ) # wrap around environment for rsl-rl env = RslRlVecEnvWrapper(env, clip_actions=agent_cfg.clip_actions) @@ -264,7 +262,7 @@ def main(env_cfg: ManagerBasedRLEnvCfg, agent_cfg: RslRlBaseRunnerCfg): dtype=next(policy_nn.parameters()).dtype, ) registered_state = annotate.state_tensors( - export_task_name, + annotation_task_name, state_dict_from_actor_hidden(actor_hidden), ) actor_memory = get_actor_memory_module(policy_nn) @@ -278,7 +276,7 @@ def main(env_cfg: ManagerBasedRLEnvCfg, agent_cfg: RslRlBaseRunnerCfg): if policy_nn is not None and getattr(policy_nn, "is_recurrent", False): actor_hidden_after = policy_nn.get_hidden_states()[0] annotate.update_state( - export_task_name, + annotation_task_name, state_dict_from_actor_hidden(actor_hidden_after), ) diff --git a/scripts/tutorials/06_deploy/anymal_c_env.py b/scripts/tutorials/06_deploy/anymal_c_env.py new file mode 100644 index 000000000000..0186dd18c013 --- /dev/null +++ b/scripts/tutorials/06_deploy/anymal_c_env.py @@ -0,0 +1,230 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +# ruff: noqa: I001 + +from __future__ import annotations + +import gymnasium as gym +import torch +import warp as wp + +import isaaclab.sim as sim_utils +from isaaclab.assets import Articulation +from isaaclab.envs import DirectRLEnv +from isaaclab.sensors import ContactSensor, RayCaster + +from .anymal_c_env_cfg import AnymalCFlatEnvCfg, AnymalCRoughEnvCfg +from leapp import annotate # isort: skip + + +class AnymalCEnv(DirectRLEnv): + cfg: AnymalCFlatEnvCfg | AnymalCRoughEnvCfg + + def __init__(self, cfg: AnymalCFlatEnvCfg | AnymalCRoughEnvCfg, render_mode: str | None = None, **kwargs): + super().__init__(cfg, render_mode, **kwargs) + + # Joint position command (deviation from default joint positions) + self._actions = torch.zeros(self.num_envs, gym.spaces.flatdim(self.single_action_space), device=self.device) + self._previous_actions = torch.zeros( + self.num_envs, gym.spaces.flatdim(self.single_action_space), device=self.device + ) + + # X/Y linear velocity and yaw angular velocity commands + self._commands = torch.zeros(self.num_envs, 3, device=self.device) + + # Logging + self._episode_sums = { + key: torch.zeros(self.num_envs, dtype=torch.float, device=self.device) + for key in [ + "track_lin_vel_xy_exp", + "track_ang_vel_z_exp", + "lin_vel_z_l2", + "ang_vel_xy_l2", + "dof_torques_l2", + "dof_acc_l2", + "action_rate_l2", + "feet_air_time", + "undesired_contacts", + "flat_orientation_l2", + ] + } + # Get specific body indices + self._base_id, _ = self._contact_sensor.find_sensors("base") + self._feet_ids, _ = self._contact_sensor.find_sensors(".*FOOT") + self._undesired_contact_body_ids, _ = self._contact_sensor.find_sensors(".*THIGH") + + def _setup_scene(self): + self._robot = Articulation(self.cfg.robot) + self.scene.articulations["robot"] = self._robot + self._contact_sensor = ContactSensor(self.cfg.contact_sensor) + self.scene.sensors["contact_sensor"] = self._contact_sensor + if isinstance(self.cfg, AnymalCRoughEnvCfg): + # we add a height scanner for perceptive locomotion + self._height_scanner = RayCaster(self.cfg.height_scanner) + self.scene.sensors["height_scanner"] = self._height_scanner + self.cfg.terrain.num_envs = self.scene.cfg.num_envs + self.cfg.terrain.env_spacing = self.scene.cfg.env_spacing + self._terrain = self.cfg.terrain.class_type(self.cfg.terrain) + # clone and replicate + self.scene.clone_environments(copy_from_source=False) + # we need to explicitly filter collisions for CPU simulation + if self.device == "cpu": + self.scene.filter_collisions(global_prim_paths=[self.cfg.terrain.prim_path]) + # add lights + light_cfg = sim_utils.DomeLightCfg(intensity=2000.0, color=(0.75, 0.75, 0.75)) + light_cfg.func("/World/Light", light_cfg) + + def _pre_physics_step(self, actions: torch.Tensor): + annotate.update_state(self.spec.id, {"previous_actions": actions}) + self._actions = actions.clone() + self._processed_actions = self.cfg.action_scale * self._actions + wp.to_torch( + self._robot.data.default_joint_pos + ) + annotate.output_tensors(self.spec.id, {"processed_actions": self._processed_actions}, export_with="onnx") + + def _apply_action(self): + self._robot.set_joint_position_target_index(target=self._processed_actions) + + def _get_observations(self) -> dict: + self._previous_actions = self._actions.clone() + height_data = None + if isinstance(self.cfg, AnymalCRoughEnvCfg): + height_data = ( + self._height_scanner.data.pos_w[:, 2].unsqueeze(1) - self._height_scanner.data.ray_hits_w[..., 2] - 0.5 + ).clip(-1.0, 1.0) + task_name = self.spec.id + root_lin_vel_b = annotate.input_tensors( + task_name, {"root_lin_vel_b": wp.to_torch(self._robot.data.root_lin_vel_b)} + ) + root_ang_vel_b = annotate.input_tensors( + task_name, {"root_ang_vel_b": wp.to_torch(self._robot.data.root_ang_vel_b)} + ) + projected_gravity_b = annotate.input_tensors( + task_name, {"projected_gravity_b": wp.to_torch(self._robot.data.projected_gravity_b)} + ) + commands = annotate.input_tensors(task_name, {"commands": self._commands}) + joint_pos = annotate.input_tensors(task_name, {"joint_pos": wp.to_torch(self._robot.data.joint_pos)}) + default_joint_pos = annotate.input_tensors( + task_name, {"default_joint_pos": wp.to_torch(self._robot.data.default_joint_pos)} + ) + joint_vel = annotate.input_tensors(task_name, {"joint_vel": wp.to_torch(self._robot.data.joint_vel)}) + previous_actions = annotate.state_tensors(task_name, {"previous_actions": self._actions}) + + obs = torch.cat( + [ + tensor + for tensor in ( + root_lin_vel_b, + root_ang_vel_b, + projected_gravity_b, + commands, + joint_pos - default_joint_pos, + joint_vel, + height_data, + previous_actions, + ) + if tensor is not None + ], + dim=-1, + ) + observations = {"policy": obs} + return observations + + def _get_rewards(self) -> torch.Tensor: + # linear velocity tracking + lin_vel_error = torch.sum( + torch.square(self._commands[:, :2] - wp.to_torch(self._robot.data.root_lin_vel_b)[:, :2]), dim=1 + ) + lin_vel_error_mapped = torch.exp(-lin_vel_error / 0.25) + # yaw rate tracking + yaw_rate_error = torch.square(self._commands[:, 2] - wp.to_torch(self._robot.data.root_ang_vel_b)[:, 2]) + yaw_rate_error_mapped = torch.exp(-yaw_rate_error / 0.25) + # z velocity tracking + z_vel_error = torch.square(wp.to_torch(self._robot.data.root_lin_vel_b)[:, 2]) + # angular velocity x/y + ang_vel_error = torch.sum(torch.square(wp.to_torch(self._robot.data.root_ang_vel_b)[:, :2]), dim=1) + # joint torques + joint_torques = torch.sum(torch.square(wp.to_torch(self._robot.data.applied_torque)), dim=1) + # joint acceleration + joint_accel = torch.sum(torch.square(wp.to_torch(self._robot.data.joint_acc)), dim=1) + # action rate + action_rate = torch.sum(torch.square(self._actions - self._previous_actions), dim=1) + # feet air time + first_contact = wp.to_torch(self._contact_sensor.compute_first_contact(self.step_dt))[:, self._feet_ids] + last_air_time = wp.to_torch(self._contact_sensor.data.last_air_time)[:, self._feet_ids] + air_time = torch.sum((last_air_time - 0.5) * first_contact, dim=1) * ( + torch.linalg.norm(self._commands[:, :2], dim=1) > 0.1 + ) + # undesired contacts + net_contact_forces = wp.to_torch(self._contact_sensor.data.net_forces_w_history) + is_contact = ( + torch.max(torch.linalg.norm(net_contact_forces[:, :, self._undesired_contact_body_ids], dim=-1), dim=1)[0] + > 1.0 + ) + contacts = torch.sum(is_contact, dim=1) + # flat orientation + flat_orientation = torch.sum(torch.square(wp.to_torch(self._robot.data.projected_gravity_b)[:, :2]), dim=1) + + rewards = { + "track_lin_vel_xy_exp": lin_vel_error_mapped * self.cfg.lin_vel_reward_scale * self.step_dt, + "track_ang_vel_z_exp": yaw_rate_error_mapped * self.cfg.yaw_rate_reward_scale * self.step_dt, + "lin_vel_z_l2": z_vel_error * self.cfg.z_vel_reward_scale * self.step_dt, + "ang_vel_xy_l2": ang_vel_error * self.cfg.ang_vel_reward_scale * self.step_dt, + "dof_torques_l2": joint_torques * self.cfg.joint_torque_reward_scale * self.step_dt, + "dof_acc_l2": joint_accel * self.cfg.joint_accel_reward_scale * self.step_dt, + "action_rate_l2": action_rate * self.cfg.action_rate_reward_scale * self.step_dt, + "feet_air_time": air_time * self.cfg.feet_air_time_reward_scale * self.step_dt, + "undesired_contacts": contacts * self.cfg.undesired_contact_reward_scale * self.step_dt, + "flat_orientation_l2": flat_orientation * self.cfg.flat_orientation_reward_scale * self.step_dt, + } + reward = torch.sum(torch.stack(list(rewards.values())), dim=0) + # Logging + for key, value in rewards.items(): + self._episode_sums[key] += value + return reward + + def _get_dones(self) -> tuple[torch.Tensor, torch.Tensor]: + time_out = self.episode_length_buf >= self.max_episode_length - 1 + net_contact_forces = wp.to_torch(self._contact_sensor.data.net_forces_w_history) + died = torch.any( + torch.max(torch.linalg.norm(net_contact_forces[:, :, self._base_id], dim=-1), dim=1)[0] > 1.0, dim=1 + ) + return died, time_out + + def _reset_idx(self, env_ids: torch.Tensor | None): + if env_ids is None or len(env_ids) == self.num_envs: + env_ids = wp.to_torch(self._robot._ALL_INDICES) + self._robot.reset(env_ids) + super()._reset_idx(env_ids) + if len(env_ids) == self.num_envs: + # Spread out the resets to avoid spikes in training when many environments reset at a similar time + self.episode_length_buf[:] = torch.randint_like(self.episode_length_buf, high=int(self.max_episode_length)) + self._actions[env_ids] = 0.0 + self._previous_actions[env_ids] = 0.0 + # Sample new commands + self._commands[env_ids] = torch.zeros_like(self._commands[env_ids]).uniform_(-1.0, 1.0) + # Reset robot state + joint_pos = wp.to_torch(self._robot.data.default_joint_pos)[env_ids] + joint_vel = wp.to_torch(self._robot.data.default_joint_vel)[env_ids] + default_root_pose = wp.to_torch(self._robot.data.default_root_pose)[env_ids] + default_root_vel = wp.to_torch(self._robot.data.default_root_vel)[env_ids] + default_root_pose[:, :3] += self._terrain.env_origins[env_ids] + self._robot.write_root_pose_to_sim_index(root_pose=default_root_pose, env_ids=env_ids) + self._robot.write_root_velocity_to_sim_index(root_velocity=default_root_vel, env_ids=env_ids) + self._robot.write_joint_position_to_sim_index(position=joint_pos, env_ids=env_ids) + self._robot.write_joint_velocity_to_sim_index(velocity=joint_vel, env_ids=env_ids) + # Logging + extras = dict() + for key in self._episode_sums.keys(): + episodic_sum_avg = torch.mean(self._episode_sums[key][env_ids]) + extras["Episode_Reward/" + key] = episodic_sum_avg / self.max_episode_length_s + self._episode_sums[key][env_ids] = 0.0 + self.extras["log"] = dict() + self.extras["log"].update(extras) + extras = dict() + extras["Episode_Termination/base_contact"] = torch.count_nonzero(self.reset_terminated[env_ids]).item() + extras["Episode_Termination/time_out"] = torch.count_nonzero(self.reset_time_outs[env_ids]).item() + self.extras["log"].update(extras) diff --git a/source/isaaclab/isaaclab/utils/leapp/export_annotator.py b/source/isaaclab/isaaclab/utils/leapp/export_annotator.py index 141e9bb09bdd..ca5365da3ad7 100644 --- a/source/isaaclab/isaaclab/utils/leapp/export_annotator.py +++ b/source/isaaclab/isaaclab/utils/leapp/export_annotator.py @@ -90,8 +90,8 @@ class ExportPatcher: in as a constant. """ - def __init__(self, task_name: str, export_method: str, required_obs_groups: set[str] | None = None): - self.task_name = task_name + def __init__(self, export_method: str, required_obs_groups: set[str] | None = None): + self.task_name: str | None = None self.export_method = export_method self.required_obs_groups = required_obs_groups self._annotated_tensor_cache: dict[tuple[int, str], torch.Tensor] = {} @@ -109,6 +109,7 @@ def __init__(self, task_name: str, export_method: str, required_obs_groups: set[ def setup(self, env): """Patch observation and action managers on the unwrapped env.""" unwrapped = env.env.unwrapped + self.task_name = unwrapped.spec.id proxy_env = _EnvProxy( unwrapped, @@ -535,7 +536,6 @@ def _collect_action_static_outputs( def patch_env_for_export( env: ManagerBasedEnv, - task_name: str, export_method: str, required_obs_groups: set[str] | None = None, ) -> None: @@ -564,5 +564,5 @@ class list is required. Properties with ``_leapp_semantics`` produce of the pipeline; only the manager call paths are redirected. """ patch_warp_to_torch_passthrough() - patcher = ExportPatcher(task_name, export_method, required_obs_groups=required_obs_groups) + patcher = ExportPatcher(export_method, required_obs_groups=required_obs_groups) patcher.setup(env) diff --git a/source/isaaclab/isaaclab/utils/leapp/utils.py b/source/isaaclab/isaaclab/utils/leapp/utils.py index 19a11aa0bffd..9f1f43d4a558 100644 --- a/source/isaaclab/isaaclab/utils/leapp/utils.py +++ b/source/isaaclab/isaaclab/utils/leapp/utils.py @@ -6,6 +6,7 @@ from __future__ import annotations from contextlib import suppress +from types import SimpleNamespace from typing import Any import torch @@ -53,6 +54,21 @@ def patched_to_torch(value, *args, **kwargs): wp.to_torch = patched_to_torch +def ensure_env_spec_id(env, fallback_task_name: str = "policy") -> str: + """Return ``env.unwrapped.spec.id``, creating a fallback spec when needed.""" + spec = getattr(env.unwrapped, "spec", None) + if spec is None: + env.unwrapped.spec = SimpleNamespace(id=fallback_task_name) + return fallback_task_name + + task_name = getattr(spec, "id", None) + if task_name is None: + spec.id = fallback_task_name + return fallback_task_name + + return task_name + + # ══════════════════════════════════════════════════════════════════ # Connection Builders # ══════════════════════════════════════════════════════════════════ diff --git a/source/isaaclab_rl/test/export/test_rsl_rl_direct_export_flow.py b/source/isaaclab_rl/test/export/test_rsl_rl_direct_export_flow.py new file mode 100644 index 000000000000..632a8f54b1e8 --- /dev/null +++ b/source/isaaclab_rl/test/export/test_rsl_rl_direct_export_flow.py @@ -0,0 +1,192 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Direct-env export integration test with subprocess-side gym re-registration.""" + +from __future__ import annotations + +import argparse +import importlib +import importlib.util +import os +import runpy +import shutil +import subprocess +import sys +import tempfile +import types +from pathlib import Path + +import gymnasium as gym +import pytest + +_THIS_FILE = Path(__file__).resolve() +_REPO_ROOT = str(_THIS_FILE.parents[4]) +_EXPORT_SCRIPT = os.path.join(_REPO_ROOT, "scripts", "reinforcement_learning", "rsl_rl", "export.py") +_THIS_SCRIPT = str(_THIS_FILE) +_TASK_NAME = "Isaac-Velocity-Flat-Anymal-C-Direct-v0" +_PACKAGE_NAME = "_isaaclab_test_tutorial_anymal_c" +_MODULE_NAME = f"{_PACKAGE_NAME}.anymal_c_env" +_CFG_MODULE_NAME = f"{_PACKAGE_NAME}.anymal_c_env_cfg" +_RUNTIME_MODULE_NAME = "_isaaclab_test_tutorial_anymal_c_runtime" +_TUTORIAL_ENV_PATH = Path(_REPO_ROOT) / "scripts" / "tutorials" / "06_deploy" / "anymal_c_env.py" + + +def _export_command(task_name: str, export_dir: str) -> list[str]: + """Build a subprocess command that runs this file in helper mode.""" + return [ + sys.executable, + _THIS_SCRIPT, + "--task", + task_name, + "--export_save_path", + export_dir, + "--disable_graph_visualization", + "--headless", + ] + + +def _artifact_dir(export_dir: str, task_name: str) -> str: + """Return the LEAPP artifact directory for the exported task.""" + return os.path.join(export_dir, task_name) + + +def _load_tutorial_env_class(): + """Load the tutorial env through a synthetic package for relative imports.""" + module = sys.modules.get(_MODULE_NAME) + if module is not None: + return module.AnymalCEnv + + package = types.ModuleType(_PACKAGE_NAME) + package.__path__ = [] # type: ignore[attr-defined] + sys.modules.setdefault(_PACKAGE_NAME, package) + + cfg_module = importlib.import_module("isaaclab_tasks.direct.anymal_c.anymal_c_env_cfg") + sys.modules[_CFG_MODULE_NAME] = cfg_module + + spec = importlib.util.spec_from_file_location(_MODULE_NAME, _TUTORIAL_ENV_PATH) + if spec is None or spec.loader is None: + raise ImportError(f"Could not create module spec for tutorial env: {_TUTORIAL_ENV_PATH}") + + module = importlib.util.module_from_spec(spec) + sys.modules[_MODULE_NAME] = module + spec.loader.exec_module(module) + return module.AnymalCEnv + + +class _LazyTutorialEnvModule(types.ModuleType): + """Resolve the tutorial env class only when gym imports the entrypoint.""" + + def __getattr__(self, name: str): + if name != "AnymalCEnv": + raise AttributeError(name) + env_class = _load_tutorial_env_class() + setattr(self, name, env_class) + return env_class + + +def _install_lazy_runtime_module() -> str: + """Install a lazy module so gym can defer tutorial env imports.""" + module = sys.modules.get(_RUNTIME_MODULE_NAME) + if module is None: + sys.modules[_RUNTIME_MODULE_NAME] = _LazyTutorialEnvModule(_RUNTIME_MODULE_NAME) + return _RUNTIME_MODULE_NAME + + +def _reregister_task(task_name: str) -> None: + """Override the direct task registration to point at the tutorial env.""" + import isaaclab_tasks.direct.anymal_c # noqa: F401 + + original_spec = gym.spec(task_name) + original_kwargs = dict(original_spec.kwargs) + runtime_module_name = _install_lazy_runtime_module() + + gym.registry.pop(task_name, None) + gym.register( + id=task_name, + entry_point=f"{runtime_module_name}:AnymalCEnv", + disable_env_checker=original_spec.disable_env_checker, + kwargs=original_kwargs, + ) + + +def _run_export_subprocess_entrypoint() -> None: + """Run export.py after re-registering the direct task in-process.""" + parser = argparse.ArgumentParser(add_help=False) + parser.add_argument("--task", required=True) + args, remaining_args = parser.parse_known_args() + + _reregister_task(args.task) + export_script_dir = os.path.dirname(_EXPORT_SCRIPT) + sys.argv = [_EXPORT_SCRIPT, "--task", args.task, *remaining_args] + if export_script_dir not in sys.path: + sys.path.insert(0, export_script_dir) + runpy.run_path(_EXPORT_SCRIPT, run_name="__main__") + + +def _build_failure_context(result: subprocess.CompletedProcess[str], artifact_dir: str) -> str: + """Return debug context for subprocess and export artifacts.""" + export_dir = os.path.dirname(artifact_dir) + log_txt_path = os.path.join(artifact_dir, "log.txt") + leapp_tail = "" + if os.path.isfile(log_txt_path): + with open(log_txt_path) as file: + last_lines = file.readlines()[-50:] + leapp_tail = f"\n--- leapp log.txt (last 50 lines) ---\n{''.join(last_lines)}" + + try: + export_dir_contents = sorted(os.listdir(export_dir)) + except FileNotFoundError: + export_dir_contents = [""] + + try: + artifact_dir_contents = sorted(os.listdir(artifact_dir)) + except FileNotFoundError: + artifact_dir_contents = [""] + + return ( + f"--- export_dir ---\n{export_dir}\n" + f"--- export_dir contents ---\n{export_dir_contents}\n" + f"--- artifact_dir ---\n{artifact_dir}\n" + f"--- artifact_dir contents ---\n{artifact_dir_contents}\n" + f"--- stdout ---\n{result.stdout[-3000:]}\n" + f"--- stderr ---\n{result.stderr[-3000:]}" + f"{leapp_tail}" + ) + + +def test_direct_env_export_flow(): + """Run export.py against the tutorial direct env and assert artifacts are created.""" + export_dir = tempfile.mkdtemp(prefix="isaaclab-direct-export-") + artifact_dir = _artifact_dir(export_dir, _TASK_NAME) + shutil.rmtree(artifact_dir, ignore_errors=True) + + # TODO: Switch this test to --use_pretrained_checkpoint when a published + # checkpoint is available for the direct tutorial task. + result = subprocess.run( + _export_command(_TASK_NAME, export_dir), + cwd=_REPO_ROOT, + capture_output=True, + text=True, + timeout=600, + ) + + if result.returncode != 0: + pytest.fail(f"export.py exited with code {result.returncode}.\n{_build_failure_context(result, artifact_dir)}") + + onnx_path = os.path.join(artifact_dir, f"{_TASK_NAME}.onnx") + yaml_path = os.path.join(artifact_dir, f"{_TASK_NAME}.yaml") + log_path = os.path.join(artifact_dir, "log.txt") + + if not os.path.isfile(onnx_path): + pytest.fail(f"Missing .onnx export at {onnx_path}.\n{_build_failure_context(result, artifact_dir)}") + if not os.path.isfile(yaml_path): + pytest.fail(f"Missing .yaml export at {yaml_path}.\n{_build_failure_context(result, artifact_dir)}") + if not os.path.isfile(log_path): + pytest.fail(f"Missing log.txt at {log_path}.\n{_build_failure_context(result, artifact_dir)}") + + +if __name__ == "__main__": + _run_export_subprocess_entrypoint() diff --git a/source/isaaclab_rl/test/test_rsl_rl_export_flow.py b/source/isaaclab_rl/test/export/test_rsl_rl_export_flow.py similarity index 100% rename from source/isaaclab_rl/test/test_rsl_rl_export_flow.py rename to source/isaaclab_rl/test/export/test_rsl_rl_export_flow.py From de9a7ac8743702f4de29e0f0f2d3d2b425578e07 Mon Sep 17 00:00:00 2001 From: Frank Lai Date: Wed, 1 Apr 2026 15:58:54 -0700 Subject: [PATCH 15/20] updated docs for exporting direct deployment policies --- ...ting_direct_deployment_policies_with_leapp | 0 ..._direct_deployment_policies_with_leapp.rst | 176 ++++++++++++++++++ docs/source/tutorials/index.rst | 11 ++ scripts/tutorials/06_deploy/anymal_c_env.py | 28 +-- 4 files changed, 203 insertions(+), 12 deletions(-) delete mode 100644 docs/source/tutorials/06_exporting/exporting_direct_deployment_policies_with_leapp create mode 100644 docs/source/tutorials/06_exporting/exporting_direct_deployment_policies_with_leapp.rst diff --git a/docs/source/tutorials/06_exporting/exporting_direct_deployment_policies_with_leapp b/docs/source/tutorials/06_exporting/exporting_direct_deployment_policies_with_leapp deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/docs/source/tutorials/06_exporting/exporting_direct_deployment_policies_with_leapp.rst b/docs/source/tutorials/06_exporting/exporting_direct_deployment_policies_with_leapp.rst new file mode 100644 index 000000000000..994886044f53 --- /dev/null +++ b/docs/source/tutorials/06_exporting/exporting_direct_deployment_policies_with_leapp.rst @@ -0,0 +1,176 @@ +Exporting Direct Deployment Policies with LEAPP +=============================================== + +.. currentmodule:: isaaclab + +This tutorial shows how to prepare a direct deployment policy for export with +LEAPP. If your policy is manager-based, use the +:doc:`manager-based LEAPP export guide ` +instead. + + +Overview +~~~~~~~~ + +To export a direct deployment policy with LEAPP, you add LEAPP annotations to the +environment code. During export, LEAPP traces the annotated tensors and builds an +intermediate representation of the full policy pipeline. These annotations remain +dormant during normal environment execution and only add a small amount of +overhead until export time. They are activated by +``scripts/reinforcement_learning/rsl_rl/export.py`` when you run the export flow. + +This tutorial uses ``scripts/tutorials/06_deploy/anymal_c_env.py`` as the example. +The script is based on the existing ANYmal-C direct environment at +``source/isaaclab_tasks/isaaclab_tasks/direct/anymal_c/anymal_c_env.py`` and adds +the annotations needed to make it compatible with the export script. Once you have added +the annotations to your direct RL environment, you can export a trained policy +with: + +.. code-block:: bash + + ./isaaclab.sh -p scripts/reinforcement_learning/rsl_rl/export.py \ + --task \ + --checkpoint \ + --export_save_path + +The ``--task`` argument is the registered task name, such as +``Isaac-Velocity-Rough-Anymal-C-Direct-v0``. The ``--checkpoint`` argument +points to the trained RSL-RL checkpoint to export. The optional +``--export_save_path`` argument selects the output directory for the exported +artifacts. If you omit it, the export is written next to the checkpoint. + +For more information on the export arguments, see the +:doc:`manager-based LEAPP export guide `. + + +.. dropdown:: Full example script + :icon: code + + .. literalinclude:: ../../../../scripts/tutorials/06_deploy/anymal_c_env.py + :language: python + :emphasize-lines: 20, 100-118, 85-88 + :linenos: + + +How the Annotations Work +~~~~~~~~~~~~~~~~~~~~~~~~ + +The main task is to identify the inputs, outputs, and persistent state in the +environment and register them with LEAPP. In this example, the script uses four +annotation helpers: + +- :func:`annotate.input_tensors` marks tensors that enter the policy pipeline. +- :func:`annotate.output_tensors` marks tensors that leave the environment-side + part of the pipeline. +- :func:`annotate.state_tensors` marks tensors that behave like persistent state. +- :func:`annotate.update_state` updates that persistent state after each step. + + +Input Annotations +~~~~~~~~~~~~~~~~~ + +Input annotations usually belong in ``_get_observations()``, because that method +collects the tensors that are passed to the policy. + + +.. literalinclude:: ../../../../scripts/tutorials/06_deploy/anymal_c_env.py + :language: python + :start-at: # start LEAPP annotations for inputs + :end-at: # end LEAPP annotations for inputs + :dedent: 8 + +``annotate.input_tensors()`` wraps a tensor so LEAPP can trace all downstream +operations that depend on it. The function takes two important arguments: + +- ``self.spec.id`` identifies the node that owns the tensor. When you use + ``export.py``, this ID matches the exported policy node. +- The second argument is a dictionary that maps a unique tensor name to the + tensor itself. LEAPP uses these names in the exported metadata and for + debugging. + +In this example, the observation tensors are registered one by one for +readability, but ``annotate.input_tensors()`` can also register multiple tensors +in a single call. + +.. note:: + Any inputs not explicitly annotated will be automatically inlined as a constant. + This may be desired for certain values such as constant transforms or default values. + + +Output Annotations +~~~~~~~~~~~~~~~~~~ + +Output annotations should be placed where the environment has finished preparing +the command that will be applied to the robot. In this example, that happens in +``_pre_physics_step()``. + +.. literalinclude:: ../../../../scripts/tutorials/06_deploy/anymal_c_env.py + :language: python + :start-at: # start LEAPP annotations for outputs + :end-at: # end LEAPP annotations for outputs + :dedent: 8 + +``annotate.output_tensors()`` marks the tensors that leave the environment-side +part of the pipeline. As with input annotations, the call uses ``self.spec.id`` +together with a dictionary that maps tensor names to tensors. + +The ``export_with`` argument restricts an output annotation to specific +export backends. The supported backend names are ``onnx-dynamo``, ``onnx-torchscript``, +``jit-script``, and ``jit-trace``. This argument is needed to actually generate the IR +based on the tracing. + +Unlike ``annotate.input_tensors()``, output annotation should happen once for the +final outputs of the pipeline stage. In this example, ``processed_actions`` is +the tensor that should be exported. After calling +``annotate.output_tensors()``, you do not need to use a return value. + +.. note:: + All tensors passed to ``annotate.output_tensors()`` must be traced tensors. + These tensors are created from inputs or tensors derived from inputs. + +.. warning:: + + Do not place output annotations in ``_apply_action()``. That method may be + called multiple times per environment step, depending on the decimation + setting, which would make the traced pipeline incorrect. + + +State Annotations +~~~~~~~~~~~~~~~~~ + +If your policy depends on internal state or feedback loops, register that data +explicitly with ``annotate.state_tensors()`` and update it with +``annotate.update_state()``. + +In this example, the environment uses the previous action as part of the +observation. That makes ``previous_actions`` a feedback state: + +- ``annotate.state_tensors()`` is called in ``_get_observations()`` so the state + can participate in the traced observation pipeline. +- ``annotate.update_state()`` is called in ``_pre_physics_step()`` so the stored + value is updated for the next step. + +The state name must match across both calls. Here, both functions use the name +``previous_actions``, which lets LEAPP route the feedback tensor correctly. + + +Semantic Annotations +~~~~~~~~~~~~~~~~~~~~ + +This example covers the minimum annotations needed to trace the pipeline. In +more advanced export workflows, you may also want to attach semantic metadata +so downstream runtimes know what each tensor represents. + +For direct environments, semantic annotations are optional and should be +authored explicitly by the user. Unlike the manager-based export path, Isaac Lab +does not infer tensor semantics automatically for direct environments, instead it +is up to the user to provide this data. LEAPP provides this through +``TensorSemantics``. You can use it to describe the meaning of tensors more +precisely and make the exported pipeline easier to inspect, validate, and integrate +into deployment systems. + +.. note:: + + Refer to the full LEAPP documentation and API reference for details on + authoring semantic annotations once that documentation becomes publicly + available. diff --git a/docs/source/tutorials/index.rst b/docs/source/tutorials/index.rst index f1096e6c05b0..8f15bb5df11b 100644 --- a/docs/source/tutorials/index.rst +++ b/docs/source/tutorials/index.rst @@ -108,3 +108,14 @@ tutorials show you how to use motion generators to control the robots at the tas 05_controllers/run_diff_ik 05_controllers/run_osc + +Exporting Policies +------------------ + +The following tutorial shows how to prepare a direct deployment policy for export with LEAPP. + +.. toctree:: + :maxdepth: 1 + :titlesonly: + + 06_exporting/exporting_direct_deployment_policies_with_leapp diff --git a/scripts/tutorials/06_deploy/anymal_c_env.py b/scripts/tutorials/06_deploy/anymal_c_env.py index 0186dd18c013..421d0875944f 100644 --- a/scripts/tutorials/06_deploy/anymal_c_env.py +++ b/scripts/tutorials/06_deploy/anymal_c_env.py @@ -78,12 +78,14 @@ def _setup_scene(self): light_cfg.func("/World/Light", light_cfg) def _pre_physics_step(self, actions: torch.Tensor): - annotate.update_state(self.spec.id, {"previous_actions": actions}) self._actions = actions.clone() self._processed_actions = self.cfg.action_scale * self._actions + wp.to_torch( self._robot.data.default_joint_pos ) - annotate.output_tensors(self.spec.id, {"processed_actions": self._processed_actions}, export_with="onnx") + # start LEAPP annotations for outputs + annotate.update_state(self.spec.id, {"previous_actions": actions}) + annotate.output_tensors(self.spec.id, {"processed_actions": self._processed_actions}, export_with="onnx-dynamo") + # end LEAPP annotations for outputs def _apply_action(self): self._robot.set_joint_position_target_index(target=self._processed_actions) @@ -95,23 +97,25 @@ def _get_observations(self) -> dict: height_data = ( self._height_scanner.data.pos_w[:, 2].unsqueeze(1) - self._height_scanner.data.ray_hits_w[..., 2] - 0.5 ).clip(-1.0, 1.0) - task_name = self.spec.id + # start LEAPP annotations for inputs + # NOTE: height data is not used by the flat policy. not needed for this example root_lin_vel_b = annotate.input_tensors( - task_name, {"root_lin_vel_b": wp.to_torch(self._robot.data.root_lin_vel_b)} + self.spec.id, {"root_lin_vel_b": wp.to_torch(self._robot.data.root_lin_vel_b)} ) root_ang_vel_b = annotate.input_tensors( - task_name, {"root_ang_vel_b": wp.to_torch(self._robot.data.root_ang_vel_b)} + self.spec.id, {"root_ang_vel_b": wp.to_torch(self._robot.data.root_ang_vel_b)} ) projected_gravity_b = annotate.input_tensors( - task_name, {"projected_gravity_b": wp.to_torch(self._robot.data.projected_gravity_b)} + self.spec.id, {"projected_gravity_b": wp.to_torch(self._robot.data.projected_gravity_b)} ) - commands = annotate.input_tensors(task_name, {"commands": self._commands}) - joint_pos = annotate.input_tensors(task_name, {"joint_pos": wp.to_torch(self._robot.data.joint_pos)}) + commands = annotate.input_tensors(self.spec.id, {"commands": self._commands}) + joint_pos = annotate.input_tensors(self.spec.id, {"joint_pos": wp.to_torch(self._robot.data.joint_pos)}) default_joint_pos = annotate.input_tensors( - task_name, {"default_joint_pos": wp.to_torch(self._robot.data.default_joint_pos)} + self.spec.id, {"default_joint_pos": wp.to_torch(self._robot.data.default_joint_pos)} ) - joint_vel = annotate.input_tensors(task_name, {"joint_vel": wp.to_torch(self._robot.data.joint_vel)}) - previous_actions = annotate.state_tensors(task_name, {"previous_actions": self._actions}) + joint_vel = annotate.input_tensors(self.spec.id, {"joint_vel": wp.to_torch(self._robot.data.joint_vel)}) + previous_actions = annotate.state_tensors(self.spec.id, {"previous_actions": self._actions}) + # end LEAPP annotations for inputs obs = torch.cat( [ @@ -123,7 +127,7 @@ def _get_observations(self) -> dict: commands, joint_pos - default_joint_pos, joint_vel, - height_data, + height_data, # height data is not used by the flat policy. not needed for this example previous_actions, ) if tensor is not None From bc39385d9c5856db3a51b58c86e9c4c0c0bee70b Mon Sep 17 00:00:00 2001 From: Frank Lai Date: Thu, 2 Apr 2026 10:36:21 -0700 Subject: [PATCH 16/20] minor cleanup adding docstrigs --- scripts/reinforcement_learning/rsl_rl/export.py | 10 ++++------ .../isaaclab/isaaclab/utils/leapp/export_annotator.py | 11 +++++++---- 2 files changed, 11 insertions(+), 10 deletions(-) diff --git a/scripts/reinforcement_learning/rsl_rl/export.py b/scripts/reinforcement_learning/rsl_rl/export.py index 1253c1681ce0..dac38dfda1bf 100644 --- a/scripts/reinforcement_learning/rsl_rl/export.py +++ b/scripts/reinforcement_learning/rsl_rl/export.py @@ -77,12 +77,6 @@ default=False, help="Disable LEAPP graph visualization during compile_graph().", ) -parser.add_argument( - "--disable_automatic_module_annotation", - action="store_true", - default=False, - help="Disables automatic detection and annotation of modules that have internal states", -) # append RSL-RL cli arguments cli_args.add_rsl_rl_args(parser) @@ -123,6 +117,7 @@ def get_actor_memory_module(policy_nn): + """Return the actor-side recurrent memory module when the policy exposes one.""" if hasattr(policy_nn, "memory_a"): return policy_nn.memory_a if hasattr(policy_nn, "memory_s"): @@ -131,6 +126,7 @@ def get_actor_memory_module(policy_nn): def ensure_actor_hidden_state_initialized(policy_nn, batch_size: int, device: torch.device, dtype: torch.dtype): + """Initialize and return the actor hidden state when a recurrent policy has not created it yet.""" actor_state, _ = policy_nn.get_hidden_states() if actor_state is not None: return actor_state @@ -151,6 +147,7 @@ def ensure_actor_hidden_state_initialized(policy_nn, batch_size: int, device: to def state_dict_from_actor_hidden(actor_hidden): + """Convert the actor hidden state into the named tensor mapping expected by LEAPP state APIs.""" if actor_hidden is None: return {} if isinstance(actor_hidden, tuple): @@ -159,6 +156,7 @@ def state_dict_from_actor_hidden(actor_hidden): def actor_hidden_from_registered(registered_state, original_hidden): + """Restore the registered LEAPP state to the hidden-state structure expected by the actor memory module.""" if isinstance(original_hidden, tuple): if isinstance(registered_state, tuple): return registered_state diff --git a/source/isaaclab/isaaclab/utils/leapp/export_annotator.py b/source/isaaclab/isaaclab/utils/leapp/export_annotator.py index ca5365da3ad7..20a8421bc728 100644 --- a/source/isaaclab/isaaclab/utils/leapp/export_annotator.py +++ b/source/isaaclab/isaaclab/utils/leapp/export_annotator.py @@ -107,7 +107,7 @@ def __init__(self, export_method: str, required_obs_groups: set[str] | None = No self._action_term_scene_keys: dict[str, str] = {} def setup(self, env): - """Patch observation and action managers on the unwrapped env.""" + """Patch the unwrapped env in place for LEAPP-aware observation and action export.""" unwrapped = env.env.unwrapped self.task_name = unwrapped.spec.id @@ -551,9 +551,12 @@ def patch_env_for_export( reads **and** ``Articulation`` write methods. Data properties are resolved lazily through proxies — no hardcoded - class list is required. Properties with ``_leapp_semantics`` produce - rich annotations; properties without it are still traced so that no - tensor is silently baked as a constant. + class list is required. To produce LEAPP input annotations, the + accessed data property getter must carry ``_leapp_semantics``. + Likewise, action-side write methods must be annotated to produce + semantic LEAPP outputs. Undecorated reads and writes are forwarded + as normal runtime access, but they do not gain semantic annotation + metadata through this patching path. State reads are deduplicated across observation and action paths via a shared cache, so a property like ``joint_pos`` that is read by both an From 0904877cdbfd8032d0df4e826539906ed5757cdb Mon Sep 17 00:00:00 2001 From: Frank Lai Date: Mon, 20 Apr 2026 12:39:44 -0700 Subject: [PATCH 17/20] added semantic annotations to the new pva dataclass --- scripts/reinforcement_learning/deploy.py | 8 ++++++++ .../isaaclab/sensors/pva/base_pva_data.py | 16 ++++++++++++++++ 2 files changed, 24 insertions(+) diff --git a/scripts/reinforcement_learning/deploy.py b/scripts/reinforcement_learning/deploy.py index bd6a40aa16db..8d00842fc390 100644 --- a/scripts/reinforcement_learning/deploy.py +++ b/scripts/reinforcement_learning/deploy.py @@ -47,6 +47,14 @@ def main(): # ── Create deploy env ───────────────────────────────────────── env = DirectDeploymentEnv(env_cfg, args_cli.leapp_model) + if getattr(args_cli, "headless", False): + print( + "[WARN]: Running deploy without a viewport. This happens when headless mode is active, " + "including the default case where no visualizer was selected. The policy may be " + "stepping normally, but no viewport will appear unless you specify the " + "`--visualizer` field." + ) + print(f"[INFO]: Deploying task '{task_name}' with LEAPP model: {args_cli.leapp_model}") print(f"[INFO]: Num envs: {env.num_envs}, decimation: {env.cfg.decimation}, step_dt: {env.step_dt:.4f}s") diff --git a/source/isaaclab/isaaclab/sensors/pva/base_pva_data.py b/source/isaaclab/isaaclab/sensors/pva/base_pva_data.py index 17c56e3f3bb5..12ba18e03975 100644 --- a/source/isaaclab/isaaclab/sensors/pva/base_pva_data.py +++ b/source/isaaclab/isaaclab/sensors/pva/base_pva_data.py @@ -11,6 +11,14 @@ import warp as wp +from isaaclab.utils.leapp import ( + POSE7_ELEMENT_NAMES, + QUAT_WXYZ_ELEMENT_NAMES, + XYZ_ELEMENT_NAMES, + InputKindEnum, + leapp_tensor_semantics, +) + class BasePvaData(ABC): """Data container for the PVA sensor. @@ -21,6 +29,7 @@ class BasePvaData(ABC): @property @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.BODY_POSE, element_names=POSE7_ELEMENT_NAMES) def pose_w(self) -> wp.array | None: """Pose of the sensor origin in world frame [m, unitless]. @@ -31,6 +40,7 @@ def pose_w(self) -> wp.array | None: @property @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.BODY_POSITION, element_names=XYZ_ELEMENT_NAMES) def pos_w(self) -> wp.array: """Position of the sensor origin in world frame [m]. @@ -40,6 +50,7 @@ def pos_w(self) -> wp.array: @property @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.BODY_ROTATION, element_names=QUAT_WXYZ_ELEMENT_NAMES) def quat_w(self) -> wp.array: """Orientation of the sensor origin in world frame. @@ -50,6 +61,7 @@ def quat_w(self) -> wp.array: @property @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.VECTOR3D, element_names=XYZ_ELEMENT_NAMES) def projected_gravity_b(self) -> wp.array: """Gravity direction unit vector projected on the PVA frame. @@ -59,6 +71,7 @@ def projected_gravity_b(self) -> wp.array: @property @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.BODY_LINEAR_VELOCITY, element_names=XYZ_ELEMENT_NAMES) def lin_vel_b(self) -> wp.array: """PVA frame linear velocity relative to the world expressed in PVA frame [m/s]. @@ -68,6 +81,7 @@ def lin_vel_b(self) -> wp.array: @property @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.BODY_ANGULAR_VELOCITY, element_names=XYZ_ELEMENT_NAMES) def ang_vel_b(self) -> wp.array: """PVA frame angular velocity relative to the world expressed in PVA frame [rad/s]. @@ -77,6 +91,7 @@ def ang_vel_b(self) -> wp.array: @property @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.BODY_LINEAR_ACCELERATION, element_names=XYZ_ELEMENT_NAMES) def lin_acc_b(self) -> wp.array: """Linear acceleration (coordinate) in the PVA frame [m/s^2]. @@ -88,6 +103,7 @@ def lin_acc_b(self) -> wp.array: @property @abstractmethod + @leapp_tensor_semantics(kind=InputKindEnum.BODY_ANGULAR_ACCELERATION, element_names=XYZ_ELEMENT_NAMES) def ang_acc_b(self) -> wp.array: """PVA frame angular acceleration relative to the world expressed in PVA frame [rad/s^2]. From cb2622c9e3f25b0a1eb1b1ca3f8c5e6f62e437d8 Mon Sep 17 00:00:00 2001 From: Frank Lai Date: Mon, 20 Apr 2026 13:32:01 -0700 Subject: [PATCH 18/20] added some import gaurds, improved code readability --- .../reinforcement_learning/rsl_rl/export.py | 8 +++-- .../isaaclab/envs/direct_deployment_env.py | 6 +++- .../isaaclab/utils/leapp/export_annotator.py | 32 +++++++++++++------ .../export/test_rsl_rl_direct_export_flow.py | 2 +- 4 files changed, 34 insertions(+), 14 deletions(-) diff --git a/scripts/reinforcement_learning/rsl_rl/export.py b/scripts/reinforcement_learning/rsl_rl/export.py index dac38dfda1bf..2cc29d2983fe 100644 --- a/scripts/reinforcement_learning/rsl_rl/export.py +++ b/scripts/reinforcement_learning/rsl_rl/export.py @@ -15,9 +15,13 @@ import time from collections.abc import Mapping -import leapp import torch -from leapp import annotate + +try: + import leapp + from leapp import annotate +except ImportError as e: + raise ImportError("LEAPP package is required for policy export. Install with: pip install leapp") from e # Disable TorchScript before importing task/environment modules so any # @torch.jit.script helpers resolve to plain Python functions during export. diff --git a/source/isaaclab/isaaclab/envs/direct_deployment_env.py b/source/isaaclab/isaaclab/envs/direct_deployment_env.py index 4eb307f6bbc9..2675b420f185 100644 --- a/source/isaaclab/isaaclab/envs/direct_deployment_env.py +++ b/source/isaaclab/isaaclab/envs/direct_deployment_env.py @@ -21,7 +21,11 @@ import torch import yaml -from leapp import InferenceManager + +try: + from leapp import InferenceManager +except ImportError as e: + raise ImportError("LEAPP package is required for policy deployment testing.Install with: pip install leapp") from e from isaaclab.managers import CommandManager, EventManager from isaaclab.scene import InteractiveScene diff --git a/source/isaaclab/isaaclab/utils/leapp/export_annotator.py b/source/isaaclab/isaaclab/utils/leapp/export_annotator.py index 20a8421bc728..421fe4a5ae1d 100644 --- a/source/isaaclab/isaaclab/utils/leapp/export_annotator.py +++ b/source/isaaclab/isaaclab/utils/leapp/export_annotator.py @@ -58,6 +58,9 @@ from isaaclab.envs import ManagerBasedEnv +VARIABLE_IMPEDANCE_MODES = frozenset({"variable", "variable_kp"}) + + # ══════════════════════════════════════════════════════════════════ # ExportPatcher # ══════════════════════════════════════════════════════════════════ @@ -139,24 +142,33 @@ def _disable_training_managers(unwrapped): """ num_envs = unwrapped.num_envs device = unwrapped.device + _zero_reward = torch.zeros(num_envs, device=device) + _no_termination = torch.zeros(num_envs, dtype=torch.bool, device=device) + + def _noop_curriculum(env_ids=None): + return None + + def _zero_reward_compute(dt): + return _zero_reward + + def _no_termination_compute(): + return _no_termination + + def _noop(*args, **kwargs): + return None if hasattr(unwrapped, "curriculum_manager"): - unwrapped.curriculum_manager.compute = lambda env_ids=None: None + unwrapped.curriculum_manager.compute = _noop_curriculum if hasattr(unwrapped, "reward_manager"): - _zero_reward = torch.zeros(num_envs, device=device) - unwrapped.reward_manager.compute = lambda dt: _zero_reward + unwrapped.reward_manager.compute = _zero_reward_compute if hasattr(unwrapped, "termination_manager"): - _no_termination = torch.zeros(num_envs, dtype=torch.bool, device=device) - unwrapped.termination_manager.compute = lambda: _no_termination + unwrapped.termination_manager.compute = _no_termination_compute if hasattr(unwrapped, "recorder_manager"): rm = unwrapped.recorder_manager - def _noop(*args, **kwargs): - return None - rm.record_pre_step = _noop rm.record_post_step = _noop rm.record_pre_reset = _noop @@ -415,7 +427,7 @@ def _collect_action_outputs(self, action_manager) -> list[TensorSemantics]: tensors: list[TensorSemantics] = [] for term_name, term in action_manager._terms.items(): osc = getattr(term, "_osc", None) - if osc and hasattr(osc, "cfg") and osc.cfg.impedance_mode in ["variable", "variable_kp"]: + if osc and hasattr(osc, "cfg") and osc.cfg.impedance_mode in VARIABLE_IMPEDANCE_MODES: asset = getattr(term, "_asset", None) real_asset = getattr(asset, "_real_asset", asset) joint_ids = getattr(term, "_joint_ids", None) @@ -490,7 +502,7 @@ def _collect_action_static_outputs( if skip_terms and term_name in skip_terms: continue osc = getattr(term, "_osc", None) - if osc and hasattr(osc, "cfg") and osc.cfg.impedance_mode in ["variable", "variable_kp"]: + if osc and hasattr(osc, "cfg") and osc.cfg.impedance_mode in VARIABLE_IMPEDANCE_MODES: continue asset = getattr(term, "_asset", None) real_asset = getattr(asset, "_real_asset", asset) diff --git a/source/isaaclab_rl/test/export/test_rsl_rl_direct_export_flow.py b/source/isaaclab_rl/test/export/test_rsl_rl_direct_export_flow.py index 632a8f54b1e8..ee0f7d1c8c4f 100644 --- a/source/isaaclab_rl/test/export/test_rsl_rl_direct_export_flow.py +++ b/source/isaaclab_rl/test/export/test_rsl_rl_direct_export_flow.py @@ -170,7 +170,7 @@ def test_direct_env_export_flow(): cwd=_REPO_ROOT, capture_output=True, text=True, - timeout=600, + timeout=6000, ) if result.returncode != 0: From 98db1721cdcffc2854eef38cfd884c3947e51f0c Mon Sep 17 00:00:00 2001 From: Frank Lai Date: Tue, 21 Apr 2026 14:16:09 -0700 Subject: [PATCH 19/20] added docstrings and addressed comments --- .../reinforcement_learning/rsl_rl/export.py | 1 + .../isaaclab/envs/direct_deployment_env.py | 42 ++++- .../envs/mdp/commands/pose_2d_command.py | 2 + .../envs/mdp/commands/pose_command.py | 2 + .../envs/mdp/commands/velocity_command.py | 2 + .../isaaclab/utils/leapp/export_annotator.py | 158 ++++++++++++++++-- .../dexsuite/mdp/commands/pose_commands.py | 2 + .../mdp/commands/orientation_command.py | 2 + 8 files changed, 191 insertions(+), 20 deletions(-) diff --git a/scripts/reinforcement_learning/rsl_rl/export.py b/scripts/reinforcement_learning/rsl_rl/export.py index 2cc29d2983fe..2633fd09f745 100644 --- a/scripts/reinforcement_learning/rsl_rl/export.py +++ b/scripts/reinforcement_learning/rsl_rl/export.py @@ -215,6 +215,7 @@ def main(env_cfg: ManagerBasedRLEnvCfg, agent_cfg: RslRlBaseRunnerCfg): if isinstance(env.unwrapped, ManagerBasedRLEnv): # Patch only the observation groups consumed by the actor policy. + # This filters out the critic and teacher observation groups. obs_groups_cfg = getattr(agent_cfg, "obs_groups", None) if isinstance(obs_groups_cfg, Mapping): required_obs_groups = set(obs_groups_cfg.get("actor", ["policy"])) diff --git a/source/isaaclab/isaaclab/envs/direct_deployment_env.py b/source/isaaclab/isaaclab/envs/direct_deployment_env.py index 2675b420f185..c560b0a7b4c0 100644 --- a/source/isaaclab/isaaclab/envs/direct_deployment_env.py +++ b/source/isaaclab/isaaclab/envs/direct_deployment_env.py @@ -78,8 +78,16 @@ class WriteOutputSpec: def _resolve_joint_ids(element_names: list | None, entity: Any) -> list[int] | None: """Convert ``element_names[0]`` joint names to integer joint indices. - Returns ``None`` when no slicing is needed (all joints, non-joint tensor, - or entity does not support joint lookup). + Args: + element_names: LEAPP element-name metadata for the tensor, or ``None`` + when the tensor does not define named elements. + entity: Scene entity that may provide ``joint_names`` and + ``find_joints()`` for name-to-index resolution. + + Returns: + Joint indices matching ``element_names[0]``, or ``None`` when no + slicing is needed because all joints are selected, the tensor is not + joint-indexed, or the entity does not support joint lookup. """ if element_names is None or not hasattr(entity, "find_joints"): return None @@ -97,6 +105,13 @@ def _first_param_name(method: Any) -> str: Expects a bound method — ``inspect.signature`` on a bound method already excludes ``self``, so ``params[0]`` is the first real parameter. + + Args: + method: Bound method whose first callable parameter should be + inspected. + + Returns: + The name of the first non-``self`` parameter. """ params = list(inspect.signature(method).parameters.values()) if not params: @@ -217,7 +232,12 @@ def device(self) -> str: # ── I/O Resolution ──────────────────────────────────────────── def _resolve_io(self): - """Build ``_input_mapping`` and ``_output_mapping`` from ``isaaclab_connection`` fields.""" + """Build ``_input_mapping`` and ``_output_mapping`` from LEAPP metadata. + + Parses the ``isaaclab_connection`` field in the loaded LEAPP YAML and + resolves each declared input/output to the corresponding scene entity, + command term, and optional joint index selection. + """ pipeline = self._leapp_desc["pipeline"] for node_name, input_names in pipeline["inputs"].items(): @@ -281,7 +301,12 @@ def _resolve_io(self): # ── Read / Write ────────────────────────────────────────────── def _read_inputs(self) -> dict[str, torch.Tensor]: - """Read all mapped inputs from scene entities and command manager.""" + """Read all mapped inputs from scene entities and command manager. + + Returns: + A mapping from ``"node_name/tensor_name"`` to the tensor value that + should be passed to the LEAPP inference pipeline. + """ inputs: dict[str, torch.Tensor] = {} for key, spec in self._input_mapping.items(): if isinstance(spec, StateInputSpec): @@ -297,7 +322,12 @@ def _read_inputs(self) -> dict[str, torch.Tensor]: return inputs def _write_outputs(self, outputs: dict[str, torch.Tensor]): - """Write model outputs to scene entities.""" + """Write model outputs to scene entities. + + Args: + outputs: Model outputs keyed by ``"node_name/tensor_name"`` as + returned by :meth:`step` and ``InferenceManager.run_policy()``. + """ for key, tensor in outputs.items(): spec = self._output_mapping.get(key) if spec is None: @@ -388,7 +418,7 @@ def step(self, external_inputs: dict[str, torch.Tensor] | None = None) -> dict[s return outputs def close(self): - """Clean up the environment.""" + """Clean up the environment and release simulator-owned resources.""" if not self._is_closed: self.sim.stop() if self.command_manager is not None: diff --git a/source/isaaclab/isaaclab/envs/mdp/commands/pose_2d_command.py b/source/isaaclab/isaaclab/envs/mdp/commands/pose_2d_command.py index 2666774de143..f7c5854b3f1c 100644 --- a/source/isaaclab/isaaclab/envs/mdp/commands/pose_2d_command.py +++ b/source/isaaclab/isaaclab/envs/mdp/commands/pose_2d_command.py @@ -62,6 +62,8 @@ def __init__(self, cfg: UniformPose2dCommandCfg, env: ManagerBasedEnv): self.metrics["error_pos"] = torch.zeros(self.num_envs, device=self.device) self.metrics["error_heading"] = torch.zeros(self.num_envs, device=self.device) + # adds (optional) cmd kind and element names for leapp export + # during export, semantic data about this command will be used to annotate the command input self.cfg.cmd_kind = self.cfg.cmd_kind or "command/body/pose" self.cfg.element_names = self.cfg.element_names or ["x", "y", "z", "heading"] diff --git a/source/isaaclab/isaaclab/envs/mdp/commands/pose_command.py b/source/isaaclab/isaaclab/envs/mdp/commands/pose_command.py index 7e2dbd6ac2a4..9ab8db38a439 100644 --- a/source/isaaclab/isaaclab/envs/mdp/commands/pose_command.py +++ b/source/isaaclab/isaaclab/envs/mdp/commands/pose_command.py @@ -69,6 +69,8 @@ def __init__(self, cfg: UniformPoseCommandCfg, env: ManagerBasedEnv): self.metrics["position_error"] = torch.zeros(self.num_envs, device=self.device) self.metrics["orientation_error"] = torch.zeros(self.num_envs, device=self.device) + # adds (optional) cmd kind and element names for leapp export + # during export, semantic data about this command will be used to annotate the command input self.cfg.cmd_kind = self.cfg.cmd_kind or "command/body/pose" self.cfg.element_names = self.cfg.element_names or ["x", "y", "z", "qw", "qx", "qy", "qz"] diff --git a/source/isaaclab/isaaclab/envs/mdp/commands/velocity_command.py b/source/isaaclab/isaaclab/envs/mdp/commands/velocity_command.py index 2e4285ea6bf7..3e3c0959b071 100644 --- a/source/isaaclab/isaaclab/envs/mdp/commands/velocity_command.py +++ b/source/isaaclab/isaaclab/envs/mdp/commands/velocity_command.py @@ -88,6 +88,8 @@ def __init__(self, cfg: UniformVelocityCommandCfg, env: ManagerBasedEnv): self.metrics["error_vel_xy"] = torch.zeros(self.num_envs, device=self.device) self.metrics["error_vel_yaw"] = torch.zeros(self.num_envs, device=self.device) + # adds (optional) cmd kind and element names for leapp export + # during export, semantic data about this command will be used to annotate the command input self.cfg.cmd_kind = self.cfg.cmd_kind or "command/body/velocity" self.cfg.element_names = self.cfg.element_names or ["lin_vel_x", "lin_vel_y", "ang_vel_z"] diff --git a/source/isaaclab/isaaclab/utils/leapp/export_annotator.py b/source/isaaclab/isaaclab/utils/leapp/export_annotator.py index 421fe4a5ae1d..52f2738c748c 100644 --- a/source/isaaclab/isaaclab/utils/leapp/export_annotator.py +++ b/source/isaaclab/isaaclab/utils/leapp/export_annotator.py @@ -78,8 +78,6 @@ class ExportPatcher: This lets backends override property implementations without duplicating decorators from the abstract API. - The proxies and a shared dedup cache are wired into both: - - The observation proxy chain (``_EnvProxy`` → ``_SceneProxy`` → ``_EntityProxy`` → ``_DataProxy``) for state reads by observation term functions. @@ -87,13 +85,17 @@ class ExportPatcher: target writes **and** routes ``.data`` reads through the same ``_DataProxy`` / cache. - This ensures that a property like ``joint_pos`` read by both an - observation term and ``RelativeJointPositionAction.apply_actions()`` - resolves to a single LEAPP input edge rather than being silently baked - in as a constant. """ def __init__(self, export_method: str, required_obs_groups: set[str] | None = None): + """Initialize the export patcher. + + Args: + export_method: LEAPP export backend passed to + :func:`annotate.output_tensors`. + required_obs_groups: Observation groups that should be patched, or + ``None`` to patch all groups. + """ self.task_name: str | None = None self.export_method = export_method self.required_obs_groups = required_obs_groups @@ -110,7 +112,12 @@ def __init__(self, export_method: str, required_obs_groups: set[str] | None = No self._action_term_scene_keys: dict[str, str] = {} def setup(self, env): - """Patch the unwrapped env in place for LEAPP-aware observation and action export.""" + """Patch the environment in place for LEAPP-aware export. + + Args: + env: Wrapped manager-based environment whose unwrapped instance + should be patched. + """ unwrapped = env.env.unwrapped self.task_name = unwrapped.spec.id @@ -139,6 +146,10 @@ def _disable_training_managers(unwrapped): managers serve no purpose. Disabling them avoids side-effect crashes (e.g. ADR curriculum terms accessing nullified noise configs) and removes unnecessary computation. + + Args: + unwrapped: Unwrapped environment whose training-only managers + should be disabled. """ num_envs = unwrapped.num_envs device = unwrapped.device @@ -177,7 +188,15 @@ def _noop(*args, **kwargs): @staticmethod def _resolve_scene_entity_key(scene, entity: Any) -> str | None: - """Return the scene dictionary key for a given entity, if present.""" + """Return the scene dictionary key for an entity. + + Args: + scene: Scene object that stores entity dictionaries. + entity: Entity instance to locate. + + Returns: + The scene key for ``entity`` if found, otherwise ``None``. + """ for attr_value in vars(scene).values(): if not isinstance(attr_value, dict): continue @@ -189,7 +208,12 @@ def _resolve_scene_entity_key(scene, entity: Any) -> str | None: # ── Observation manager patches ─────────────────────────────── def _patch_history_buffers(self, obs_manager): - """Patch history-enabled observation buffers to export as LEAPP state.""" + """Patch history-enabled observation buffers to export as LEAPP state. + + Args: + obs_manager: Observation manager whose history buffers should be + wrapped. + """ history_buffers = getattr(obs_manager, "_group_obs_term_history_buffer", {}) term_names_by_group = getattr(obs_manager, "_group_obs_term_names", {}) @@ -216,7 +240,12 @@ def _patch_history_buffers(self, obs_manager): self._patch_history_buffer_append(circular_buffer, state_name) def _patch_history_buffer_append(self, circular_buffer, state_name: str): - """Wrap ``_append`` so history buffers become explicit LEAPP state.""" + """Wrap ``_append`` so history buffers become explicit LEAPP state. + + Args: + circular_buffer: Circular buffer instance to patch. + state_name: LEAPP state tensor name for the buffer contents. + """ if hasattr(circular_buffer, "_leapp_original_append"): return @@ -224,6 +253,14 @@ def _patch_history_buffer_append(self, circular_buffer, state_name: str): original_append = circular_buffer._append def patched_append(data: torch.Tensor): + """Annotate history buffer updates as LEAPP state transitions. + + Args: + data: New observation slice appended to the buffer. + + Returns: + ``None``. + """ if circular_buffer._buffer is not None: circular_buffer._buffer = annotate.state_tensors(task_name, {state_name: circular_buffer._buffer}) @@ -236,7 +273,12 @@ def patched_append(data: torch.Tensor): circular_buffer._append = patched_append def _patch_observation_manager(self, obs_manager, proxy_env): - """Patch observation terms to use annotating proxies and disable noise.""" + """Patch observation terms to use annotating proxies and disable noise. + + Args: + obs_manager: Observation manager instance to patch. + proxy_env: Proxy environment routed into observation terms. + """ for group_name, term_cfgs in obs_manager._group_obs_term_cfgs.items(): if self.required_obs_groups is not None and group_name not in self.required_obs_groups: continue @@ -267,7 +309,12 @@ def patched_compute(*args, **kwargs): # ── Action manager patches ──────────────────────────────────── def _patch_action_manager(self, action_manager, cache): - """Patch action terms with write+read proxies and patch manager methods.""" + """Patch action terms with write/read proxies and manager hooks. + + Args: + action_manager: Action manager instance to patch. + cache: Shared tensor dedup cache for annotated state reads. + """ scene = action_manager._env.scene for term_name, term in action_manager._terms.items(): asset = getattr(term, "_asset", None) @@ -314,6 +361,10 @@ def _patch_action_manager_methods(self, action_manager): subsequent iterations): The cache is cleared **before** running action terms so every ``.data`` read returns the current simulator value, preserving simulation correctness. + + Args: + action_manager: Action manager whose instance methods should be + wrapped. """ original_process = action_manager.process_action original_apply = action_manager.apply_action @@ -361,12 +412,29 @@ def patched_apply_action(): @staticmethod def _wrap_with_proxy(original_func, proxy_env): - """Wrap a term function so it receives the proxy env instead of the real env.""" + """Wrap a term function so it receives the proxy env. + + Args: + original_func: Original observation term function or manager term. + proxy_env: Proxy environment routed into the wrapped callable. + + Returns: + Wrapped callable that substitutes ``proxy_env`` for the real env. + """ if isinstance(original_func, ManagerTermBase): return _ManagerTermProxy(original_func, proxy_env) def wrapped(*args, **kwargs): + """Invoke the original function with the proxy environment. + + Args: + *args: Original positional arguments. + **kwargs: Original keyword arguments. + + Returns: + Result of the wrapped observation term. + """ if args: args = (proxy_env, *args[1:]) else: @@ -383,10 +451,26 @@ def _wrap_last_action(self, original_func): therefore register it through ``annotate.state_tensors(...)`` on the observation side and update it through ``annotate.update_state(...)`` after the traced action pass. + + Args: + original_func: Original ``last_action`` observation term. + + Returns: + Wrapped callable that exports ``last_action`` as LEAPP state. """ task_name = self.task_name def wrapped(env, action_name=None, **kwargs): + """Run the wrapped ``last_action`` term and annotate its output. + + Args: + env: Environment passed by the observation manager. + action_name: Optional action term name. + **kwargs: Additional keyword arguments for the term. + + Returns: + Annotated last-action tensor. + """ result = original_func(env, action_name, **kwargs) return annotate.state_tensors(task_name, {"last_action": result}) @@ -398,11 +482,28 @@ def _wrap_generated_commands(self, original_func, term_cfg): Resolves command semantics (kind, element_names) from the command manager configuration when available. + + Args: + original_func: Original ``generated_commands`` observation term. + term_cfg: Observation term config used to resolve the command name. + + Returns: + Wrapped callable that exports generated commands as LEAPP inputs. """ task_name = self.task_name command_name_from_cfg = term_cfg.params.get("command_name") def wrapped(env, command_name=None, **kwargs): + """Run the wrapped command term and annotate its output. + + Args: + env: Environment passed by the observation manager. + command_name: Optional command term name override. + **kwargs: Additional keyword arguments for the term. + + Returns: + Annotated command tensor. + """ result = original_func(env, command_name, **kwargs) leapp_input_name = command_name or command_name_from_cfg or "commands" command_cfg = None @@ -423,7 +524,15 @@ def wrapped(env, command_name=None, **kwargs): # ── Output collection ───────────────────────────────────────── def _collect_action_outputs(self, action_manager) -> list[TensorSemantics]: - """Collect non-writer action tensors that should be exported (e.g. OSC dynamic gains).""" + """Collect non-writer action tensors that should be exported. + + Args: + action_manager: Action manager whose terms should be inspected. + + Returns: + Exportable tensor semantics for dynamic action outputs such as OSC + gains. + """ tensors: list[TensorSemantics] = [] for term_name, term in action_manager._terms.items(): osc = getattr(term, "_osc", None) @@ -459,6 +568,12 @@ def _collect_processed_action_fallbacks(self, action_manager) -> list[TensorSema When an action term does not call any ``_leapp_semantics``-decorated write method (e.g. ``PreTrainedPolicyAction`` which delegates writes to a nested sub-policy), we fall back to capturing ``term.processed_actions`` as the output tensor. + + Args: + action_manager: Action manager whose terms should be inspected. + + Returns: + Fallback tensor semantics built from ``processed_actions``. """ logger = logging.getLogger(__name__) fallback_terms: set[str] = set() @@ -496,6 +611,14 @@ def _collect_action_static_outputs( Terms in ``skip_terms`` are excluded — these are terms that fell back to ``processed_actions`` and whose static gains (kp/kd) belong to a lower abstraction level that is not part of the exported policy. + + Args: + action_manager: Action manager whose terms should be inspected. + skip_terms: Action term names whose static outputs should be + skipped. + + Returns: + Static tensor semantics for action gains exported as metadata. """ static_values: list[TensorSemantics] = [] for term_name, term in action_manager._terms.items(): @@ -577,6 +700,13 @@ class list is required. To produce LEAPP input annotations, the The underlying env, scene, assets, and tensors remain shared with the rest of the pipeline; only the manager call paths are redirected. + + Args: + env: Manager-based environment to patch in place. + export_method: LEAPP export backend passed to + :func:`annotate.output_tensors`. + required_obs_groups: Observation groups that should be patched, or + ``None`` to patch all groups. """ patch_warp_to_torch_passthrough() patcher = ExportPatcher(export_method, required_obs_groups=required_obs_groups) diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/dexsuite/mdp/commands/pose_commands.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/dexsuite/mdp/commands/pose_commands.py index fb8b71e5e6ac..d64e938d91e7 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/dexsuite/mdp/commands/pose_commands.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/dexsuite/mdp/commands/pose_commands.py @@ -83,6 +83,8 @@ def __init__(self, cfg: dex_cmd_cfgs.ObjectUniformPoseCommandCfg, env: ManagerBa self.success_visualizer = VisualizationMarkers(self.cfg.success_visualizer_cfg) self.success_visualizer.set_visibility(True) + # adds (optional) cmd kind and element names for leapp export + # during export, semantic data about this command will be used to annotate the command input self.cfg.cmd_kind = self.cfg.cmd_kind or "command/body/pose" self.cfg.element_names = self.cfg.element_names or ["x", "y", "z", "qw", "qx", "qy", "qz"] diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/inhand/mdp/commands/orientation_command.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/inhand/mdp/commands/orientation_command.py index 946ff908bc55..d131e03e4aa1 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/inhand/mdp/commands/orientation_command.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/inhand/mdp/commands/orientation_command.py @@ -75,6 +75,8 @@ def __init__(self, cfg: InHandReOrientationCommandCfg, env: ManagerBasedRLEnv): self.metrics["position_error"] = torch.zeros(self.num_envs, device=self.device) self.metrics["consecutive_success"] = torch.zeros(self.num_envs, device=self.device) + # adds (optional) cmd kind and element names for leapp export + # during export, semantic data about this command will be used to annotate the command input self.cfg.cmd_kind = self.cfg.cmd_kind or "command/body/pose" self.cfg.element_names = self.cfg.element_names or ["x", "y", "z", "qw", "qx", "qy", "qz"] From bb76135b78663a606ca0fc816928225eb43bbdc1 Mon Sep 17 00:00:00 2001 From: Frank Lai Date: Tue, 21 Apr 2026 15:11:25 -0700 Subject: [PATCH 20/20] moved file location, updated docs, fixed deploy.py bug with mapping element names --- .../05_leapp/exporting_policies_with_leapp.rst | 15 ++++++++------- ...ting_direct_deployment_policies_with_leapp.rst | 10 ++++++++-- .../reinforcement_learning/{ => leapp}/deploy.py | 12 ++++++++++-- .../{ => leapp}/rsl_rl/export.py | 4 ++++ .../isaaclab/envs/direct_deployment_env.py | 13 ++++++++++++- 5 files changed, 42 insertions(+), 12 deletions(-) rename scripts/reinforcement_learning/{ => leapp}/deploy.py (83%) rename scripts/reinforcement_learning/{ => leapp}/rsl_rl/export.py (98%) diff --git a/docs/source/policy_deployment/05_leapp/exporting_policies_with_leapp.rst b/docs/source/policy_deployment/05_leapp/exporting_policies_with_leapp.rst index b560b4f7f511..565510be2b64 100644 --- a/docs/source/policy_deployment/05_leapp/exporting_policies_with_leapp.rst +++ b/docs/source/policy_deployment/05_leapp/exporting_policies_with_leapp.rst @@ -67,7 +67,7 @@ Use the RSL-RL export script to export a trained checkpoint: .. code-block:: bash - ./isaaclab.sh -p scripts/reinforcement_learning/rsl_rl/export.py \ + ./isaaclab.sh -p scripts/reinforcement_learning/leapp/rsl_rl/export.py \ --task \ --checkpoint @@ -75,9 +75,9 @@ For example, to export a UR10 reach policy: .. code-block:: bash - ./isaaclab.sh -p scripts/reinforcement_learning/rsl_rl/export.py \ + ./isaaclab.sh -p scripts/reinforcement_learning/leapp/rsl_rl/export.py \ --task Isaac-Reach-UR10-v0 \ - --checkpoint logs/rsl_rl/ur10_reach/2026-03-22_22-35-55/model_4999.pt + --checkpoint logs/rsl_rl/ur10_reach/< date timestamp >/model_4999.pt By default, the export artifacts are saved in the same directory as the checkpoint. The exported graph is named after the task. @@ -242,10 +242,11 @@ simulation without the training infrastructure. This is the Isaac Lab deployment LEAPP-exported policies and is useful for validating that the packaged policy still behaves correctly when driven through the deployment stack instead of the training stack. -.. admonition:: TODO - :class: warning - - A full tutorial on ``DirectDeploymentEnv`` usage will be added in a follow-up guide. +For direct deployment policies, see the +:doc:`direct deployment LEAPP export tutorial `. +That guide shows how to add LEAPP annotations to a direct RL environment so it can be +exported with ``scripts/reinforcement_learning/leapp/rsl_rl/export.py``. Direct +deployment policies are not currently supported by ``scripts/reinforcement_learning/leapp/deploy.py``. .. admonition:: TODO :class: warning diff --git a/docs/source/tutorials/06_exporting/exporting_direct_deployment_policies_with_leapp.rst b/docs/source/tutorials/06_exporting/exporting_direct_deployment_policies_with_leapp.rst index 994886044f53..f7ed37b077d9 100644 --- a/docs/source/tutorials/06_exporting/exporting_direct_deployment_policies_with_leapp.rst +++ b/docs/source/tutorials/06_exporting/exporting_direct_deployment_policies_with_leapp.rst @@ -17,7 +17,7 @@ environment code. During export, LEAPP traces the annotated tensors and builds a intermediate representation of the full policy pipeline. These annotations remain dormant during normal environment execution and only add a small amount of overhead until export time. They are activated by -``scripts/reinforcement_learning/rsl_rl/export.py`` when you run the export flow. +``scripts/reinforcement_learning/leapp/rsl_rl/export.py`` when you run the export flow. This tutorial uses ``scripts/tutorials/06_deploy/anymal_c_env.py`` as the example. The script is based on the existing ANYmal-C direct environment at @@ -28,7 +28,7 @@ with: .. code-block:: bash - ./isaaclab.sh -p scripts/reinforcement_learning/rsl_rl/export.py \ + ./isaaclab.sh -p scripts/reinforcement_learning/leapp/rsl_rl/export.py \ --task \ --checkpoint \ --export_save_path @@ -39,6 +39,12 @@ points to the trained RSL-RL checkpoint to export. The optional ``--export_save_path`` argument selects the output directory for the exported artifacts. If you omit it, the export is written next to the checkpoint. +.. warning:: + + This tutorial covers exporting direct deployment policies only. Direct deployment + policies are not currently supported by + ``scripts/reinforcement_learning/leapp/deploy.py``. + For more information on the export arguments, see the :doc:`manager-based LEAPP export guide `. diff --git a/scripts/reinforcement_learning/deploy.py b/scripts/reinforcement_learning/leapp/deploy.py similarity index 83% rename from scripts/reinforcement_learning/deploy.py rename to scripts/reinforcement_learning/leapp/deploy.py index 8d00842fc390..b5fdbf3bf3d6 100644 --- a/scripts/reinforcement_learning/deploy.py +++ b/scripts/reinforcement_learning/leapp/deploy.py @@ -13,12 +13,20 @@ from isaaclab.app import AppLauncher parser = argparse.ArgumentParser(description="Deploy a LEAPP-exported policy in simulation.") -parser.add_argument("--task", type=str, required=True, help="Name of the registered Isaac Lab task.") -parser.add_argument("--leapp_model", type=str, required=True, help="Path to the LEAPP .yaml pipeline description.") +parser.add_argument("--task", type=str, default=None, help="Name of the registered Isaac Lab task.") +parser.add_argument("--leapp_model", type=str, default=None, help="Path to the LEAPP .yaml pipeline description.") parser.add_argument("--seed", type=int, default=None, help="Seed for the environment.") AppLauncher.add_app_launcher_args(parser) args_cli, hydra_args = parser.parse_known_args() +if args_cli.task is None or args_cli.leapp_model is None: + missing_args = [] + if args_cli.task is None: + missing_args.append("--task") + if args_cli.leapp_model is None: + missing_args.append("--leapp_model") + parser.error(f"the following arguments are required: {', '.join(missing_args)}") + sys.argv = [sys.argv[0]] + hydra_args app_launcher = AppLauncher(args_cli) diff --git a/scripts/reinforcement_learning/rsl_rl/export.py b/scripts/reinforcement_learning/leapp/rsl_rl/export.py similarity index 98% rename from scripts/reinforcement_learning/rsl_rl/export.py rename to scripts/reinforcement_learning/leapp/rsl_rl/export.py index 2633fd09f745..991f6da82064 100644 --- a/scripts/reinforcement_learning/rsl_rl/export.py +++ b/scripts/reinforcement_learning/leapp/rsl_rl/export.py @@ -14,6 +14,7 @@ import sys import time from collections.abc import Mapping +from pathlib import Path import torch @@ -30,6 +31,9 @@ from isaaclab.app import AppLauncher # local imports +_RSL_RL_SCRIPTS_DIR = Path(__file__).resolve().parents[2] / "rsl_rl" +if str(_RSL_RL_SCRIPTS_DIR) not in sys.path: + sys.path.insert(0, str(_RSL_RL_SCRIPTS_DIR)) import cli_args # isort: skip diff --git a/source/isaaclab/isaaclab/envs/direct_deployment_env.py b/source/isaaclab/isaaclab/envs/direct_deployment_env.py index c560b0a7b4c0..6d9a29dc5852 100644 --- a/source/isaaclab/isaaclab/envs/direct_deployment_env.py +++ b/source/isaaclab/isaaclab/envs/direct_deployment_env.py @@ -94,7 +94,18 @@ def _resolve_joint_ids(element_names: list | None, entity: Any) -> list[int] | N joint_names = element_names[0] if not isinstance(joint_names, list) or not joint_names: return None - if joint_names == list(entity.joint_names): + entity_joint_names = list(entity.joint_names) + # Only resolve indices when the leading element-name axis actually refers + # to a subset of this articulation's joints. Other tensors can use axis + # labels like ["x", "y", "z"] or body names in the first axis. + matching_joint_names = [name for name in joint_names if name in entity_joint_names] + if not matching_joint_names: + return None + if len(matching_joint_names) != len(joint_names): + raise ValueError( + f"LEAPP element names mix joint and non-joint labels for an articulation-backed tensor: {joint_names}" + ) + if joint_names == entity_joint_names: return None joint_ids, _ = entity.find_joints(joint_names, preserve_order=True) return joint_ids