diff --git a/scripts/benchmarks/benchmark_newton_xform_prim_view.py b/scripts/benchmarks/benchmark_newton_xform_prim_view.py new file mode 100644 index 000000000000..eae43159c78a --- /dev/null +++ b/scripts/benchmarks/benchmark_newton_xform_prim_view.py @@ -0,0 +1,145 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Benchmark script for Newton XformPrimView performance. + +Tests the performance of batched transform operations using Newton's GPU-backed +XformPrimView (sites / body_q) without requiring Isaac Sim Kit. + +Usage: + VIRTUAL_ENV=env_isaaclab ./isaaclab.sh -p scripts/benchmarks/benchmark_newton_xform_prim_view.py --num_envs 4096 + VIRTUAL_ENV=env_isaaclab ./isaaclab.sh -p scripts/benchmarks/benchmark_newton_xform_prim_view.py --num_envs 4096 --num_iterations 200 +""" + +from __future__ import annotations + +import argparse +import time + +import torch +import warp as wp + +from isaaclab_newton.physics import MJWarpSolverCfg, NewtonCfg +from isaaclab_newton.sim.views import XformPrimView + +import isaaclab.sim as sim_utils +from isaaclab.assets import RigidObjectCfg +from isaaclab.scene import InteractiveScene, InteractiveSceneCfg +from isaaclab.sim import SimulationCfg, build_simulation_context +from isaaclab.utils import configclass + +NEWTON_SIM_CFG = SimulationCfg( + physics=NewtonCfg(solver_cfg=MJWarpSolverCfg()), +) + + +@configclass +class BenchSceneCfg(InteractiveSceneCfg): + cube: RigidObjectCfg = RigidObjectCfg( + prim_path="{ENV_REGEX_NS}/Cube", + spawn=sim_utils.CuboidCfg( + size=(0.2, 0.2, 0.2), + rigid_props=sim_utils.RigidBodyPropertiesCfg(), + mass_props=sim_utils.MassPropertiesCfg(mass=1.0), + collision_props=sim_utils.CollisionPropertiesCfg(), + ), + init_state=RigidObjectCfg.InitialStateCfg(pos=(0.0, 0.0, 1.0)), + ) + + +@torch.no_grad() +def benchmark(num_envs: int, num_iterations: int, device: str) -> dict[str, float]: + """Run the benchmark and return timing results in seconds.""" + NEWTON_SIM_CFG.device = device + results: dict[str, float] = {} + + with build_simulation_context(device=device, sim_cfg=NEWTON_SIM_CFG, add_ground_plane=True) as sim: + sim._app_control_on_stop_handle = None + scene = InteractiveScene(BenchSceneCfg(num_envs=num_envs, env_spacing=2.0)) + sim.reset() + + view = XformPrimView("/World/envs/env_.*/Cube", device=device) + print(f" View count: {view.count}") + + # -- warmup (compile Warp kernel, first torch allocs) -- + for _ in range(5): + view.get_world_poses() + + # -- get_world_poses (full) -- + torch.cuda.synchronize() + t0 = time.perf_counter() + for _ in range(num_iterations): + pos, quat = view.get_world_poses() + torch.cuda.synchronize() + results["get_world_poses"] = (time.perf_counter() - t0) / num_iterations + + # -- get_world_poses (indexed, 50 %) -- + half = list(range(0, num_envs, 2)) + torch.cuda.synchronize() + t0 = time.perf_counter() + for _ in range(num_iterations): + pos, quat = view.get_world_poses(half) + torch.cuda.synchronize() + results["get_world_poses_indexed_50pct"] = (time.perf_counter() - t0) / num_iterations + + # -- set_world_poses (full) -- + new_pos = torch.rand((num_envs, 3), device=device) + new_quat = torch.tensor([[0.0, 0.0, 0.0, 1.0]] * num_envs, device=device) + torch.cuda.synchronize() + t0 = time.perf_counter() + for _ in range(num_iterations): + view.set_world_poses(new_pos, new_quat) + torch.cuda.synchronize() + results["set_world_poses"] = (time.perf_counter() - t0) / num_iterations + + # -- interleaved set -> get -- + torch.cuda.synchronize() + t0 = time.perf_counter() + for _ in range(num_iterations): + view.set_world_poses(new_pos, new_quat) + pos, quat = view.get_world_poses() + torch.cuda.synchronize() + results["interleaved_set_get"] = (time.perf_counter() - t0) / num_iterations + + return results + + +def print_results(results: dict[str, float], num_envs: int, num_iterations: int): + """Print benchmark results in a formatted table.""" + print("\n" + "=" * 70) + print(f"Newton XformPrimView Benchmark: {num_envs} envs, {num_iterations} iters") + print("=" * 70) + print(f"{'Operation':<40} {'Time (ms)':>12} {'us/env':>12}") + print("-" * 70) + for op, t in results.items(): + ms = t * 1000 + us_per_env = t * 1e6 / num_envs + print(f"{op:<40} {ms:>12.4f} {us_per_env:>12.4f}") + total = sum(results.values()) * 1000 + print("-" * 70) + print(f"{'Total':<40} {total:>12.4f}") + print("=" * 70) + print() + + +def main(): + parser = argparse.ArgumentParser(description="Benchmark Newton XformPrimView performance.") + parser.add_argument("--num_envs", type=int, default=4096) + parser.add_argument("--num_iterations", type=int, default=100) + parser.add_argument("--device", type=str, default="cuda:0") + args = parser.parse_args() + + print("=" * 70) + print("Newton XformPrimView Benchmark") + print("=" * 70) + print(f" Envs: {args.num_envs} Iterations: {args.num_iterations} Device: {args.device}") + print() + + results = benchmark(args.num_envs, args.num_iterations, args.device) + print_results(results, args.num_envs, args.num_iterations) + + +if __name__ == "__main__": + main() diff --git a/scripts/benchmarks/benchmark_xform_prim_view.py b/scripts/benchmarks/benchmark_xform_prim_view.py index e76796e20271..a3fb7f1368cb 100644 --- a/scripts/benchmarks/benchmark_xform_prim_view.py +++ b/scripts/benchmarks/benchmark_xform_prim_view.py @@ -67,15 +67,27 @@ import torch -from isaacsim.core.prims import XFormPrim as IsaacSimXformPrimView -from isaacsim.core.utils.extensions import enable_extension +try: + from isaacsim.core.prims import XFormPrim as IsaacSimXformPrimView + from isaacsim.core.utils.extensions import enable_extension -# compare against latest Isaac Sim implementation -enable_extension("isaacsim.core.experimental.prims") -from isaacsim.core.experimental.prims import XformPrim as IsaacSimExperimentalXformPrimView + enable_extension("isaacsim.core.experimental.prims") + from isaacsim.core.experimental.prims import XformPrim as IsaacSimExperimentalXformPrimView + + _HAS_ISAACSIM = True +except (ImportError, ModuleNotFoundError): + _HAS_ISAACSIM = False import isaaclab.sim as sim_utils from isaaclab.sim.views import XformPrimView as IsaacLabXformPrimView +from isaaclab_physx.sim.views import XformPrimView as IsaacLabFabricXformPrimView + +from isaaclab.assets import RigidObjectCfg +from isaaclab.scene import InteractiveScene, InteractiveSceneCfg +from isaaclab.sim import SimulationCfg, build_simulation_context +from isaaclab.utils import configclass +from isaaclab_newton.physics import MJWarpSolverCfg, NewtonCfg +from isaaclab_newton.sim.views import XformPrimView as IsaacLabNewtonXformPrimView @torch.no_grad() @@ -133,8 +145,10 @@ def benchmark_xform_prim_view( # noqa: C901 # Create view start_time = time.perf_counter() - if api == "isaaclab-usd" or api == "isaaclab-fabric": + if api == "isaaclab-usd": xform_view = IsaacLabXformPrimView(pattern, device=args_cli.device, validate_xform_ops=False) + elif api == "isaaclab-fabric": + xform_view = IsaacLabFabricXformPrimView(pattern, device=args_cli.device, validate_xform_ops=False) elif api == "isaacsim-usd": xform_view = IsaacSimXformPrimView(pattern, reset_xform_properties=False, usd=True) elif api == "isaacsim-fabric": @@ -279,6 +293,109 @@ def benchmark_xform_prim_view( # noqa: C901 return timing_results, computed_results +@configclass +class _NewtonBenchSceneCfg(InteractiveSceneCfg): + cube: RigidObjectCfg = RigidObjectCfg( + prim_path="{ENV_REGEX_NS}/Object", + spawn=sim_utils.CuboidCfg( + size=(0.2, 0.2, 0.2), + rigid_props=sim_utils.RigidBodyPropertiesCfg(), + mass_props=sim_utils.MassPropertiesCfg(mass=1.0), + collision_props=sim_utils.CollisionPropertiesCfg(), + ), + init_state=RigidObjectCfg.InitialStateCfg(pos=(0.0, 0.0, 1.0)), + ) + + +@torch.no_grad() +def benchmark_newton_xform_prim_view( + num_iterations: int, +) -> tuple[dict[str, float], dict[str, torch.Tensor]]: + """Benchmark Newton XformPrimView (GPU sites / body_q).""" + timing_results = {} + computed_results = {} + + print(" Setting up Newton scene") + newton_sim_cfg = SimulationCfg(device=args_cli.device, physics=NewtonCfg(solver_cfg=MJWarpSolverCfg())) + + with build_simulation_context(device=args_cli.device, sim_cfg=newton_sim_cfg, add_ground_plane=True) as sim: + sim._app_control_on_stop_handle = None + scene = InteractiveScene(_NewtonBenchSceneCfg(num_envs=args_cli.num_envs, env_spacing=2.0)) + sim.reset() + + start_time = time.perf_counter() + xform_view = IsaacLabNewtonXformPrimView("/World/envs/env_.*/Object", device=args_cli.device) + timing_results["init"] = time.perf_counter() - start_time + + num_prims = xform_view.count + print(f" Newton XformView managing {num_prims} prims") + + # warmup + for _ in range(5): + xform_view.get_world_poses() + + # get_world_poses + torch.cuda.synchronize() + start_time = time.perf_counter() + for _ in range(num_iterations): + positions, orientations = xform_view.get_world_poses() + torch.cuda.synchronize() + timing_results["get_world_poses"] = (time.perf_counter() - start_time) / num_iterations + + computed_results["initial_world_positions"] = positions.clone() + computed_results["initial_world_orientations"] = orientations.clone() + + # set_world_poses + new_positions = positions.clone() + new_positions[:, 2] += 0.1 + torch.cuda.synchronize() + start_time = time.perf_counter() + for _ in range(num_iterations): + xform_view.set_world_poses(new_positions, orientations) + torch.cuda.synchronize() + timing_results["set_world_poses"] = (time.perf_counter() - start_time) / num_iterations + + positions_after, orientations_after = xform_view.get_world_poses() + computed_results["world_positions_after_set"] = positions_after.clone() + computed_results["world_orientations_after_set"] = orientations_after.clone() + + # get_local_poses (delegates to world for Newton) + torch.cuda.synchronize() + start_time = time.perf_counter() + for _ in range(num_iterations): + translations, orientations_local = xform_view.get_local_poses() + torch.cuda.synchronize() + timing_results["get_local_poses"] = (time.perf_counter() - start_time) / num_iterations + + computed_results["initial_local_translations"] = translations.clone() + computed_results["initial_local_orientations"] = orientations_local.clone() + + # set_local_poses + new_translations = translations.clone() + new_translations[:, 2] += 0.1 + torch.cuda.synchronize() + start_time = time.perf_counter() + for _ in range(num_iterations): + xform_view.set_local_poses(new_translations, orientations_local) + torch.cuda.synchronize() + timing_results["set_local_poses"] = (time.perf_counter() - start_time) / num_iterations + + translations_after, orientations_local_after = xform_view.get_local_poses() + computed_results["local_translations_after_set"] = translations_after.clone() + computed_results["local_orientations_after_set"] = orientations_local_after.clone() + + # interleaved set -> get + torch.cuda.synchronize() + start_time = time.perf_counter() + for _ in range(num_iterations): + xform_view.set_world_poses(new_positions, orientations) + positions, orientations = xform_view.get_world_poses() + torch.cuda.synchronize() + timing_results["interleaved_world_set_get"] = (time.perf_counter() - start_time) / num_iterations + + return timing_results, computed_results + + def compare_results( results_dict: dict[str, dict[str, torch.Tensor]], tolerance: float = 1e-4 ) -> dict[str, dict[str, dict[str, float]]]: @@ -566,10 +683,14 @@ def main(): apis_to_test = [ ("isaaclab-usd", "Isaac Lab XformPrimView (USD)"), ("isaaclab-fabric", "Isaac Lab XformPrimView (Fabric)"), - ("isaacsim-usd", "Isaac Sim XformPrimView (USD)"), - ("isaacsim-fabric", "Isaac Sim XformPrimView (Fabric)"), - ("isaacsim-exp", "Isaac Sim Experimental XformPrim"), ] + # Uncomment to include Isaac Sim APIs in the comparison (requires isaacsim package): + # if _HAS_ISAACSIM: + # apis_to_test += [ + # ("isaacsim-usd", "Isaac Sim XformPrimView (USD)"), + # ("isaacsim-fabric", "Isaac Sim XformPrimView (Fabric)"), + # ("isaacsim-exp", "Isaac Sim Experimental XformPrim"), + # ] # Benchmark each API for api_key, api_name in apis_to_test: @@ -579,7 +700,6 @@ def main(): profiler = cProfile.Profile() profiler.enable() - # Cast api_key to Literal type for type checker timing, computed = benchmark_xform_prim_view( api=api_key, # type: ignore[arg-type] num_iterations=args_cli.num_iterations, @@ -598,6 +718,17 @@ def main(): print(" Done!") print() + # Benchmark Newton (separate setup path) + if "cuda" in args_cli.device: + print("Benchmarking Isaac Lab XformPrimView (Newton)...") + timing, computed = benchmark_newton_xform_prim_view(num_iterations=args_cli.num_iterations) + all_timing_results["isaaclab-newton"] = timing + all_computed_results["isaaclab-newton"] = computed + print(" Done!") + print() + else: + print("Note: Skipping Newton benchmark (requires CUDA).\n") + # Print timing results print_results(all_timing_results, args_cli.num_envs, args_cli.num_iterations) diff --git a/scripts/newton_repro/envs/rough_anymal_d/cloner_info.json b/scripts/newton_repro/envs/rough_anymal_d/cloner_info.json new file mode 100644 index 000000000000..4e274a9ce0ce --- /dev/null +++ b/scripts/newton_repro/envs/rough_anymal_d/cloner_info.json @@ -0,0 +1,16 @@ +{ + "sources": [ + "/World/envs/env_0" + ], + "destinations": [ + "/World/envs/env_{}" + ], + "ignore_paths": [ + "/World/envs", + "/World/envs/env_0" + ], + "up_axis": "Z", + "simplify_meshes": true, + "num_envs": 256, + "env_spacing": 2.5 +} \ No newline at end of file diff --git a/scripts/newton_repro/envs/rough_anymal_d/env.yaml b/scripts/newton_repro/envs/rough_anymal_d/env.yaml new file mode 100644 index 000000000000..e9a4974882d5 --- /dev/null +++ b/scripts/newton_repro/envs/rough_anymal_d/env.yaml @@ -0,0 +1,1010 @@ +viewer: + eye: !!python/tuple + - 7.5 + - 7.5 + - 7.5 + lookat: !!python/tuple + - 0.0 + - 0.0 + - 0.0 + cam_prim_path: /OmniverseKit_Persp + resolution: !!python/tuple + - 1280 + - 720 + origin_type: world + env_index: 0 + asset_name: null + body_name: null +sim: + device: cuda:0 + dt: 0.005 + gravity: !!python/tuple + - 0.0 + - 0.0 + - -9.81 + physics_prim_path: /physicsScene + physics_material: + func: isaaclab.sim.spawners.materials.physics_materials:spawn_rigid_body_material + static_friction: 1.0 + dynamic_friction: 1.0 + restitution: 0.0 + friction_combine_mode: multiply + restitution_combine_mode: multiply + compliant_contact_stiffness: 0.0 + compliant_contact_damping: 0.0 + use_fabric: true + render_interval: 4 + enable_scene_query_support: false + physics: + class_type: isaaclab_newton.physics.newton_manager:NewtonManager + num_substeps: 1 + debug_mode: false + use_cuda_graph: true + nan_replay: + buffer_size: 200 + export_path: ./nan_debug/ + max_exports: 5 + solver_cfg: + solver_type: mujoco_warp + njmax: 400 + nconmax: 200 + iterations: 100 + ls_iterations: 50 + solver: newton + integrator: implicitfast + use_mujoco_cpu: false + disable_contacts: false + default_actuator_gear: null + actuator_gears: null + update_data_interval: 1 + save_to_mjcf: null + impratio: 1.0 + cone: pyramidal + ccd_iterations: 35 + ls_parallel: false + use_mujoco_contacts: false + tolerance: 1.0e-06 + collision_cfg: + broad_phase: explicit + reduce_contacts: true + rigid_contact_max: null + max_triangle_pairs: 4000000 + soft_contact_max: null + soft_contact_margin: 0.01 + requires_grad: null + sdf_hydroelastic_config: null + render: + enable_translucency: null + enable_reflections: null + enable_global_illumination: null + antialiasing_mode: null + enable_dlssg: null + enable_dl_denoiser: null + dlss_mode: null + enable_direct_lighting: null + samples_per_pixel: null + enable_shadows: null + enable_ambient_occlusion: null + dome_light_upper_lower_strategy: null + carb_settings: null + rendering_mode: null + create_stage_in_memory: false + logging_level: WARNING + save_logs_to_file: true + log_dir: null + visualizer_cfgs: [] +ui_window_class_type: isaaclab.envs.ui.manager_based_rl_env_window:ManagerBasedRLEnvWindow +seed: 42 +decimation: 4 +scene: + num_envs: 8192 + env_spacing: 2.5 + lazy_sensor_update: true + replicate_physics: true + filter_collisions: true + clone_in_fabric: false + robot: + class_type: isaaclab.assets.articulation.articulation:Articulation + prim_path: /World/envs/env_.*/Robot + spawn: + func: isaaclab.sim.spawners.from_files.from_files:spawn_from_usd + visible: true + semantic_tags: null + copy_from_source: true + spawn_path: /World/template/Robot/proto_asset_.* + mass_props: null + deformable_props: null + rigid_props: + rigid_body_enabled: null + kinematic_enabled: null + disable_gravity: false + linear_damping: 0.0 + angular_damping: 0.0 + max_linear_velocity: 1000.0 + max_angular_velocity: 1000.0 + max_depenetration_velocity: 1.0 + max_contact_impulse: null + enable_gyroscopic_forces: null + retain_accelerations: false + solver_position_iteration_count: null + solver_velocity_iteration_count: null + sleep_threshold: null + stabilization_threshold: null + collision_props: null + activate_contact_sensors: true + scale: null + articulation_props: + articulation_enabled: null + enabled_self_collisions: true + solver_position_iteration_count: 4 + solver_velocity_iteration_count: 0 + sleep_threshold: null + stabilization_threshold: null + fix_root_link: null + fixed_tendons_props: null + spatial_tendons_props: null + joint_drive_props: null + visual_material_path: material + visual_material: null + randomizable_visual_materials: false + randomizable_visual_materials_mode: full + randomizable_visual_materials_path: RandomizableMaterials + usd_path: https://omniverse-content-staging.s3-us-west-2.amazonaws.com/Assets/Isaac/6.0/Isaac/IsaacLab/Robots/ANYbotics/ANYmal-D/anymal_d.usd + variants: null + init_state: + pos: !!python/tuple + - 0.0 + - 0.0 + - 0.6 + rot: !!python/tuple + - 0.0 + - 0.0 + - 0.0 + - 1.0 + lin_vel: !!python/tuple + - 0.0 + - 0.0 + - 0.0 + ang_vel: !!python/tuple + - 0.0 + - 0.0 + - 0.0 + joint_pos: + .*HAA: 0.0 + .*F_HFE: 0.4 + .*H_HFE: -0.4 + .*F_KFE: -0.8 + .*H_KFE: 0.8 + joint_vel: + .*: 0.0 + collision_group: 0 + debug_vis: false + articulation_root_prim_path: null + soft_joint_pos_limit_factor: 0.95 + actuators: + legs: + class_type: isaaclab.actuators.actuator_net:ActuatorNetLSTM + joint_names_expr: + - .*HAA + - .*HFE + - .*KFE + effort_limit: 80.0 + velocity_limit: 7.5 + effort_limit_sim: null + velocity_limit_sim: null + stiffness: null + damping: null + armature: 0.01 + friction: null + dynamic_friction: null + viscous_friction: null + saturation_effort: 120.0 + network_file: https://omniverse-content-staging.s3-us-west-2.amazonaws.com/Assets/Isaac/6.0/Isaac/IsaacLab/ActuatorNets/ANYbotics/anydrive_3_lstm_jit.pt + actuator_value_resolution_debug_print: false + terrain: + class_type: isaaclab.terrains.terrain_importer:TerrainImporter + collision_group: -1 + prim_path: /World/ground + num_envs: 8192 + terrain_type: generator + terrain_generator: + class_type: isaaclab.terrains.terrain_generator:TerrainGenerator + seed: null + curriculum: true + size: !!python/tuple + - 8.0 + - 8.0 + border_width: 20.0 + border_height: 1.0 + num_rows: 10 + num_cols: 20 + color_scheme: none + horizontal_scale: 0.1 + vertical_scale: 0.005 + slope_threshold: 0.75 + sub_terrains: + pyramid_stairs: + function: isaaclab.terrains.trimesh.mesh_terrains:pyramid_stairs_terrain + proportion: 0.2 + size: !!python/tuple + - 8.0 + - 8.0 + flat_patch_sampling: null + border_width: 1.0 + step_height_range: !!python/tuple + - 0.05 + - 0.23 + step_width: 0.3 + platform_width: 3.0 + holes: false + pyramid_stairs_inv: + function: isaaclab.terrains.trimesh.mesh_terrains:inverted_pyramid_stairs_terrain + proportion: 0.2 + size: !!python/tuple + - 8.0 + - 8.0 + flat_patch_sampling: null + border_width: 1.0 + step_height_range: !!python/tuple + - 0.05 + - 0.23 + step_width: 0.3 + platform_width: 3.0 + holes: false + boxes: + function: isaaclab.terrains.trimesh.mesh_terrains:random_grid_terrain + proportion: 0.2 + size: !!python/tuple + - 8.0 + - 8.0 + flat_patch_sampling: null + grid_width: 0.45 + grid_height_range: !!python/tuple + - 0.05 + - 0.2 + platform_width: 2.0 + holes: false + random_rough: + function: isaaclab.terrains.height_field.hf_terrains:random_uniform_terrain + proportion: 0.2 + size: !!python/tuple + - 8.0 + - 8.0 + flat_patch_sampling: null + border_width: 0.25 + horizontal_scale: 0.1 + vertical_scale: 0.005 + slope_threshold: 0.75 + noise_range: !!python/tuple + - 0.02 + - 0.1 + noise_step: 0.02 + downsampled_scale: null + hf_pyramid_slope: + function: isaaclab.terrains.height_field.hf_terrains:pyramid_sloped_terrain + proportion: 0.1 + size: !!python/tuple + - 8.0 + - 8.0 + flat_patch_sampling: null + border_width: 0.25 + horizontal_scale: 0.1 + vertical_scale: 0.005 + slope_threshold: 0.75 + slope_range: !!python/tuple + - 0.0 + - 0.4 + platform_width: 2.0 + inverted: false + hf_pyramid_slope_inv: + function: isaaclab.terrains.height_field.hf_terrains:pyramid_sloped_terrain + proportion: 0.1 + size: !!python/tuple + - 8.0 + - 8.0 + flat_patch_sampling: null + border_width: 0.25 + horizontal_scale: 0.1 + vertical_scale: 0.005 + slope_threshold: 0.75 + slope_range: !!python/tuple + - 0.0 + - 0.4 + platform_width: 2.0 + inverted: true + difficulty_range: !!python/tuple + - 0.0 + - 1.0 + use_cache: false + cache_dir: /tmp/isaaclab/terrains + usd_path: null + env_spacing: 2.5 + use_terrain_origins: true + visual_material: + func: isaaclab.sim.spawners.materials.visual_materials:spawn_from_mdl_file + mdl_path: https://omniverse-content-staging.s3-us-west-2.amazonaws.com/Assets/Isaac/6.0/Isaac/IsaacLab/Materials/TilesMarbleSpiderWhiteBrickBondHoned/TilesMarbleSpiderWhiteBrickBondHoned.mdl + project_uvw: true + albedo_brightness: null + texture_scale: !!python/tuple + - 0.25 + - 0.25 + physics_material: + func: isaaclab.sim.spawners.materials.physics_materials:spawn_rigid_body_material + static_friction: 1.0 + dynamic_friction: 1.0 + restitution: 0.0 + friction_combine_mode: multiply + restitution_combine_mode: multiply + compliant_contact_stiffness: 0.0 + compliant_contact_damping: 0.0 + collision_props: null + max_init_terrain_level: 5 + debug_vis: false + height_scanner: null + contact_forces: + class_type: isaaclab_newton.sensors.contact_sensor.contact_sensor:ContactSensor + prim_path: /World/envs/env_.*/Robot/.* + update_period: 0.0 + debug_vis: false + track_pose: false + track_contact_points: false + track_friction_forces: false + max_contact_data_count_per_prim: null + track_air_time: true + force_threshold: null + history_length: 3 + filter_prim_paths_expr: [] + visualizer_cfg: + prim_path: /Visuals/ContactSensor + markers: + contact: + func: isaaclab.sim.spawners.shapes.shapes:spawn_sphere + visible: true + semantic_tags: null + copy_from_source: true + spawn_path: null + mass_props: null + rigid_props: null + collision_props: null + activate_contact_sensors: false + visual_material_path: material + visual_material: + func: isaaclab.sim.spawners.materials.visual_materials:spawn_preview_surface + diffuse_color: !!python/tuple + - 1.0 + - 0.0 + - 0.0 + emissive_color: !!python/tuple + - 0.0 + - 0.0 + - 0.0 + roughness: 0.5 + metallic: 0.0 + opacity: 1.0 + physics_material_path: material + physics_material: null + radius: 0.02 + no_contact: + func: isaaclab.sim.spawners.shapes.shapes:spawn_sphere + visible: false + semantic_tags: null + copy_from_source: true + spawn_path: null + mass_props: null + rigid_props: null + collision_props: null + activate_contact_sensors: false + visual_material_path: material + visual_material: + func: isaaclab.sim.spawners.materials.visual_materials:spawn_preview_surface + diffuse_color: !!python/tuple + - 0.0 + - 1.0 + - 0.0 + emissive_color: !!python/tuple + - 0.0 + - 0.0 + - 0.0 + roughness: 0.5 + metallic: 0.0 + opacity: 1.0 + physics_material_path: material + physics_material: null + radius: 0.02 + sensor_shape_prim_expr: [] + filter_shape_prim_expr: [] + sky_light: + class_type: null + prim_path: /World/skyLight + spawn: + func: isaaclab.sim.spawners.lights.lights:spawn_light + visible: true + semantic_tags: null + copy_from_source: true + spawn_path: /World/skyLight + prim_type: DomeLight + color: !!python/tuple + - 1.0 + - 1.0 + - 1.0 + enable_color_temperature: false + color_temperature: 6500.0 + normalize: false + exposure: 0.0 + intensity: 750.0 + texture_file: https://omniverse-content-staging.s3-us-west-2.amazonaws.com/Assets/Isaac/6.0/Isaac/Materials/Textures/Skies/PolyHaven/kloofendal_43d_clear_puresky_4k.hdr + texture_format: automatic + visible_in_primary_ray: true + init_state: + pos: !!python/tuple + - 0.0 + - 0.0 + - 0.0 + rot: !!python/tuple + - 0.0 + - 0.0 + - 0.0 + - 1.0 + collision_group: 0 + debug_vis: false +recorders: + dataset_file_handler_class_type: isaaclab.utils.datasets.hdf5_dataset_file_handler:HDF5DatasetFileHandler + dataset_export_dir_path: /tmp/isaaclab/logs + dataset_filename: dataset + dataset_export_mode: + _value_: 1 + _name_: EXPORT_ALL + _sort_order_: 1 + export_in_record_pre_reset: true + export_in_close: false +observations: + policy: + concatenate_terms: true + concatenate_dim: -1 + enable_corruption: true + history_length: null + flatten_history_dim: true + base_lin_vel: + func: isaaclab.envs.mdp.observations:base_lin_vel + params: {} + modifiers: null + noise: + func: isaaclab.utils.noise.noise_model:uniform_noise + operation: add + n_min: -0.1 + n_max: 0.1 + clip: null + scale: null + history_length: 0 + flatten_history_dim: true + base_ang_vel: + func: isaaclab.envs.mdp.observations:base_ang_vel + params: {} + modifiers: null + noise: + func: isaaclab.utils.noise.noise_model:uniform_noise + operation: add + n_min: -0.2 + n_max: 0.2 + clip: null + scale: null + history_length: 0 + flatten_history_dim: true + projected_gravity: + func: isaaclab.envs.mdp.observations:projected_gravity + params: {} + modifiers: null + noise: + func: isaaclab.utils.noise.noise_model:uniform_noise + operation: add + n_min: -0.05 + n_max: 0.05 + clip: null + scale: null + history_length: 0 + flatten_history_dim: true + velocity_commands: + func: isaaclab.envs.mdp.observations:generated_commands + params: + command_name: base_velocity + modifiers: null + noise: null + clip: null + scale: null + history_length: 0 + flatten_history_dim: true + joint_pos: + func: isaaclab.envs.mdp.observations:joint_pos_rel + params: {} + modifiers: null + noise: + func: isaaclab.utils.noise.noise_model:uniform_noise + operation: add + n_min: -0.01 + n_max: 0.01 + clip: null + scale: null + history_length: 0 + flatten_history_dim: true + joint_vel: + func: isaaclab.envs.mdp.observations:joint_vel_rel + params: {} + modifiers: null + noise: + func: isaaclab.utils.noise.noise_model:uniform_noise + operation: add + n_min: -1.5 + n_max: 1.5 + clip: null + scale: null + history_length: 0 + flatten_history_dim: true + actions: + func: isaaclab.envs.mdp.observations:last_action + params: {} + modifiers: null + noise: null + clip: null + scale: null + history_length: 0 + flatten_history_dim: true + height_scan: null +actions: + joint_pos: + class_type: isaaclab.envs.mdp.actions.joint_actions:JointPositionAction + asset_name: robot + debug_vis: false + clip: null + joint_names: + - .* + scale: 0.5 + offset: 0.0 + preserve_order: false + use_default_offset: true +events: + base_external_force_torque: + func: isaaclab.envs.mdp.events:apply_external_force_torque + params: + asset_cfg: + name: robot + joint_names: null + joint_ids: !!python/object/apply:builtins.slice + - null + - null + - null + fixed_tendon_names: null + fixed_tendon_ids: !!python/object/apply:builtins.slice + - null + - null + - null + body_names: base + body_ids: !!python/object/apply:builtins.slice + - null + - null + - null + object_collection_names: null + object_collection_ids: !!python/object/apply:builtins.slice + - null + - null + - null + preserve_order: false + force_range: !!python/tuple + - 0.0 + - 0.0 + torque_range: !!python/tuple + - -0.0 + - 0.0 + mode: reset + interval_range_s: null + is_global_time: false + min_step_count_between_reset: 0 + reset_base: + func: isaaclab.envs.mdp.events:reset_root_state_uniform + params: + pose_range: + x: !!python/tuple + - -0.5 + - 0.5 + y: !!python/tuple + - -0.5 + - 0.5 + yaw: !!python/tuple + - -3.14 + - 3.14 + velocity_range: + x: !!python/tuple + - -0.5 + - 0.5 + y: !!python/tuple + - -0.5 + - 0.5 + z: !!python/tuple + - -0.5 + - 0.5 + roll: !!python/tuple + - -0.5 + - 0.5 + pitch: !!python/tuple + - -0.5 + - 0.5 + yaw: !!python/tuple + - -0.5 + - 0.5 + mode: reset + interval_range_s: null + is_global_time: false + min_step_count_between_reset: 0 + reset_robot_joints: + func: isaaclab.envs.mdp.events:reset_joints_by_scale + params: + position_range: !!python/tuple + - 0.5 + - 1.5 + velocity_range: !!python/tuple + - 0.0 + - 0.0 + mode: reset + interval_range_s: null + is_global_time: false + min_step_count_between_reset: 0 + push_robot: + func: isaaclab.envs.mdp.events:push_by_setting_velocity + params: + velocity_range: + x: !!python/tuple + - -0.5 + - 0.5 + y: !!python/tuple + - -0.5 + - 0.5 + mode: interval + interval_range_s: !!python/tuple + - 10.0 + - 15.0 + is_global_time: false + min_step_count_between_reset: 0 + collider_offsets: + func: isaaclab.envs.mdp.events:randomize_rigid_body_collider_offsets + params: + asset_cfg: + name: robot + joint_names: null + joint_ids: !!python/object/apply:builtins.slice + - null + - null + - null + fixed_tendon_names: null + fixed_tendon_ids: !!python/object/apply:builtins.slice + - null + - null + - null + body_names: .* + body_ids: !!python/object/apply:builtins.slice + - null + - null + - null + object_collection_names: null + object_collection_ids: !!python/object/apply:builtins.slice + - null + - null + - null + preserve_order: false + contact_offset_distribution_params: !!python/tuple + - 0.025 + - 0.025 + rest_offset_distribution_params: !!python/tuple + - 0.001 + - 0.001 + mode: startup + interval_range_s: null + is_global_time: false + min_step_count_between_reset: 0 +rerender_on_reset: false +num_rerenders_on_reset: 0 +wait_for_textures: true +xr: null +teleop_devices: + devices: {} +isaac_teleop: null +export_io_descriptors: false +log_dir: /home/zhengyuz/Projects/IsaacLab/logs/rsl_rl/anymal_d_rough/2026-04-03_16-16-04 +is_finite_horizon: false +episode_length_s: 20.0 +rewards: + track_lin_vel_xy_exp: + func: isaaclab.envs.mdp.rewards:track_lin_vel_xy_exp + params: + command_name: base_velocity + std: 0.5 + weight: 1.0 + track_ang_vel_z_exp: + func: isaaclab.envs.mdp.rewards:track_ang_vel_z_exp + params: + command_name: base_velocity + std: 0.5 + weight: 0.5 + lin_vel_z_l2: + func: isaaclab.envs.mdp.rewards:lin_vel_z_l2 + params: {} + weight: -2.0 + ang_vel_xy_l2: + func: isaaclab.envs.mdp.rewards:ang_vel_xy_l2 + params: {} + weight: -0.05 + dof_torques_l2: + func: isaaclab.envs.mdp.rewards:joint_torques_l2 + params: {} + weight: -1.0e-05 + dof_acc_l2: + func: isaaclab.envs.mdp.rewards:joint_acc_l2 + params: {} + weight: -2.5e-07 + action_rate_l2: + func: isaaclab.envs.mdp.rewards:action_rate_l2 + params: {} + weight: -0.01 + feet_air_time: + func: isaaclab_tasks.manager_based.locomotion.velocity.mdp.rewards:feet_air_time + params: + sensor_cfg: + name: contact_forces + joint_names: null + joint_ids: !!python/object/apply:builtins.slice + - null + - null + - null + fixed_tendon_names: null + fixed_tendon_ids: !!python/object/apply:builtins.slice + - null + - null + - null + body_names: .*FOOT + body_ids: !!python/object/apply:builtins.slice + - null + - null + - null + object_collection_names: null + object_collection_ids: !!python/object/apply:builtins.slice + - null + - null + - null + preserve_order: false + command_name: base_velocity + threshold: 0.5 + weight: 0.125 + undesired_contacts: + func: isaaclab.envs.mdp.rewards:undesired_contacts + params: + sensor_cfg: + name: contact_forces + joint_names: null + joint_ids: !!python/object/apply:builtins.slice + - null + - null + - null + fixed_tendon_names: null + fixed_tendon_ids: !!python/object/apply:builtins.slice + - null + - null + - null + body_names: .*THIGH + body_ids: !!python/object/apply:builtins.slice + - null + - null + - null + object_collection_names: null + object_collection_ids: !!python/object/apply:builtins.slice + - null + - null + - null + preserve_order: false + threshold: 1.0 + weight: -1.0 + flat_orientation_l2: + func: isaaclab.envs.mdp.rewards:flat_orientation_l2 + params: {} + weight: 0.0 + dof_pos_limits: + func: isaaclab.envs.mdp.rewards:joint_pos_limits + params: {} + weight: 0.0 +terminations: + time_out: + func: isaaclab.envs.mdp.terminations:time_out + params: {} + time_out: true + base_contact: + func: isaaclab.envs.mdp.terminations:illegal_contact + params: + sensor_cfg: + name: contact_forces + joint_names: null + joint_ids: !!python/object/apply:builtins.slice + - null + - null + - null + fixed_tendon_names: null + fixed_tendon_ids: !!python/object/apply:builtins.slice + - null + - null + - null + body_names: base + body_ids: !!python/object/apply:builtins.slice + - null + - null + - null + object_collection_names: null + object_collection_ids: !!python/object/apply:builtins.slice + - null + - null + - null + preserve_order: false + threshold: 1.0 + time_out: false + body_lin_vel: + func: isaaclab.envs.mdp.terminations:body_lin_vel_out_of_limit + params: + max_velocity: 20.0 + asset_cfg: + name: robot + joint_names: null + joint_ids: !!python/object/apply:builtins.slice + - null + - null + - null + fixed_tendon_names: null + fixed_tendon_ids: !!python/object/apply:builtins.slice + - null + - null + - null + body_names: .* + body_ids: !!python/object/apply:builtins.slice + - null + - null + - null + object_collection_names: null + object_collection_ids: !!python/object/apply:builtins.slice + - null + - null + - null + preserve_order: false + time_out: false + body_ang_vel: + func: isaaclab.envs.mdp.terminations:body_ang_vel_out_of_limit + params: + max_velocity: 100.0 + asset_cfg: + name: robot + joint_names: null + joint_ids: !!python/object/apply:builtins.slice + - null + - null + - null + fixed_tendon_names: null + fixed_tendon_ids: !!python/object/apply:builtins.slice + - null + - null + - null + body_names: .* + body_ids: !!python/object/apply:builtins.slice + - null + - null + - null + object_collection_names: null + object_collection_ids: !!python/object/apply:builtins.slice + - null + - null + - null + preserve_order: false + time_out: false +curriculum: + terrain_levels: + func: isaaclab_tasks.manager_based.locomotion.velocity.mdp.curriculums:terrain_levels_vel + params: {} +commands: + base_velocity: + class_type: isaaclab.envs.mdp.commands.velocity_command:UniformVelocityCommand + resampling_time_range: !!python/tuple + - 10.0 + - 10.0 + debug_vis: true + asset_name: robot + heading_command: true + heading_control_stiffness: 0.5 + rel_standing_envs: 0.02 + rel_heading_envs: 1.0 + ranges: + lin_vel_x: !!python/tuple + - -1.0 + - 1.0 + lin_vel_y: !!python/tuple + - -1.0 + - 1.0 + ang_vel_z: !!python/tuple + - -1.0 + - 1.0 + heading: !!python/tuple + - -3.141592653589793 + - 3.141592653589793 + goal_vel_visualizer_cfg: + prim_path: /Visuals/Command/velocity_goal + markers: + arrow: + func: isaaclab.sim.spawners.from_files.from_files:spawn_from_usd + visible: true + semantic_tags: null + copy_from_source: true + spawn_path: null + mass_props: null + deformable_props: null + rigid_props: null + collision_props: null + activate_contact_sensors: false + scale: !!python/tuple + - 0.5 + - 0.5 + - 0.5 + articulation_props: null + fixed_tendons_props: null + spatial_tendons_props: null + joint_drive_props: null + visual_material_path: material + visual_material: + func: isaaclab.sim.spawners.materials.visual_materials:spawn_preview_surface + diffuse_color: !!python/tuple + - 0.0 + - 1.0 + - 0.0 + emissive_color: !!python/tuple + - 0.0 + - 0.0 + - 0.0 + roughness: 0.5 + metallic: 0.0 + opacity: 1.0 + randomizable_visual_materials: false + randomizable_visual_materials_mode: full + randomizable_visual_materials_path: RandomizableMaterials + usd_path: https://omniverse-content-staging.s3-us-west-2.amazonaws.com/Assets/Isaac/6.0/Isaac/Props/UIElements/arrow_x.usd + variants: null + current_vel_visualizer_cfg: + prim_path: /Visuals/Command/velocity_current + markers: + arrow: + func: isaaclab.sim.spawners.from_files.from_files:spawn_from_usd + visible: true + semantic_tags: null + copy_from_source: true + spawn_path: null + mass_props: null + deformable_props: null + rigid_props: null + collision_props: null + activate_contact_sensors: false + scale: !!python/tuple + - 0.5 + - 0.5 + - 0.5 + articulation_props: null + fixed_tendons_props: null + spatial_tendons_props: null + joint_drive_props: null + visual_material_path: material + visual_material: + func: isaaclab.sim.spawners.materials.visual_materials:spawn_preview_surface + diffuse_color: !!python/tuple + - 0.0 + - 0.0 + - 1.0 + emissive_color: !!python/tuple + - 0.0 + - 0.0 + - 0.0 + roughness: 0.5 + metallic: 0.0 + opacity: 1.0 + randomizable_visual_materials: false + randomizable_visual_materials_mode: full + randomizable_visual_materials_path: RandomizableMaterials + usd_path: https://omniverse-content-staging.s3-us-west-2.amazonaws.com/Assets/Isaac/6.0/Isaac/Props/UIElements/arrow_x.usd + variants: null diff --git a/scripts/newton_repro/envs/rough_anymal_d/mdp.py b/scripts/newton_repro/envs/rough_anymal_d/mdp.py new file mode 100644 index 000000000000..b539521f54c6 --- /dev/null +++ b/scripts/newton_repro/envs/rough_anymal_d/mdp.py @@ -0,0 +1,206 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers. +# SPDX-License-Identifier: BSD-3-Clause + +"""MDP for ANYmal-D NaN repro using Newton raw API.""" + +import math +import os + +import numpy as np +import torch +import warp as wp + +import newton + +NUM_DOFS = 12 +ACTION_SCALE = 0.5 +# Newton joint order: LF_HAA, LF_HFE, LF_KFE, LH_HAA, LH_HFE, LH_KFE, +# RF_HAA, RF_HFE, RF_KFE, RH_HAA, RH_HFE, RH_KFE +ANYMAL_DEFAULT_JOINTS = [0.0, 0.4, -0.8, 0.0, -0.4, 0.8, + 0.0, 0.4, -0.8, 0.0, -0.4, 0.8] +LSTM_URL = "https://omniverse-content-staging.s3-us-west-2.amazonaws.com/Assets/Isaac/6.0/Isaac/IsaacLab/ActuatorNets/ANYbotics/anydrive_3_lstm_jit.pt" + + +def _quat_rotate_inverse(q, v): + q_w = q[..., 3] + q_vec = q[..., :3] + a = v * (2.0 * q_w**2 - 1.0).unsqueeze(-1) + b = torch.cross(q_vec, v, dim=-1) * q_w.unsqueeze(-1) * 2.0 + c = q_vec * torch.bmm(q_vec.view(q.shape[0], 1, 3), v.view(q.shape[0], 3, 1)).squeeze(-1) * 2.0 + return a - b + c + + +class AnymalMDP: + """ANYmal-D MDP: observations, actions, termination, reset, LSTM actuator. + + Usage:: + + mdp = AnymalMDP(...) + obs = mdp.get_observations() + while running: + actions = policy(obs) + mdp.set_actions(actions) + for _ in range(decimation): + mdp.apply_lstm_torques() + sim_step() + obs, terminated, truncated = mdp.forward() + reset_ids = (terminated | truncated).nonzero(...) + if len(reset_ids) > 0: + mdp.reset(reset_ids) + """ + + def __init__(self, model, state, control, env_origins, + num_envs, jc_per, jd_per, physics_dt, episode_length_s, + decimation, device): + self.model = model + self.state = state + self.control = control + self.num_envs = num_envs + self.jc_per = jc_per + self.jd_per = jd_per + self.decimation = decimation + self.device = device + + step_dt = physics_dt * decimation + self.max_episode_length = int(math.ceil(episode_length_s / step_dt)) + self.command_change_steps = int(10.0 / step_dt) + + self.default_joints = torch.tensor( + ANYMAL_DEFAULT_JOINTS, device=device, dtype=torch.float32 + ).unsqueeze(0) + self.gravity_vec = torch.tensor([[0.0, 0.0, -1.0]], device=device) + self.env_origins = env_origins + terrain_file = os.path.join(os.path.dirname(__file__), "terrain_origins.npy") + raw = np.load(terrain_file) + self.terrain_origins = torch.tensor(raw.reshape(-1, 3), device=device, dtype=torch.float32) + + + # LSTM actuator + self.lstm = torch.hub.load_state_dict_from_url( + LSTM_URL, map_location=device, check_hash=False, + file_name="anydrive_3_lstm_jit.pt", + ) + self.lstm_h = torch.zeros(2, num_envs * NUM_DOFS, 8, device=device) + self.lstm_c = torch.zeros(2, num_envs * NUM_DOFS, 8, device=device) + + # Buffers + self.last_action = torch.zeros(num_envs, NUM_DOFS, device=device) + self.commands = torch.zeros(num_envs, 3, device=device) + self.commands[:, 0] = 1.0 + self.episode_length = torch.zeros(num_envs, device=device, dtype=torch.long) + self.targets = self.default_joints.expand(num_envs, -1).clone() + self.timestep = 0 + + # Match training's Articulation._resolve_actuator_values() for explicit + # (LSTM) actuators: zero stiffness/damping, set armature, effort limit, + # velocity limit, and friction on the Newton model arrays. + wp.to_torch(model.joint_armature).fill_(0.01) + wp.to_torch(model.joint_target_ke).fill_(0.0) + wp.to_torch(model.joint_target_kd).fill_(0.0) + wp.to_torch(model.joint_friction).fill_(0.0) + wp.to_torch(model.joint_effort_limit).fill_(120.0) + + # Set default standing pose + initial reset + jq = wp.to_torch(state.joint_q).reshape(num_envs, jc_per) + jq[:, 7:] = self.default_joints + newton.eval_fk(model, state.joint_q, state.joint_qd, state) + all_ids = torch.arange(num_envs, dtype=torch.int32, device=device) + self.reset(all_ids) + + def get_observations(self): + """Compute observations from current state.""" + jq = wp.to_torch(self.state.joint_q).reshape(self.num_envs, self.jc_per) + jqd = wp.to_torch(self.state.joint_qd).reshape(self.num_envs, self.jd_per) + root_quat = jq[:, 3:7] + base_lin_vel = _quat_rotate_inverse(root_quat, jqd[:, :3]) + base_ang_vel = _quat_rotate_inverse(root_quat, jqd[:, 3:6]) + projected_gravity = _quat_rotate_inverse(root_quat, self.gravity_vec.expand(self.num_envs, -1)) + + obs = torch.cat([base_lin_vel, base_ang_vel, projected_gravity, + self.commands, jq[:, 7:] - self.default_joints, + jqd[:, 6:], self.last_action], dim=1) + return obs + + def set_actions(self, actions): + """Store actions and compute joint position targets.""" + self.last_action = actions.clone() + self.targets = self.default_joints + ACTION_SCALE * actions + + def apply_lstm_torques(self): + """Compute LSTM actuator torques and write to control.joint_f.""" + jq = wp.to_torch(self.state.joint_q).reshape(self.num_envs, self.jc_per) + jqd = wp.to_torch(self.state.joint_qd).reshape(self.num_envs, self.jd_per) + sea_in = torch.stack([ + (self.targets - jq[:, 7:]).flatten(), + jqd[:, 6:].flatten(), + ], dim=-1).unsqueeze(1) + torques, (self.lstm_h[:], self.lstm_c[:]) = self.lstm(sea_in, (self.lstm_h, self.lstm_c)) + torques = torques.reshape(self.num_envs, NUM_DOFS).clamp(-80.0, 80.0) + + jf = wp.to_torch(self.control.joint_f).reshape(self.num_envs, self.jd_per) + jf[:, :6] = 0.0 + jf[:, 6:] = torques + + def forward(self): + """Compute observations, termination, and resample commands after physics step. + + Returns: + Tuple of (obs, terminated, truncated). + """ + obs = self.get_observations() + + self.episode_length += 1 + jq = wp.to_torch(self.state.joint_q).reshape(self.num_envs, self.jc_per) + root_quat = jq[:, 3:7] + projected_gravity = _quat_rotate_inverse(root_quat, self.gravity_vec.expand(self.num_envs, -1)) + jqd = wp.to_torch(self.state.joint_qd).reshape(self.num_envs, self.jd_per) + lin_vel = jqd[:, :3] + ang_vel = jqd[:, 3:6] + terminated = ( + (projected_gravity[:, 2] > -0.1736) + | (lin_vel.norm(dim=1) > 20.0) + | (ang_vel.norm(dim=1) > 100.0) + ) + truncated = self.episode_length >= self.max_episode_length + + if self.timestep % self.command_change_steps == 0: + self.commands[:, 0].uniform_(-1.0, 1.0) + self.commands[:, 1].uniform_(-0.5, 0.5) + self.commands[:, 2].uniform_(-0.5, 0.5) + + + self.timestep += 1 + return obs, terminated, truncated + + def reset(self, reset_ids): + """Reset specified environments to randomized standing pose.""" + n = len(reset_ids) + if n == 0: + return + + jq = wp.to_torch(self.state.joint_q).reshape(self.num_envs, self.jc_per) + jqd = wp.to_torch(self.state.joint_qd).reshape(self.num_envs, self.jd_per) + + # Pick random terrain origins for reset position + rand_idx = torch.randint(0, len(self.terrain_origins), (n,), device=self.device) + origins = self.terrain_origins[rand_idx] + jq[reset_ids, 0] = origins[:, 0] + torch.empty(n, device=self.device).uniform_(-0.5, 0.5) + jq[reset_ids, 1] = origins[:, 1] + torch.empty(n, device=self.device).uniform_(-0.5, 0.5) + jq[reset_ids, 2] = origins[:, 2] + 0.6 + yaw = torch.empty(n, device=self.device).uniform_(-3.14, 3.14) + jq[reset_ids, 3] = 0.0 + jq[reset_ids, 4] = 0.0 + jq[reset_ids, 5] = torch.sin(yaw * 0.5) + jq[reset_ids, 6] = torch.cos(yaw * 0.5) + jq[reset_ids, 7:] = self.default_joints * torch.empty(n, NUM_DOFS, device=self.device).uniform_(0.5, 1.5) + + jqd[reset_ids] = 0.0 + + self.last_action[reset_ids] = 0.0 + offsets = (reset_ids.unsqueeze(1) * NUM_DOFS + + torch.arange(NUM_DOFS, device=self.device).unsqueeze(0)).flatten() + self.lstm_h[:, offsets] = 0.0 + self.lstm_c[:, offsets] = 0.0 + + newton.eval_fk(self.model, self.state.joint_q, self.state.joint_qd, self.state) + self.episode_length[reset_ids] = 0 diff --git a/scripts/newton_repro/envs/rough_anymal_d/policy.pt b/scripts/newton_repro/envs/rough_anymal_d/policy.pt new file mode 100644 index 000000000000..5cc4e7cc0ac5 --- /dev/null +++ b/scripts/newton_repro/envs/rough_anymal_d/policy.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:531a6bda6e2bae18de092d39aad98a46476f0ad25fe300713effb6ca5a2f756c +size 776854 diff --git a/scripts/newton_repro/envs/rough_anymal_d/terrain_origins.npy b/scripts/newton_repro/envs/rough_anymal_d/terrain_origins.npy new file mode 100644 index 000000000000..4a1ae053c25d Binary files /dev/null and b/scripts/newton_repro/envs/rough_anymal_d/terrain_origins.npy differ diff --git a/scripts/newton_repro/newton_manager.py b/scripts/newton_repro/newton_manager.py new file mode 100644 index 000000000000..6cccac0b50e5 --- /dev/null +++ b/scripts/newton_repro/newton_manager.py @@ -0,0 +1,105 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers. +# SPDX-License-Identifier: BSD-3-Clause + +"""Standalone Newton physics manager — no IsaacLab dependencies.""" + +import inspect + +import warp as wp + +import newton +from newton import CollisionPipeline +from newton.solvers import SolverMuJoCo, SolverNotifyFlags + + +def _to_kwargs(cfg, target_cls): + """Extract valid __init__ kwargs from a namespace/dict config.""" + d = dict(vars(cfg)) if hasattr(cfg, "__dict__") else dict(cfg) + valid = set(inspect.signature(target_cls.__init__).parameters.keys()) - {"self", "model"} + return {k: v for k, v in d.items() if k in valid} + + +class NewtonSim: + """Minimal Newton simulation: model + solver + collision + viewer sync.""" + + def __init__(self, builder, solver_cfg, collision_cfg, physics_dt, num_substeps=1, device="cuda:0"): + """Initialize Newton simulation from a finalized builder. + + Args: + builder: Newton ModelBuilder (already populated with worlds). + solver_cfg: Namespace/dict with SolverMuJoCo kwargs (njmax, nconmax, cone, etc.). + collision_cfg: Namespace/dict with CollisionPipeline kwargs, or None for MuJoCo contacts. + physics_dt: Physics timestep [s]. + num_substeps: Number of solver substeps per step. + device: Torch device string. + """ + self.device = device + self._graph = None + self._num_substeps = num_substeps + self._solver_dt = physics_dt / num_substeps + + # Finalize model + self.model = builder.finalize(device=device) + self.state = self.model.state() + self.control = self.model.control() + newton.eval_fk(self.model, self.state.joint_q, self.state.joint_qd, self.state) + + # Solver + solver_kwargs = _to_kwargs(solver_cfg, SolverMuJoCo) + solver_kwargs.pop("solver_type", None) + self.solver = SolverMuJoCo(self.model, **solver_kwargs) + + # Collision pipeline (None = MuJoCo handles contacts internally) + use_mujoco_contacts = getattr(solver_cfg, "use_mujoco_contacts", True) + self.collision_pipeline = None + self.contacts = None + + if not use_mujoco_contacts and collision_cfg is not None: + col_kwargs = _to_kwargs(collision_cfg, CollisionPipeline) + col_kwargs.pop("sdf_hydroelastic_config", None) + self.collision_pipeline = CollisionPipeline(self.model, **col_kwargs) + self.contacts = self.collision_pipeline.contacts() + + def _simulate(self): + """One physics step: collide → solve × substeps → clear forces.""" + if self.collision_pipeline is not None: + self.collision_pipeline.collide(self.state, self.contacts) + for _ in range(self._num_substeps): + self.solver.step(self.state, self.state, self.control, self.contacts, self._solver_dt) + self.state.clear_forces() + + def step(self): + """Step physics. Uses CUDA graph if captured, otherwise eager.""" + with wp.ScopedDevice(self.device): + if self._graph is not None: + wp.capture_launch(self._graph) + else: + self._simulate() + + def forward(self): + """Sync body transforms from joint state (call before rendering).""" + newton.eval_fk(self.model, self.state.joint_q, self.state.joint_qd, self.state) + + def notify_model_changed(self, flags=None): + """Sync Newton model changes to the MuJoCo Warp solver. + + Must be called after modifying model arrays (joint_armature, + joint_target_ke, joint_target_kd, joint_friction, joint_effort_limit, + etc.) so the solver picks up the new values. + + Args: + flags: SolverNotifyFlags bitmask. Defaults to JOINT_DOF_PROPERTIES. + """ + if flags is None: + flags = SolverNotifyFlags.JOINT_DOF_PROPERTIES + self.solver.notify_model_changed(flags) + + def capture_graph(self): + """Capture CUDA graph for faster stepping. Falls back to eager on failure.""" + with wp.ScopedDevice(self.device): + try: + with wp.ScopedCapture() as capture: + self._simulate() + self._graph = capture.graph + except Exception: + self._graph = None diff --git a/scripts/newton_repro/newton_replicate.py b/scripts/newton_repro/newton_replicate.py new file mode 100644 index 000000000000..b6774bcd3d24 --- /dev/null +++ b/scripts/newton_repro/newton_replicate.py @@ -0,0 +1,182 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from __future__ import annotations + +import torch +import warp as wp +from newton import ModelBuilder, solvers +from newton._src.usd.schemas import SchemaResolverNewton, SchemaResolverPhysx + +from pxr import Usd + + +def _build_and_label( + stage: Usd.Stage, + sources: list[str], + destinations: list[str], + env_ids: torch.Tensor, + mapping: torch.Tensor, + positions: torch.Tensor | None = None, + quaternions: torch.Tensor | None = None, + up_axis: str = "Z", + simplify_meshes: bool = True, +) -> tuple[ModelBuilder, object]: + """Build a Newton model builder from clone mapping inputs and rename labels. + + Args: + stage: USD stage containing source assets. + sources: Source prim paths used for cloning. + destinations: Destination prim path templates. + env_ids: Environment ids for destination worlds. + mapping: Boolean source-to-environment mapping matrix. + positions: Optional per-environment world positions. + quaternions: Optional per-environment orientations in xyzw order. + up_axis: Up axis for the Newton model builder. + simplify_meshes: Whether to run convex-hull mesh approximation. + + Returns: + Tuple of the populated Newton model builder and stage metadata returned by ``add_usd``. + """ + if positions is None: + positions = torch.zeros((mapping.size(1), 3), device=mapping.device, dtype=torch.float32) + if quaternions is None: + quaternions = torch.zeros((mapping.size(1), 4), device=mapping.device, dtype=torch.float32) + quaternions[:, 3] = 1.0 + + schema_resolvers = [SchemaResolverNewton(), SchemaResolverPhysx()] + + builder = ModelBuilder(up_axis=up_axis) + stage_info = builder.add_usd( + stage, + ignore_paths=["/World/envs"] + sources, + schema_resolvers=schema_resolvers, + ) + + env0_pos = positions[0] + protos: dict[str, ModelBuilder] = {} + for src_path in sources: + p = ModelBuilder(up_axis=up_axis) + solvers.SolverMuJoCo.register_custom_attributes(p) + p.add_usd( + stage, + root_path=src_path, + load_visual_shapes=True, + skip_mesh_approximation=True, + schema_resolvers=schema_resolvers, + ) + if simplify_meshes: + p.approximate_meshes("convex_hull", keep_visual_shapes=True) + protos[src_path] = p + + # create a separate world for each environment (heterogeneous spawning) + # Newton assigns sequential world IDs (0, 1, 2, ...), so we need to track the mapping + for col, _ in enumerate(env_ids.tolist()): + # begin a new world context (Newton assigns world ID = col) + builder.begin_world() + # add all active sources for this world + delta_pos = (positions[col] - env0_pos).tolist() + for row in torch.nonzero(mapping[:, col], as_tuple=True)[0].tolist(): + builder.add_builder( + protos[sources[row]], + xform=wp.transform(delta_pos, quaternions[col].tolist()), + ) + # end the world context + builder.end_world() + + # Rename builder labels from source roots to destination roots + for i, src_path in enumerate(sources): + n = len(src_path.rstrip("/")) + world_cols = torch.nonzero(mapping[i], as_tuple=True)[0].tolist() + world_roots = {int(env_ids[c]): destinations[i].format(int(env_ids[c])) for c in world_cols} + for t in ("body", "joint", "shape", "articulation"): + labels = getattr(builder, f"{t}_label", None) or getattr(builder, f"{t}_key") + for k, w in enumerate(getattr(builder, f"{t}_world")): + wid = int(w) + if wid in world_roots and labels[k].startswith(src_path): + labels[k] = world_roots[wid] + labels[k][n:] + + return builder, stage_info + + +def newton_physics_replicate( + stage: Usd.Stage, + sources: list[str], + destinations: list[str], + env_ids: torch.Tensor, + mapping: torch.Tensor, + positions: torch.Tensor | None = None, + quaternions: torch.Tensor | None = None, + device: str = "cpu", + up_axis: str = "Z", + simplify_meshes: bool = True, +): + """Replicate prims into a Newton ``ModelBuilder`` using a per-source mapping. + + Args: + stage: USD stage containing source assets. + sources: Source prim paths used for cloning. + destinations: Destination prim path templates. + env_ids: Environment ids for destination worlds. + mapping: Boolean source-to-environment mapping matrix. + positions: Optional per-environment world positions. + quaternions: Optional per-environment orientations in xyzw order. + device: Device used by the finalized Newton model builder. + up_axis: Up axis for the Newton model builder. + simplify_meshes: Whether to run convex-hull mesh approximation. + + Returns: + Tuple of the populated Newton model builder and stage metadata. + """ + from isaaclab_newton.physics import NewtonManager + + builder, stage_info = _build_and_label( + stage, sources, destinations, env_ids, mapping, + positions, quaternions, up_axis, simplify_meshes, + ) + NewtonManager.set_builder(builder) + NewtonManager._num_envs = mapping.size(1) + return builder, stage_info + + +def newton_visualizer_prebuild( + stage: Usd.Stage, + sources: list[str], + destinations: list[str], + env_ids: torch.Tensor, + mapping: torch.Tensor, + positions: torch.Tensor | None = None, + quaternions: torch.Tensor | None = None, + device: str = "cpu", + up_axis: str = "Z", + simplify_meshes: bool = True, +): + """Replicate a clone plan into a finalized Newton model/state for visualization. + + Unlike :func:`newton_physics_replicate`, this path does not mutate ``NewtonManager`` and is intended + for prebuilding visualizer-only artifacts that can be consumed by scene data providers. + + Args: + stage: USD stage containing source assets. + sources: Source prim paths used for cloning. + destinations: Destination prim path templates. + env_ids: Environment ids for destination worlds. + mapping: Boolean source-to-environment mapping matrix. + positions: Optional per-environment world positions. + quaternions: Optional per-environment orientations in xyzw order. + device: Device used by the finalized Newton model. + up_axis: Up axis for the Newton model builder. + simplify_meshes: Whether to run convex-hull mesh approximation. + + Returns: + Tuple of finalized Newton model and state. + """ + builder, _ = _build_and_label( + stage, sources, destinations, env_ids, mapping, + positions, quaternions, up_axis, simplify_meshes, + ) + model = builder.finalize(device=device) + state = model.state() + return model, state diff --git a/scripts/newton_repro/repro.py b/scripts/newton_repro/repro.py new file mode 100644 index 000000000000..7f1a2f4f36b7 --- /dev/null +++ b/scripts/newton_repro/repro.py @@ -0,0 +1,201 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""NaN reproduction script — standalone, no IsaacLab runtime dependencies.""" + +import argparse +import json +import math +import os +import sys +from types import SimpleNamespace + +import newton.viewer +import torch +import warp as wp +import yaml +from pxr import Usd + +_repro_dir = os.path.dirname(os.path.abspath(__file__)) +sys.path.insert(0, _repro_dir) +from newton_replicate import _build_and_label +from newton_manager import NewtonSim + + +def main(): + parser = argparse.ArgumentParser(description="NaN repro — standalone") + parser.add_argument("--env", type=str, required=True, help="Env name under envs/ (e.g. rough_anymal_d)") + parser.add_argument("--num_envs", type=int, default=None) + parser.add_argument("--policy", type=str, default=None) + parser.add_argument("--headless", action="store_true") + parser.add_argument("--record", type=str, default=None, help="Record to file (e.g. recording.bin)") + args = parser.parse_args() + + device = "cuda:0" + torch.cuda.set_device(0) + + env_dir = os.path.join(_repro_dir, "envs", args.env) + sys.path.insert(0, env_dir) + import mdp + + with open(os.path.join(env_dir, "env.yaml")) as f: + cfg = yaml.unsafe_load(f) + + num_envs = args.num_envs or cfg["scene"]["num_envs"] + decimation = cfg["decimation"] + physics_dt = cfg["sim"]["dt"] + episode_length_s = cfg["episode_length_s"] + phys = cfg["sim"]["physics"] + solver_cfg = SimpleNamespace(**phys["solver_cfg"]) + collision_cfg = SimpleNamespace(**phys["collision_cfg"]) if phys.get("collision_cfg") else None + num_substeps = phys.get("num_substeps", 1) + + stage = Usd.Stage.Open(os.path.join(env_dir, "stage.usd")) + info = json.load(open(os.path.join(env_dir, "cloner_info.json"))) + + env_ids = torch.arange(num_envs, dtype=torch.long, device=device) + mapping = torch.ones(len(info["sources"]), num_envs, dtype=torch.bool, device=device) + env_spacing = info.get("env_spacing", cfg["scene"]["env_spacing"]) * 2 + num_rows = int(math.ceil(num_envs / math.sqrt(num_envs))) + num_cols = int(math.ceil(num_envs / num_rows)) + ii, jj = torch.meshgrid( + torch.arange(num_rows, dtype=torch.float32, device=device), + torch.arange(num_cols, dtype=torch.float32, device=device), + indexing="ij", + ) + positions = torch.stack([ + -(ii.flatten()[:num_envs] - (num_rows - 1) / 2) * env_spacing, + (jj.flatten()[:num_envs] - (num_cols - 1) / 2) * env_spacing, + torch.zeros(num_envs, device=device), + ], dim=1) + + builder, _ = _build_and_label( + stage, + info["sources"], + info["destinations"], + env_ids, + mapping, + positions=positions, + up_axis=info.get("up_axis", "Z"), + simplify_meshes=info.get("simplify_meshes", True), + ) + sim = NewtonSim(builder, solver_cfg, collision_cfg, physics_dt, num_substeps, device) + + viewer = newton.viewer.ViewerNull() if args.headless else newton.viewer.ViewerGL() + viewer.set_model(sim.model) + viewer.set_world_offsets((0.0, 0.0, 0.0)) + recorder = None + if args.record: + from newton._src.viewer.viewer_file import ViewerFile + recorder = ViewerFile(args.record, max_history_size=200) + recorder.set_model(sim.model) + + jc_starts = sim.model.joint_coord_world_start.numpy() + jd_starts = sim.model.joint_dof_world_start.numpy() + jc_per = int(jc_starts[1]) - int(jc_starts[0]) + jd_per = int(jd_starts[1]) - int(jd_starts[0]) + + mdp_env = mdp.AnymalMDP( + model=sim.model, + state=sim.state, + control=sim.control, + env_origins=positions, + num_envs=num_envs, + jc_per=jc_per, + jd_per=jd_per, + physics_dt=physics_dt, + episode_length_s=episode_length_s, + decimation=decimation, + device=device, + ) + + # AnymalMDP modifies model arrays (armature, stiffness, damping, etc.) + # Notify the solver so it syncs these into the MuJoCo Warp model. + sim.notify_model_changed() + + policy_path = args.policy or os.path.join(env_dir, "policy.pt") + print(f"[INFO] Loading JIT policy: {policy_path}") + policy = torch.jit.load(policy_path, map_location=device).eval() + + obs = mdp_env.get_observations() + timestep = 0 + prev = {} + try: + while args.headless or viewer.is_running(): + with torch.inference_mode(): + actions = policy(obs) + mdp_env.set_actions(actions) + for _ in range(decimation): + mdp_env.apply_lstm_torques() + sim.step() + obs, terminated, truncated = mdp_env.forward() + + # NaN detection + jq = wp.to_torch(sim.state.joint_q).reshape(num_envs, jc_per) + nan_mask = torch.isnan(jq).any(dim=1) + if nan_mask.any(): + nan_envs = nan_mask.nonzero(as_tuple=False).squeeze(-1) + jqd = wp.to_torch(sim.state.joint_qd).reshape(num_envs, jd_per) + jf = wp.to_torch(sim.control.joint_f).reshape(num_envs, jd_per) + mjw = sim.solver.mjw_data + for eid in nan_envs[:3].tolist(): + print(f"\n{'='*80}") + print(f"[NaN] timestep={timestep} env={eid}") + print(f" jq: {jq[eid].tolist()}") + print(f" jqd: {jqd[eid].tolist()}") + print(f" jf: {jf[eid].tolist()}") + if mjw is not None: + for name in ["qpos", "qvel", "qacc", "qfrc_applied", "qfrc_constraint", "qfrc_bias"]: + arr = getattr(mjw, name, None) + if arr is not None: + val = wp.to_torch(arr)[eid] + print(f" mjw.{name}: {val.tolist()}") + for name in ["ncon", "nefc"]: + arr = getattr(mjw, name, None) + if arr is not None: + print(f" mjw.{name}: {wp.to_torch(arr)[eid].item()}") + if prev: + print(f"\n[Pre-NaN] env={eid}") + print(f" prev_jq: {prev['jq'][eid].tolist()}") + print(f" prev_jqd: {prev['jqd'][eid].tolist()}") + print(f" prev_jf: {prev['jf'][eid].tolist()}") + print(f" |prev_jq|_max: {prev['jq'][eid].abs().max().item():.6f}") + print(f" |prev_jqd|_max: {prev['jqd'][eid].abs().max().item():.6f}") + print(f" |prev_jf|_max: {prev['jf'][eid].abs().max().item():.6f}") + print(f"{'='*80}\n") + if recorder: + recorder.save() + print(f"[INFO] Recording saved to {args.record}") + break + + prev = { + "jq": jq.clone(), + "jqd": wp.to_torch(sim.state.joint_qd).reshape(num_envs, jd_per).clone(), + "jf": wp.to_torch(sim.control.joint_f).reshape(num_envs, jd_per).clone(), + } + + reset_ids = (terminated | truncated).nonzero(as_tuple=False).squeeze(-1) + if len(reset_ids) > 0: + mdp_env.reset(reset_ids) + policy.reset() + + if timestep % 100 == 0: + print(f"Timestep: {timestep}") + + sim.forward() + viewer.begin_frame(0.0) + viewer.log_state(sim.state) + viewer.end_frame() + if recorder: + recorder.begin_frame(timestep * physics_dt * decimation) + recorder.log_state(sim.state) + recorder.end_frame() + timestep += 1 + except KeyboardInterrupt: + pass + + +if __name__ == "__main__": + main() diff --git a/scripts/tools/inspect_nan_reset.py b/scripts/tools/inspect_nan_reset.py new file mode 100644 index 000000000000..7d915852e19e --- /dev/null +++ b/scripts/tools/inspect_nan_reset.py @@ -0,0 +1,282 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers. +# SPDX-License-Identifier: BSD-3-Clause + +"""Visualize the exact robot state at NaN frame 0 (post-reset, pre-step). + +Loads one or more NaN debug npz files, extracts the last valid frame +(the state written by reset, just before the solver step that NaN'd), +builds a multi-world Newton model with one world per NaN'd env, and +displays them side by side in the viewer. + +Usage: + python scripts/tools/inspect_nan_reset.py nan_debug/nan_replay_*.npz + python scripts/tools/inspect_nan_reset.py nan_debug/nan_replay_A.npz nan_debug/nan_replay_B.npz +""" + +from __future__ import annotations + +import argparse +import re +import sys +import time as _time + +import numpy as np +import torch +import warp as wp + +from newton import CollisionPipeline, ModelBuilder, eval_fk +from newton._src.usd.schemas import SchemaResolverNewton, SchemaResolverPhysx +from newton.solvers import SolverMuJoCo +from newton.viewer import ViewerGL + + +def _extract_env_state(npz_path: str): + """Extract the post-reset (pre-step) state for the NaN'd env from an npz. + + Prefers ``pre_step_*`` arrays (the exact solver input that caused NaN). + Falls back to the last valid ring frame if ``pre_step_*`` is absent. + """ + d = np.load(npz_path, allow_pickle=True) + eid = int(d["exported_env_ids"][0]) + wc = int(d["world_count"]) + jq_all = d["joint_q"] + jqd_all = d["joint_qd"] + bq_all = d["body_q"] + bqd_all = d.get("body_qd") + + jc_per = jq_all.shape[1] // wc + jd_per = jqd_all.shape[1] // wc + bodies_per = bq_all.shape[1] // wc + + # Prefer pre_step snapshot (exact post-reset state before solver NaN'd) + has_pre_step = "pre_step_joint_q" in d + if has_pre_step: + ps_jq = d["pre_step_joint_q"] + ps_jqd = d["pre_step_joint_qd"] + ps_bq = d.get("pre_step_body_q") + ps_bqd = d.get("pre_step_body_qd") + + jq = ps_jq[eid * jc_per : (eid + 1) * jc_per] + jqd = ps_jqd[eid * jd_per : (eid + 1) * jd_per] + bq = ps_bq.reshape(-1, 7)[eid * bodies_per : (eid + 1) * bodies_per] if ps_bq is not None else None + bqd = ps_bqd.reshape(-1, 6)[eid * bodies_per : (eid + 1) * bodies_per] if ps_bqd is not None else None + source = "pre_step (post-reset)" + else: + # Fallback: last valid ring frame (pre-reset, not ideal) + for f in range(jq_all.shape[0] - 1, -1, -1): + q = jq_all[f, eid * jc_per : (eid + 1) * jc_per] + if np.isfinite(q).all(): + break + jq = jq_all[f, eid * jc_per : (eid + 1) * jc_per] + jqd = jqd_all[f, eid * jd_per : (eid + 1) * jd_per] + bq = bq_all[f, eid * bodies_per : (eid + 1) * bodies_per] + bqd = bqd_all[f, eid * bodies_per : (eid + 1) * bodies_per] if bqd_all is not None else None + source = f"ring frame {f} (pre-reset fallback)" + + # NaN frame forces + forces = None + fh = d.get("diag_qfrc_applied") + for f in range(jq_all.shape[0] - 1, -1, -1): + q = jq_all[f, eid * jc_per : (eid + 1) * jc_per] + if np.isfinite(q).all(): + break + nan_frame = min(f + 1, jq_all.shape[0] - 1) + if fh is not None and fh.ndim == 3: + forces = fh[nan_frame, eid] + + root_pos = jq[:3] if np.isfinite(jq[:3]).all() else (bq[0, :3] if bq is not None else np.zeros(3)) + cdist = d["diag_contact_dist_min"][nan_frame, eid] if "diag_contact_dist_min" in d else None + niter = d["diag_solver_niter"][nan_frame, eid] if "diag_solver_niter" in d else None + ep_step = None + if "episode_length_buf" in d: + ep_step = int(d["episode_length_buf"][eid]) + + cfg = {} + for k in d.files: + if k.startswith("cfg_"): + v = d[k] + cfg[k[4:]] = v.item() if v.ndim == 0 or (v.ndim == 1 and v.shape[0] == 1) else v[0] + + return { + "env_id": eid, + "source": source, + "jq": jq, + "jqd": jqd, + "bq": bq, + "bqd": bqd, + "root_pos": root_pos, + "forces": forces, + "contact_dist_min": cdist, + "solver_niter": niter, + "episode_step": ep_step, + "jc_per": jc_per, + "jd_per": jd_per, + "bodies_per": bodies_per, + "cfg": cfg, + "npz_path": npz_path, + } + + +def main(): + parser = argparse.ArgumentParser(description="Inspect NaN reset states visually.") + parser.add_argument("npz_paths", nargs="+", help="NaN debug npz files") + parser.add_argument("--device", default="cuda:0") + parser.add_argument("--step", action="store_true", + help="After showing pre-NaN state, step physics once to reproduce") + args = parser.parse_args() + + envs = [_extract_env_state(p) for p in args.npz_paths] + num_worlds = len(envs) + + for i, e in enumerate(envs): + print(f" [{i}] env {e['env_id']} from {e['npz_path']}") + print(f" source: {e['source']}") + print(f" episode_step={e['episode_step']}, contact_dist={e['contact_dist_min']}, niter={e['solver_niter']}") + print(f" root_pos={e['root_pos']}") + print(f" root_quat={e['jq'][3:7]}") + print(f" joint_pos={e['jq'][7:]}") + + # Build model: one world per NaN'd env, each at its root position + usd_path = args.npz_paths[0].replace(".npz", ".usd") + print(f"\nBuilding {num_worlds} worlds from {usd_path}...") + + from pxr import Sdf, Usd, UsdGeom, UsdPhysics + + stage = Usd.Stage.Open(usd_path) + layer = stage.GetRootLayer() + for sp in ("/World", "/World/envs"): + spec = layer.GetPrimAtPath(sp) + if spec and spec.specifier == Sdf.SpecifierOver: + spec.specifier = Sdf.SpecifierDef + + env_root = None + def _v(path): + nonlocal env_root + if env_root is None and re.match(r"/World/envs/(env_\d+)$", str(path)): + env_root = str(path) + layer.Traverse(Sdf.Path.absoluteRootPath, _v) + + up_axis = UsdGeom.GetStageUpAxis(stage) + resolvers = [SchemaResolverNewton(), SchemaResolverPhysx()] + + proto = ModelBuilder(up_axis=up_axis) + SolverMuJoCo.register_custom_attributes(proto) + if env_root: + proto.add_usd(stage, root_path=env_root, schema_resolvers=resolvers, + load_visual_shapes=True, skip_mesh_approximation=True) + proto.approximate_meshes("convex_hull", keep_visual_shapes=True) + + builder = ModelBuilder(up_axis=up_axis) + # Load terrain/global collision geometry + world_prim = stage.GetPrimAtPath("/World") + if world_prim and world_prim.IsValid(): + for child in world_prim.GetChildren(): + cp = child.GetPath().pathString + if cp.startswith("/World/envs"): + continue + for p in Usd.PrimRange(child): + if p.HasAPI(UsdPhysics.CollisionAPI): + builder.add_usd(stage, root_path=cp, load_visual_shapes=True, + schema_resolvers=resolvers) + print(f" Loaded collision: {cp}") + break + + # Spacing between displayed worlds + spacing = 5.0 + for i, e in enumerate(envs): + rx, ry = float(e["root_pos"][0]), float(e["root_pos"][1]) + builder.begin_world() + builder.add_builder(proto, xform=wp.transform((rx, ry, 0.0), wp.quat_identity())) + builder.end_world() + + model = builder.finalize(device=args.device) + state = model.state() + control = model.control() + + # Read solver config from first env + cfg = dict(envs[0]["cfg"]) + _INTEGRATOR_MAP = {0: "euler", 1: "implicit", 2: "implicit", 3: "implicitfast"} + _CONE_MAP = {0: "pyramidal", 1: "elliptic"} + if "integrator" in cfg and isinstance(cfg["integrator"], (int, float, np.integer)): + cfg["integrator"] = _INTEGRATOR_MAP.get(int(cfg["integrator"]), "implicitfast") + if "cone" in cfg and isinstance(cfg["cone"], (int, float, np.integer)): + cfg["cone"] = _CONE_MAP.get(int(cfg["cone"]), "pyramidal") + dt = float(cfg.pop("dt", 0.005)) + cfg.pop("num_substeps", None) + max_triangle_pairs = cfg.pop("max_triangle_pairs", None) + + solver = SolverMuJoCo(model, **cfg) + + pipeline_kwargs = {} + if max_triangle_pairs is not None: + pipeline_kwargs["max_triangle_pairs"] = int(max_triangle_pairs) + pipeline = CollisionPipeline(model, **pipeline_kwargs) + contacts = pipeline.contacts() + + print(f" Model: {model.body_count} bodies, {model.joint_count} joints, {model.world_count} worlds") + + # Set each world to its recorded state + jc_starts = model.joint_coord_world_start.numpy() + jd_starts = model.joint_dof_world_start.numpy() + + for w, e in enumerate(envs): + jc0 = int(jc_starts[w]) + jd0 = int(jd_starts[w]) + jq_t = wp.to_torch(state.joint_q) + jqd_t = wp.to_torch(state.joint_qd) + jq_t[jc0 : jc0 + e["jc_per"]] = torch.tensor(e["jq"], dtype=torch.float32, device=args.device) + jqd_t[jd0 : jd0 + e["jd_per"]] = torch.tensor(e["jqd"], dtype=torch.float32, device=args.device) + + eval_fk(model, state.joint_q, state.joint_qd, state, None) + + # Viewer + center = envs[0]["root_pos"] + viewer = ViewerGL(width=1920, height=1080, headless=False) + viewer.set_model(model) + viewer.set_world_offsets((0.0, 0.0, 0.0)) + viewer.up_axis = 2 + viewer.camera.pos = wp.vec3(center[0] + 3.0, center[1] - 3.0, 2.0) + viewer.camera.yaw, viewer.camera.pitch = 90.0, -15.0 + + print(f"\nDisplaying {num_worlds} robot(s) at their pre-NaN (post-reset) state.") + print("Close the viewer to exit, or use --step to step physics.\n") + + if args.step: + print("Press Enter to step physics (reproduce the NaN)...") + # Show initial state first + for _ in range(60): + if not viewer.is_running(): + sys.exit(0) + viewer.begin_frame(0.0) + viewer.log_state(state) + viewer.end_frame() + _time.sleep(1.0 / 60.0) + + # Apply forces and step + for w, e in enumerate(envs): + if e["forces"] is not None: + jd0 = int(jd_starts[w]) + jf = wp.to_torch(control.joint_f) + jf[jd0 : jd0 + e["jd_per"]] = torch.tensor( + e["forces"], dtype=torch.float32, device=args.device + ) + + pipeline.collide(state, contacts) + solver.step(state, state, control, contacts, dt) + state.clear_forces() + eval_fk(model, state.joint_q, state.joint_qd, state, None) + + has_nan = bool(wp.to_torch(state.body_q).isnan().any().item()) + niter = int(wp.to_torch(solver.mjw_data.solver_niter).max().item()) + print(f"After step: has_nan={has_nan}, max_solver_niter={niter}") + + sim_time = 0.0 + while viewer.is_running(): + viewer.begin_frame(sim_time) + viewer.log_state(state) + viewer.end_frame() + _time.sleep(1.0 / 60.0) + + +if __name__ == "__main__": + main() diff --git a/scripts/tools/replay_nan_state.py b/scripts/tools/replay_nan_state.py new file mode 100644 index 000000000000..1c636db98a39 --- /dev/null +++ b/scripts/tools/replay_nan_state.py @@ -0,0 +1,601 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Load and replay NaN state snapshots exported by the NewtonManager debug buffer. + +Usage: + # Print summary only + ./isaaclab.sh -p scripts/tools/replay_nan_state.py /path/to/nan_replay.npz + + # Print summary + matplotlib plots + ./isaaclab.sh -p scripts/tools/replay_nan_state.py /path/to/nan_replay.npz --plot + + # Replay in Newton viewer (auto-discovers .usd next to .npz) + ./isaaclab.sh -p scripts/tools/replay_nan_state.py /path/to/nan_replay.npz --visualizer newton + + # Replay at 0.25x speed + ./isaaclab.sh -p scripts/tools/replay_nan_state.py /path/to/nan_replay.npz --visualizer newton --speed 0.25 +""" + +from __future__ import annotations + +import argparse +import os +import sys +import time +import numpy as np + +def _print_summary(npz_path: str) -> None: + """Print a text summary of the npz contents.""" + + + try: + data = np.load(npz_path, allow_pickle=True) + except Exception as e: + print(f"Failed to load {npz_path}: {e}", file=sys.stderr) + sys.exit(1) + + n = int(data.get("buffer_size", data["joint_q"].shape[0] if "joint_q" in data else 0)) + if n == 0 and "joint_q" in data: + n = data["joint_q"].shape[0] + sim_time = float(data.get("sim_time", 0.0)) + + print("=== NaN replay summary ===") + print(f"File: {npz_path}") + print(f"Buffer size (steps): {n}") + print(f"Sim time at export: {sim_time}") + if "exported_env_ids" in data: + env_ids = data["exported_env_ids"] + print(f"Exported env(s) only: {env_ids.tolist()}") + + usd_path = _find_usd_path(npz_path) + if usd_path: + print(f"Scene USD: {usd_path}") + + for key in ("body_q", "body_qd", "joint_q", "joint_qd"): + if key not in data: + continue + arr = data[key] + print(f"\n{key}: shape {arr.shape}, dtype {arr.dtype}") + nan_per_step = np.isnan(arr).reshape(arr.shape[0], -1).any(axis=1) + first_nan = np.where(nan_per_step)[0] + if len(first_nan) > 0: + print(f" First step with NaN: {int(first_nan[0])} (last step {n-1} is the incident)") + print(f" Min: {np.nanmin(arr):.6g}, Max: {np.nanmax(arr):.6g}") + + # Print per-body and per-joint details at the last valid step (one before NaN) + last_valid = n - 2 if n >= 2 else 0 + print(f"\n--- Per-body/joint state at step {last_valid} (last valid before NaN) ---") + + if "body_qd" in data: + bqd = data["body_qd"] + if last_valid < bqd.shape[0]: + frame = bqd[last_valid] + print(f"\nbody_qd (link velocities) [{frame.shape[0]} bodies]:") + print(f" {'Body':>4} {'lin_x':>10} {'lin_y':>10} {'lin_z':>10} {'ang_x':>10} {'ang_y':>10} {'ang_z':>10}") + for bi in range(frame.shape[0]): + v = frame[bi] + print(f" {bi:4d} {v[0]:10.4f} {v[1]:10.4f} {v[2]:10.4f} {v[3]:10.4f} {v[4]:10.4f} {v[5]:10.4f}") + + if "joint_qd" in data: + jqd = data["joint_qd"] + if last_valid < jqd.shape[0]: + frame = jqd[last_valid] + print(f"\njoint_qd (joint velocities) [{frame.shape[0]} dofs]:") + for ji in range(frame.shape[0]): + print(f" dof {ji:3d}: {frame[ji]:12.4f}") + + if "joint_q" in data: + jq = data["joint_q"] + if last_valid < jq.shape[0]: + frame = jq[last_valid] + print(f"\njoint_q (joint positions) [{frame.shape[0]} coords]:") + for ji in range(frame.shape[0]): + print(f" coord {ji:3d}: {frame[ji]:12.6f}") + + # --- Diagnostics summary --- + diag_keys = [k for k in data.files if k.startswith("diag_")] if hasattr(data, "files") else [k for k in data if k.startswith("diag_")] + if diag_keys: + print(f"\n{'='*70}") + print(" DIAGNOSTICS") + print(f"{'='*70}") + + _print_diag_timeline(data, n, last_valid) + _print_root_cause_analysis(data, n, last_valid) + + +def _print_diag_timeline(data, n: int, last_valid: int) -> None: + """Print per-step diagnostic summary for the last few steps before NaN.""" + window = min(10, last_valid + 1) + start = last_valid - window + 1 + + print(f"\n Timeline (steps {start}..{last_valid}, last {window} before NaN):") + header = f" {'Step':>5}" + has_niter = "diag_solver_niter" in data + has_qfrc = "diag_qfrc_constraint" in data + has_torque = "diag_qfrc_actuator" in data + has_qacc = "diag_qacc" in data + has_qm = "diag_qM_diag_min" in data + has_cdist = "diag_contact_dist_min" in data + + if has_niter: + header += f" {'iters':>6}" + if has_qfrc: + header += f" {'|F_con|':>10}" + if has_torque: + header += f" {'|torque|':>10}" + if has_qacc: + header += f" {'|accel|':>10}" + if has_qm: + header += f" {'min_qM':>10}" + if has_cdist: + header += f" {'penetr':>10}" + print(header) + + for step in range(start, last_valid + 1): + row = f" {step:5d}" + if has_niter: + val = data["diag_solver_niter"][step] + v = int(val) if val.ndim == 0 else int(val.max()) + row += f" {v:6d}" + if has_qfrc: + val = data["diag_qfrc_constraint"][step] + row += f" {np.abs(val).max():10.2f}" + if has_torque: + val = data["diag_qfrc_actuator"][step] + row += f" {np.abs(val).max():10.2f}" + if has_qacc: + val = data["diag_qacc"][step] + row += f" {np.abs(val).max():10.2f}" + if has_qm: + val = data["diag_qM_diag_min"][step] + v = float(val) if val.ndim == 0 else float(val.min()) + row += f" {v:10.6f}" + if has_cdist: + val = data["diag_contact_dist_min"][step] + v = float(val) if val.ndim == 0 else float(val.min()) + row += f" {v:10.6f}" + print(row) + + +def _print_root_cause_analysis(data, n: int, last_valid: int) -> None: + """Auto-flag likely root cause based on diagnostic patterns.""" + print(f"\n ROOT CAUSE ANALYSIS:") + flags = [] + + if "diag_solver_niter" in data and last_valid >= 0: + niter = data["diag_solver_niter"] + max_iters = int(niter[last_valid].max()) if niter[last_valid].ndim > 0 else int(niter[last_valid]) + if max_iters >= 95: + flags.append(f" >> SOLVER NON-CONVERGENCE: solver used {max_iters} iterations at step {last_valid}") + + if "diag_qM_diag_min" in data and last_valid >= 0: + qm = data["diag_qM_diag_min"] + min_qm = float(qm[last_valid].min()) if qm[last_valid].ndim > 0 else float(qm[last_valid]) + if min_qm < 1e-6: + flags.append(f" >> NEAR-SINGULAR MASS MATRIX: min(diag(qM)) = {min_qm:.2e} at step {last_valid}") + + if "diag_contact_dist_min" in data and last_valid >= 0: + cdist = data["diag_contact_dist_min"] + min_dist = float(cdist[last_valid].min()) if cdist[last_valid].ndim > 0 else float(cdist[last_valid]) + if min_dist < -0.01: + flags.append(f" >> DEEP PENETRATION: contact dist = {min_dist:.4f} m at step {last_valid}") + + if "diag_qfrc_constraint" in data and last_valid >= 0: + frc = data["diag_qfrc_constraint"] + max_frc = float(np.abs(frc[last_valid]).max()) + if max_frc > 1e4: + flags.append(f" >> EXTREME CONTACT FORCE: |qfrc_constraint| = {max_frc:.1f} at step {last_valid}") + + if "diag_qfrc_actuator" in data and last_valid >= 0: + trq = data["diag_qfrc_actuator"] + max_trq = float(np.abs(trq[last_valid]).max()) + if max_trq > 1e4: + flags.append(f" >> EXTREME ACTUATOR TORQUE: |qfrc_actuator| = {max_trq:.1f} at step {last_valid}") + + if "diag_qacc" in data and last_valid >= 0: + acc = data["diag_qacc"] + max_acc = float(np.abs(acc[last_valid]).max()) + if max_acc > 1e6: + has_normal_forces = True + if "diag_qfrc_constraint" in data: + has_normal_forces = float(np.abs(data["diag_qfrc_constraint"][last_valid]).max()) < 1e4 + if has_normal_forces: + flags.append(f" >> ACCELERATION SPIKE (forces normal): |qacc| = {max_acc:.1f} => likely inv(qM) blowup") + + # Teleport detection from body_q + if "body_q" in data and last_valid >= 1: + bq = data["body_q"] + pos_curr = bq[last_valid, :, :3] + pos_prev = bq[last_valid - 1, :, :3] + jump = np.linalg.norm(pos_curr - pos_prev, axis=-1).max() + if jump > 1.0: + worst = int(np.argmax(np.linalg.norm(pos_curr - pos_prev, axis=-1))) + flags.append(f" >> POSITION TELEPORT: body {worst} jumped {jump:.2f} m between steps {last_valid-1}->{last_valid}") + + if flags: + for f in flags: + print(f) + else: + print(" No obvious root cause detected from diagnostics.") + + +def _plot(npz_path: str, output_dir: str | None) -> None: + """Generate matplotlib plots of the state trajectories.""" + import numpy as np + + data = np.load(npz_path, allow_pickle=True) + n = int(data.get("buffer_size", data["joint_q"].shape[0] if "joint_q" in data else 0)) + + try: + import matplotlib + + if output_dir: + matplotlib.use("Agg") + import matplotlib.pyplot as plt + except ImportError: + print("matplotlib not available, skipping --plot", file=sys.stderr) + return + + fig, axes = plt.subplots(2, 1, figsize=(10, 6), sharex=True) + t = np.arange(n) + + if "body_q" in data: + bq = data["body_q"] + if bq.ndim == 3: + pos = bq[:, 0, :3] + elif bq.ndim == 2 and bq.shape[1] >= 3: + pos = bq[:, :3] + else: + pos = None + if pos is not None: + for i, label in enumerate("xyz"): + axes[0].plot(t, pos[:, i], label=label) + axes[0].set_ylabel("Body 0 position [m]") + axes[0].legend(loc="upper right") + axes[0].set_title("First body position (world)") + axes[0].grid(True, alpha=0.3) + + if "joint_qd" in data: + jqd = data["joint_qd"] + ndof = min(3, jqd.shape[1]) + for i in range(ndof): + axes[1].plot(t, jqd[:, i], label=f"qd[{i}]") + axes[1].set_ylabel("Joint velocity") + axes[1].set_xlabel("Step (last = NaN incident)") + axes[1].legend(loc="upper right") + axes[1].set_title("First 3 joint velocities") + axes[1].grid(True, alpha=0.3) + + plt.tight_layout() + if output_dir: + os.makedirs(output_dir, exist_ok=True) + out_path = os.path.join(output_dir, "nan_replay_plot.png") + plt.savefig(out_path, dpi=150) + print(f"\nPlot saved to {out_path}") + else: + plt.show() + + +def _find_usd_path(npz_path: str) -> str | None: + """Find the companion .usd file for a .npz export.""" + base = os.path.splitext(npz_path)[0] + for ext in (".usd", ".usda", ".usdc"): + candidate = base + ext + if os.path.isfile(candidate): + return candidate + return None + + +def _prepare_stage(usd_path: str): + """Open the exported USD and fix ``over`` ancestors so the stage composes. + + The NaN exporter uses ``Sdf.CopySpec`` which preserves the original + specifier. Ancestor prims (``/World``, ``/World/envs``) are stored as + ``over`` because they were overrides on the live stage. Converting them + to ``def`` lets USD compose and traverse the env subtree. + + Returns: + (stage, env_root_path) — the opened stage and the path to the + environment prim (e.g. ``/World/envs/env_1303``). + """ + import re + + from pxr import Sdf, Usd + + stage = Usd.Stage.Open(usd_path) + layer = stage.GetRootLayer() + + for ancestor in ("/World", "/World/envs"): + spec = layer.GetPrimAtPath(ancestor) + if spec and spec.specifier == Sdf.SpecifierOver: + spec.specifier = Sdf.SpecifierDef + + env_re = re.compile(r"/World/envs/(env_\d+)$") + env_root = None + + def _visitor(path): + nonlocal env_root + if env_root is None and env_re.match(str(path)): + env_root = str(path) + + layer.Traverse(Sdf.Path.absoluteRootPath, _visitor) + return stage, env_root + + +def _replay_newton(npz_path: str, speed: float, loop: bool) -> None: + """Replay state snapshots in the Newton ViewerGL. + + Builds a Newton Model from the exported USD scene, creates a State, then + steps through each recorded snapshot writing body_q/body_qd/joint_q/joint_qd + into the state and rendering each frame. + """ + import numpy as np + import warp as wp + from newton import ModelBuilder + from newton.viewer import ViewerGL + from pxr import Usd, UsdGeom, UsdPhysics + + data = np.load(npz_path, allow_pickle=True) + n_steps = int(data.get("buffer_size", 0)) + if n_steps == 0 and "joint_q" in data: + n_steps = data["joint_q"].shape[0] + + usd_path = _find_usd_path(npz_path) + if usd_path is None: + print( + "No companion .usd file found next to the .npz.\n" + "The Newton viewer requires the exported scene USD to build a Model.\n" + "Re-run training with the NaN replay buffer enabled to export both .npz and .usd.", + file=sys.stderr, + ) + sys.exit(1) + + print(f"Building Newton model from: {usd_path}") + stage, env_root = _prepare_stage(usd_path) + up_axis = UsdGeom.GetStageUpAxis(stage) + + builder = ModelBuilder(up_axis=up_axis) + if env_root: + builder.add_usd(stage, root_path=env_root, load_visual_shapes=False) + else: + builder.add_usd(stage, load_visual_shapes=False) + # Load any global collision geometry (terrain, ground planes, etc.) + # that lives under /World but outside /World/envs. + world_prim = stage.GetPrimAtPath("/World") + if world_prim and world_prim.IsValid(): + for child in world_prim.GetChildren(): + child_path = child.GetPath().pathString + if child_path.startswith("/World/envs"): + continue + has_collision = any( + p.HasAPI(UsdPhysics.CollisionAPI) for p in Usd.PrimRange(child) + ) + if has_collision: + builder.add_usd(stage, root_path=child_path, load_visual_shapes=True) + print(f"Loaded collision geometry from {child_path}") + model = builder.finalize(device="cpu") + state = model.state() + + if model.body_count == 0: + print("Warning: model has 0 bodies. The USD may not contain resolvable physics prims.", file=sys.stderr) + + body_q_all = data.get("body_q") + body_qd_all = data.get("body_qd") + joint_q_all = data.get("joint_q") + joint_qd_all = data.get("joint_qd") + sim_time_at_export = float(data.get("sim_time", 0.0)) + + dt = 1.0 / 60.0 + env_ids = data["exported_env_ids"].tolist() if "exported_env_ids" in data else [] + print(f"Model: {model.body_count} bodies, {model.joint_count} joints") + + viewer = ViewerGL(width=1920, height=1080, headless=False) + viewer.set_model(model) + viewer.set_world_offsets((0.0, 0.0, 0.0)) + viewer.up_axis = 2 # Z-up + + # Work around Newton ViewerGL imgui color_edit3 incompatibility (expects + # ImVec4 but receives a plain tuple). Patch before the first frame so we + # never leave imgui in a half-finished state. + viewer._render_left_panel = lambda: None + + # Enable wireframe rendering to see mesh triangle structure + viewer.renderer.draw_wireframe = True + + # Point camera at the robot using the first frame's body positions. + if body_q_all is not None and body_q_all.shape[0] > 0: + first_frame = body_q_all[0] + valid_mask = np.isfinite(first_frame).all(axis=-1) + if valid_mask.any(): + positions = first_frame[valid_mask, :3] + center = positions.mean(axis=0) + extent = float(np.linalg.norm(positions.max(axis=0) - positions.min(axis=0))) + cam_dist = max(extent * 2.0, 1.5) + viewer.camera.pos = wp.vec3( + float(center[0]) + cam_dist * 0.5, + float(center[1]) - cam_dist * 0.5, + float(center[2]) + cam_dist * 0.4, + ) + direction = np.array([center[0], center[1], center[2]]) - np.array( + [float(viewer.camera.pos[0]), float(viewer.camera.pos[1]), float(viewer.camera.pos[2])] + ) + viewer.camera.yaw = float(np.degrees(np.arctan2(direction[1], direction[0]))) + horiz = float(np.sqrt(direction[0] ** 2 + direction[1] ** 2)) + viewer.camera.pitch = float(np.degrees(np.arctan2(direction[2], horiz))) + print(f"Camera target: ({center[0]:.1f}, {center[1]:.1f}, {center[2]:.1f})") + + print(f"Replaying {n_steps} snapshots at {speed}x speed (loop={loop})") + if env_ids: + print(f"Exported env_id(s): {env_ids}") + print("Press ESC in the viewer window to exit.") + + frame_delay = dt / speed + sim_time = sim_time_at_export - n_steps * dt + + def _assign_if_compatible(target, frame_data, dtype): + """Assign frame data to a warp state array if shapes are compatible.""" + if target is None or frame_data is None: + return + if frame_data.shape[0] == target.shape[0]: + target.assign(wp.array(frame_data, dtype=dtype, device="cpu")) + + # Build short name tables from model labels + body_names = [lbl.rsplit("/", 1)[-1] for lbl in (model.body_label if hasattr(model, "body_label") else [])] + joint_names = [lbl.rsplit("/", 1)[-1] for lbl in (model.joint_label if hasattr(model, "joint_label") else [])] + # joint_q has one coord per joint (may have >1 for free joints) + # joint_qd has one dof per joint + # For display, map dof index -> joint name (approximate: 1 dof per joint for revolute) + + def _short(name: str, maxlen: int = 14) -> str: + return name[:maxlen].ljust(maxlen) + + # Preload diagnostic arrays from npz + diag_niter = data.get("diag_solver_niter") + diag_qfrc = data.get("diag_qfrc_constraint") + diag_torque = data.get("diag_qfrc_actuator") + diag_qacc = data.get("diag_qacc") + diag_qm = data.get("diag_qM_diag_min") + diag_cdist = data.get("diag_contact_dist_min") + + def _print_frame_state(idx, bq, bqd, jq, jqd): + """Print full per-body/joint state + diagnostics for the current frame.""" + print(f"\n{'='*100}") + print(f" Step {idx}/{n_steps}") + + # Diagnostics header line + parts = [] + if diag_niter is not None and idx < diag_niter.shape[0]: + v = diag_niter[idx] + iters = int(v) if v.ndim == 0 else int(v.max()) + warn = " !!" if iters >= 95 else "" + parts.append(f"solver_iters={iters}{warn}") + if diag_qfrc is not None and idx < diag_qfrc.shape[0]: + parts.append(f"|F_con|={np.abs(diag_qfrc[idx]).max():.1f}") + if diag_torque is not None and idx < diag_torque.shape[0]: + parts.append(f"|torque|={np.abs(diag_torque[idx]).max():.1f}") + if diag_qacc is not None and idx < diag_qacc.shape[0]: + parts.append(f"|accel|={np.abs(diag_qacc[idx]).max():.1f}") + if diag_qm is not None and idx < diag_qm.shape[0]: + v = diag_qm[idx] + qm_min = float(v) if v.ndim == 0 else float(v.min()) + warn = " !!" if qm_min < 1e-6 else "" + parts.append(f"min_qM={qm_min:.6f}{warn}") + if diag_cdist is not None and idx < diag_cdist.shape[0]: + v = diag_cdist[idx] + cd = float(v) if v.ndim == 0 else float(v.min()) + warn = " !!" if cd < -0.01 else "" + parts.append(f"penetr={cd:.4f}{warn}") + if parts: + print(f" {' | '.join(parts)}") + + print(f"{'='*100}") + + if bqd is not None: + print(f" {'Body':<14} {'lin_x':>9} {'lin_y':>9} {'lin_z':>9} {'ang_x':>9} {'ang_y':>9} {'ang_z':>9}") + for bi in range(bqd.shape[0]): + name = _short(body_names[bi]) if bi < len(body_names) else f"body_{bi:<8}" + v = bqd[bi] + print(f" {name} {v[0]:9.2f} {v[1]:9.2f} {v[2]:9.2f} {v[3]:9.2f} {v[4]:9.2f} {v[5]:9.2f}") + + if jqd is not None: + print(f" {'Joint':<14} {'vel':>12} {'pos':>12}") + for ji in range(jqd.shape[0]): + name = _short(joint_names[ji]) if ji < len(joint_names) else f"dof_{ji:<9}" + pos_val = f"{jq[ji]:12.4f}" if jq is not None and ji < jq.shape[0] else " N/A" + print(f" {name} {jqd[ji]:12.4f} {pos_val}") + + running = True + while running and viewer.is_running(): + for step_idx in range(n_steps): + if not viewer.is_running(): + running = False + break + + if body_q_all is not None and step_idx < body_q_all.shape[0]: + _assign_if_compatible(state.body_q, body_q_all[step_idx], wp.transform) + if body_qd_all is not None and step_idx < body_qd_all.shape[0]: + _assign_if_compatible(state.body_qd, body_qd_all[step_idx], wp.spatial_vector) + if joint_q_all is not None and step_idx < joint_q_all.shape[0]: + _assign_if_compatible(state.joint_q, joint_q_all[step_idx], float) + if joint_qd_all is not None and step_idx < joint_qd_all.shape[0]: + _assign_if_compatible(state.joint_qd, joint_qd_all[step_idx], float) + + _print_frame_state( + step_idx, + body_q_all[step_idx] if body_q_all is not None and step_idx < body_q_all.shape[0] else None, + body_qd_all[step_idx] if body_qd_all is not None and step_idx < body_qd_all.shape[0] else None, + joint_q_all[step_idx] if joint_q_all is not None and step_idx < joint_q_all.shape[0] else None, + joint_qd_all[step_idx] if joint_qd_all is not None and step_idx < joint_qd_all.shape[0] else None, + ) + + sim_time += dt + viewer.begin_frame(sim_time) + viewer.log_state(state) + viewer.end_frame() + + time.sleep(frame_delay) + + if not loop: + print("Replay complete. Close the viewer window to exit.") + while viewer.is_running(): + viewer.begin_frame(sim_time) + viewer.log_state(state) + viewer.end_frame() + time.sleep(frame_delay) + break + + +def main(): + parser = argparse.ArgumentParser( + description="Load NaN replay npz and print summary, plot, or replay in Newton viewer." + ) + parser.add_argument( + "npz_path", + type=str, + help="Path to the nan_replay_*.npz file exported when NaN was detected.", + ) + parser.add_argument( + "--plot", + action="store_true", + help="Plot body_q positions (first body, xyz) and joint_qd (first 3 dof) over time.", + ) + parser.add_argument( + "--output_dir", + type=str, + default=None, + help="Directory to save plot images. If not set, plots are shown interactively.", + ) + parser.add_argument( + "--visualizer", + type=str, + default=None, + choices=["newton"], + help="Visualizer backend for replay. Currently supports: newton.", + ) + parser.add_argument( + "--speed", + type=float, + default=1.0, + help="Playback speed multiplier (e.g. 0.25 for quarter speed). Default: 1.0.", + ) + parser.add_argument( + "--no-loop", + action="store_true", + help="Play the replay only once instead of looping.", + ) + args = parser.parse_args() + + _print_summary(args.npz_path) + + if args.plot: + _plot(args.npz_path, args.output_dir) + + if args.visualizer == "newton": + _replay_newton(args.npz_path, speed=args.speed, loop=not args.no_loop) + + +if __name__ == "__main__": + main() diff --git a/scripts/tools/repro_solver_nan.py b/scripts/tools/repro_solver_nan.py new file mode 100644 index 000000000000..f8671b3d9907 --- /dev/null +++ b/scripts/tools/repro_solver_nan.py @@ -0,0 +1,424 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers. +# SPDX-License-Identifier: BSD-3-Clause + +"""Standalone MuJoCo Warp solver NaN reproduction. + +Replays a NaN debug export (ring npz + companion USD) to reproduce the failure. + +The ring npz contains ALL envs' data across 200 frames: + - joint_q, joint_qd, body_q, body_qd (full flat arrays per frame) + - diag_qfrc_applied (per-world per-step forces) + - cfg_* keys (solver + collision pipeline config) + - exported_env_ids (which env(s) NaN'd) + +Replay strategy: for each frame in the ring, reset the simulation to the +recorded joint state, apply the recorded forces, step once, and check if +that step produces NaN. This tests each step independently with the exact +inputs from training. + +Usage: + python scripts/tools/repro_solver_nan.py nan_debug/nan_replay_.npz + python scripts/tools/repro_solver_nan.py nan_debug/nan_replay_.npz --nan_only --visualize +""" + +from __future__ import annotations + +import argparse +import re +import sys +import time as _time + +import numpy as np +import torch +import warp as wp + +from newton import CollisionPipeline, ModelBuilder, eval_fk +from newton._src.usd.schemas import SchemaResolverNewton, SchemaResolverPhysx +from newton.solvers import SolverMuJoCo + + +def _find_valid_range(body_q: np.ndarray, bad_world: int = -1, bodies_per_env: int = 17) -> tuple[int, int]: + """Return (first_valid, last_valid) frame indices. + + For full-world exports, checks only the bad_world's bodies. + """ + n = body_q.shape[0] + if bad_world >= 0 and body_q.ndim == 3: + b0 = bad_world * bodies_per_env + b1 = b0 + bodies_per_env + check = body_q[:, b0:b1, :] + else: + check = body_q + first = next((s for s in range(n) if np.isfinite(check[s]).all()), 0) + last = next((s for s in range(n - 1, -1, -1) if np.isfinite(check[s]).all()), 0) + return first, last + + +def _build_model(usd_path: str, world_positions: np.ndarray, device: str): + """Build multi-world Newton model from exported USD. + + Each world is placed at its recorded root body position so that + broad-phase collision covers the correct terrain patch. + """ + from pxr import Sdf, Usd, UsdGeom, UsdPhysics + + num_worlds = world_positions.shape[0] + + stage = Usd.Stage.Open(usd_path) + layer = stage.GetRootLayer() + for sp in ("/World", "/World/envs"): + spec = layer.GetPrimAtPath(sp) + if spec and spec.specifier == Sdf.SpecifierOver: + spec.specifier = Sdf.SpecifierDef + + env_root = None + def _v(path): + nonlocal env_root + if env_root is None and re.match(r"/World/envs/(env_\d+)$", str(path)): + env_root = str(path) + layer.Traverse(Sdf.Path.absoluteRootPath, _v) + + up_axis = UsdGeom.GetStageUpAxis(stage) + resolvers = [SchemaResolverNewton(), SchemaResolverPhysx()] + + proto = ModelBuilder(up_axis=up_axis) + if env_root: + proto.add_usd(stage, root_path=env_root, schema_resolvers=resolvers, + load_visual_shapes=False) + + builder = ModelBuilder(up_axis=up_axis) + world_prim = stage.GetPrimAtPath("/World") + if world_prim and world_prim.IsValid(): + for child in world_prim.GetChildren(): + cp = child.GetPath().pathString + if cp.startswith("/World/envs"): + continue + for p in Usd.PrimRange(child): + if p.HasAPI(UsdPhysics.CollisionAPI): + builder.add_usd(stage, root_path=cp, load_visual_shapes=False) + print(f" Loaded collision: {cp}") + break + + for w in range(num_worlds): + rx, ry = float(world_positions[w, 0]), float(world_positions[w, 1]) + builder.begin_world() + builder.add_builder(proto, xform=wp.transform((rx, ry, 0.0), wp.quat_identity())) + builder.end_world() + + model = builder.finalize(device=device) + nw = getattr(model, "world_count", num_worlds) + print(f" Model: {model.body_count} bodies, {model.joint_count} joints, {nw} worlds") + return model + + +def _set_state(model, state, jq: np.ndarray, jqd: np.ndarray, + bq: np.ndarray | None, bqd: np.ndarray | None, device: str): + """Write full state directly. No FK -- uses recorded body_q/body_qd.""" + jc_starts = model.joint_coord_world_start.numpy() + jd_starts = model.joint_dof_world_start.numpy() + nw = getattr(model, "world_count", 1) or 1 + jq_t = wp.to_torch(state.joint_q) + jqd_t = wp.to_torch(state.joint_qd) + total_jc = int(jc_starts[nw]) + total_jd = int(jd_starts[nw]) + + if len(jq) == total_jc: + jq_t.copy_(torch.tensor(jq, dtype=torch.float32, device=device)) + jqd_t.copy_(torch.tensor(jqd, dtype=torch.float32, device=device)) + else: + jq_dev = torch.tensor(jq, dtype=torch.float32, device=device) + jqd_dev = torch.tensor(jqd, dtype=torch.float32, device=device) + for w in range(nw): + jc0, jc1 = int(jc_starts[w]), int(jc_starts[w + 1]) + jd0, jd1 = int(jd_starts[w]), int(jd_starts[w + 1]) + if jc1 - jc0 == len(jq): + jq_t[jc0:jc1] = jq_dev + if jd1 - jd0 == len(jqd): + jqd_t[jd0:jd1] = jqd_dev + + if bq is not None and state.body_q is not None: + bq_flat = bq.reshape(-1) if bq.ndim > 1 else bq + wp.to_torch(state.body_q).view(-1).copy_( + torch.tensor(bq_flat, dtype=torch.float32, device=device)) + if bqd is not None and state.body_qd is not None: + bqd_flat = bqd.reshape(-1) if bqd.ndim > 1 else bqd + wp.to_torch(state.body_qd).view(-1).copy_( + torch.tensor(bqd_flat, dtype=torch.float32, device=device)) + + if bq is None: + eval_fk(model, state.joint_q, state.joint_qd, state, None) + + +def _apply_forces(model, control, qfrc: np.ndarray, device: str): + """Write forces to control.joint_f.""" + jd_starts = model.joint_dof_world_start.numpy() + nw = getattr(model, "world_count", 1) or 1 + joint_f_t = wp.to_torch(control.joint_f) + total_jd = int(jd_starts[nw]) + + if qfrc.ndim == 2: + nv = qfrc.shape[1] + qfrc_dev = torch.tensor(qfrc, dtype=torch.float32, device=device) + for w in range(min(nw, qfrc.shape[0])): + jd0 = int(jd_starts[w]) + joint_f_t[jd0 : jd0 + nv] = qfrc_dev[w] + elif len(qfrc) == total_jd: + joint_f_t.copy_(torch.tensor(qfrc, dtype=torch.float32, device=device)) + else: + qfrc_dev = torch.tensor(qfrc, dtype=torch.float32, device=device) + for w in range(nw): + jd0 = int(jd_starts[w]) + joint_f_t[jd0 : jd0 + len(qfrc)] = qfrc_dev + + +def _read_config(ring) -> dict: + """Read solver + collision config from cfg_* keys in the npz.""" + _INTEGRATOR_MAP = {0: "euler", 1: "implicit", 2: "implicit", 3: "implicitfast"} + _CONE_MAP = {0: "pyramidal", 1: "elliptic"} + cfg = {} + for k in ring.files: + if k.startswith("cfg_"): + v = ring[k] + cfg[k[4:]] = v.item() if v.ndim == 0 or (v.ndim == 1 and v.shape[0] == 1) else v[0] + if "integrator" in cfg and isinstance(cfg["integrator"], (int, float, np.integer)): + cfg["integrator"] = _INTEGRATOR_MAP.get(int(cfg["integrator"]), "implicitfast") + if "cone" in cfg and isinstance(cfg["cone"], (int, float, np.integer)): + cfg["cone"] = _CONE_MAP.get(int(cfg["cone"]), "pyramidal") + return cfg + + +def main(): + parser = argparse.ArgumentParser(description="Reproduce MuJoCo Warp solver NaN.") + parser.add_argument("npz_path", type=str, help="Ring buffer npz from NaN debug export") + parser.add_argument("--nan_only", action="store_true", + help="Load only the NaN'd env (1 world).") + parser.add_argument("--device", type=str, default="cuda:0") + parser.add_argument("--visualize", action="store_true") + args = parser.parse_args() + + # --- Load data --- + ring = np.load(args.npz_path, allow_pickle=True) + n_frames = int(ring.get("buffer_size", ring["body_q"].shape[0])) + env_ids = ring.get("exported_env_ids", np.array([0])) + bad_world = int(env_ids[0]) + + first_valid, last_valid = _find_valid_range(ring["body_q"], bad_world, bodies_per_env=17) + nan_frame = min(last_valid + 1, n_frames - 1) + + jq_all = ring["joint_q"] # (n_frames, total_jc) + jqd_all = ring["joint_qd"] # (n_frames, total_jd) + bq_all = ring["body_q"] # (n_frames, total_bodies, 7) + bqd_all = ring.get("body_qd") # (n_frames, total_bodies, 6) or None + + force_history = ring.get("diag_qfrc_applied") # (n_frames, num_worlds, nv) or None + + # Detect world layout + coords_per_env = 19 + dofs_per_env = 18 + export_worlds = max(1, jq_all.shape[-1] // coords_per_env) + is_full = export_worlds > 1 + bodies_per_env = bq_all.shape[1] // export_worlds if is_full else bq_all.shape[1] + + # Per-world root positions for model building (from first valid frame) + if is_full: + all_root_pos = bq_all[first_valid, ::bodies_per_env, :3] + else: + all_root_pos = bq_all[first_valid, 0:1, :3] + + # Camera target + if is_full: + robot_pos = bq_all[last_valid, bad_world * bodies_per_env, :3] + else: + robot_pos = bq_all[last_valid, 0, :3] + + # World count and data selection + if args.nan_only: + num_worlds = 1 + world_positions = all_root_pos[bad_world : bad_world + 1] if is_full else all_root_pos + mode = f"NaN'd env {bad_world} only" + else: + num_worlds = export_worlds + world_positions = all_root_pos + mode = f"all {num_worlds} envs" + + # Read config + cfg = _read_config(ring) + print(f" Failing env: {bad_world}") + print(f" Ring: {n_frames} frames, valid [{first_valid}, {last_valid}], NaN at {nan_frame}") + print(f" Mode: {mode}") + print(f" Config: {cfg}") + + dt = float(cfg.pop("dt", 1.0 / 200.0)) + num_substeps = int(cfg.pop("num_substeps", 1)) + max_triangle_pairs = cfg.pop("max_triangle_pairs", None) + if max_triangle_pairs is not None: + max_triangle_pairs = int(max_triangle_pairs) + + # Remaining cfg keys are solver kwargs + solver_kwargs = cfg + + # --- Build model --- + usd_path = args.npz_path.replace(".npz", ".usd") + print(f"\nBuilding {num_worlds} worlds from {usd_path}...") + model = _build_model(usd_path, world_positions, args.device) + solver = SolverMuJoCo(model, **solver_kwargs) + + pipeline_kwargs = {} + if max_triangle_pairs is not None: + pipeline_kwargs["max_triangle_pairs"] = max_triangle_pairs + pipeline = CollisionPipeline(model, **pipeline_kwargs) + contacts = pipeline.contacts() + state = model.state() + control = model.control() + + # --- Viewer --- + viewer = None + if args.visualize: + from newton.viewer import ViewerGL + viewer = ViewerGL(width=1920, height=1080, headless=False) + viewer.set_model(model) + viewer.set_world_offsets((0.0, 0.0, 0.0)) + viewer.up_axis = 2 + viewer._render_left_panel = lambda: None + viewer.renderer.draw_wireframe = True + viewer.camera.pos = wp.vec3(robot_pos[0] + 3.0, robot_pos[1] - 3.0, 2.0) + viewer.camera.yaw, viewer.camera.pitch = 90.0, -15.0 + + # --- Load pre-NaN mjw_data if available --- + pre_nan_mjw_path = args.npz_path.replace(".npz", "_pre_nan_mjw.npz") + pre_nan_mjw = None + try: + pre_nan_mjw = np.load(pre_nan_mjw_path, allow_pickle=True) + print(f" Loaded pre-NaN mjw_data: {sorted(pre_nan_mjw.files)}") + except FileNotFoundError: + print(f" No pre-NaN mjw_data found ({pre_nan_mjw_path})") + + # --- Replay: reset state each step, apply recorded forces, step once --- + print(f"\nReplaying frames {first_valid}..{nan_frame} ({nan_frame - first_valid + 1} steps)...") + sim_time = 0.0 + max_iter = solver_kwargs["iterations"] + + for frame in range(first_valid, nan_frame + 1): + if viewer is not None and not viewer.is_running(): + break + + # Get state and forces for this frame + if args.nan_only and is_full: + w = bad_world + jq = jq_all[frame, w * coords_per_env : (w + 1) * coords_per_env] + jqd = jqd_all[frame, w * dofs_per_env : (w + 1) * dofs_per_env] + bq = bq_all[frame, w * bodies_per_env : (w + 1) * bodies_per_env] + bqd = bqd_all[frame, w * bodies_per_env : (w + 1) * bodies_per_env] if bqd_all is not None else None + else: + jq = jq_all[frame] + jqd = jqd_all[frame] + bq = bq_all[frame] + bqd = bqd_all[frame] if bqd_all is not None else None + + if not np.isfinite(jq).all() or not np.isfinite(bq).all(): + print(f" Frame {frame}: state has NaN/Inf, skipping") + continue + + # Reset to exact recorded state (no FK recomputation) + _set_state(model, state, jq, jqd, bq, bqd, args.device) + + # Apply recorded forces + if force_history is not None: + forces = force_history[frame] + if args.nan_only and forces.ndim == 2 and is_full: + forces = forces[bad_world] + _apply_forces(model, control, forces, args.device) + + # On the last valid frame, inject pre-NaN mjw_data to reproduce warm-start + if frame == nan_frame - 1 and pre_nan_mjw is not None: + # Inject pre-NaN mjw_data and call solver internals directly. + # This bypasses collide() and _convert_contacts which would + # overwrite the recorded contact state. + mjd = solver.mjw_data + injected = 0 + for k in pre_nan_mjw.files: + if k.startswith("efc_") or k.startswith("contact_"): + prefix, attr = k.split("_", 1) + sub = getattr(mjd, prefix, None) + if sub is not None: + target = getattr(sub, attr, None) + if target is not None and isinstance(target, wp.array): + src = pre_nan_mjw[k] + if target.shape == src.shape: + target.assign(wp.array(src, dtype=target.dtype, device=args.device)) + injected += 1 + else: + target = getattr(mjd, k, None) + if target is not None and isinstance(target, wp.array): + src = pre_nan_mjw[k] + if target.shape == src.shape: + target.assign(wp.array(src, dtype=target.dtype, device=args.device)) + injected += 1 + print(f" Injected {injected} pre-NaN arrays, calling _mujoco_warp_step directly") + solver._mujoco_warp_step() + solver._update_newton_state(model, state, mjd, state_prev=state) + else: + # Normal step: collide → solve → clear + pipeline.collide(state, contacts) + for _sub in range(num_substeps): + solver.step(state, state, control, contacts, dt) + state.clear_forces() + sim_time += dt * num_substeps + + # Check result + niter = wp.to_torch(solver.mjw_data.solver_niter) + body_q = wp.to_torch(state.body_q) + has_nan = bool(body_q.isnan().any().item()) + max_niter = int(niter.max().item()) + + if viewer is not None: + viewer.begin_frame(sim_time) + viewer.log_state(state) + viewer.end_frame() + _time.sleep(dt * 0.5) + + if has_nan: + nan_worlds = torch.where(body_q.isnan().any(dim=-1))[0].tolist() + nan_world_ids = [b // bodies_per_env for b in nan_worlds[:5]] + print(f"\n*** NaN REPRODUCED at frame {frame} ***") + print(f" niter={max_niter}, NaN in worlds: {nan_world_ids}") + if viewer is not None: + while viewer.is_running(): + viewer.begin_frame(sim_time) + viewer.log_state(state) + viewer.end_frame() + _time.sleep(0.016) + sys.exit(0) + + if frame % 10 == 0 or max_niter >= max_iter: + print(f" Frame {frame}: niter={max_niter}, ok") + + print(f"\nCompleted replay. Looping from start...") + while True: + for frame in range(first_valid, nan_frame): + if viewer is not None and not viewer.is_running(): + sys.exit(0) + + if args.nan_only and is_full: + w = bad_world + bq = bq_all[frame, w * bodies_per_env : (w + 1) * bodies_per_env] + else: + bq = bq_all[frame] + + if not np.isfinite(bq).all(): + continue + + bq_flat = bq.reshape(-1) + wp.to_torch(state.body_q).view(-1).copy_( + torch.tensor(bq_flat, dtype=torch.float32, device=args.device)) + sim_time += dt * num_substeps + + if viewer is not None: + viewer.begin_frame(sim_time) + viewer.log_state(state) + viewer.end_frame() + _time.sleep(dt * 0.5) + + +if __name__ == "__main__": + main() diff --git a/source/isaaclab/isaaclab/envs/manager_based_rl_env.py b/source/isaaclab/isaaclab/envs/manager_based_rl_env.py index 0082912ec59c..803916739edf 100644 --- a/source/isaaclab/isaaclab/envs/manager_based_rl_env.py +++ b/source/isaaclab/isaaclab/envs/manager_based_rl_env.py @@ -94,6 +94,13 @@ def __init__(self, cfg: ManagerBasedRLEnvCfg, render_mode: str | None = None, ** # produced video matches the simulation self.metadata["render_fps"] = 1 / self.step_dt self.has_rtx_sensors = self.sim.get_setting("/isaaclab/render/rtx_sensors") + + # If the physics manager supports NaN debug tracking, attach episode + # length so NaN exports report how far into the episode each env was. + pm = self.sim.physics_manager + if hasattr(pm, "set_debug_episode_length_buf"): + pm.set_debug_episode_length_buf(self.episode_length_buf) + print("[INFO]: Completed setting up the environment...") """ diff --git a/source/isaaclab/isaaclab/envs/mdp/__init__.pyi b/source/isaaclab/isaaclab/envs/mdp/__init__.pyi index 399035f2c09c..6c029271bf8e 100644 --- a/source/isaaclab/isaaclab/envs/mdp/__init__.pyi +++ b/source/isaaclab/isaaclab/envs/mdp/__init__.pyi @@ -55,10 +55,12 @@ __all__ = [ "randomize_physics_scene_gravity", "randomize_rigid_body_collider_offsets", "randomize_rigid_body_com", + "randomize_rigid_body_inertia", "randomize_rigid_body_mass", "randomize_rigid_body_material", "randomize_rigid_body_scale", "randomize_visual_color", + "randomize_visual_color_instanced", "randomize_visual_texture_material", "reset_joints_by_offset", "reset_joints_by_scale", @@ -137,6 +139,8 @@ __all__ = [ "joint_pos_out_of_manual_limit", "joint_vel_out_of_limit", "joint_vel_out_of_manual_limit", + "body_lin_vel_out_of_limit", + "body_ang_vel_out_of_limit", "root_height_below_minimum", "time_out", ] @@ -195,10 +199,12 @@ from .events import ( randomize_physics_scene_gravity, randomize_rigid_body_collider_offsets, randomize_rigid_body_com, + randomize_rigid_body_inertia, randomize_rigid_body_mass, randomize_rigid_body_material, randomize_rigid_body_scale, randomize_visual_color, + randomize_visual_color_instanced, randomize_visual_texture_material, reset_joints_by_offset, reset_joints_by_scale, @@ -285,6 +291,8 @@ from .terminations import ( joint_pos_out_of_manual_limit, joint_vel_out_of_limit, joint_vel_out_of_manual_limit, + body_lin_vel_out_of_limit, + body_ang_vel_out_of_limit, root_height_below_minimum, time_out, ) diff --git a/source/isaaclab/isaaclab/envs/mdp/events.py b/source/isaaclab/isaaclab/envs/mdp/events.py index 18c9b5eff63a..15bbb5a126b7 100644 --- a/source/isaaclab/isaaclab/envs/mdp/events.py +++ b/source/isaaclab/isaaclab/envs/mdp/events.py @@ -26,10 +26,16 @@ import isaaclab.utils.math as math_utils from isaaclab.actuators import ImplicitActuator from isaaclab.managers import EventTermCfg, ManagerTermBase, SceneEntityCfg -from isaaclab.utils.version import compare_versions, get_isaac_sim_version +from isaaclab.utils.version import compare_versions if TYPE_CHECKING: + from isaaclab_newton.assets import Articulation as NewtonArticulation + from isaaclab_newton.assets import RigidObject as NewtonRigidObject + from isaaclab_newton.assets import RigidObjectCollection as NewtonRigidObjectCollection + from isaaclab_physx.assets import Articulation as PhysXArticulation from isaaclab_physx.assets import DeformableObject + from isaaclab_physx.assets import RigidObject as PhysXRigidObject + from isaaclab_physx.assets import RigidObjectCollection as PhysXRigidObjectCollection from isaaclab.assets import Articulation, RigidObject from isaaclab.envs import ManagerBasedEnv @@ -158,6 +164,15 @@ class randomize_rigid_body_material(ManagerTermBase): values. The number of materials is specified by ``num_buckets``. The materials are generated by sampling uniform random values from the given ranges. + Automatically detects the active physics backend (PhysX or Newton) and applies the appropriate + material randomization strategy: + + - **PhysX**: Uses the 3-tuple material format (static_friction, dynamic_friction, restitution) + and applies via the unified ``set_material_properties_index`` method. + - **Newton**: Uses separate friction (mu) and restitution values, applying them via + ``set_friction_index`` and ``set_restitution_index`` methods. Newton uses a single + friction coefficient (mu), so only the static friction value is used. + The material properties are then assigned to the geometries of the asset. The assignment is done by creating a random integer tensor of shape (num_instances, max_num_shapes) where ``num_instances`` is the number of assets spawned and ``max_num_shapes`` is the maximum number of shapes in the asset (over @@ -203,25 +218,9 @@ def __init__(self, cfg: EventTermCfg, env: ManagerBasedEnv): f" with type: '{type(self.asset)}'." ) - # obtain number of shapes per body (needed for indexing the material properties correctly) - # note: this is a workaround since the Articulation does not provide a direct way to obtain the number of shapes - # per body. We use the physics simulation view to obtain the number of shapes per body. - if isinstance(self.asset, BaseArticulation) and self.asset_cfg.body_ids != slice(None): - self.num_shapes_per_body = [] - for link_path in self.asset.root_view.link_paths[0]: - link_physx_view = self.asset._physics_sim_view.create_rigid_body_view(link_path) # type: ignore - self.num_shapes_per_body.append(link_physx_view.max_shapes) - # ensure the parsing is correct - num_shapes = sum(self.num_shapes_per_body) - expected_shapes = self.asset.root_view.max_shapes - if num_shapes != expected_shapes: - raise ValueError( - "Randomization term 'randomize_rigid_body_material' failed to parse the number of shapes per body." - f" Expected total shapes: {expected_shapes}, but got: {num_shapes}." - ) - else: - # in this case, we don't need to do special indexing - self.num_shapes_per_body = None + # detect physics backend and initialize backend-specific state + manager_name = env.sim.physics_manager.__name__.lower() + self._backend = "newton" if "newton" in manager_name else "physx" # obtain parameters for sampling friction and restitution values static_friction_range = cfg.params.get("static_friction_range", (1.0, 1.0)) @@ -232,8 +231,7 @@ def __init__(self, cfg: EventTermCfg, env: ManagerBasedEnv): # sample material properties from the given ranges # note: we only sample the materials once during initialization # afterwards these are randomly assigned to the geometries of the asset - range_list = [static_friction_range, dynamic_friction_range, restitution_range] - ranges = torch.tensor(range_list, device="cpu") + ranges = torch.tensor([static_friction_range, dynamic_friction_range, restitution_range], device="cpu") self.material_buckets = math_utils.sample_uniform(ranges[:, 0], ranges[:, 1], (num_buckets, 3), device="cpu") # ensure dynamic friction is always less than static friction @@ -241,6 +239,62 @@ def __init__(self, cfg: EventTermCfg, env: ManagerBasedEnv): if make_consistent: self.material_buckets[:, 1] = torch.min(self.material_buckets[:, 0], self.material_buckets[:, 1]) + # initialize backend-specific state (shape indices, cached materials) + use_body_ids = isinstance(self.asset, BaseArticulation) and self.asset_cfg.body_ids != slice(None) + if self._backend == "newton": + self._init_newton(use_body_ids) + else: + self._init_physx(use_body_ids) + + def _init_physx(self, use_body_ids: bool) -> None: + """Initialize PhysX-specific state: shape indices and cached materials.""" + asset: PhysXArticulation | PhysXRigidObject | PhysXRigidObjectCollection = self.asset # type: ignore[assignment] + + # compute shape indices + if use_body_ids: + # note: workaround since Articulation doesn't expose shapes per body directly + num_shapes_per_body = [] + for link_path in asset.root_view.link_paths[0]: # type: ignore[union-attr] + link_physx_view = asset.root_view.create_rigid_body_view(link_path) # type: ignore + num_shapes_per_body.append(link_physx_view.max_shapes) + shape_indices_list = [] + for body_id in self.asset_cfg.body_ids: + start_idx = sum(num_shapes_per_body[:body_id]) + end_idx = start_idx + num_shapes_per_body[body_id] + shape_indices_list.extend(range(start_idx, end_idx)) + self.shape_indices = torch.tensor(shape_indices_list, dtype=torch.long) + else: + self.shape_indices = torch.arange(asset.root_view.max_shapes, dtype=torch.long) + + # cache default materials + self.default_materials = wp.to_torch(asset.root_view.get_material_properties()).clone() + + def _init_newton(self, use_body_ids: bool) -> None: + """Initialize Newton-specific state: shape indices and cached materials.""" + asset: NewtonArticulation | NewtonRigidObject | NewtonRigidObjectCollection = self.asset # type: ignore[assignment] + + # compute shape indices + if use_body_ids: + num_shapes_per_body = asset.num_shapes_per_body # type: ignore[union-attr] + shape_indices_list = [] + for body_id in self.asset_cfg.body_ids: + start_idx = sum(num_shapes_per_body[:body_id]) + end_idx = start_idx + num_shapes_per_body[body_id] + shape_indices_list.extend(range(start_idx, end_idx)) + self.shape_indices = torch.tensor(shape_indices_list, dtype=torch.long) + else: + self.shape_indices = torch.arange(asset.num_shapes, dtype=torch.long) + + # cache default materials + # TODO(newton-material-api): We access internal bindings `_sim_bind_shape_material_*` directly + # because Newton's friction model (single mu) differs from PhysX (static/dynamic friction). + # This is not ideal - we should either: + # 1. Add a unified getter API that abstracts backend differences, or + # 2. Use Newton's selection API (e.g., root_view.get_attribute) for querying + # For now, internal code accesses the bindings directly while we iterate on the design. + self.default_friction = wp.to_torch(asset.data._sim_bind_shape_material_mu).clone() + self.default_restitution = wp.to_torch(asset.data._sim_bind_shape_material_restitution).clone() + def __call__( self, env: ManagerBasedEnv, @@ -252,38 +306,46 @@ def __call__( asset_cfg: SceneEntityCfg, make_consistent: bool = False, ): + # determine device based on backend + device = env.device if self._backend == "newton" else "cpu" # physx uses cpu to process material randomization + # resolve environment ids if env_ids is None: - env_ids = torch.arange(env.scene.num_envs, device="cpu", dtype=torch.int32) + env_ids = torch.arange(env.scene.num_envs, device=device, dtype=torch.int32) else: - env_ids = env_ids.cpu() + env_ids = env_ids.to(device) # randomly assign material IDs to the geometries - total_num_shapes = self.asset.root_view.max_shapes - bucket_ids = torch.randint(0, num_buckets, (len(env_ids), total_num_shapes), device="cpu") - material_samples = self.material_buckets[bucket_ids] - - # retrieve material buffer from the physics simulation - materials = wp.to_torch(self.asset.root_view.get_material_properties()) + total_num_shapes = len(self.shape_indices) + bucket_ids = torch.randint(0, num_buckets, (len(env_ids), total_num_shapes), device=device) + shape_idx = self.shape_indices.to(device) + material_samples = self.material_buckets.to(device)[bucket_ids] - # update material buffer with new samples - if self.num_shapes_per_body is not None: - # sample material properties from the given ranges - for body_id in self.asset_cfg.body_ids: - # obtain indices of shapes for the body - start_idx = sum(self.num_shapes_per_body[:body_id]) - end_idx = start_idx + self.num_shapes_per_body[body_id] - # assign the new materials - # material samples are of shape: num_env_ids x total_num_shapes x 3 - materials[env_ids, start_idx:end_idx] = material_samples[:, start_idx:end_idx] + # dispatch to backend-specific implementation + if self._backend == "newton": + self._call_newton(env_ids, shape_idx, material_samples) else: - # assign all the materials - materials[env_ids] = material_samples[:] + self._call_physx(env_ids, shape_idx, material_samples) - # apply to simulation - self.asset.root_view.set_material_properties( - wp.from_torch(materials, dtype=wp.float32), wp.from_torch(env_ids, dtype=wp.int32) - ) + def _call_physx(self, env_ids: torch.Tensor, shape_idx: torch.Tensor, material_samples: torch.Tensor) -> None: + """Apply material randomization via PhysX's 3-tuple material format.""" + # update cached material buffer with new samples (vectorized) + self.default_materials[env_ids[:, None], shape_idx] = material_samples + + # apply to simulation via PhysX's unified material API + asset: PhysXRigidObject | PhysXArticulation | PhysXRigidObjectCollection = self.asset # type: ignore[assignment] + asset.set_material_properties_index(materials=self.default_materials, env_ids=env_ids) + + def _call_newton(self, env_ids: torch.Tensor, shape_idx: torch.Tensor, material_samples: torch.Tensor) -> None: + """Apply material randomization via Newton's native friction/restitution API.""" + # update cached default buffers with new samples (vectorized) + self.default_friction[env_ids[:, None], shape_idx] = material_samples[..., 0] + self.default_restitution[env_ids[:, None], shape_idx] = material_samples[..., 2] + + # apply via Newton's setter methods (handles kernel write + notification) + asset: NewtonRigidObject | NewtonArticulation | NewtonRigidObjectCollection = self.asset # type: ignore[assignment] + asset.set_friction_mask(friction=self.default_friction) + asset.set_restitution_mask(restitution=self.default_restitution) class randomize_rigid_body_mass(ManagerTermBase): @@ -405,6 +467,144 @@ def __call__( ) +class randomize_rigid_body_inertia(ManagerTermBase): + """Randomize the inertia tensor of rigid bodies by adding, scaling, or setting values. + + This function modifies body inertia tensors independently of mass. The inertia tensor + is a 3x3 symmetric matrix stored as 9 elements: ``[Ixx, Ixy, Ixz, Iyx, Iyy, Iyz, Izx, Izy, Izz]``. + + Two modes are supported via the :attr:`diagonal_only` parameter: + + - **diagonal_only=True** (default): Only modifies diagonal elements (Ixx, Iyy, Izz at + indices 0, 4, 8). This is useful for adding numerical stability (armature/regularization) + without changing rotational coupling between axes. The diagonal elements represent + resistance to rotation about each principal axis. + + - **diagonal_only=False**: Modifies all 9 elements of the inertia tensor. This can + simulate manufacturing variations or asymmetric mass distributions. Off-diagonal + elements represent coupling between rotations about different axes. + + .. note:: + Unlike :class:`randomize_rigid_body_mass` which recomputes inertia based on mass + ratios, this function modifies inertia directly without affecting mass. + """ + + def __init__(self, cfg: EventTermCfg, env: ManagerBasedEnv): + """Initialize the term. + + Args: + cfg: The configuration of the event term. + env: The environment instance. + + Raises: + ValueError: If the operation is not supported. + ValueError: If the lower bound is negative or zero when not allowed for scale operation. + ValueError: If the upper bound is less than the lower bound. + """ + from isaaclab.assets import BaseArticulation, BaseRigidObject + + super().__init__(cfg, env) + + # extract the used quantities (to enable type-hinting) + self.asset_cfg: SceneEntityCfg = cfg.params["asset_cfg"] + self.asset: RigidObject | Articulation = env.scene[self.asset_cfg.name] + + if not isinstance(self.asset, (BaseRigidObject, BaseArticulation)): + raise ValueError( + f"Randomization term 'randomize_rigid_body_inertia' not supported for asset: '{self.asset_cfg.name}'" + f" with type: '{type(self.asset)}'." + ) + + # check for valid operation + if cfg.params["operation"] == "scale": + if "inertia_distribution_params" in cfg.params: + _validate_scale_range( + cfg.params["inertia_distribution_params"], "inertia_distribution_params", allow_zero=False + ) + elif cfg.params["operation"] not in ("abs", "add"): + raise ValueError( + f"Randomization term 'randomize_rigid_body_inertia' does not support operation:" + f" '{cfg.params['operation']}'." + ) + + self.default_inertia = None + # cache inertia indices: diagonal (0, 4, 8) for regularization, or all elements + diagonal_only = cfg.params.get("diagonal_only", True) + self._inertia_idx = torch.tensor([0, 4, 8], device=self.asset.device) if diagonal_only else slice(None) + + def __call__( + self, + env: ManagerBasedEnv, + env_ids: torch.Tensor | None, + asset_cfg: SceneEntityCfg, + inertia_distribution_params: tuple[float, float], + operation: Literal["add", "scale", "abs"] = "add", + distribution: Literal["uniform", "log_uniform", "gaussian"] = "uniform", + diagonal_only: bool = True, + ): + """Randomize body inertia tensors. + + Args: + env: The environment instance. + env_ids: The environment indices to randomize. If None, all environments are randomized. + asset_cfg: The asset configuration specifying the asset and body names. + inertia_distribution_params: Distribution parameters as a tuple of two floats + ``(low, high)`` for sampling inertia modification values. + operation: The operation to apply. Options: ``"add"``, ``"scale"``, ``"abs"``. + Defaults to ``"add"`` which is typical for regularization/armature. + distribution: The distribution to sample from. Options: ``"uniform"``, + ``"log_uniform"``, ``"gaussian"``. Defaults to ``"uniform"``. + diagonal_only: If True, only modify diagonal elements (Ixx, Iyy, Izz) for + numerical stability. If False, modify all 9 elements. Defaults to True. + """ + # store default inertia on first call for repeatable randomization + if self.default_inertia is None: + self.default_inertia = wp.to_torch(self.asset.data.body_inertia).clone() + + # resolve environment ids + if env_ids is None: + env_ids = torch.arange(env.scene.num_envs, device=self.asset.device, dtype=torch.int32) + else: + env_ids = env_ids.to(self.asset.device) + + # resolve body indices + if self.asset_cfg.body_ids == slice(None): + body_ids = torch.arange(self.asset.num_bodies, dtype=torch.int32, device=self.asset.device) + else: + body_ids = torch.tensor(self.asset_cfg.body_ids, dtype=torch.int32, device=self.asset.device) + + # get default inertias for affected envs/bodies (advanced indexing creates a copy) + # shape: (len(env_ids), len(body_ids), 9) + inertias = self.default_inertia[env_ids[:, None], body_ids] + + # resolve the distribution function + if distribution == "uniform": + dist_fn = math_utils.sample_uniform + elif distribution == "log_uniform": + dist_fn = math_utils.sample_log_uniform + elif distribution == "gaussian": + dist_fn = math_utils.sample_gaussian + else: + raise NotImplementedError( + f"Unknown distribution: '{distribution}' for inertia randomization." + " Please use 'uniform', 'log_uniform', or 'gaussian'." + ) + + # sample random values once per (env, body) - shape: (len(env_ids), len(body_ids)) + random_values = dist_fn(*inertia_distribution_params, (len(env_ids), len(body_ids)), device=self.asset.device) + + # apply the operation with the SAME random value per body + if operation == "add": + inertias[:, :, self._inertia_idx] += random_values[..., None] + elif operation == "scale": + inertias[:, :, self._inertia_idx] *= random_values[..., None] + elif operation == "abs": + inertias[:, :, self._inertia_idx] = random_values[..., None] + + # set the inertia tensors into the physics simulation + self.asset.set_inertias_index(inertias=inertias, body_ids=body_ids, env_ids=env_ids) + + def randomize_rigid_body_com( env: ManagerBasedEnv, env_ids: torch.Tensor | None, @@ -438,71 +638,217 @@ def randomize_rigid_body_com( ranges[:, 0], ranges[:, 1], (len(env_ids), 3), device=asset.device ).unsqueeze(1) - # get the current com of the bodies (num_assets, num_bodies) + # get the current com of the bodies (num_assets, num_bodies, 7) coms = wp.to_torch(asset.data.body_com_pose_b).clone() - # Randomize the com in range + # Randomize the com position coms[env_ids[:, None], body_ids, :3] += rand_samples - # Set the new coms - asset.set_coms_index(coms=coms, env_ids=env_ids) + # Newton's set_coms_index expects position-only (vec3f), while PhysX expects + # the full pose (pos + quat). Detect backend to pass the correct shape. + manager_name = env.sim.physics_manager.__name__.lower() + if "newton" in manager_name: + asset.set_coms_index(coms=coms[..., :3], env_ids=env_ids) + else: + asset.set_coms_index(coms=coms, env_ids=env_ids) -def randomize_rigid_body_collider_offsets( - env: ManagerBasedEnv, - env_ids: torch.Tensor | None, - asset_cfg: SceneEntityCfg, - rest_offset_distribution_params: tuple[float, float] | None = None, - contact_offset_distribution_params: tuple[float, float] | None = None, - distribution: Literal["uniform", "log_uniform", "gaussian"] = "uniform", -): - """Randomize the collider parameters of rigid bodies in an asset by adding, scaling, or setting random values. +class randomize_rigid_body_collider_offsets(ManagerTermBase): + """Randomize the collider parameters of rigid bodies by setting random values. This function allows randomizing the collider parameters of the asset, such as rest and contact offsets. - These correspond to the physics engine collider properties that affect the collision checking. + These correspond to the physics engine collider properties that affect collision checking. - The function samples random values from the given distribution parameters and applies the operation to - the collider properties. It then sets the values into the physics simulation. If the distribution parameters - are not provided for a particular property, the function does not modify the property. + Automatically detects the active physics backend (PhysX or Newton) and applies the appropriate + collider offset randomization strategy: + + - **PhysX**: Uses rest offset and contact offset directly via the PhysX tensor API + (``root_view.set_rest_offsets`` / ``root_view.set_contact_offsets``). + - **Newton**: Maps PhysX concepts to Newton's geometry properties. PhysX ``rest_offset`` + maps to Newton ``shape_margin``, and PhysX ``contact_offset`` is converted to Newton + ``shape_gap`` via ``gap = contact_offset - margin``. + See the `Newton collision schema`_ for details on this mapping. - Currently, the distribution parameters are applied as absolute values. + The function samples random values from the given distribution parameters and applies them + as absolute values to the collider properties. If the distribution parameters are not + provided for a particular property, the function does not modify it. + + .. _Newton collision schema: https://newton-physics.github.io/newton/latest/concepts/collisions.html .. tip:: - This function uses CPU tensors to assign the collision properties. It is recommended to use this function - only during the initialization of the environment. + This function uses CPU tensors (PhysX) or GPU tensors (Newton) to assign the collision + properties. It is recommended to use this function only during the initialization of + the environment. """ - # extract the used quantities (to enable type-hinting) - asset: RigidObject | Articulation = env.scene[asset_cfg.name] - # resolve environment ids - if env_ids is None: - env_ids = torch.arange(env.scene.num_envs, device="cpu") + def __init__(self, cfg: EventTermCfg, env: ManagerBasedEnv): + """Initialize the term. - # sample collider properties from the given ranges and set into the physics simulation - # -- rest offsets - if rest_offset_distribution_params is not None: - rest_offset = wp.to_torch(asset.root_view.get_rest_offsets()).clone() - rest_offset = _randomize_prop_by_op( - rest_offset, - rest_offset_distribution_params, - None, - slice(None), - operation="abs", - distribution=distribution, - ) - asset.root_view.set_rest_offsets(rest_offset, env_ids.cpu()) - # -- contact offsets - if contact_offset_distribution_params is not None: - contact_offset = wp.to_torch(asset.root_view.get_contact_offsets()).clone() - contact_offset = _randomize_prop_by_op( - contact_offset, - contact_offset_distribution_params, - None, - slice(None), - operation="abs", - distribution=distribution, - ) - asset.root_view.set_contact_offsets(contact_offset, env_ids.cpu()) + Args: + cfg: The configuration of the event term. + env: The environment instance. + + Raises: + ValueError: If the asset is not a RigidObject or an Articulation. + """ + from isaaclab.assets import BaseArticulation, BaseRigidObject # noqa: PLC0415 + + super().__init__(cfg, env) + + self.asset_cfg: SceneEntityCfg = cfg.params["asset_cfg"] + self.asset: RigidObject | Articulation = env.scene[self.asset_cfg.name] + + if not isinstance(self.asset, (BaseRigidObject, BaseArticulation)): + raise ValueError( + f"Randomization term 'randomize_rigid_body_collider_offsets' not supported for asset:" + f" '{self.asset_cfg.name}' with type: '{type(self.asset)}'." + ) + + manager_name = env.sim.physics_manager.__name__.lower() + self._backend = "newton" if "newton" in manager_name else "physx" + + if self._backend == "newton": + self._init_newton() + else: + self._init_physx() + + def _init_physx(self) -> None: + """Initialize PhysX-specific state: cache default offsets.""" + asset: PhysXArticulation | PhysXRigidObject | PhysXRigidObjectCollection = self.asset # type: ignore[assignment] + self.default_rest_offsets = wp.to_torch(asset.root_view.get_rest_offsets()).clone() + self.default_contact_offsets = wp.to_torch(asset.root_view.get_contact_offsets()).clone() + + def _init_newton(self) -> None: + """Initialize Newton-specific state: bind to shape_margin and shape_gap.""" + import isaaclab_newton.physics.newton_manager as newton_manager_module # noqa: PLC0415 + from newton.solvers import SolverNotifyFlags # noqa: PLC0415 + + self._newton_manager = newton_manager_module.NewtonManager + self._notify_shape_properties = SolverNotifyFlags.SHAPE_PROPERTIES + + asset: NewtonArticulation | NewtonRigidObject | NewtonRigidObjectCollection = self.asset # type: ignore[assignment] + model = self._newton_manager.get_model() + # TODO(newton-collider-api): We access root_view.get_attribute directly to create + # bindings for shape_margin and shape_gap because Newton doesn't yet expose dedicated + # APIs for these properties on its asset classes. This mirrors the approach used for + # material properties in randomize_rigid_body_material. When Newton adds proper + # getter/setter APIs for collision geometry properties, update this code. + self._sim_bind_shape_margin = asset._root_view.get_attribute("shape_margin", model)[:, 0] + self._sim_bind_shape_gap = asset._root_view.get_attribute("shape_gap", model)[:, 0] + + self.default_margin = wp.to_torch(self._sim_bind_shape_margin).clone() + self.default_gap = wp.to_torch(self._sim_bind_shape_gap).clone() + + def __call__( + self, + env: ManagerBasedEnv, + env_ids: torch.Tensor | None, + asset_cfg: SceneEntityCfg, + rest_offset_distribution_params: tuple[float, float] | None = None, + contact_offset_distribution_params: tuple[float, float] | None = None, + distribution: Literal["uniform", "log_uniform", "gaussian"] = "uniform", + ): + if self._backend == "newton": + self._call_newton( + env, env_ids, rest_offset_distribution_params, contact_offset_distribution_params, distribution + ) + else: + self._call_physx( + env, env_ids, rest_offset_distribution_params, contact_offset_distribution_params, distribution + ) + + def _call_physx( + self, + env: ManagerBasedEnv, + env_ids: torch.Tensor | None, + rest_offset_params: tuple[float, float] | None, + contact_offset_params: tuple[float, float] | None, + distribution: Literal["uniform", "log_uniform", "gaussian"], + ) -> None: + """Apply collider offset randomization via PhysX's tensor API.""" + asset: PhysXRigidObject | PhysXArticulation | PhysXRigidObjectCollection = self.asset # type: ignore[assignment] + if env_ids is None: + env_ids = torch.arange(env.scene.num_envs, device="cpu") + else: + env_ids = env_ids.cpu() + + if rest_offset_params is not None: + rest_offset = self.default_rest_offsets.clone() + rest_offset = _randomize_prop_by_op( + rest_offset, + rest_offset_params, + None, + slice(None), + operation="abs", + distribution=distribution, + ) + asset.root_view.set_rest_offsets(rest_offset, env_ids) + + if contact_offset_params is not None: + contact_offset = self.default_contact_offsets.clone() + contact_offset = _randomize_prop_by_op( + contact_offset, + contact_offset_params, + None, + slice(None), + operation="abs", + distribution=distribution, + ) + asset.root_view.set_contact_offsets(contact_offset, env_ids) + + def _call_newton( + self, + env: ManagerBasedEnv, + env_ids: torch.Tensor | None, + rest_offset_params: tuple[float, float] | None, + contact_offset_params: tuple[float, float] | None, + distribution: Literal["uniform", "log_uniform", "gaussian"], + ) -> None: + """Apply collider offset randomization via Newton's shape geometry properties. + + Maps PhysX concepts to Newton (see ``SchemaResolverPhysx`` in newton): + + - ``rest_offset`` → ``shape_margin`` (Newton margin) + - ``contact_offset`` → ``shape_gap`` (Newton gap = contact_offset - rest_offset) + """ + device = env.device + if env_ids is None: + env_ids = torch.arange(env.scene.num_envs, device=device, dtype=torch.int32) + else: + env_ids = env_ids.to(device) + + margin_view = wp.to_torch(self._sim_bind_shape_margin) + + if rest_offset_params is not None: + margin = self.default_margin.clone() + margin = _randomize_prop_by_op( + margin, + rest_offset_params, + None, + slice(None), + operation="abs", + distribution=distribution, + ) + self.default_margin[env_ids] = margin[env_ids] + margin_view[env_ids] = margin[env_ids] + + if contact_offset_params is not None: + current_margin = self.default_margin + contact_offset = torch.zeros_like(self.default_gap) + contact_offset = _randomize_prop_by_op( + contact_offset, + contact_offset_params, + None, + slice(None), + operation="abs", + distribution=distribution, + ) + gap = torch.clamp(contact_offset - current_margin, min=0.0) + self.default_gap[env_ids] = gap[env_ids] + gap_view = wp.to_torch(self._sim_bind_shape_gap) + gap_view[env_ids] = gap[env_ids] + + self._newton_manager.add_model_change(self._notify_shape_properties) class randomize_physics_scene_gravity(ManagerTermBase): @@ -802,14 +1148,26 @@ def __init__(self, cfg: EventTermCfg, env: ManagerBasedEnv): # extract the used quantities (to enable type-hinting) self.asset_cfg: SceneEntityCfg = cfg.params["asset_cfg"] - self.asset: RigidObject | Articulation = env.scene[self.asset_cfg.name] + self.asset: Articulation = env.scene[self.asset_cfg.name] + + # detect physics backend + manager_name = env.sim.physics_manager.__name__.lower() + self._backend = "newton" if "newton" in manager_name else "physx" + # cache default values (common to both backends) self.default_joint_friction_coeff = wp.to_torch(self.asset.data.joint_friction_coeff).clone() - self.default_dynamic_joint_friction_coeff = wp.to_torch(self.asset.data.joint_dynamic_friction_coeff).clone() - self.default_viscous_joint_friction_coeff = wp.to_torch(self.asset.data.joint_viscous_friction_coeff).clone() self.default_joint_armature = wp.to_torch(self.asset.data.joint_armature).clone() self.default_joint_pos_limits = wp.to_torch(self.asset.data.joint_pos_limits).clone() + # cache dynamic/viscous friction (PhysX only - Newton only has static friction) + if self._backend == "physx": + self.default_dynamic_joint_friction_coeff = wp.to_torch( + self.asset.data.joint_dynamic_friction_coeff + ).clone() + self.default_viscous_joint_friction_coeff = wp.to_torch( + self.asset.data.joint_viscous_friction_coeff + ).clone() + # check for valid operation if cfg.params["operation"] == "scale": if "friction_distribution_params" in cfg.params: @@ -867,8 +1225,14 @@ def __call__( # Always set static friction (indexed once) static_friction_coeff = friction_coeff[env_ids_for_slice, joint_ids] - # if isaacsim version is lower than 5.0.0 we can set only the static friction coefficient - if get_isaac_sim_version().major >= 5: + if self._backend == "newton": + # Newton only supports static friction coefficient + self.asset.write_joint_friction_coefficient_to_sim_index( + joint_friction_coeff=static_friction_coeff, + joint_ids=joint_ids, + env_ids=env_ids, + ) + else: # Randomize raw tensors dynamic_friction_coeff = _randomize_prop_by_op( self.default_dynamic_joint_friction_coeff.clone(), @@ -897,19 +1261,15 @@ def __call__( # Index once at the end dynamic_friction_coeff = dynamic_friction_coeff[env_ids_for_slice, joint_ids] viscous_friction_coeff = viscous_friction_coeff[env_ids_for_slice, joint_ids] - else: - # For versions < 5.0.0, we do not set these values - dynamic_friction_coeff = None - viscous_friction_coeff = None - - # Single write call for all versions - self.asset.write_joint_friction_coefficient_to_sim_index( - joint_friction_coeff=static_friction_coeff, - joint_dynamic_friction_coeff=dynamic_friction_coeff, - joint_viscous_friction_coeff=viscous_friction_coeff, - joint_ids=joint_ids, - env_ids=env_ids, - ) + + # Single write call for all versions + self.asset.write_joint_friction_coefficient_to_sim_index( + joint_friction_coeff=static_friction_coeff, + joint_dynamic_friction_coeff=dynamic_friction_coeff, + joint_viscous_friction_coeff=viscous_friction_coeff, + joint_ids=joint_ids, + env_ids=env_ids, + ) # joint armature if armature_distribution_params is not None: @@ -1857,6 +2217,164 @@ def __call__( rep.functional.modify.attribute(self.material_prims, "diffuse_color_constant", random_colors) +class randomize_visual_color_instanced(ManagerTermBase): + """Randomize visual color of instanced prims using spawn-time created materials. + + This event term works with assets spawned using ``randomizable_visual_materials=True`` + in their spawner configuration. The materials are created at spawn time inside the + asset hierarchy, so when the cloner copies the asset, each environment gets its own + copy of the materials. + + Unlike :class:`randomize_visual_color`, this approach: + + 1. Preserves USD instancing performance (materials are copied with the asset) + 2. Enables true per-environment color randomization + 3. Works with both ``replicate_physics=True`` and ``False`` + + .. note:: + The asset must be spawned with ``randomizable_visual_materials=True`` in its + spawner config (e.g., :class:`~isaaclab.sim.spawners.from_files.UsdFileCfg`). + """ + + def __init__(self, cfg: EventTermCfg, env: ManagerBasedEnv): + """Initialize the color randomization term by finding spawn-time created materials. + + The spawner creates UsdPreviewSurface materials in a RandomizableMaterials container + and binds them to link prims. These materials are NOT instance proxies, so we can + modify them at runtime. + + Args: + cfg: The configuration of the event term. + env: The environment instance. + """ + from isaacsim.core.utils.extensions import enable_extension # noqa: PLC0415 + from pxr import UsdShade # noqa: PLC0415 + + enable_extension("omni.replicator.core") + import omni.replicator.core as rep # noqa: PLC0415 + + super().__init__(cfg, env) + + asset_cfg: SceneEntityCfg = cfg.params.get("asset_cfg") + materials_path: str = cfg.params.get("materials_path", "RandomizableMaterials") + + asset = env.scene[asset_cfg.name] + stage = env.sim.stage + + # Get env paths + import re # noqa: PLC0415 + + env0_path = re.sub(r"\{ENV_REGEX_NS\}|env_\.\*", "env_0", asset.cfg.prim_path) + + # Find shaders in the RandomizableMaterials container + self._shader_prims = [] + self._shaders_per_env = 0 + self._color_attr_name = "diffuseColor" # UsdPreviewSurface uses diffuseColor + + # Find all material shaders in env_0's RandomizableMaterials container + env0_materials_path = f"{env0_path}/{materials_path}" + env0_container = stage.GetPrimAtPath(env0_materials_path) + + if not env0_container or not env0_container.IsValid(): + logger.warning( + f"[randomize_visual_color_instanced] No materials container at {env0_materials_path}. " + "Make sure the asset is spawned with randomizable_visual_materials=True." + ) + self._rep = rep + self.color_rng = None + return + + # Find all mat_* prims and their shaders + env0_shaders = [] # relative shader paths + for child in env0_container.GetChildren(): + if child.IsA(UsdShade.Material): + shader_prim = child.GetChild("Shader") + if shader_prim and shader_prim.IsValid(): + # Store relative path from asset root + rel_path = str(shader_prim.GetPath()).replace(env0_path, "") + env0_shaders.append(rel_path) + + if not env0_shaders: + logger.warning("[randomize_visual_color_instanced] No shaders found in materials container") + self._rep = rep + self.color_rng = None + return + + self._shaders_per_env = len(env0_shaders) + + # Find shaders in all envs + for env_id in range(env.num_envs): + env_path = env0_path.replace("/env_0/", f"/env_{env_id}/") + for rel_shader_path in env0_shaders: + shader_path = f"{env_path}{rel_shader_path}" + shader_prim = stage.GetPrimAtPath(shader_path) + if shader_prim and shader_prim.IsValid(): + self._shader_prims.append(shader_prim) + + if not self._shader_prims: + logger.warning("[randomize_visual_color_instanced] No shaders found across envs") + self._rep = rep + self.color_rng = None + return + + logger.info( + f"[randomize_visual_color_instanced] Found {len(self._shader_prims)} shaders ({self._shaders_per_env} per env)" + ) + + # Store Replicator reference and RNG + self._rep = rep + self.color_rng = rep.rng.ReplicatorRNG() + + def __call__( + self, + env: ManagerBasedEnv, + env_ids: torch.Tensor, + asset_cfg: SceneEntityCfg, + colors: dict[str, tuple[float, float]], + materials_path: str = "RandomizableMaterials", + ): + """Randomize colors of the spawn-time created materials. + + Only randomizes colors for the environments specified in ``env_ids``, not all environments. + + Args: + env: The environment instance. + env_ids: The environment indices to randomize (only these envs get new colors). + asset_cfg: The asset configuration (used to identify the asset). + colors: Dictionary with "r", "g", "b" keys, each mapping to (min, max) tuple. + materials_path: Path to the materials container (must match spawner config). + """ + if not self._shader_prims or self._shaders_per_env == 0: + return + + rep = self._rep + + # Parse colors into ranges + color_low = [colors[key][0] for key in ["r", "g", "b"]] + color_high = [colors[key][1] for key in ["r", "g", "b"]] + + # Only update shaders for the resetting environments (env_ids) + # Shaders are stored in order: [env_0 shaders..., env_1 shaders..., ...] + env_ids_list = env_ids.cpu().tolist() if hasattr(env_ids, "cpu") else list(env_ids) + + # Collect shader prims for only the resetting environments + shaders_to_update = [] + for env_id in env_ids_list: + start_idx = env_id * self._shaders_per_env + end_idx = start_idx + self._shaders_per_env + shaders_to_update.extend(self._shader_prims[start_idx:end_idx]) + + if not shaders_to_update: + return + + # Generate random colors only for the shaders being updated + num_shaders = len(shaders_to_update) + random_colors = self.color_rng.generator.uniform(color_low, color_high, size=(num_shaders, 3)) + + # Use Replicator's batched API for efficient color updates + rep.functional.modify.attribute(shaders_to_update, self._color_attr_name, random_colors) + + """ Internal helper functions. """ diff --git a/source/isaaclab/isaaclab/envs/mdp/terminations.py b/source/isaaclab/isaaclab/envs/mdp/terminations.py index 1936f5dd50b6..6cdab3839d3b 100644 --- a/source/isaaclab/isaaclab/envs/mdp/terminations.py +++ b/source/isaaclab/isaaclab/envs/mdp/terminations.py @@ -150,6 +150,41 @@ def joint_effort_out_of_limit( return torch.any(out_of_limits, dim=1) +""" +Body velocity terminations. +""" + + +def body_lin_vel_out_of_limit( + env: ManagerBasedRLEnv, + max_velocity: float, + asset_cfg: SceneEntityCfg = SceneEntityCfg("robot"), +) -> torch.Tensor: + """Terminate when any body's linear velocity magnitude exceeds the limit [m/s]. + + Also terminates on NaN velocities, which indicate a solver singularity. + """ + asset: Articulation = env.scene[asset_cfg.name] + body_vel = wp.to_torch(asset.data.body_lin_vel_w)[:, asset_cfg.body_ids] + speed = torch.linalg.norm(body_vel, dim=-1) + return torch.any((speed > max_velocity) | torch.isnan(speed), dim=1) + + +def body_ang_vel_out_of_limit( + env: ManagerBasedRLEnv, + max_velocity: float, + asset_cfg: SceneEntityCfg = SceneEntityCfg("robot"), +) -> torch.Tensor: + """Terminate when any body's angular velocity magnitude exceeds the limit [rad/s]. + + Also terminates on NaN velocities, which indicate a solver singularity. + """ + asset: Articulation = env.scene[asset_cfg.name] + body_vel = wp.to_torch(asset.data.body_ang_vel_w)[:, asset_cfg.body_ids] + speed = torch.linalg.norm(body_vel, dim=-1) + return torch.any((speed > max_velocity) | torch.isnan(speed), dim=1) + + """ Contact sensor. """ diff --git a/source/isaaclab/isaaclab/sensors/ray_caster/ray_cast_utils.py b/source/isaaclab/isaaclab/sensors/ray_caster/ray_cast_utils.py index ac503b28bf52..191529349830 100644 --- a/source/isaaclab/isaaclab/sensors/ray_caster/ray_cast_utils.py +++ b/source/isaaclab/isaaclab/sensors/ray_caster/ray_cast_utils.py @@ -8,20 +8,20 @@ from __future__ import annotations import torch -import warp as wp -import omni.physics.tensors.impl.api as physx - -from isaaclab.sim.views import XformPrimView +from isaaclab.sim.views import BaseXformPrimView def obtain_world_pose_from_view( - physx_view: XformPrimView | physx.ArticulationView | physx.RigidBodyView, + physx_view: BaseXformPrimView, env_ids: torch.Tensor, clone: bool = False, ) -> tuple[torch.Tensor, torch.Tensor]: """Get the world poses of the prim referenced by the prim view. + Accepts any :class:`~isaaclab.sim.views.BaseXformPrimView` subclass + (USD/Fabric ``XformPrimView``, Newton GPU-backed ``XformPrimView``, etc.). + Args: physx_view: The prim view to get the world poses from. env_ids: The environment ids of the prims to get the world poses for. @@ -29,17 +29,12 @@ def obtain_world_pose_from_view( Returns: A tuple containing the world positions and orientations of the prims. - Orientation is in (x, y, z, w) format. Raises: - NotImplementedError: If the prim view is not of the supported type. + NotImplementedError: If the prim view is not a BaseXformPrimView subclass. """ - if isinstance(physx_view, XformPrimView): + if isinstance(physx_view, BaseXformPrimView): pos_w, quat_w = physx_view.get_world_poses(env_ids) - elif isinstance(physx_view, physx.ArticulationView): - pos_w, quat_w = wp.to_torch(physx_view.get_root_transforms())[env_ids].split([3, 4], dim=-1) - elif isinstance(physx_view, physx.RigidBodyView): - pos_w, quat_w = wp.to_torch(physx_view.get_transforms())[env_ids].split([3, 4], dim=-1) else: raise NotImplementedError(f"Cannot get world poses for prim view of type '{type(physx_view)}'.") diff --git a/source/isaaclab/isaaclab/sensors/ray_caster/ray_caster.py b/source/isaaclab/isaaclab/sensors/ray_caster/ray_caster.py index 731d57f1638f..f9f205d33e4d 100644 --- a/source/isaaclab/isaaclab/sensors/ray_caster/ray_caster.py +++ b/source/isaaclab/isaaclab/sensors/ray_caster/ray_caster.py @@ -18,7 +18,7 @@ import isaaclab.sim as sim_utils import isaaclab.utils.math as math_utils from isaaclab.markers import VisualizationMarkers -from isaaclab.sim.views import XformPrimView +from isaaclab.sim.views import XformPrimView, XformPrimViewFactory from isaaclab.terrains.trimesh.utils import make_plane from isaaclab.utils.math import quat_apply, quat_apply_yaw from isaaclab.utils.warp import convert_to_warp_mesh, raycast_mesh @@ -362,16 +362,9 @@ def _obtain_trackable_prim_view( prim_view = None while prim_view is None: - # TODO: Need to handle the case where API is present but it is disabled - if current_prim.HasAPI(UsdPhysics.ArticulationRootAPI): - prim_view = self._physics_sim_view.create_articulation_view(current_path_expr.replace(".*", "*")) - logger.info(f"Created articulation view for mesh prim at path: {target_prim_path}") - break - - # TODO: Need to handle the case where API is present but it is disabled - if current_prim.HasAPI(UsdPhysics.RigidBodyAPI): - prim_view = self._physics_sim_view.create_rigid_body_view(current_path_expr.replace(".*", "*")) - logger.info(f"Created rigid body view for mesh prim at path: {target_prim_path}") + if current_prim.HasAPI(UsdPhysics.ArticulationRootAPI) or current_prim.HasAPI(UsdPhysics.RigidBodyAPI): + prim_view = XformPrimViewFactory(current_path_expr, device=self._device, stage=self.stage) + logger.info(f"Created {type(prim_view).__name__} for physics prim at path: {current_path_expr}") break new_root_prim = current_prim.GetParent() @@ -387,7 +380,6 @@ def _obtain_trackable_prim_view( ) break - # switch the current prim to the parent prim current_prim = new_root_prim # obtain the relative transforms between target prim and the view prims diff --git a/source/isaaclab/isaaclab/sim/__init__.pyi b/source/isaaclab/isaaclab/sim/__init__.pyi index 32a5ea87e197..30028da60041 100644 --- a/source/isaaclab/isaaclab/sim/__init__.pyi +++ b/source/isaaclab/isaaclab/sim/__init__.pyi @@ -164,7 +164,9 @@ __all__ = [ "resolve_prim_pose", "resolve_prim_scale", "convert_world_pose_to_local", + "BaseXformPrimView", "XformPrimView", + "XformPrimViewFactory", ] from .simulation_cfg import RenderCfg, SimulationCfg @@ -333,4 +335,4 @@ from .utils import ( resolve_prim_scale, convert_world_pose_to_local, ) -from .views import XformPrimView +from .views import BaseXformPrimView, XformPrimView, XformPrimViewFactory diff --git a/source/isaaclab/isaaclab/sim/schemas/schemas.py b/source/isaaclab/isaaclab/sim/schemas/schemas.py index dec9de5bbada..bd7d9908908b 100644 --- a/source/isaaclab/isaaclab/sim/schemas/schemas.py +++ b/source/isaaclab/isaaclab/sim/schemas/schemas.py @@ -411,7 +411,7 @@ def modify_collision_properties( # get USD prim collider_prim = stage.GetPrimAtPath(prim_path) # check if prim has collision applied on it - if not UsdPhysics.CollisionAPI(collider_prim): + if not collider_prim.HasAPI(UsdPhysics.CollisionAPI): return False # retrieve the USD collision api usd_collision_api = UsdPhysics.CollisionAPI(collider_prim) diff --git a/source/isaaclab/isaaclab/sim/simulation_context.py b/source/isaaclab/isaaclab/sim/simulation_context.py index 91cee7d0be46..cee8ae4ee5e1 100644 --- a/source/isaaclab/isaaclab/sim/simulation_context.py +++ b/source/isaaclab/isaaclab/sim/simulation_context.py @@ -862,6 +862,7 @@ def build_simulation_context( if visualizers: sim.set_setting("/isaaclab/visualizer/types", " ".join(visualizers)) + sim.set_setting("/isaaclab/visualizer/explicit", True) if add_ground_plane: cfg = GroundPlaneCfg() diff --git a/source/isaaclab/isaaclab/sim/spawners/from_files/from_files.py b/source/isaaclab/isaaclab/sim/spawners/from_files/from_files.py index ddb7f9ff2fb3..bef6fd1d7b35 100644 --- a/source/isaaclab/isaaclab/sim/spawners/from_files/from_files.py +++ b/source/isaaclab/isaaclab/sim/spawners/from_files/from_files.py @@ -8,6 +8,7 @@ import fcntl import logging import os +import shutil import tempfile from typing import TYPE_CHECKING @@ -28,6 +29,7 @@ set_prim_visibility, ) from isaaclab.utils.assets import check_file_path, retrieve_file_path +from isaaclab.utils.string import to_camel_case from isaaclab.utils.version import has_kit if TYPE_CHECKING: @@ -37,6 +39,263 @@ logger = logging.getLogger(__name__) +def _create_modified_usd_with_overrides(usd_path: str, cfg: from_files_cfg.UsdFileCfg) -> str | None: + """Create a modified copy of USD with physics properties baked in for instanced prims. + + File I/O is required because USD prototypes are read-only after composition. + This function modifies source layers to set physics schema attributes and + optionally creates randomizable visual materials for per-body color randomization. + """ + # Collect physics properties to apply + prop_schema_map = { + "collision_props": ("CollisionAPI", "physxCollision:"), + "rigid_props": ("RigidBodyAPI", "physxRigidBody:"), + "articulation_props": ("ArticulationRootAPI", "physxArticulation:"), + "joint_drive_props": ("DriveAPI", "physxJoint:"), + "mass_props": ("MassAPI", "physics:"), + "deformable_props": ("DeformableAPI", "physxDeformable:"), + } + props = [] + for name, (api, prefix) in prop_schema_map.items(): + prop_cfg = getattr(cfg, name, None) + if prop_cfg: + props.append((prop_cfg, api, prefix)) + + # Check if we need to create randomizable visual materials + create_visual_materials = getattr(cfg, "randomizable_visual_materials", False) + + if not props and not create_visual_materials: + return None + + # Copy USD and all referenced layers to temp directory + stage = Usd.Stage.Open(usd_path) + if not stage: + return None + layers = [l for l in stage.GetUsedLayers() if not l.anonymous] + paths = [l.identifier for l in layers] + del stage + + common_root = os.path.commonpath(paths) if len(paths) > 1 else os.path.dirname(paths[0]) + temp_dir = tempfile.mkdtemp(prefix="isaaclab_usd_") + path_map = {p: os.path.join(temp_dir, os.path.relpath(p, common_root)) for p in paths} + for src, dst in path_map.items(): + os.makedirs(os.path.dirname(dst), exist_ok=True) + shutil.copy2(src, dst) + + temp_path = path_map[usd_path] + temp_stage = Usd.Stage.Open(temp_path) + if not temp_stage: + return None + + # Modify physics properties in each layer + modified = False + for layer in (l for l in temp_stage.GetUsedLayers() if not l.anonymous): + stack = list(layer.rootPrims.values()) + while stack: + prim_spec = stack.pop() + stack.extend(prim_spec.nameChildren.values()) + + # Get applied API schemas + schemas = prim_spec.GetInfo("apiSchemas") + apis = set() + if schemas: + for attr in ("explicitItems", "prependedItems", "appendedItems"): + apis.update(str(s) for s in getattr(schemas, attr, [])) + + # Apply matching properties + for prop_cfg, api_schema, prefix in props: + if not any(api_schema in a for a in apis): + continue + modified = True + for field, value in prop_cfg.to_dict().items(): + if value is None or field.startswith("_") or hasattr(value, "to_dict"): + continue + vtype = { + bool: Sdf.ValueTypeNames.Bool, + int: Sdf.ValueTypeNames.Int, + float: Sdf.ValueTypeNames.Float, + }.get(type(value)) + if vtype: + attr_name = f"{prefix}{to_camel_case(field, 'cC')}" + path = prim_spec.path.AppendProperty(attr_name) + attr = layer.GetAttributeAtPath(path) or Sdf.AttributeSpec(prim_spec, attr_name, vtype) + attr.default = value + layer.Save() + + # Create randomizable visual materials in the source layer + if create_visual_materials: + materials_modified = _add_visual_materials_to_layer(temp_stage, cfg) + modified = modified or materials_modified + + del temp_stage + return temp_path if modified else None + + +def _add_visual_materials_to_layer(stage: Usd.Stage, cfg: from_files_cfg.FileCfg) -> bool: + """Create UsdPreviewSurface materials for per-body color randomization. + + This function enables per-body color randomization while keeping visual geometry + INSTANCED for memory efficiency. Supports two modes: + + - ``"full"``: Each visual body part gets its own unique material (fully random colors) + - ``"style"``: Body parts sharing the same original material share a randomizable material + (preserves color groupings, like changing the robot's color scheme) + + The approach: + 1. Find link prims that have 'visuals' children + 2. Create UsdPreviewSurface materials in a RandomizableMaterials container + 3. Bind materials to 'visuals' prims with 'strongerThanDescendants' binding strength + 4. The binding strength overrides instance proxy bindings, enabling per-env colors + without breaking geometry instancing + + Args: + stage: The USD stage (opened from temp copy). + cfg: The configuration instance. + + Returns: + True if materials were added, False otherwise. + """ + from pxr import Usd, UsdShade # noqa: PLC0415 + + root_layer = stage.GetRootLayer() + default_prim = stage.GetDefaultPrim() + if not default_prim: + logger.warning("[randomizable_visual_materials] No default prim found") + return False + + default_prim_path = default_prim.GetPath() + stage.Load() + + # Get mode: "full" (each body gets unique material) or "style" (preserve material groups) + mode = getattr(cfg, "randomizable_visual_materials_mode", "full") + + # Find link prims that have a 'visuals' child (these are NOT instance proxies) + link_prims_with_visuals = [] + for prim in stage.TraverseAll(): + path_str = str(prim.GetPath()) + if "/visuals" in path_str or "/collision" in path_str: + continue + visuals_child = prim.GetChild("visuals") + if visuals_child and visuals_child.IsValid(): + link_prims_with_visuals.append(prim) + + if not link_prims_with_visuals: + logger.info("[randomizable_visual_materials] No link prims with visuals found") + return False + + # Create materials container + materials_path = getattr(cfg, "randomizable_visual_materials_path", "RandomizableMaterials") + container_path = f"{default_prim_path}/{materials_path}" + + container_spec = Sdf.CreatePrimInLayer(root_layer, container_path) + container_spec.specifier = Sdf.SpecifierDef + container_spec.typeName = "Scope" + + # For "style" mode, detect original material bindings and group by material name + original_material_map = {} # visuals_path -> original_material_name + if mode == "style": + for link_prim in link_prims_with_visuals: + visuals_prim = link_prim.GetChild("visuals") + if not visuals_prim: + continue + # Find the first mesh inside visuals and get its material binding + for child in stage.Traverse(Usd.TraverseInstanceProxies()): + if not child.IsA(UsdGeom.Mesh): + continue + child_path = str(child.GetPath()) + visuals_path = str(visuals_prim.GetPath()) + if not child_path.startswith(visuals_path): + continue + if "collision" in child_path.lower(): + continue + # Get the bound material + binding_api = UsdShade.MaterialBindingAPI(child) + bound_mat, _ = binding_api.ComputeBoundMaterial() + if bound_mat: + mat_name = bound_mat.GetPrim().GetName() + original_material_map[visuals_path] = mat_name + break + + # Create materials - one per group in "style" mode, one per link in "full" mode + created_materials = {} # material_name -> mat_path (for style mode) + mat_index = 0 + + def _create_material(mat_name: str) -> Sdf.Path: + """Create a UsdPreviewSurface material and return its path.""" + nonlocal mat_index + mat_path = Sdf.Path(f"{container_path}/mat_{mat_index}") + shader_path = mat_path.AppendChild("Shader") + mat_index += 1 + + # Create Material prim + mat_spec = Sdf.CreatePrimInLayer(root_layer, mat_path) + mat_spec.specifier = Sdf.SpecifierDef + mat_spec.typeName = "Material" + + # Create UsdPreviewSurface Shader + shader_spec = Sdf.CreatePrimInLayer(root_layer, shader_path) + shader_spec.specifier = Sdf.SpecifierDef + shader_spec.typeName = "Shader" + + id_attr = Sdf.AttributeSpec(shader_spec, "info:id", Sdf.ValueTypeNames.Token) + id_attr.default = "UsdPreviewSurface" + + diffuse_attr = Sdf.AttributeSpec(shader_spec, "inputs:diffuseColor", Sdf.ValueTypeNames.Color3f) + diffuse_attr.default = Gf.Vec3f(0.8, 0.8, 0.8) + + Sdf.AttributeSpec(shader_spec, "outputs:surface", Sdf.ValueTypeNames.Token) + mat_surface_attr = Sdf.AttributeSpec(mat_spec, "outputs:surface", Sdf.ValueTypeNames.Token) + mat_surface_attr.connectionPathList.explicitItems = [shader_path.AppendProperty("outputs:surface")] + + return mat_path + + # Bind materials to visuals prims + for link_prim in link_prims_with_visuals: + link_path = link_prim.GetPath() + visuals_path = link_path.AppendChild("visuals") + visuals_path_str = str(visuals_path) + + # Determine which material to use + if mode == "style" and visuals_path_str in original_material_map: + original_mat_name = original_material_map[visuals_path_str] + if original_mat_name not in created_materials: + created_materials[original_mat_name] = _create_material(original_mat_name) + mat_path = created_materials[original_mat_name] + else: + # Full mode: create unique material for each + mat_path = _create_material(f"body_{mat_index}") + + # Create override for the VISUALS prim to add material binding + # KEY: Use 'strongerThanDescendants' to override instance proxy bindings + visuals_spec = root_layer.GetPrimAtPath(visuals_path) + if not visuals_spec: + visuals_spec = Sdf.CreatePrimInLayer(root_layer, visuals_path) + visuals_spec.specifier = Sdf.SpecifierOver + + # Add MaterialBindingAPI + visuals_spec.SetInfo("apiSchemas", Sdf.TokenListOp.Create(prependedItems=["MaterialBindingAPI"])) + + # Create material:binding with 'strongerThanDescendants' binding strength + binding_rel = Sdf.RelationshipSpec(visuals_spec, "material:binding", custom=False) + binding_rel.targetPathList.explicitItems = [mat_path] + binding_rel.SetInfo("bindMaterialAs", "strongerThanDescendants") + + root_layer.Save() + num_materials = mat_index + if mode == "style": + logger.info( + f"[randomizable_visual_materials] Style mode: created {num_materials} materials " + f"for {len(link_prims_with_visuals)} visuals (grouped by original material)" + ) + else: + logger.info( + f"[randomizable_visual_materials] Full mode: created {num_materials} materials " + f"for {len(link_prims_with_visuals)} visuals (one per body)" + ) + + return True + + @clone def spawn_from_usd( prim_path: str, @@ -311,18 +570,29 @@ def _spawn_from_usd_file( if file_status == 0: raise FileNotFoundError(f"USD file not found at path: '{usd_path}'.") + # Handle physics properties via layer copying to preserve instancing + # Note: We cannot avoid file I/O here because: + # 1. USD prototypes are read-only after stage composition + # 2. Modifying prototype prims at runtime doesn't propagate to instance proxies + # 3. The only way to modify instanced prim properties is to modify the source layer + spawn_usd_path = usd_path + resolved_path = usd_path if file_status != 2 else retrieve_file_path(usd_path, force_download=False) + modified_path = _create_modified_usd_with_overrides(resolved_path, cfg) + if modified_path: + spawn_usd_path = modified_path + if _world_size > 1: lock_path = os.path.join(tempfile.gettempdir(), "isaaclab_usd_spawn.lock") lock_fd = open(lock_path, "w") # noqa: SIM115 fcntl.flock(lock_fd, fcntl.LOCK_EX) try: - if file_status == 2: - usd_path = retrieve_file_path(usd_path, force_download=False) + if file_status == 2 and spawn_usd_path == usd_path: + spawn_usd_path = retrieve_file_path(usd_path, force_download=False) stage = get_current_stage() if not stage.GetPrimAtPath(prim_path).IsValid(): create_prim( prim_path, - usd_path=usd_path, + usd_path=spawn_usd_path, translation=translation, orientation=orientation, scale=cfg.scale, @@ -339,33 +609,14 @@ def _spawn_from_usd_file( if hasattr(cfg, "variants") and cfg.variants is not None: select_usd_variants(prim_path, cfg.variants) - # modify rigid body properties - if cfg.rigid_props is not None: - schemas.modify_rigid_body_properties(prim_path, cfg.rigid_props) - # modify collision properties - if cfg.collision_props is not None: - schemas.modify_collision_properties(prim_path, cfg.collision_props) - # modify mass properties - if cfg.mass_props is not None: - schemas.modify_mass_properties(prim_path, cfg.mass_props) - - # modify articulation root properties - if cfg.articulation_props is not None: - schemas.modify_articulation_root_properties(prim_path, cfg.articulation_props) - # modify tendon properties + # Modify physics properties + # Note: Most properties are handled via layer copying above to preserve instancing. + # Tendons use multi-instance API schemas (e.g. PhysxTendonAxisRootAPI:tendon0) which + # require special handling via runtime modification. if cfg.fixed_tendons_props is not None: schemas.modify_fixed_tendon_properties(prim_path, cfg.fixed_tendons_props) if cfg.spatial_tendons_props is not None: schemas.modify_spatial_tendon_properties(prim_path, cfg.spatial_tendons_props) - # define drive API on the joints - # note: these are only for setting low-level simulation properties. all others should be set or are - # and overridden by the articulation/actuator properties. - if cfg.joint_drive_props is not None: - schemas.modify_joint_drive_properties(prim_path, cfg.joint_drive_props) - - # modify deformable body properties - if cfg.deformable_props is not None: - schemas.modify_deformable_body_properties(prim_path, cfg.deformable_props) # apply visual material if cfg.visual_material is not None: @@ -381,6 +632,10 @@ def _spawn_from_usd_file( # apply material bind_visual_material(prim_path, material_path, stage=stage) + # Note: randomizable_visual_materials are created at the layer level in + # _create_modified_usd_with_overrides to enable per-body color randomization. + # They get copied with the asset when cloner uses Sdf.CopySpec. + # return the prim return stage.GetPrimAtPath(prim_path) diff --git a/source/isaaclab/isaaclab/sim/spawners/from_files/from_files_cfg.py b/source/isaaclab/isaaclab/sim/spawners/from_files/from_files_cfg.py index 0de0b85911ea..f392775872c6 100644 --- a/source/isaaclab/isaaclab/sim/spawners/from_files/from_files_cfg.py +++ b/source/isaaclab/isaaclab/sim/spawners/from_files/from_files_cfg.py @@ -67,6 +67,34 @@ class FileCfg(RigidObjectSpawnerCfg, DeformableObjectSpawnerCfg): If None, then no visual material will be added. """ + randomizable_visual_materials: bool = False + """Whether to create randomizable UsdPreviewSurface materials for visual meshes. Defaults to False. + + When True, the spawner will: + + 1. Find all visual meshes in the asset (paths containing "visuals", excluding "collision") + 2. Create UsdPreviewSurface materials in a RandomizableMaterials container + 3. Bind each visuals prim to its corresponding material with 'strongerThanDescendants' + + This enables per-environment visual material randomization while keeping geometry instanced. + The materials can be randomized using event terms like + :class:`~isaaclab.envs.mdp.events.randomize_visual_color_instanced`. + """ + + randomizable_visual_materials_mode: str = "full" + """Mode for creating randomizable materials. Defaults to "full". + + - ``"full"``: Each visual body part gets its own unique material (fully random colors). + - ``"style"``: Body parts that originally shared the same material will share the same + randomizable material (preserves color groupings, like changing the robot's color scheme). + """ + + randomizable_visual_materials_path: str = "RandomizableMaterials" + """Path within the asset hierarchy to create randomizable materials. Defaults to "RandomizableMaterials". + + This is relative to the spawned prim's path. + """ + @configclass class UsdFileCfg(FileCfg): diff --git a/source/isaaclab/isaaclab/sim/views/__init__.pyi b/source/isaaclab/isaaclab/sim/views/__init__.pyi index a666958e4387..aeec320b6350 100644 --- a/source/isaaclab/isaaclab/sim/views/__init__.pyi +++ b/source/isaaclab/isaaclab/sim/views/__init__.pyi @@ -4,7 +4,11 @@ # SPDX-License-Identifier: BSD-3-Clause __all__ = [ + "BaseXformPrimView", "XformPrimView", + "XformPrimViewFactory", ] +from .base_xform_prim_view import BaseXformPrimView from .xform_prim_view import XformPrimView +from .xform_prim_view_factory import XformPrimViewFactory diff --git a/source/isaaclab/isaaclab/sim/views/base_xform_prim_view.py b/source/isaaclab/isaaclab/sim/views/base_xform_prim_view.py new file mode 100644 index 000000000000..29d8469433d3 --- /dev/null +++ b/source/isaaclab/isaaclab/sim/views/base_xform_prim_view.py @@ -0,0 +1,111 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Abstract base class for batched prim transform views.""" + +from __future__ import annotations + +import abc +from collections.abc import Sequence + +import torch + + +class BaseXformPrimView(abc.ABC): + """Abstract interface for reading and writing world-space transforms of multiple prims. + + Backend-specific implementations (USD/Fabric, Newton GPU state, etc.) subclass + this to provide efficient batched pose queries. The factory + :class:`~isaaclab.sim.views.XformPrimViewFactory` selects the correct + implementation at runtime based on the active physics backend. + """ + + @property + @abc.abstractmethod + def count(self) -> int: + """Number of prims in this view.""" + ... + + @abc.abstractmethod + def get_world_poses( + self, indices: Sequence[int] | None = None + ) -> tuple[torch.Tensor, torch.Tensor]: + """Get world-space positions and orientations for prims in the view. + + Args: + indices: Subset of prims to query. ``None`` means all prims. + + Returns: + A tuple ``(positions (M, 3), orientations (M, 4))``. + """ + ... + + @abc.abstractmethod + def set_world_poses( + self, + positions: torch.Tensor | None = None, + orientations: torch.Tensor | None = None, + indices: Sequence[int] | None = None, + ) -> None: + """Set world-space positions and/or orientations for prims in the view. + + Args: + positions: World-space positions ``(M, 3)``. ``None`` leaves positions unchanged. + orientations: World-space quaternions ``(M, 4)``. ``None`` leaves orientations unchanged. + indices: Subset of prims to update. ``None`` means all prims. + """ + ... + + @abc.abstractmethod + def get_local_poses( + self, indices: Sequence[int] | None = None + ) -> tuple[torch.Tensor, torch.Tensor]: + """Get local-space positions and orientations for prims in the view. + + Args: + indices: Subset of prims to query. ``None`` means all prims. + + Returns: + A tuple ``(translations (M, 3), orientations (M, 4))``. + """ + ... + + @abc.abstractmethod + def set_local_poses( + self, + translations: torch.Tensor | None = None, + orientations: torch.Tensor | None = None, + indices: Sequence[int] | None = None, + ) -> None: + """Set local-space translations and/or orientations for prims in the view. + + Args: + translations: Local-space translations ``(M, 3)``. ``None`` leaves translations unchanged. + orientations: Local-space quaternions ``(M, 4)``. ``None`` leaves orientations unchanged. + indices: Subset of prims to update. ``None`` means all prims. + """ + ... + + @abc.abstractmethod + def get_scales(self, indices: Sequence[int] | None = None) -> torch.Tensor: + """Get scales for prims in the view. + + Args: + indices: Subset of prims to query. ``None`` means all prims. + + Returns: + A tensor of shape ``(M, 3)``. + """ + ... + + @abc.abstractmethod + def set_scales(self, scales: torch.Tensor, indices: Sequence[int] | None = None) -> None: + """Set scales for prims in the view. + + Args: + scales: Scales ``(M, 3)``. + indices: Subset of prims to update. ``None`` means all prims. + """ + ... diff --git a/source/isaaclab/isaaclab/sim/views/xform_prim_view.py b/source/isaaclab/isaaclab/sim/views/xform_prim_view.py index 211994a7226b..72601f9abc35 100644 --- a/source/isaaclab/isaaclab/sim/views/xform_prim_view.py +++ b/source/isaaclab/isaaclab/sim/views/xform_prim_view.py @@ -10,61 +10,29 @@ import numpy as np import torch -import warp as wp from pxr import Gf, Sdf, Usd, UsdGeom, Vt import isaaclab.sim as sim_utils -from isaaclab.app.settings_manager import SettingsManager -from isaaclab.utils.warp import fabric as fabric_utils + +from .base_xform_prim_view import BaseXformPrimView logger = logging.getLogger(__name__) -class XformPrimView: - """Optimized batched interface for reading and writing transforms of multiple USD prims. +class XformPrimView(BaseXformPrimView): + """Batched interface for reading and writing transforms of multiple USD prims. - This class provides efficient batch operations for getting and setting poses (position and orientation) - of multiple prims at once using torch tensors. It is designed for scenarios where you need to manipulate - many prims simultaneously, such as in multi-agent simulations or large-scale procedural generation. + This class provides batch operations for getting and setting poses (position and orientation) + of multiple prims at once using torch tensors via USD's ``XformCache``. The class supports both world-space and local-space pose operations: - **World poses**: Positions and orientations in the global world frame - **Local poses**: Positions and orientations relative to each prim's parent - When Fabric is enabled, the class leverages NVIDIA's Fabric API for GPU-accelerated batch operations: - - - Uses `omni:fabric:worldMatrix` and `omni:fabric:localMatrix` attributes for all Boundable prims - - Performs batch matrix decomposition/composition using Warp kernels on GPU - - Achieves performance comparable to Isaac Sim's XFormPrim implementation - - Works for both physics-enabled and non-physics prims (cameras, meshes, etc.). - Note: renderers typically consume USD-authored camera transforms. - - .. warning:: - **Fabric requires CUDA**: Fabric is only supported with on CUDA devices. - Warp's CPU backend for fabric-array writes has known issues, so attempting to use - Fabric with CPU device (``device="cpu"``) will raise a ValueError at initialization. - - .. note:: - **Fabric Support:** - - When Fabric is enabled, this view ensures prims have the required Fabric hierarchy - attributes (``omni:fabric:localMatrix`` and ``omni:fabric:worldMatrix``). On first Fabric - read, USD-authored transforms initialize Fabric state. Fabric writes can optionally - be mirrored back to USD via :attr:`sync_usd_on_fabric_write`. - - For more information, see the `Fabric Hierarchy documentation`_. - - .. _Fabric Hierarchy documentation: https://docs.omniverse.nvidia.com/kit/docs/usdrt/latest/docs/fabric_hierarchy.html - - .. note:: - **Performance Considerations:** - - * Tensor operations are performed on the specified device (CPU/CUDA) - * USD write operations use ``Sdf.ChangeBlock`` for batched updates - * Fabric operations use GPU-accelerated Warp kernels for maximum performance - * For maximum performance, minimize get/set operations within tight loops + For GPU-accelerated Fabric operations, use the PhysX backend variant + obtained via :class:`~isaaclab.sim.views.XformPrimViewFactory`. .. note:: **Transform Requirements:** @@ -85,20 +53,11 @@ def __init__( prim_path: str, device: str = "cpu", validate_xform_ops: bool = True, - sync_usd_on_fabric_write: bool = False, stage: Usd.Stage | None = None, + **kwargs, ): """Initialize the view with matching prims. - This method searches the USD stage for all prims matching the provided path pattern, - validates that they are Xformable with standard transform operations, and stores - references for efficient batch operations. - - We generally recommend to validate the xform operations, as it ensures that the prims are in a consistent state - and have the standard transform operations (translate, orient, scale in that order). - However, if you are sure that the prims are in a consistent state, you can set this to False to improve - performance. This can save around 45-50% of the time taken to initialize the view. - Args: prim_path: USD prim path pattern to match prims. Supports wildcards (``*``) and regex patterns (e.g., ``"/World/Env_.*/Robot"``). See @@ -107,25 +66,22 @@ def __init__( ``"cuda:0"``. Defaults to ``"cpu"``. validate_xform_ops: Whether to validate that the prims have standard xform operations. Defaults to True. - sync_usd_on_fabric_write: Whether to mirror Fabric transform writes back to USD. - When True, transform updates are synchronized to USD so that USD data readers (e.g., rendering - cameras) can observe these changes. Defaults to False for better performance. - stage: USD stage to search for prims. Defaults to None, in which case the current active stage - from the simulation context is used. + stage: USD stage to search for prims. Defaults to None, in which case the current + active stage from the simulation context is used. + **kwargs: Additional keyword arguments (ignored). Allows forward-compatible + construction when callers pass backend-specific options like + ``sync_usd_on_fabric_write``. Raises: ValueError: If any matched prim is not Xformable or doesn't have standardized transform operations (translate, orient, scale in that order). """ - # Store configuration self._prim_path = prim_path self._device = device - # Find and validate matching prims stage = sim_utils.get_current_stage() if stage is None else stage self._prims: list[Usd.Prim] = sim_utils.find_matching_prims(prim_path, stage=stage) - # Validate all prims have standard xform operations if validate_xform_ops: for prim in self._prims: sim_utils.standardize_xform_ops(prim) @@ -136,53 +92,8 @@ def __init__( " Use sim_utils.standardize_xform_ops() to prepare the prim." ) - # Determine if Fabric is supported on the device - settings = SettingsManager.instance() - self._use_fabric = bool(settings.get("/physics/fabricEnabled", False)) - - # Check for unsupported Fabric + CPU combination - if self._use_fabric and self._device == "cpu": - logger.warning( - "Fabric mode with Warp fabric-array operations is not supported on CPU devices. " - "While Fabric itself can run on both CPU and GPU, our batch Warp kernels for " - "fabric-array operations require CUDA and are not reliable on the CPU backend. " - "To ensure stability, Fabric is being disabled and execution will fall back " - "to standard USD operations on the CPU. This may impact performance." - ) - self._use_fabric = False - - # Check for unsupported Fabric + non-primary CUDA device combination. - # USDRT SelectPrims and Warp fabric arrays only support cuda:0 internally. - # When running on cuda:1 or higher, SelectPrims raises a C++ error regardless of - # the device argument, because USDRT uses the active CUDA context (which is cuda:1). - if self._use_fabric and self._device not in ("cuda", "cuda:0"): - logger.warning( - f"Fabric mode is not supported on device '{self._device}'. " - "USDRT SelectPrims and Warp fabric arrays only support cuda:0. " - "Falling back to standard USD operations. This may impact performance." - ) - self._use_fabric = False - - # Create indices buffer - # Since we iterate over the indices, we need to use range instead of torch tensor self._ALL_INDICES = list(range(len(self._prims))) - # Some prims (e.g., Cameras) require USD-authored transforms for rendering. - # When enabled, mirror Fabric pose writes to USD for those prims. - self._sync_usd_on_fabric_write = sync_usd_on_fabric_write - - # Fabric batch infrastructure (initialized lazily on first use) - self._fabric_initialized = False - self._fabric_usd_sync_done = False - self._fabric_selection = None - self._fabric_to_view: wp.array | None = None - self._view_to_fabric: wp.array | None = None - self._default_view_indices: wp.array | None = None - self._fabric_hierarchy = None - # Create a valid USD attribute name: namespace:name - # Use "isaaclab" namespace to identify our custom attributes - self._view_index_attr = f"isaaclab:view_index:{abs(hash(self))}" - """ Properties. """ @@ -208,15 +119,7 @@ def prim_paths(self) -> list[str]: This property converts each prim to its path string representation. The conversion is performed lazily on first access and cached for subsequent accesses. - - Note: - For most use cases, prefer using :attr:`prims` directly as it provides direct access - to the USD prim objects without the conversion overhead. This property is mainly useful - for logging, debugging, or when string paths are explicitly required. """ - # we cache it the first time it is accessed. - # we don't compute it in constructor because it is expensive and we don't need it most of the time. - # users should usually deal with prims directly as they typically need to access the prims directly. if not hasattr(self, "_prim_paths"): self._prim_paths = [prim.GetPath().pathString for prim in self._prims] return self._prim_paths @@ -233,30 +136,14 @@ def set_world_poses( ): """Set world-space poses for prims in the view. - This method sets the position and/or orientation of each prim in world space. - - - When Fabric is enabled, the function writes directly to Fabric's ``omni:fabric:worldMatrix`` - attribute using GPU-accelerated batch operations. - - When Fabric is disabled, the function converts to local space and writes to USD's ``xformOp:translate`` - and ``xformOp:orient`` attributes. - Args: - positions: World-space positions as a tensor of shape (M, 3) where M is the number of prims - to set (either all prims if indices is None, or the number of indices provided). + positions: World-space positions as a tensor of shape (M, 3). Defaults to None, in which case positions are not modified. orientations: World-space orientations as quaternions (w, x, y, z) with shape (M, 4). Defaults to None, in which case orientations are not modified. - indices: Indices of prims to set poses for. Defaults to None, in which case poses are set - for all prims in the view. - - Raises: - ValueError: If positions shape is not (M, 3) or orientations shape is not (M, 4). - ValueError: If the number of poses doesn't match the number of indices provided. + indices: Indices of prims to set poses for. Defaults to None (all prims). """ - if self._use_fabric: - self._set_world_poses_fabric(positions, orientations, indices) - else: - self._set_world_poses_usd(positions, orientations, indices) + self._set_world_poses_usd(positions, orientations, indices) def set_local_poses( self, @@ -266,90 +153,42 @@ def set_local_poses( ): """Set local-space poses for prims in the view. - This method sets the position and/or orientation of each prim in local space (relative to - their parent prims). - - The function writes directly to USD's ``xformOp:translate`` and ``xformOp:orient`` attributes. - - Note: - Even in Fabric mode, local pose operations use USD. This behavior is based on Isaac Sim's design - where Fabric is only used for world pose operations. - - Rationale: - - Local pose writes need correct parent-child hierarchy relationships - - USD maintains these relationships correctly and efficiently - - Fabric is optimized for world pose operations, not local hierarchies - Args: - translations: Local-space translations as a tensor of shape (M, 3) where M is the number of prims - to set (either all prims if indices is None, or the number of indices provided). + translations: Local-space translations as a tensor of shape (M, 3). Defaults to None, in which case translations are not modified. orientations: Local-space orientations as quaternions (w, x, y, z) with shape (M, 4). Defaults to None, in which case orientations are not modified. - indices: Indices of prims to set poses for. Defaults to None, in which case poses are set - for all prims in the view. - - Raises: - ValueError: If translations shape is not (M, 3) or orientations shape is not (M, 4). - ValueError: If the number of poses doesn't match the number of indices provided. + indices: Indices of prims to set poses for. Defaults to None (all prims). """ - if self._use_fabric: - self._set_local_poses_fabric(translations, orientations, indices) - else: - self._set_local_poses_usd(translations, orientations, indices) + self._set_local_poses_usd(translations, orientations, indices) def set_scales(self, scales: torch.Tensor, indices: Sequence[int] | None = None): """Set scales for prims in the view. - This method sets the scale of each prim in the view. - - - When Fabric is enabled, the function updates scales in Fabric matrices using GPU-accelerated batch operations. - - When Fabric is disabled, the function writes to USD's ``xformOp:scale`` attributes. - Args: - scales: Scales as a tensor of shape (M, 3) where M is the number of prims - to set (either all prims if indices is None, or the number of indices provided). - indices: Indices of prims to set scales for. Defaults to None, in which case scales are set - for all prims in the view. - - Raises: - ValueError: If scales shape is not (M, 3). + scales: Scales as a tensor of shape (M, 3). + indices: Indices of prims to set scales for. Defaults to None (all prims). """ - if self._use_fabric: - self._set_scales_fabric(scales, indices) - else: - self._set_scales_usd(scales, indices) + self._set_scales_usd(scales, indices) def set_visibility(self, visibility: torch.Tensor, indices: Sequence[int] | None = None): """Set visibility for prims in the view. - This method sets the visibility of each prim in the view. - Args: - visibility: Visibility as a boolean tensor of shape (M,) where M is the - number of prims to set (either all prims if indices is None, or the number of indices provided). - indices: Indices of prims to set visibility for. Defaults to None, in which case visibility is set - for all prims in the view. - - Raises: - ValueError: If visibility shape is not (M,). + visibility: Visibility as a boolean tensor of shape (M,). + indices: Indices of prims to set visibility for. Defaults to None (all prims). """ - # Resolve indices if indices is None or indices == slice(None): indices_list = self._ALL_INDICES else: indices_list = indices.tolist() if isinstance(indices, torch.Tensor) else list(indices) - # Validate inputs if visibility.shape != (len(indices_list),): raise ValueError(f"Expected visibility shape ({len(indices_list)},), got {visibility.shape}.") - # Set visibility for each prim with Sdf.ChangeBlock(): for idx, prim_idx in enumerate(indices_list): - # Convert prim to imageable imageable = UsdGeom.Imageable(self._prims[prim_idx]) - # Set visibility if visibility[idx]: imageable.MakeVisible() else: @@ -362,115 +201,60 @@ def set_visibility(self, visibility: torch.Tensor, indices: Sequence[int] | None def get_world_poses(self, indices: Sequence[int] | None = None) -> tuple[torch.Tensor, torch.Tensor]: """Get world-space poses for prims in the view. - This method retrieves the position and orientation of each prim in world space by computing - the full transform hierarchy from the prim to the world root. - - - When Fabric is enabled, the function uses Fabric batch operations with Warp kernels. - - When Fabric is disabled, the function uses USD XformCache. - - Note: - Scale and skew are ignored. The returned poses contain only translation and rotation. - Args: - indices: Indices of prims to get poses for. Defaults to None, in which case poses are retrieved - for all prims in the view. + indices: Indices of prims to get poses for. Defaults to None (all prims). Returns: A tuple of (positions, orientations) where: - - positions: Torch tensor of shape (M, 3) containing world-space positions (x, y, z), - where M is the number of prims queried. - - orientations: Torch tensor of shape (M, 4) containing world-space quaternions (w, x, y, z) + - positions: Torch tensor of shape (M, 3) containing world-space positions (x, y, z). + - orientations: Torch tensor of shape (M, 4) containing world-space quaternions (w, x, y, z). """ - if self._use_fabric: - return self._get_world_poses_fabric(indices) - else: - return self._get_world_poses_usd(indices) + return self._get_world_poses_usd(indices) def get_local_poses(self, indices: Sequence[int] | None = None) -> tuple[torch.Tensor, torch.Tensor]: """Get local-space poses for prims in the view. - This method retrieves the position and orientation of each prim in local space (relative to - their parent prims). It reads directly from USD's ``xformOp:translate`` and ``xformOp:orient`` attributes. - - Note: - Even in Fabric mode, local pose operations use USD. This behavior is based on Isaac Sim's design - where Fabric is only used for world pose operations. - - Rationale: - - Local pose reads need correct parent-child hierarchy relationships - - USD maintains these relationships correctly and efficiently - - Fabric is optimized for world pose operations, not local hierarchies - - Note: - Scale is ignored. The returned poses contain only translation and rotation. - Args: - indices: Indices of prims to get poses for. Defaults to None, in which case poses are retrieved - for all prims in the view. + indices: Indices of prims to get poses for. Defaults to None (all prims). Returns: A tuple of (translations, orientations) where: - - translations: Torch tensor of shape (M, 3) containing local-space translations (x, y, z), - where M is the number of prims queried. - - orientations: Torch tensor of shape (M, 4) containing local-space quaternions (w, x, y, z) + - translations: Torch tensor of shape (M, 3) containing local-space translations (x, y, z). + - orientations: Torch tensor of shape (M, 4) containing local-space quaternions (w, x, y, z). """ - if self._use_fabric: - return self._get_local_poses_fabric(indices) - else: - return self._get_local_poses_usd(indices) + return self._get_local_poses_usd(indices) def get_scales(self, indices: Sequence[int] | None = None) -> torch.Tensor: """Get scales for prims in the view. - This method retrieves the scale of each prim in the view. - - - When Fabric is enabled, the function extracts scales from Fabric matrices using batch operations with - Warp kernels. - - When Fabric is disabled, the function reads from USD's ``xformOp:scale`` attributes. - Args: - indices: Indices of prims to get scales for. Defaults to None, in which case scales are retrieved - for all prims in the view. + indices: Indices of prims to get scales for. Defaults to None (all prims). Returns: - A tensor of shape (M, 3) containing the scales of each prim, where M is the number of prims queried. + A tensor of shape (M, 3) containing the scales of each prim. """ - if self._use_fabric: - return self._get_scales_fabric(indices) - else: - return self._get_scales_usd(indices) + return self._get_scales_usd(indices) def get_visibility(self, indices: Sequence[int] | None = None) -> torch.Tensor: """Get visibility for prims in the view. - This method retrieves the visibility of each prim in the view. - Args: - indices: Indices of prims to get visibility for. Defaults to None, in which case visibility is retrieved - for all prims in the view. + indices: Indices of prims to get visibility for. Defaults to None (all prims). Returns: - A tensor of shape (M,) containing the visibility of each prim, where M is the number of prims queried. - The tensor is of type bool. + A tensor of shape (M,) containing the visibility of each prim (bool). """ - # Resolve indices if indices is None or indices == slice(None): indices_list = self._ALL_INDICES else: - # Convert to list if it is a tensor array indices_list = indices.tolist() if isinstance(indices, torch.Tensor) else list(indices) - # Create buffers visibility = torch.zeros(len(indices_list), dtype=torch.bool, device=self._device) - for idx, prim_idx in enumerate(indices_list): - # Get prim imageable = UsdGeom.Imageable(self._prims[prim_idx]) - # Get visibility visibility[idx] = imageable.ComputeVisibility() != UsdGeom.Tokens.invisible - return visibility """ @@ -484,14 +268,11 @@ def _set_world_poses_usd( indices: Sequence[int] | None = None, ): """Set world poses to USD.""" - # Resolve indices if indices is None or indices == slice(None): indices_list = self._ALL_INDICES else: - # Convert to list if it is a tensor array indices_list = indices.tolist() if isinstance(indices, torch.Tensor) else list(indices) - # Validate inputs if positions is not None: if positions.shape != (len(indices_list), 3): raise ValueError( @@ -507,60 +288,41 @@ def _set_world_poses_usd( f"Expected orientations shape ({len(indices_list)}, 4), got {orientations.shape}. " "Number of orientations must match the number of prims in the view." ) - # Vt expects quaternions in xyzw order orientations_array = Vt.QuatdArray.FromNumpy(orientations.cpu().numpy()) else: orientations_array = None - # Create xform cache instance xform_cache = UsdGeom.XformCache(Usd.TimeCode.Default()) - # Set poses for each prim - # We use Sdf.ChangeBlock to minimize notification overhead. with Sdf.ChangeBlock(): for idx, prim_idx in enumerate(indices_list): - # Get prim prim = self._prims[prim_idx] - # Get parent prim for local space conversion parent_prim = prim.GetParent() - # Determine what to set world_pos = positions_array[idx] if positions_array is not None else None world_quat = orientations_array[idx] if orientations_array is not None else None - # Convert world pose to local if we have a valid parent - # Note: We don't use :func:`isaaclab.sim.utils.transforms.convert_world_pose_to_local` - # here since it isn't optimized for batch operations. if parent_prim.IsValid() and parent_prim.GetPath() != Sdf.Path.absoluteRootPath: - # Get current world pose if we're only setting one component if positions_array is None or orientations_array is None: - # get prim xform prim_tf = xform_cache.GetLocalToWorldTransform(prim) - # sanitize quaternion - # this is needed, otherwise the quaternion might be non-normalized prim_tf.Orthonormalize() - # populate desired world transform if world_pos is not None: prim_tf.SetTranslateOnly(world_pos) if world_quat is not None: prim_tf.SetRotateOnly(world_quat) else: - # Both position and orientation are provided, create new transform prim_tf = Gf.Matrix4d() prim_tf.SetTranslateOnly(world_pos) prim_tf.SetRotateOnly(world_quat) - # Convert to local space parent_world_tf = xform_cache.GetLocalToWorldTransform(parent_prim) local_tf = prim_tf * parent_world_tf.GetInverse() local_pos = local_tf.ExtractTranslation() local_quat = local_tf.ExtractRotationQuat() else: - # No parent or parent is root, world == local local_pos = world_pos local_quat = world_quat - # Get or create the standard transform operations if local_pos is not None: prim.GetAttribute("xformOp:translate").Set(local_pos) if local_quat is not None: @@ -573,13 +335,11 @@ def _set_local_poses_usd( indices: Sequence[int] | None = None, ): """Set local poses to USD.""" - # Resolve indices if indices is None or indices == slice(None): indices_list = self._ALL_INDICES else: indices_list = indices.tolist() if isinstance(indices, torch.Tensor) else list(indices) - # Validate inputs if translations is not None: if translations.shape != (len(indices_list), 3): raise ValueError(f"Expected translations shape ({len(indices_list)}, 3), got {translations.shape}.") @@ -593,7 +353,6 @@ def _set_local_poses_usd( else: orientations_array = None - # Set local poses with Sdf.ChangeBlock(): for idx, prim_idx in enumerate(indices_list): prim = self._prims[prim_idx] @@ -604,18 +363,15 @@ def _set_local_poses_usd( def _set_scales_usd(self, scales: torch.Tensor, indices: Sequence[int] | None = None): """Set scales to USD.""" - # Resolve indices if indices is None or indices == slice(None): indices_list = self._ALL_INDICES else: indices_list = indices.tolist() if isinstance(indices, torch.Tensor) else list(indices) - # Validate inputs if scales.shape != (len(indices_list), 3): raise ValueError(f"Expected scales shape ({len(indices_list)}, 3), got {scales.shape}.") scales_array = Vt.Vec3dArray.FromNumpy(scales.cpu().numpy()) - # Set scales for each prim with Sdf.ChangeBlock(): for idx, prim_idx in enumerate(indices_list): prim = self._prims[prim_idx] @@ -623,51 +379,35 @@ def _set_scales_usd(self, scales: torch.Tensor, indices: Sequence[int] | None = def _get_world_poses_usd(self, indices: Sequence[int] | None = None) -> tuple[torch.Tensor, torch.Tensor]: """Get world poses from USD.""" - # Resolve indices if indices is None or indices == slice(None): indices_list = self._ALL_INDICES else: - # Convert to list if it is a tensor array indices_list = indices.tolist() if isinstance(indices, torch.Tensor) else list(indices) - # Create buffers positions = Vt.Vec3dArray(len(indices_list)) orientations = Vt.QuatdArray(len(indices_list)) - # Create xform cache instance xform_cache = UsdGeom.XformCache(Usd.TimeCode.Default()) - # Note: We don't use :func:`isaaclab.sim.utils.transforms.resolve_prim_pose` - # here since it isn't optimized for batch operations. for idx, prim_idx in enumerate(indices_list): - # Get prim prim = self._prims[prim_idx] - # get prim xform prim_tf = xform_cache.GetLocalToWorldTransform(prim) - # sanitize quaternion - # this is needed, otherwise the quaternion might be non-normalized prim_tf.Orthonormalize() - # extract position and orientation positions[idx] = prim_tf.ExtractTranslation() orientations[idx] = prim_tf.ExtractRotationQuat() - # move to torch tensors positions = torch.tensor(np.array(positions), dtype=torch.float32, device=self._device) orientations = torch.tensor(np.array(orientations), dtype=torch.float32, device=self._device) return positions, orientations # type: ignore def _get_local_poses_usd(self, indices: Sequence[int] | None = None) -> tuple[torch.Tensor, torch.Tensor]: """Get local poses from USD.""" - # Resolve indices if indices is None or indices == slice(None): indices_list = self._ALL_INDICES else: indices_list = indices.tolist() if isinstance(indices, torch.Tensor) else list(indices) - # Create buffers translations = Vt.Vec3dArray(len(indices_list)) orientations = Vt.QuatdArray(len(indices_list)) - - # Create a fresh XformCache to avoid stale cached values xform_cache = UsdGeom.XformCache(Usd.TimeCode.Default()) for idx, prim_idx in enumerate(indices_list): @@ -683,458 +423,14 @@ def _get_local_poses_usd(self, indices: Sequence[int] | None = None) -> tuple[to def _get_scales_usd(self, indices: Sequence[int] | None = None) -> torch.Tensor: """Get scales from USD.""" - # Resolve indices if indices is None or indices == slice(None): indices_list = self._ALL_INDICES else: indices_list = indices.tolist() if isinstance(indices, torch.Tensor) else list(indices) - # Create buffers scales = Vt.Vec3dArray(len(indices_list)) - for idx, prim_idx in enumerate(indices_list): prim = self._prims[prim_idx] scales[idx] = prim.GetAttribute("xformOp:scale").Get() - # Convert to tensor return torch.tensor(np.array(scales), dtype=torch.float32, device=self._device) - - """ - Internal Functions - Fabric. - """ - - def _set_world_poses_fabric( - self, - positions: torch.Tensor | None = None, - orientations: torch.Tensor | None = None, - indices: Sequence[int] | None = None, - ): - """Set world poses using Fabric GPU batch operations. - - Writes directly to Fabric's ``omni:fabric:worldMatrix`` attribute using Warp kernels. - Changes are propagated through Fabric's hierarchy system but remain GPU-resident. - - For workflows mixing Fabric world pose writes with USD local pose queries, note - that local poses read from USD's xformOp:* attributes, which may not immediately - reflect Fabric changes. For best performance and consistency, use Fabric methods - exclusively (get_world_poses/set_world_poses with Fabric enabled). - """ - # Lazy initialization - if not self._fabric_initialized: - self._initialize_fabric() - - # Resolve indices (treat slice(None) as None for consistency with USD path) - indices_wp = self._resolve_indices_wp(indices) - - count = indices_wp.shape[0] - - # Convert torch to warp (if provided), use dummy arrays for None to avoid Warp kernel issues - if positions is not None: - positions_wp = wp.from_torch(positions) - else: - positions_wp = wp.zeros((0, 3), dtype=wp.float32).to(self._device) - - if orientations is not None: - orientations_wp = wp.from_torch(orientations) - else: - orientations_wp = wp.zeros((0, 4), dtype=wp.float32).to(self._device) - - # Dummy array for scales (not modifying) - scales_wp = wp.zeros((0, 3), dtype=wp.float32).to(self._device) - - # Use cached fabricarray for world matrices - world_matrices = self._fabric_world_matrices - - # Batch compose matrices with a single kernel launch - # Note: world_matrices is a fabricarray on fabric_device, so we must launch on fabric_device - wp.launch( - kernel=fabric_utils.compose_fabric_transformation_matrix_from_warp_arrays, - dim=count, - inputs=[ - world_matrices, - positions_wp, - orientations_wp, - scales_wp, # dummy array instead of None - False, # broadcast_positions - False, # broadcast_orientations - False, # broadcast_scales - indices_wp, - self._view_to_fabric, - ], - device=self._fabric_device, - ) - - # Synchronize to ensure kernel completes - wp.synchronize() - - # Update world transforms within Fabric hierarchy - self._fabric_hierarchy.update_world_xforms() - # Fabric now has authoritative data; skip future USD syncs - self._fabric_usd_sync_done = True - # Mirror to USD for renderer-facing prims when enabled. - if self._sync_usd_on_fabric_write: - self._set_world_poses_usd(positions, orientations, indices) - - # Fabric writes are GPU-resident; local pose operations still use USD. - - def _set_local_poses_fabric( - self, - translations: torch.Tensor | None = None, - orientations: torch.Tensor | None = None, - indices: Sequence[int] | None = None, - ): - """Set local poses using USD (matches Isaac Sim's design). - - Note: Even in Fabric mode, local pose operations use USD. - This is Isaac Sim's design: the ``usd=False`` parameter only affects world poses. - - Rationale: - - Local pose writes need correct parent-child hierarchy relationships - - USD maintains these relationships correctly and efficiently - - Fabric is optimized for world pose operations, not local hierarchies - """ - self._set_local_poses_usd(translations, orientations, indices) - - def _set_scales_fabric(self, scales: torch.Tensor, indices: Sequence[int] | None = None): - """Set scales using Fabric GPU batch operations.""" - # Lazy initialization - if not self._fabric_initialized: - self._initialize_fabric() - - # Resolve indices (treat slice(None) as None for consistency with USD path) - indices_wp = self._resolve_indices_wp(indices) - - count = indices_wp.shape[0] - - # Convert torch to warp - scales_wp = wp.from_torch(scales) - - # Dummy arrays for positions and orientations (not modifying) - positions_wp = wp.zeros((0, 3), dtype=wp.float32).to(self._device) - orientations_wp = wp.zeros((0, 4), dtype=wp.float32).to(self._device) - - # Use cached fabricarray for world matrices - world_matrices = self._fabric_world_matrices - - # Batch compose matrices on GPU with a single kernel launch - # Note: world_matrices is a fabricarray on fabric_device, so we must launch on fabric_device - wp.launch( - kernel=fabric_utils.compose_fabric_transformation_matrix_from_warp_arrays, - dim=count, - inputs=[ - world_matrices, - positions_wp, # dummy array instead of None - orientations_wp, # dummy array instead of None - scales_wp, - False, # broadcast_positions - False, # broadcast_orientations - False, # broadcast_scales - indices_wp, - self._view_to_fabric, - ], - device=self._fabric_device, - ) - - # Synchronize to ensure kernel completes before syncing - wp.synchronize() - - # Update world transforms to propagate changes - self._fabric_hierarchy.update_world_xforms() - # Fabric now has authoritative data; skip future USD syncs - self._fabric_usd_sync_done = True - # Mirror to USD for renderer-facing prims when enabled. - if self._sync_usd_on_fabric_write: - self._set_scales_usd(scales, indices) - - def _get_world_poses_fabric(self, indices: Sequence[int] | None = None) -> tuple[torch.Tensor, torch.Tensor]: - """Get world poses from Fabric using GPU batch operations.""" - # Lazy initialization of Fabric infrastructure - if not self._fabric_initialized: - self._initialize_fabric() - # Sync once from USD to ensure reads see the latest authored transforms - if not self._fabric_usd_sync_done: - self._sync_fabric_from_usd_once() - - # Resolve indices (treat slice(None) as None for consistency with USD path) - indices_wp = self._resolve_indices_wp(indices) - - count = indices_wp.shape[0] - - # Use pre-allocated buffers for full reads, allocate only for partial reads - use_cached_buffers = indices is None or indices == slice(None) - if use_cached_buffers: - # Full read: Use cached buffers (zero allocation overhead!) - positions_wp = self._fabric_positions_buffer - orientations_wp = self._fabric_orientations_buffer - scales_wp = self._fabric_dummy_buffer - else: - # Partial read: Need to allocate buffers of appropriate size - positions_wp = wp.zeros((count, 3), dtype=wp.float32).to(self._device) - orientations_wp = wp.zeros((count, 4), dtype=wp.float32).to(self._device) - scales_wp = self._fabric_dummy_buffer # Always use dummy for scales - - # Use cached fabricarray for world matrices - # This eliminates the 0.06-0.30ms variability from creating fabricarray each call - world_matrices = self._fabric_world_matrices - - # Launch GPU kernel to decompose matrices in parallel - # Note: world_matrices is a fabricarray on fabric_device, so we must launch on fabric_device - wp.launch( - kernel=fabric_utils.decompose_fabric_transformation_matrix_to_warp_arrays, - dim=count, - inputs=[ - world_matrices, - positions_wp, - orientations_wp, - scales_wp, # dummy array instead of None - indices_wp, - self._view_to_fabric, - ], - device=self._fabric_device, - ) - - # Return tensors: zero-copy for cached buffers, conversion for partial reads - if use_cached_buffers: - # Zero-copy! The Warp kernel wrote directly into the PyTorch tensors - # We just need to synchronize to ensure the kernel is done - wp.synchronize() - return self._fabric_positions_torch, self._fabric_orientations_torch - else: - # Partial read: Need to convert from Warp to torch - positions = wp.to_torch(positions_wp) - orientations = wp.to_torch(orientations_wp) - return positions, orientations - - def _get_local_poses_fabric(self, indices: Sequence[int] | None = None) -> tuple[torch.Tensor, torch.Tensor]: - """Get local poses using USD (matches Isaac Sim's design). - - Note: - Even in Fabric mode, local pose operations use USD's XformCache. - This is Isaac Sim's design: the ``usd=False`` parameter only affects world poses. - - Rationale: - - Local pose computation requires parent transforms which may not be in the view - - USD's XformCache provides efficient hierarchy-aware local transform queries - - Fabric is optimized for world pose operations, not local hierarchies - """ - return self._get_local_poses_usd(indices) - - def _get_scales_fabric(self, indices: Sequence[int] | None = None) -> torch.Tensor: - """Get scales from Fabric using GPU batch operations.""" - # Lazy initialization - if not self._fabric_initialized: - self._initialize_fabric() - # Sync once from USD to ensure reads see the latest authored transforms - if not self._fabric_usd_sync_done: - self._sync_fabric_from_usd_once() - - # Resolve indices (treat slice(None) as None for consistency with USD path) - indices_wp = self._resolve_indices_wp(indices) - - count = indices_wp.shape[0] - - # Use pre-allocated buffers for full reads, allocate only for partial reads - use_cached_buffers = indices is None or indices == slice(None) - if use_cached_buffers: - # Full read: Use cached buffers (zero allocation overhead!) - scales_wp = self._fabric_scales_buffer - else: - # Partial read: Need to allocate buffer of appropriate size - scales_wp = wp.zeros((count, 3), dtype=wp.float32).to(self._device) - - # Always use dummy buffers for positions and orientations (not needed for scales) - positions_wp = self._fabric_dummy_buffer - orientations_wp = self._fabric_dummy_buffer - - # Use cached fabricarray for world matrices - world_matrices = self._fabric_world_matrices - - # Launch GPU kernel to decompose matrices in parallel - # Note: world_matrices is a fabricarray on fabric_device, so we must launch on fabric_device - wp.launch( - kernel=fabric_utils.decompose_fabric_transformation_matrix_to_warp_arrays, - dim=count, - inputs=[ - world_matrices, - positions_wp, # dummy array instead of None - orientations_wp, # dummy array instead of None - scales_wp, - indices_wp, - self._view_to_fabric, - ], - device=self._fabric_device, - ) - - # Return tensor: zero-copy for cached buffers, conversion for partial reads - if use_cached_buffers: - # Zero-copy! The Warp kernel wrote directly into the PyTorch tensor - wp.synchronize() - return self._fabric_scales_torch - else: - # Partial read: Need to convert from Warp to torch - return wp.to_torch(scales_wp) - - """ - Internal Functions - Initialization. - """ - - def _initialize_fabric(self) -> None: - """Initialize Fabric batch infrastructure for GPU-accelerated pose queries. - - This method ensures all prims have the required Fabric hierarchy attributes - (``omni:fabric:localMatrix`` and ``omni:fabric:worldMatrix``) and creates the necessary - infrastructure for batch GPU operations using Warp. - - Based on the Fabric Hierarchy documentation, when Fabric Scene Delegate is enabled, - all boundable prims should have these attributes. This method ensures they exist - and are properly synchronized with USD. - """ - import usdrt - from usdrt import Rt - - # Get USDRT (Fabric) stage - stage_id = sim_utils.get_current_stage_id() - fabric_stage = usdrt.Usd.Stage.Attach(stage_id) - - # Step 1: Ensure all prims have Fabric hierarchy attributes - # According to the documentation, these attributes are created automatically - # when Fabric Scene Delegate is enabled, but we ensure they exist - for i in range(self.count): - rt_prim = fabric_stage.GetPrimAtPath(self.prim_paths[i]) - rt_xformable = Rt.Xformable(rt_prim) - - # Create Fabric hierarchy world matrix attribute if it doesn't exist - has_attr = ( - rt_xformable.HasFabricHierarchyWorldMatrixAttr() - if hasattr(rt_xformable, "HasFabricHierarchyWorldMatrixAttr") - else False - ) - if not has_attr: - rt_xformable.CreateFabricHierarchyWorldMatrixAttr() - - # Best-effort USD->Fabric sync; authoritative initialization happens on first read. - rt_xformable.SetWorldXformFromUsd() - - # Create view index attribute for batch operations - rt_prim.CreateAttribute(self._view_index_attr, usdrt.Sdf.ValueTypeNames.UInt, custom=True) - rt_prim.GetAttribute(self._view_index_attr).Set(i) - - # After syncing all prims, update the Fabric hierarchy to ensure world matrices are computed - self._fabric_hierarchy = usdrt.hierarchy.IFabricHierarchy().get_fabric_hierarchy( - fabric_stage.GetFabricId(), fabric_stage.GetStageIdAsStageId() - ) - self._fabric_hierarchy.update_world_xforms() - - # Step 2: Create index arrays for batch operations - self._default_view_indices = wp.zeros((self.count,), dtype=wp.uint32).to(self._device) - wp.launch( - kernel=fabric_utils.arange_k, - dim=self.count, - inputs=[self._default_view_indices], - device=self._device, - ) - wp.synchronize() # Ensure indices are ready - - # Step 3: Create Fabric selection with attribute filtering - # SelectPrims expects device format like "cuda:0" not "cuda" - # - # KNOWN ISSUE: SelectPrims may return prims in a different order than self._prims - # (which comes from USD's find_matching_prims). We create a bidirectional mapping - # (_view_to_fabric and _fabric_to_view) to handle this ordering difference. - # This works correctly for full-view operations but partial indexing still has issues. - # - # NOTE: SelectPrims only supports "cuda:0" regardless of which GPU the simulation - # is running on. In multi-GPU setups, we must use "cuda:0" for SelectPrims even if - # the simulation device is "cuda:1" or higher. - fabric_device = self._device - if self._device == "cuda": - logger.warning("Fabric device is not specified, defaulting to 'cuda:0'.") - fabric_device = "cuda:0" - elif self._device.startswith("cuda:"): - # SelectPrims only supports cuda:0, so we always use cuda:0 for SelectPrims - # even if the simulation is running on a different GPU - if self._device != "cuda:0": - logger.debug( - f"SelectPrims only supports cuda:0. Using cuda:0 for SelectPrims " - f"even though simulation device is {self._device}." - ) - fabric_device = "cuda:0" - - self._fabric_selection = fabric_stage.SelectPrims( - require_attrs=[ - (usdrt.Sdf.ValueTypeNames.UInt, self._view_index_attr, usdrt.Usd.Access.Read), - (usdrt.Sdf.ValueTypeNames.Matrix4d, "omni:fabric:worldMatrix", usdrt.Usd.Access.ReadWrite), - ], - device=fabric_device, - ) - - # Step 4: Create bidirectional mapping between view and fabric indices - # Note: fabric_to_view is tied to fabric_device (cuda:0) because it's created from SelectPrims. - # view_to_fabric must also be on fabric_device since it's always used with fabricarrays in kernels. - self._view_to_fabric = wp.zeros((self.count,), dtype=wp.uint32).to(fabric_device) - self._fabric_to_view = wp.fabricarray(self._fabric_selection, self._view_index_attr) - - wp.launch( - kernel=fabric_utils.set_view_to_fabric_array, - dim=self._fabric_to_view.shape[0], - inputs=[self._fabric_to_view, self._view_to_fabric], - device=fabric_device, - ) - # Synchronize to ensure mapping is ready before any operations - wp.synchronize() - - # Pre-allocate reusable output buffers for read operations - self._fabric_positions_torch = torch.zeros((self.count, 3), dtype=torch.float32, device=self._device) - self._fabric_orientations_torch = torch.zeros((self.count, 4), dtype=torch.float32, device=self._device) - self._fabric_scales_torch = torch.zeros((self.count, 3), dtype=torch.float32, device=self._device) - - # Create Warp views of the PyTorch tensors - self._fabric_positions_buffer = wp.from_torch(self._fabric_positions_torch, dtype=wp.float32) - self._fabric_orientations_buffer = wp.from_torch(self._fabric_orientations_torch, dtype=wp.float32) - self._fabric_scales_buffer = wp.from_torch(self._fabric_scales_torch, dtype=wp.float32) - - # Dummy array for unused outputs (always empty) - self._fabric_dummy_buffer = wp.zeros((0, 3), dtype=wp.float32).to(self._device) - - # Cache fabricarray for world matrices to avoid recreation overhead - # Refs: https://docs.omniverse.nvidia.com/kit/docs/usdrt/latest/docs/usdrt_prim_selection.html - # https://docs.omniverse.nvidia.com/kit/docs/usdrt/latest/docs/scenegraph_use.html - self._fabric_world_matrices = wp.fabricarray(self._fabric_selection, "omni:fabric:worldMatrix") - - # Cache Fabric stage to avoid expensive get_current_stage() calls - self._fabric_stage = fabric_stage - - # Store fabric_device for use in kernel launches that involve fabricarrays - self._fabric_device = fabric_device - - self._fabric_initialized = True - # Force a one-time USD->Fabric sync on first read to pick up any USD edits - # made after the view was constructed. - self._fabric_usd_sync_done = False - - def _sync_fabric_from_usd_once(self) -> None: - """Sync Fabric world matrices from USD once, on the first read.""" - # Ensure Fabric is initialized - if not self._fabric_initialized: - self._initialize_fabric() - - # Read authoritative transforms from USD and write once into Fabric. - positions_usd, orientations_usd = self._get_world_poses_usd() - scales_usd = self._get_scales_usd() - - prev_sync = self._sync_usd_on_fabric_write - self._sync_usd_on_fabric_write = False - self._set_world_poses_fabric(positions_usd, orientations_usd) - self._set_scales_fabric(scales_usd) - self._sync_usd_on_fabric_write = prev_sync - - self._fabric_usd_sync_done = True - - def _resolve_indices_wp(self, indices: Sequence[int] | None) -> wp.array: - """Resolve view indices as a Warp array.""" - if indices is None or indices == slice(None): - if self._default_view_indices is None: - raise RuntimeError("Fabric indices are not initialized.") - return self._default_view_indices - indices_list = indices.tolist() if isinstance(indices, torch.Tensor) else list(indices) - return wp.array(indices_list, dtype=wp.uint32).to(self._device) diff --git a/source/isaaclab/isaaclab/sim/views/xform_prim_view_factory.py b/source/isaaclab/isaaclab/sim/views/xform_prim_view_factory.py new file mode 100644 index 000000000000..57b673d57952 --- /dev/null +++ b/source/isaaclab/isaaclab/sim/views/xform_prim_view_factory.py @@ -0,0 +1,44 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Factory for creating backend-specific XformPrimView instances.""" + +from __future__ import annotations + +from isaaclab.utils.backend_utils import FactoryBase + +from .base_xform_prim_view import BaseXformPrimView + + +class XformPrimViewFactory(FactoryBase, BaseXformPrimView): + """Factory that selects a :class:`BaseXformPrimView` implementation based on the active physics backend. + + - **PhysX / no backend**: returns the USD/Fabric-based + :class:`~isaaclab.sim.views.XformPrimView`. + - **Newton**: returns the GPU-state-backed + :class:`~isaaclab_newton.sim.views.XformPrimView`. + + Use this in place of constructing an ``XformPrimView`` directly when the + caller is physics-aware (e.g. ray-cast sensors) and wants the fastest + available implementation for the active backend. + """ + + _backend_class_names = {"physx": "XformPrimView", "newton": "XformPrimView"} + + @classmethod + def _get_backend(cls, *args, **kwargs) -> str: + from isaaclab.sim.simulation_context import SimulationContext # noqa: PLC0415 + + ctx = SimulationContext.instance() + if ctx is None: + return "physx" + manager_name = ctx.physics_manager.__name__.lower() + if "newton" in manager_name: + return "newton" + return "physx" + + def __new__(cls, *args, **kwargs) -> BaseXformPrimView: + """Create a new XformPrimView for the active physics backend.""" + return super().__new__(cls, *args, **kwargs) diff --git a/source/isaaclab/isaaclab/terrains/terrain_generator.py b/source/isaaclab/isaaclab/terrains/terrain_generator.py index 58c0c85be9d1..d3bdf1fa86cd 100644 --- a/source/isaaclab/isaaclab/terrains/terrain_generator.py +++ b/source/isaaclab/isaaclab/terrains/terrain_generator.py @@ -270,25 +270,48 @@ def _generate_curriculum_terrains(self): """ def _add_terrain_border(self): - """Add a surrounding border over all the sub-terrains into the terrain meshes.""" - # border parameters - border_size = ( - self.cfg.num_rows * self.cfg.size[0] + 2 * self.cfg.border_width, - self.cfg.num_cols * self.cfg.size[1] + 2 * self.cfg.border_width, - ) - inner_size = (self.cfg.num_rows * self.cfg.size[0], self.cfg.num_cols * self.cfg.size[1]) - border_center = ( - self.cfg.num_rows * self.cfg.size[0] / 2, - self.cfg.num_cols * self.cfg.size[1] / 2, - -self.cfg.border_height / 2, - ) - # border mesh - border_meshes = make_border(border_size, inner_size, height=abs(self.cfg.border_height), position=border_center) - border = trimesh.util.concatenate(border_meshes) - # update the faces to have minimal triangles - selector = ~(np.asarray(border.triangles)[:, :, 2] < -0.1).any(1) - border.update_faces(selector) - # add the border to the list of meshes + """Add a surrounding border over all the sub-terrains into the terrain meshes. + + The border is built as a subdivided grid mesh (matching ``horizontal_scale``) + rather than large box primitives. Large triangles on the border can cause + collision detection failures in some physics backends (e.g. Newton/MuJoCo). + """ + bw = self.cfg.border_width + if bw <= 0: + return + + inner_w = self.cfg.num_rows * self.cfg.size[0] + inner_l = self.cfg.num_cols * self.cfg.size[1] + hs = self.cfg.horizontal_scale + + def _make_grid_strip(x0: float, y0: float, width: float, length: float) -> trimesh.Trimesh: + """Create a flat subdivided mesh strip at z=0 with grid spacing ``hs``.""" + nx = max(int(np.ceil(width / hs)), 1) + 1 + ny = max(int(np.ceil(length / hs)), 1) + 1 + xs = np.linspace(x0, x0 + width, nx) + ys = np.linspace(y0, y0 + length, ny) + xx, yy = np.meshgrid(xs, ys, indexing="ij") + vertices = np.zeros((nx * ny, 3)) + vertices[:, 0] = xx.flatten() + vertices[:, 1] = yy.flatten() + faces = [] + for i in range(nx - 1): + for j in range(ny - 1): + v0 = i * ny + j + v1 = v0 + 1 + v2 = (i + 1) * ny + j + v3 = v2 + 1 + faces.append([v0, v2, v1]) + faces.append([v1, v2, v3]) + return trimesh.Trimesh(vertices=vertices, faces=np.array(faces, dtype=np.uint32)) + + strips = [ + _make_grid_strip(-bw, -bw, inner_w + 2 * bw, bw), # bottom + _make_grid_strip(-bw, inner_l, inner_w + 2 * bw, bw), # top + _make_grid_strip(-bw, 0.0, bw, inner_l), # left + _make_grid_strip(inner_w, 0.0, bw, inner_l), # right + ] + border = trimesh.util.concatenate(strips) self.terrain_meshes.append(border) def _add_sub_terrain( diff --git a/source/isaaclab/isaaclab/terrains/terrain_importer.py b/source/isaaclab/isaaclab/terrains/terrain_importer.py index c78f9ae3344e..41bc5316913a 100644 --- a/source/isaaclab/isaaclab/terrains/terrain_importer.py +++ b/source/isaaclab/isaaclab/terrains/terrain_importer.py @@ -222,6 +222,9 @@ def import_ground_plane(self, name: str, size: tuple[float, float] = (2.0e6, 2.0 # get the mesh ground_plane_cfg = sim_utils.GroundPlaneCfg(physics_material=self.cfg.physics_material, size=size, color=color) ground_plane_cfg.func(prim_path, ground_plane_cfg) + # apply collision properties if specified + if self.cfg.collision_props is not None: + sim_utils.define_collision_properties(prim_path, self.cfg.collision_props) def import_mesh(self, name: str, mesh: trimesh.Trimesh): """Import a mesh into the simulator. @@ -251,6 +254,10 @@ def import_mesh(self, name: str, mesh: trimesh.Trimesh): create_prim_from_mesh( prim_path, mesh, visual_material=self.cfg.visual_material, physics_material=self.cfg.physics_material ) + # apply collision properties if specified + # note: the actual mesh prim is at {prim_path}/mesh (see create_prim_from_mesh) + if self.cfg.collision_props is not None: + sim_utils.define_collision_properties(f"{prim_path}/mesh", self.cfg.collision_props) def import_usd(self, name: str, usd_path: str): """Import a mesh from a USD file. diff --git a/source/isaaclab/isaaclab/terrains/terrain_importer_cfg.py b/source/isaaclab/isaaclab/terrains/terrain_importer_cfg.py index 279c4c981ebb..4524befb00ce 100644 --- a/source/isaaclab/isaaclab/terrains/terrain_importer_cfg.py +++ b/source/isaaclab/isaaclab/terrains/terrain_importer_cfg.py @@ -96,6 +96,12 @@ class TerrainImporterCfg: This parameter is used only when the ``terrain_type`` is "generator" or "plane". """ + collision_props: sim_utils.CollisionPropertiesCfg | None = None + """The collision properties of the terrain. Defaults to None (use USD defaults). + + This can be used to configure collision parameters such as ``contact_offset`` and ``rest_offset``. + """ + max_init_terrain_level: int | None = None """The maximum initial terrain level for defining environment origins. Defaults to None. diff --git a/source/isaaclab/test/sim/test_spawn_from_files.py b/source/isaaclab/test/sim/test_spawn_from_files.py index 009949de4c9c..19539ce6a732 100644 --- a/source/isaaclab/test/sim/test_spawn_from_files.py +++ b/source/isaaclab/test/sim/test_spawn_from_files.py @@ -196,3 +196,123 @@ def test_spawn_usd_with_compliant_contact_material_no_prim_path(sim): material_prim = sim.stage.GetPrimAtPath(material_prim_path) assert material_prim is not None assert not material_prim.IsValid() + + +@pytest.mark.isaacsim_ci +def test_spawn_usd_collision_props_applied_to_instanced_prims(sim): + """Test that collision_props are applied to ALL collision meshes, including instanced ones. + + This is a regression test for an issue where @apply_nested skipped instanced prims, + causing collision properties to not be applied to robot collision meshes that were + USD instances. + """ + from pxr import Usd, UsdPhysics + + # Spawn instanceable robot with collision_props + # The panda_instanceable.usd has instanced collision meshes + rest_offset_value = 0.05 + cfg = sim_utils.UsdFileCfg( + usd_path=f"{ISAACLAB_NUCLEUS_DIR}/Robots/FrankaEmika/panda_instanceable.usd", + collision_props=sim_utils.CollisionPropertiesCfg(rest_offset=rest_offset_value), + ) + prim = cfg.func("/World/Franka", cfg) + + # Check validity + assert prim.IsValid() + + # Find all collision meshes under the robot and verify rest_offset is applied + collision_mesh_count = 0 + props_applied_count = 0 + + # Use Usd.PrimRange with TraverseInstanceProxies to traverse into instanced prims + for descendant in Usd.PrimRange(prim, Usd.TraverseInstanceProxies()): + # Check if this prim has collision API + if descendant.HasAPI(UsdPhysics.CollisionAPI): + collision_mesh_count += 1 + # Check if rest_offset was applied (use approximate comparison for float32 precision) + rest_offset_attr = descendant.GetAttribute("physxCollision:restOffset") + if rest_offset_attr: + actual_value = rest_offset_attr.Get() + if actual_value is not None and abs(actual_value - rest_offset_value) < 1e-6: + props_applied_count += 1 + + # There should be collision meshes in the robot + assert collision_mesh_count > 0, "Robot should have collision meshes" + + # ALL collision meshes should have the rest_offset applied + assert props_applied_count == collision_mesh_count, ( + f"collision_props not applied to all collision meshes: " + f"{props_applied_count}/{collision_mesh_count} have rest_offset={rest_offset_value}. " + "This may indicate instanced prims are being skipped." + ) + + +@pytest.mark.isaacsim_ci +@pytest.mark.parametrize( + "material_cfg,material_name", + [ + (sim_utils.PreviewSurfaceCfg(diffuse_color=(1.0, 0.0, 0.0)), "PreviewSurface"), + (sim_utils.GlassMdlCfg(glass_color=(0.0, 1.0, 0.0)), "GlassMdl"), + ], +) +def test_spawn_usd_visual_material_binding_on_instanced_prims(sim, material_cfg, material_name): + """Test that visual_material binding propagates to ALL meshes, including instanced ones. + + This tests whether USD material binding inheritance works through instance proxies + for different material types (PreviewSurface and MDL materials). + """ + from pxr import Usd, UsdGeom, UsdShade + + # Spawn instanceable robot with visual_material + cfg = sim_utils.UsdFileCfg( + usd_path=f"{ISAACLAB_NUCLEUS_DIR}/Robots/FrankaEmika/panda_instanceable.usd", + visual_material=material_cfg, + visual_material_path=f"Looks/Test{material_name}", + ) + prim = cfg.func("/World/Franka", cfg) + + # Check validity + assert prim.IsValid() + + # Get the material path + material_path = f"/World/Franka/Looks/Test{material_name}" + stage = prim.GetStage() + material_prim = stage.GetPrimAtPath(material_path) + assert material_prim.IsValid(), f"Material prim should exist at {material_path}" + + # Find all mesh prims under the robot and check material binding + mesh_count = 0 + correct_binding_count = 0 + wrong_binding_meshes = [] + + for descendant in Usd.PrimRange(prim, Usd.TraverseInstanceProxies()): + if descendant.IsA(UsdGeom.Mesh): + mesh_count += 1 + # Check material binding - verify it's OUR material, not pre-existing + binding_api = UsdShade.MaterialBindingAPI(descendant) + bound_material, _ = binding_api.ComputeBoundMaterial() + if bound_material: + bound_path = bound_material.GetPrim().GetPath() + if str(bound_path) == material_path: + correct_binding_count += 1 + else: + wrong_binding_meshes.append((str(descendant.GetPath()), str(bound_path))) + else: + wrong_binding_meshes.append((str(descendant.GetPath()), "None")) + + # There should be meshes in the robot + assert mesh_count > 0, "Robot should have mesh prims" + + # Log results for debugging + print(f"{material_name} binding test: {correct_binding_count}/{mesh_count} meshes bound to our material") + if wrong_binding_meshes: + print(f"Wrong/missing bindings (first 5): {wrong_binding_meshes[:5]}") + + # ALL meshes should be bound to OUR material (not pre-existing ones) + # This verifies that bind_visual_material with stronger_than_descendants=True + # actually overrides existing bindings on all descendants including instance proxies + assert correct_binding_count == mesh_count, ( + f"{material_name}: Only {correct_binding_count}/{mesh_count} meshes bound to our material. " + f"Expected all meshes to use {material_path}. " + f"Wrong bindings: {wrong_binding_meshes[:5]}" + ) diff --git a/source/isaaclab/test/sim/test_views_xform_prim.py b/source/isaaclab/test/sim/test_views_xform_prim.py index 3de7a0b357a2..6532972859c5 100644 --- a/source/isaaclab/test/sim/test_views_xform_prim.py +++ b/source/isaaclab/test/sim/test_views_xform_prim.py @@ -254,7 +254,7 @@ def test_xform_prim_view_initialization_empty_pattern(device): @pytest.mark.parametrize("device", ["cpu", "cuda"]) -@pytest.mark.parametrize("backend", ["usd", "fabric"]) +@pytest.mark.parametrize("backend", ["usd"]) def test_get_world_poses(device, backend): """Test getting world poses from XformPrimView.""" _skip_if_backend_unavailable(backend, device) @@ -294,7 +294,7 @@ def test_get_world_poses(device, backend): @pytest.mark.parametrize("device", ["cpu", "cuda"]) -@pytest.mark.parametrize("backend", ["usd", "fabric"]) +@pytest.mark.parametrize("backend", ["usd"]) def test_get_local_poses(device, backend): """Test getting local poses from XformPrimView.""" _skip_if_backend_unavailable(backend, device) @@ -341,7 +341,7 @@ def test_get_local_poses(device, backend): @pytest.mark.parametrize("device", ["cpu", "cuda"]) -@pytest.mark.parametrize("backend", ["usd", "fabric"]) +@pytest.mark.parametrize("backend", ["usd"]) def test_get_scales(device, backend): """Test getting scales from XformPrimView.""" _skip_if_backend_unavailable(backend, device) @@ -399,7 +399,7 @@ def test_get_visibility(device): @pytest.mark.parametrize("device", ["cpu", "cuda"]) -@pytest.mark.parametrize("backend", ["usd", "fabric"]) +@pytest.mark.parametrize("backend", ["usd"]) def test_set_world_poses(device, backend): """Test setting world poses in XformPrimView.""" _skip_if_backend_unavailable(backend, device) @@ -445,7 +445,7 @@ def test_set_world_poses(device, backend): @pytest.mark.parametrize("device", ["cpu", "cuda"]) -@pytest.mark.parametrize("backend", ["usd", "fabric"]) +@pytest.mark.parametrize("backend", ["usd"]) def test_set_world_poses_only_positions(device, backend): """Test setting only positions, leaving orientations unchanged.""" _skip_if_backend_unavailable(backend, device) @@ -484,7 +484,7 @@ def test_set_world_poses_only_positions(device, backend): @pytest.mark.parametrize("device", ["cpu", "cuda"]) -@pytest.mark.parametrize("backend", ["usd", "fabric"]) +@pytest.mark.parametrize("backend", ["usd"]) def test_set_world_poses_only_orientations(device, backend): """Test setting only orientations, leaving positions unchanged.""" _skip_if_backend_unavailable(backend, device) @@ -523,7 +523,7 @@ def test_set_world_poses_only_orientations(device, backend): @pytest.mark.parametrize("device", ["cpu", "cuda"]) -@pytest.mark.parametrize("backend", ["usd", "fabric"]) +@pytest.mark.parametrize("backend", ["usd"]) def test_set_world_poses_with_hierarchy(device, backend): """Test setting world poses correctly handles parent transformations.""" _skip_if_backend_unavailable(backend, device) @@ -564,7 +564,7 @@ def test_set_world_poses_with_hierarchy(device, backend): @pytest.mark.parametrize("device", ["cpu", "cuda"]) -@pytest.mark.parametrize("backend", ["usd", "fabric"]) +@pytest.mark.parametrize("backend", ["usd"]) def test_set_local_poses(device, backend): """Test setting local poses in XformPrimView.""" _skip_if_backend_unavailable(backend, device) @@ -609,7 +609,7 @@ def test_set_local_poses(device, backend): @pytest.mark.parametrize("device", ["cpu", "cuda"]) -@pytest.mark.parametrize("backend", ["usd", "fabric"]) +@pytest.mark.parametrize("backend", ["usd"]) def test_set_local_poses_only_translations(device, backend): """Test setting only local translations.""" _skip_if_backend_unavailable(backend, device) @@ -654,7 +654,7 @@ def test_set_local_poses_only_translations(device, backend): @pytest.mark.parametrize("device", ["cpu", "cuda"]) -@pytest.mark.parametrize("backend", ["usd", "fabric"]) +@pytest.mark.parametrize("backend", ["usd"]) def test_set_scales(device, backend): """Test setting scales in XformPrimView.""" _skip_if_backend_unavailable(backend, device) @@ -899,7 +899,7 @@ def test_index_types_set_methods(device, index_type, method): @pytest.mark.parametrize("device", ["cpu", "cuda"]) -@pytest.mark.parametrize("backend", ["usd", "fabric"]) +@pytest.mark.parametrize("backend", ["usd"]) def test_indices_single_element(device, backend): """Test with a single index.""" _skip_if_backend_unavailable(backend, device) @@ -933,7 +933,7 @@ def test_indices_single_element(device, backend): @pytest.mark.parametrize("device", ["cpu", "cuda"]) -@pytest.mark.parametrize("backend", ["usd", "fabric"]) +@pytest.mark.parametrize("backend", ["usd"]) def test_indices_out_of_order(device, backend): """Test with indices provided in non-sequential order.""" _skip_if_backend_unavailable(backend, device) @@ -968,7 +968,7 @@ def test_indices_out_of_order(device, backend): @pytest.mark.parametrize("device", ["cpu", "cuda"]) -@pytest.mark.parametrize("backend", ["usd", "fabric"]) +@pytest.mark.parametrize("backend", ["usd"]) def test_indices_with_only_positions_or_orientations(device, backend): """Test indices work correctly when setting only positions or only orientations.""" _skip_if_backend_unavailable(backend, device) @@ -1435,81 +1435,3 @@ def test_compare_set_local_poses_with_isaacsim(): torch.testing.assert_close(isaaclab_quat, -isaacsim_quat, atol=1e-4, rtol=0) -""" -Tests - Fabric Operations. -""" - - -@pytest.mark.parametrize("device", ["cpu", "cuda"]) -def test_fabric_initialization(device): - """Test XformPrimView initialization with Fabric enabled.""" - _skip_if_backend_unavailable("fabric", device) - - stage = sim_utils.get_current_stage() - - # Create camera prims (Boundable prims that support Fabric) - num_prims = 5 - for i in range(num_prims): - sim_utils.create_prim(f"/World/Cam_{i}", "Camera", translation=(i * 1.0, 0.0, 1.0), stage=stage) - - # Create view with Fabric enabled - view = _create_view("/World/Cam_.*", device=device, backend="fabric") - - # Verify properties - assert view.count == num_prims - assert view.device == device - assert len(view.prims) == num_prims - - -@pytest.mark.parametrize("device", ["cpu", "cuda"]) -def test_fabric_usd_consistency(device): - """Test that Fabric round-trip (write→read) is consistent, matching Isaac Sim's design. - - Note: This does NOT test Fabric vs USD reads on initialization, as Fabric is designed - for write-first workflows. Instead, it tests that: - 1. Fabric write→read round-trip works correctly - 2. This matches Isaac Sim's Fabric behavior - """ - _skip_if_backend_unavailable("fabric", device) - - stage = sim_utils.get_current_stage() - - # Create prims - num_prims = 5 - for i in range(num_prims): - sim_utils.create_prim( - f"/World/Cam_{i}", - "Camera", - translation=(i * 1.0, 2.0, 3.0), - orientation=(0.0, 0.0, 0.7071068, 0.7071068), - stage=stage, - ) - - # Create Fabric view - view_fabric = _create_view("/World/Cam_.*", device=device, backend="fabric") - - # Test Fabric write→read round-trip (Isaac Sim's intended workflow) - # Initialize Fabric state by WRITING first - init_positions = torch.zeros((num_prims, 3), dtype=torch.float32, device=device) - init_positions[:, 0] = torch.arange(num_prims, dtype=torch.float32, device=device) - init_positions[:, 1] = 2.0 - init_positions[:, 2] = 3.0 - init_orientations = torch.tensor([[0.0, 0.0, 0.7071068, 0.7071068]] * num_prims, dtype=torch.float32, device=device) - - view_fabric.set_world_poses(init_positions, init_orientations) - - # Read back from Fabric (should match what we wrote) - pos_fabric, quat_fabric = view_fabric.get_world_poses() - torch.testing.assert_close(pos_fabric, init_positions, atol=1e-4, rtol=0) - torch.testing.assert_close(quat_fabric, init_orientations, atol=1e-4, rtol=0) - - # Test another round-trip with different values - new_positions = torch.rand((num_prims, 3), dtype=torch.float32, device=device) * 10.0 - new_orientations = torch.tensor([[0.0, 0.0, 0.0, 1.0]] * num_prims, dtype=torch.float32, device=device) - - view_fabric.set_world_poses(new_positions, new_orientations) - - # Read back from Fabric (should match) - pos_fabric_after, quat_fabric_after = view_fabric.get_world_poses() - torch.testing.assert_close(pos_fabric_after, new_positions, atol=1e-4, rtol=0) - torch.testing.assert_close(quat_fabric_after, new_orientations, atol=1e-4, rtol=0) diff --git a/source/isaaclab_assets/isaaclab_assets/robots/__init__.pyi b/source/isaaclab_assets/isaaclab_assets/robots/__init__.pyi index 9875e9244fa3..74ea899a9bb0 100644 --- a/source/isaaclab_assets/isaaclab_assets/robots/__init__.pyi +++ b/source/isaaclab_assets/isaaclab_assets/robots/__init__.pyi @@ -31,6 +31,7 @@ __all__ = [ "KINOVA_JACO2_N6S300_CFG", "KINOVA_GEN3_N7_CFG", "KUKA_ALLEGRO_CFG", + "KUKA_ALLEGRO_RANDOMIZABLE_CFG", "PICK_AND_PLACE_CFG", "CRAZYFLIE_CFG", "RIDGEBACK_FRANKA_PANDA_CFG", @@ -78,7 +79,7 @@ from .galbot import GALBOT_ONE_CHARLIE_CFG from .humanoid import HUMANOID_CFG from .humanoid_28 import HUMANOID_28_CFG from .kinova import KINOVA_JACO2_N7S300_CFG, KINOVA_JACO2_N6S300_CFG, KINOVA_GEN3_N7_CFG -from .kuka_allegro import KUKA_ALLEGRO_CFG +from .kuka_allegro import KUKA_ALLEGRO_CFG, KUKA_ALLEGRO_RANDOMIZABLE_CFG from .pick_and_place import PICK_AND_PLACE_CFG from .quadcopter import CRAZYFLIE_CFG from .ridgeback_franka import RIDGEBACK_FRANKA_PANDA_CFG diff --git a/source/isaaclab_assets/isaaclab_assets/robots/anymal.py b/source/isaaclab_assets/isaaclab_assets/robots/anymal.py index 18beaccc5d92..615e372ebd98 100644 --- a/source/isaaclab_assets/isaaclab_assets/robots/anymal.py +++ b/source/isaaclab_assets/isaaclab_assets/robots/anymal.py @@ -142,7 +142,6 @@ articulation_props=sim_utils.ArticulationRootPropertiesCfg( enabled_self_collisions=True, solver_position_iteration_count=4, solver_velocity_iteration_count=0 ), - # collision_props=sim_utils.CollisionPropertiesCfg(contact_offset=0.02, rest_offset=0.0), ), init_state=ArticulationCfg.InitialStateCfg( pos=(0.0, 0.0, 0.6), diff --git a/source/isaaclab_assets/isaaclab_assets/robots/kuka_allegro.py b/source/isaaclab_assets/isaaclab_assets/robots/kuka_allegro.py index f521f45bff57..12e0d4afd32a 100644 --- a/source/isaaclab_assets/isaaclab_assets/robots/kuka_allegro.py +++ b/source/isaaclab_assets/isaaclab_assets/robots/kuka_allegro.py @@ -8,6 +8,7 @@ The following configurations are available: * :obj:`KUKA_ALLEGRO_CFG`: Kuka Allegro with implicit actuator model. +* :obj:`KUKA_ALLEGRO_RANDOMIZABLE_CFG`: Kuka Allegro with randomizable visual materials. Reference: @@ -115,3 +116,97 @@ }, soft_joint_pos_limit_factor=1.0, ) + + +KUKA_ALLEGRO_RANDOMIZABLE_CFG = ArticulationCfg( + spawn=sim_utils.UsdFileCfg( + usd_path=f"{ISAACLAB_NUCLEUS_DIR}/Robots/KukaAllegro/kuka.usd", + activate_contact_sensors=True, + randomizable_visual_materials=True, # Enable spawn-time material creation + randomizable_visual_materials_mode="style", # "style" = preserve color groups, "full" = all random + rigid_props=sim_utils.RigidBodyPropertiesCfg( + disable_gravity=True, + retain_accelerations=True, + linear_damping=0.0, + angular_damping=0.0, + max_linear_velocity=1000.0, + max_angular_velocity=1000.0, + max_depenetration_velocity=1000.0, + ), + articulation_props=sim_utils.ArticulationRootPropertiesCfg( + enabled_self_collisions=True, + solver_position_iteration_count=32, + solver_velocity_iteration_count=1, + sleep_threshold=0.005, + stabilization_threshold=0.0005, + ), + joint_drive_props=sim_utils.JointDrivePropertiesCfg(drive_type="force"), + ), + init_state=ArticulationCfg.InitialStateCfg( + pos=(0.0, 0.0, 0.0), + rot=(0.0, 0.0, 0.0, 1.0), + joint_pos={ + "iiwa7_joint_(1|2|7)": 0.0, + "iiwa7_joint_3": 0.7854, + "iiwa7_joint_4": 1.5708, + "iiwa7_joint_(5|6)": -1.5708, + "(index|middle|ring)_joint_0": 0.0, + "(index|middle|ring)_joint_1": 0.3, + "(index|middle|ring)_joint_2": 0.3, + "(index|middle|ring)_joint_3": 0.3, + "thumb_joint_0": 1.5, + "thumb_joint_1": 0.60147215, + "thumb_joint_2": 0.33795027, + "thumb_joint_3": 0.60845138, + }, + ), + actuators={ + "kuka_allegro_actuators": ImplicitActuatorCfg( + joint_names_expr=[ + "iiwa7_joint_(1|2|3|4|5|6|7)", + "index_joint_(0|1|2|3)", + "middle_joint_(0|1|2|3)", + "ring_joint_(0|1|2|3)", + "thumb_joint_(0|1|2|3)", + ], + effort_limit_sim={ + "iiwa7_joint_(1|2|3|4|5|6|7)": 300.0, + "index_joint_(0|1|2|3)": 0.5, + "middle_joint_(0|1|2|3)": 0.5, + "ring_joint_(0|1|2|3)": 0.5, + "thumb_joint_(0|1|2|3)": 0.5, + }, + stiffness={ + "iiwa7_joint_(1|2|3|4)": 300.0, + "iiwa7_joint_5": 100.0, + "iiwa7_joint_6": 50.0, + "iiwa7_joint_7": 25.0, + "index_joint_(0|1|2|3)": 3.0, + "middle_joint_(0|1|2|3)": 3.0, + "ring_joint_(0|1|2|3)": 3.0, + "thumb_joint_(0|1|2|3)": 3.0, + }, + damping={ + "iiwa7_joint_(1|2|3|4)": 45.0, + "iiwa7_joint_5": 20.0, + "iiwa7_joint_6": 15.0, + "iiwa7_joint_7": 15.0, + "index_joint_(0|1|2|3)": 0.1, + "middle_joint_(0|1|2|3)": 0.1, + "ring_joint_(0|1|2|3)": 0.1, + "thumb_joint_(0|1|2|3)": 0.1, + }, + friction={ + "iiwa7_joint_(1|2|3|4|5|6|7)": 1.0, + "index_joint_(0|1|2|3)": 0.01, + "middle_joint_(0|1|2|3)": 0.01, + "ring_joint_(0|1|2|3)": 0.01, + "thumb_joint_(0|1|2|3)": 0.01, + }, + armature={ + ".*": 0.01, + }, + ), + }, + soft_joint_pos_limit_factor=1.0, +) diff --git a/source/isaaclab_newton/config/extension.toml b/source/isaaclab_newton/config/extension.toml index 7f6e09f992a5..5fbc20c99341 100644 --- a/source/isaaclab_newton/config/extension.toml +++ b/source/isaaclab_newton/config/extension.toml @@ -1,7 +1,7 @@ [package] # Note: Semantic Versioning is used: https://semver.org/ -version = "0.5.10" +version = "0.5.12" # Description title = "Newton simulation interfaces for IsaacLab core package" diff --git a/source/isaaclab_newton/docs/CHANGELOG.rst b/source/isaaclab_newton/docs/CHANGELOG.rst index 3a7633711b8d..d9c7299f344d 100644 --- a/source/isaaclab_newton/docs/CHANGELOG.rst +++ b/source/isaaclab_newton/docs/CHANGELOG.rst @@ -1,6 +1,41 @@ Changelog --------- +0.5.13 (2026-04-05) +~~~~~~~~~~~~~~~~~~~ + +Fixed +^^^^^ + +* Fixed NaN after env reset caused by stale ``body_q`` in the collision + pipeline. Added :meth:`~isaaclab_newton.physics.NewtonManager.invalidate_fk` + so articulation write methods trigger ``eval_fk`` before the next + ``collide()``. + + +0.5.12 (2026-04-03) +~~~~~~~~~~~~~~~~~~~ + +Changed +^^^^^^^ + +* Consolidated all NaN debugging logic into :class:`~isaaclab_newton.physics.DebugStateBuffer`. + Solver divergence logging and NaN transition logging previously lived as ad-hoc + class methods on ``NewtonManager`` and are now self-contained in the buffer. + The buffer also exports a full MuJoCo Warp data snapshot (``mjw_snapshot.npz``) + on NaN for exact single-step reproduction via ``repro_solver_nan.py --snapshot``. + + +0.5.11 (2026-03-24) +~~~~~~~~~~~~~~~~~~~ + +Added +^^^^^ + +* Added ``set_friction_index/mask`` and ``set_restitution_index/mask`` methods to + Newton assets for native material property randomization. + + 0.5.10 (2026-04-05) ~~~~~~~~~~~~~~~~~~~ diff --git a/source/isaaclab_newton/isaaclab_newton/assets/articulation/articulation.py b/source/isaaclab_newton/isaaclab_newton/assets/articulation/articulation.py index 9d62dc0bbed1..b94394ecd8c6 100644 --- a/source/isaaclab_newton/isaaclab_newton/assets/articulation/articulation.py +++ b/source/isaaclab_newton/isaaclab_newton/assets/articulation/articulation.py @@ -169,6 +169,11 @@ def num_shapes_per_body(self) -> list[int]: self._num_shapes_per_body.append(len(shapes)) return self._num_shapes_per_body + @property + def num_shapes(self) -> int: + """Total number of collision shapes in the articulation.""" + return self.data._num_shapes + @property def joint_names(self) -> list[str]: """Ordered names of joints in articulation.""" @@ -2268,6 +2273,168 @@ def set_inertias_mask( # tell the physics engine that some of the body properties have been updated SimulationManager.add_model_change(SolverNotifyFlags.BODY_INERTIAL_PROPERTIES) + def set_friction_index( + self, + *, + friction: torch.Tensor | wp.array, + shape_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + """Set friction coefficient (mu) for collision shapes using indices. + + .. note:: + This method expects partial data. + + .. tip:: + Both the index and mask methods have dedicated optimized implementations. Performance is similar for both. + However, to allow graphed pipelines, the mask method must be used. + + Args: + friction: Friction coefficient for shapes. Shape is (len(env_ids), len(shape_ids)). + shape_ids: Shape indices. If None, all shapes are updated. + env_ids: Environment indices. If None, all environments are updated. + """ + # resolve all indices + env_ids = self._resolve_env_ids(env_ids) + shape_ids = self._resolve_shape_ids(shape_ids) + self.assert_shape_and_dtype(friction, (env_ids.shape[0], shape_ids.shape[0]), wp.float32, "friction") + # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. + wp.launch( + shared_kernels.write_2d_data_to_buffer_with_indices, + dim=(env_ids.shape[0], shape_ids.shape[0]), + inputs=[ + friction, + env_ids, + shape_ids, + ], + outputs=[ + self.data._sim_bind_shape_material_mu, + ], + device=self.device, + ) + # tell the physics engine that some of the shape properties have been updated + SimulationManager.add_model_change(SolverNotifyFlags.SHAPE_PROPERTIES) + + def set_friction_mask( + self, + *, + friction: torch.Tensor | wp.array, + env_mask: wp.array | None = None, + ) -> None: + """Set friction coefficient (mu) for collision shapes using masks. + + .. note:: + This method expects full data. + + .. tip:: + Both the index and mask methods have dedicated optimized implementations. Performance is similar for both. + However, to allow graphed pipelines, the mask method must be used. + + Args: + friction: Friction coefficient for all shapes. Shape is (num_instances, num_shapes). + env_mask: Environment mask. If None, then all the instances are updated. Shape is (num_instances,). + """ + # resolve masks + env_mask = self._resolve_mask(env_mask, self._ALL_ENV_MASK) + shape_mask = self._ALL_SHAPE_MASK + self.assert_shape_and_dtype_mask(friction, (env_mask, shape_mask), wp.float32, "friction") + wp.launch( + shared_kernels.write_2d_data_to_buffer_with_mask, + dim=(env_mask.shape[0], shape_mask.shape[0]), + inputs=[ + friction, + env_mask, + shape_mask, + ], + outputs=[ + self.data._sim_bind_shape_material_mu, + ], + device=self.device, + ) + # tell the physics engine that some of the shape properties have been updated + SimulationManager.add_model_change(SolverNotifyFlags.SHAPE_PROPERTIES) + + def set_restitution_index( + self, + *, + restitution: torch.Tensor | wp.array, + shape_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + """Set restitution coefficient for collision shapes using indices. + + .. note:: + This method expects partial data. + + .. tip:: + Both the index and mask methods have dedicated optimized implementations. Performance is similar for both. + However, to allow graphed pipelines, the mask method must be used. + + Args: + restitution: Restitution coefficient for shapes. Shape is (len(env_ids), len(shape_ids)). + shape_ids: Shape indices. If None, all shapes are updated. + env_ids: Environment indices. If None, all environments are updated. + """ + # resolve all indices + env_ids = self._resolve_env_ids(env_ids) + shape_ids = self._resolve_shape_ids(shape_ids) + self.assert_shape_and_dtype(restitution, (env_ids.shape[0], shape_ids.shape[0]), wp.float32, "restitution") + # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. + wp.launch( + shared_kernels.write_2d_data_to_buffer_with_indices, + dim=(env_ids.shape[0], shape_ids.shape[0]), + inputs=[ + restitution, + env_ids, + shape_ids, + ], + outputs=[ + self.data._sim_bind_shape_material_restitution, + ], + device=self.device, + ) + # tell the physics engine that some of the shape properties have been updated + SimulationManager.add_model_change(SolverNotifyFlags.SHAPE_PROPERTIES) + + def set_restitution_mask( + self, + *, + restitution: torch.Tensor | wp.array, + env_mask: wp.array | None = None, + ) -> None: + """Set restitution coefficient for collision shapes using masks. + + .. note:: + This method expects full data. + + .. tip:: + Both the index and mask methods have dedicated optimized implementations. Performance is similar for both. + However, to allow graphed pipelines, the mask method must be used. + + Args: + restitution: Restitution coefficient for all shapes. Shape is (num_instances, num_shapes). + env_mask: Environment mask. If None, then all the instances are updated. Shape is (num_instances,). + """ + # resolve masks + env_mask = self._resolve_mask(env_mask, self._ALL_ENV_MASK) + shape_mask = self._ALL_SHAPE_MASK + self.assert_shape_and_dtype_mask(restitution, (env_mask, shape_mask), wp.float32, "restitution") + wp.launch( + shared_kernels.write_2d_data_to_buffer_with_mask, + dim=(env_mask.shape[0], shape_mask.shape[0]), + inputs=[ + restitution, + env_mask, + shape_mask, + ], + outputs=[ + self.data._sim_bind_shape_material_restitution, + ], + device=self.device, + ) + # tell the physics engine that some of the shape properties have been updated + SimulationManager.add_model_change(SolverNotifyFlags.SHAPE_PROPERTIES) + def set_joint_position_target_index( self, *, @@ -3234,6 +3401,8 @@ def _create_buffers(self): self._ALL_JOINT_MASK = wp.ones((self.num_joints,), dtype=wp.bool, device=self.device) self._ALL_BODY_INDICES = wp.array(np.arange(self.num_bodies, dtype=np.int32), device=self.device) self._ALL_BODY_MASK = wp.ones((self.num_bodies,), dtype=wp.bool, device=self.device) + self._ALL_SHAPE_INDICES = wp.array(np.arange(self.num_shapes, dtype=np.int32), device=self.device) + self._ALL_SHAPE_MASK = wp.ones((self.num_shapes,), dtype=wp.bool, device=self.device) self._ALL_FIXED_TENDON_INDICES = wp.array(np.arange(self.num_fixed_tendons, dtype=np.int32), device=self.device) self._ALL_FIXED_TENDON_MASK = wp.ones((self.num_fixed_tendons,), dtype=wp.bool, device=self.device) self._ALL_SPATIAL_TENDON_INDICES = wp.array( @@ -3731,6 +3900,21 @@ def _resolve_body_ids(self, body_ids: Sequence[int] | torch.Tensor | wp.array | return self._ALL_BODY_INDICES return body_ids + def _resolve_shape_ids(self, shape_ids: Sequence[int] | torch.Tensor | wp.array | None) -> wp.array | torch.Tensor: + """Resolve shape indices to a warp array or tensor. + + Args: + shape_ids: Shape indices. If None, then all indices are used. + + Returns: + A warp array of shape indices or a tensor of shape indices. + """ + if (shape_ids is None) or (shape_ids == slice(None)): + return self._ALL_SHAPE_INDICES + elif isinstance(shape_ids, list): + return wp.array(shape_ids, dtype=wp.int32, device=self.device) + return shape_ids + def _resolve_fixed_tendon_ids( self, tendon_ids: Sequence[int] | torch.Tensor | wp.array | None ) -> wp.array | torch.Tensor: diff --git a/source/isaaclab_newton/isaaclab_newton/assets/articulation/articulation_data.py b/source/isaaclab_newton/isaaclab_newton/assets/articulation/articulation_data.py index 4dc9507dbe59..f0129e77b454 100644 --- a/source/isaaclab_newton/isaaclab_newton/assets/articulation/articulation_data.py +++ b/source/isaaclab_newton/isaaclab_newton/assets/articulation/articulation_data.py @@ -1323,6 +1323,16 @@ def _create_simulation_bindings(self) -> None: self._sim_bind_joint_velocity_target = wp.zeros( (self._num_instances, 0), dtype=wp.float32, device=self.device ) + # -- shape material properties (for collision shapes) + self._sim_bind_shape_material_mu = self._root_view.get_attribute( + "shape_material_mu", SimulationManager.get_model() + )[:, 0] + self._sim_bind_shape_material_restitution = self._root_view.get_attribute( + "shape_material_restitution", SimulationManager.get_model() + )[:, 0] + self._num_shapes = ( + self._sim_bind_shape_material_mu.shape[1] if len(self._sim_bind_shape_material_mu.shape) > 1 else 1 + ) def _create_buffers(self) -> None: """Create buffers for the root data.""" diff --git a/source/isaaclab_newton/isaaclab_newton/assets/rigid_object/rigid_object.py b/source/isaaclab_newton/isaaclab_newton/assets/rigid_object/rigid_object.py index 5e02e3622985..3e01289557b3 100644 --- a/source/isaaclab_newton/isaaclab_newton/assets/rigid_object/rigid_object.py +++ b/source/isaaclab_newton/isaaclab_newton/assets/rigid_object/rigid_object.py @@ -80,6 +80,11 @@ def num_bodies(self) -> int: """ return 1 + @property + def num_shapes(self) -> int: + """Total number of collision shapes in the rigid object.""" + return self.data._num_shapes + @property def body_names(self) -> list[str]: """Ordered names of bodies in the rigid object.""" @@ -984,6 +989,170 @@ def set_inertias_mask( # tell the physics engine that some of the body properties have been updated SimulationManager.add_model_change(SolverNotifyFlags.BODY_INERTIAL_PROPERTIES) + def set_friction_index( + self, + *, + friction: torch.Tensor | wp.array, + shape_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + """Set friction coefficient (mu) for collision shapes using indices. + + .. note:: + This method expects partial data. + + .. tip:: + Both the index and mask methods have dedicated optimized implementations. Performance is similar for both. + However, to allow graphed pipelines, the mask method must be used. + + Args: + friction: Friction coefficient for shapes. Shape is (len(env_ids), len(shape_ids)). + shape_ids: Shape indices. If None, all shapes are updated. + env_ids: Environment indices. If None, all environments are updated. + """ + # resolve all indices + env_ids = self._resolve_env_ids(env_ids) + shape_ids = self._resolve_shape_ids(shape_ids) + self.assert_shape_and_dtype(friction, (env_ids.shape[0], shape_ids.shape[0]), wp.float32, "friction") + # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. + wp.launch( + shared_kernels.write_2d_data_to_buffer_with_indices, + dim=(env_ids.shape[0], shape_ids.shape[0]), + inputs=[ + friction, + env_ids, + shape_ids, + ], + outputs=[ + self.data._sim_bind_shape_material_mu, + ], + device=self.device, + ) + # tell the physics engine that some of the shape properties have been updated + SimulationManager.add_model_change(SolverNotifyFlags.SHAPE_PROPERTIES) + + def set_friction_mask( + self, + *, + friction: torch.Tensor | wp.array, + env_mask: wp.array | None = None, + ) -> None: + """Set friction coefficient (mu) for collision shapes using masks. + + .. note:: + This method expects full data. + + .. tip:: + Both the index and mask methods have dedicated optimized implementations. Performance is similar for both. + However, to allow graphed pipelines, the mask method must be used. + + Args: + friction: Friction coefficient for all shapes. Shape is (num_instances, num_shapes). + env_mask: Environment mask. If None, then all the instances are updated. Shape is (num_instances,). + """ + # resolve masks + if env_mask is None: + env_mask = self._ALL_ENV_MASK + shape_mask = self._ALL_SHAPE_MASK + self.assert_shape_and_dtype_mask(friction, (env_mask, shape_mask), wp.float32, "friction") + wp.launch( + shared_kernels.write_2d_data_to_buffer_with_mask, + dim=(env_mask.shape[0], shape_mask.shape[0]), + inputs=[ + friction, + env_mask, + shape_mask, + ], + outputs=[ + self.data._sim_bind_shape_material_mu, + ], + device=self.device, + ) + # tell the physics engine that some of the shape properties have been updated + SimulationManager.add_model_change(SolverNotifyFlags.SHAPE_PROPERTIES) + + def set_restitution_index( + self, + *, + restitution: torch.Tensor | wp.array, + shape_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + """Set restitution coefficient for collision shapes using indices. + + .. note:: + This method expects partial data. + + .. tip:: + Both the index and mask methods have dedicated optimized implementations. Performance is similar for both. + However, to allow graphed pipelines, the mask method must be used. + + Args: + restitution: Restitution coefficient for shapes. Shape is (len(env_ids), len(shape_ids)). + shape_ids: Shape indices. If None, all shapes are updated. + env_ids: Environment indices. If None, all environments are updated. + """ + # resolve all indices + env_ids = self._resolve_env_ids(env_ids) + shape_ids = self._resolve_shape_ids(shape_ids) + self.assert_shape_and_dtype(restitution, (env_ids.shape[0], shape_ids.shape[0]), wp.float32, "restitution") + # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. + wp.launch( + shared_kernels.write_2d_data_to_buffer_with_indices, + dim=(env_ids.shape[0], shape_ids.shape[0]), + inputs=[ + restitution, + env_ids, + shape_ids, + ], + outputs=[ + self.data._sim_bind_shape_material_restitution, + ], + device=self.device, + ) + # tell the physics engine that some of the shape properties have been updated + SimulationManager.add_model_change(SolverNotifyFlags.SHAPE_PROPERTIES) + + def set_restitution_mask( + self, + *, + restitution: torch.Tensor | wp.array, + env_mask: wp.array | None = None, + ) -> None: + """Set restitution coefficient for collision shapes using masks. + + .. note:: + This method expects full data. + + .. tip:: + Both the index and mask methods have dedicated optimized implementations. Performance is similar for both. + However, to allow graphed pipelines, the mask method must be used. + + Args: + restitution: Restitution coefficient for all shapes. Shape is (num_instances, num_shapes). + env_mask: Environment mask. If None, then all the instances are updated. Shape is (num_instances,). + """ + # resolve masks + if env_mask is None: + env_mask = self._ALL_ENV_MASK + shape_mask = self._ALL_SHAPE_MASK + self.assert_shape_and_dtype_mask(restitution, (env_mask, shape_mask), wp.float32, "restitution") + wp.launch( + shared_kernels.write_2d_data_to_buffer_with_mask, + dim=(env_mask.shape[0], shape_mask.shape[0]), + inputs=[ + restitution, + env_mask, + shape_mask, + ], + outputs=[ + self.data._sim_bind_shape_material_restitution, + ], + device=self.device, + ) + # tell the physics engine that some of the shape properties have been updated + SimulationManager.add_model_change(SolverNotifyFlags.SHAPE_PROPERTIES) + """ Internal helper. """ @@ -1070,6 +1239,8 @@ def _create_buffers(self): self._ALL_ENV_MASK = wp.ones((self.num_instances,), dtype=wp.bool, device=self.device) self._ALL_BODY_INDICES = wp.array(np.arange(self.num_bodies, dtype=np.int32), device=self.device) self._ALL_BODY_MASK = wp.ones((self.num_bodies,), dtype=wp.bool, device=self.device) + self._ALL_SHAPE_INDICES = wp.array(np.arange(self.num_shapes, dtype=np.int32), device=self.device) + self._ALL_SHAPE_MASK = wp.ones((self.num_shapes,), dtype=wp.bool, device=self.device) # external wrench composer self._instantaneous_wrench_composer = WrenchComposer(self) @@ -1128,6 +1299,24 @@ def _resolve_body_ids(self, body_ids: Sequence[int] | torch.Tensor | wp.array | return wp.array(body_ids, dtype=wp.int32, device=self.device) return body_ids + def _resolve_shape_ids(self, shape_ids: Sequence[int] | torch.Tensor | wp.array | None) -> wp.array | torch.Tensor: + """Resolve shape indices to a warp array or tensor. + + .. note:: + We do not need to convert torch tensors to warp arrays since they never get passed to the TensorAPI views. + + Args: + shape_ids: Shape indices. If None, then all indices are used. + + Returns: + A warp array of shape indices or a tensor of shape indices. + """ + if (shape_ids is None) or (shape_ids == slice(None)): + return self._ALL_SHAPE_INDICES + elif isinstance(shape_ids, list): + return wp.array(shape_ids, dtype=wp.int32, device=self.device) + return shape_ids + """ Internal simulation callbacks. """ diff --git a/source/isaaclab_newton/isaaclab_newton/assets/rigid_object/rigid_object_data.py b/source/isaaclab_newton/isaaclab_newton/assets/rigid_object/rigid_object_data.py index 7a8c85a57753..44fe70f691e9 100644 --- a/source/isaaclab_newton/isaaclab_newton/assets/rigid_object/rigid_object_data.py +++ b/source/isaaclab_newton/isaaclab_newton/assets/rigid_object/rigid_object_data.py @@ -735,6 +735,16 @@ def _create_simulation_bindings(self) -> None: self._sim_bind_body_external_wrench = self._root_view.get_attribute("body_f", SimulationManager.get_state_0())[ :, 0 ] + # -- shape material properties (for collision shapes) + self._sim_bind_shape_material_mu = self._root_view.get_attribute( + "shape_material_mu", SimulationManager.get_model() + )[:, 0] + self._sim_bind_shape_material_restitution = self._root_view.get_attribute( + "shape_material_restitution", SimulationManager.get_model() + )[:, 0] + self._num_shapes = ( + self._sim_bind_shape_material_mu.shape[1] if len(self._sim_bind_shape_material_mu.shape) > 1 else 1 + ) def _create_buffers(self) -> None: """Create buffers for the root data.""" diff --git a/source/isaaclab_newton/isaaclab_newton/assets/rigid_object_collection/rigid_object_collection.py b/source/isaaclab_newton/isaaclab_newton/assets/rigid_object_collection/rigid_object_collection.py index 82bdf7a03003..5f368d8957cd 100644 --- a/source/isaaclab_newton/isaaclab_newton/assets/rigid_object_collection/rigid_object_collection.py +++ b/source/isaaclab_newton/isaaclab_newton/assets/rigid_object_collection/rigid_object_collection.py @@ -111,6 +111,11 @@ def num_bodies(self) -> int: """Number of bodies in the rigid object collection.""" return len(self.body_names) + @property + def num_shapes(self) -> int: + """Total number of collision shapes in the rigid object collection.""" + return self.data._num_shapes + @property def body_names(self) -> list[str]: """Ordered names of bodies in the rigid object collection.""" @@ -1050,6 +1055,170 @@ def set_inertias_mask( # Tell the physics engine that some of the body properties have been updated SimulationManager.add_model_change(SolverNotifyFlags.BODY_INERTIAL_PROPERTIES) + def set_friction_index( + self, + *, + friction: torch.Tensor | wp.array, + shape_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + """Set friction coefficient (mu) for collision shapes using indices. + + .. note:: + This method expects partial data. + + .. tip:: + Both the index and mask methods have dedicated optimized implementations. Performance is similar for both. + However, to allow graphed pipelines, the mask method must be used. + + Args: + friction: Friction coefficient for shapes. Shape is (len(env_ids), len(shape_ids)). + shape_ids: Shape indices. If None, all shapes are updated. + env_ids: Environment indices. If None, all environments are updated. + """ + # resolve all indices + env_ids = self._resolve_env_ids(env_ids) + shape_ids = self._resolve_shape_ids(shape_ids) + self.assert_shape_and_dtype(friction, (env_ids.shape[0], shape_ids.shape[0]), wp.float32, "friction") + # Write to consolidated buffer + wp.launch( + shared_kernels.write_2d_data_to_buffer_with_indices, + dim=(env_ids.shape[0], shape_ids.shape[0]), + inputs=[ + friction, + env_ids, + shape_ids, + ], + outputs=[ + self.data._sim_bind_shape_material_mu, + ], + device=self.device, + ) + # Tell the physics engine that some of the shape properties have been updated + SimulationManager.add_model_change(SolverNotifyFlags.SHAPE_PROPERTIES) + + def set_friction_mask( + self, + *, + friction: torch.Tensor | wp.array, + env_mask: wp.array | None = None, + ) -> None: + """Set friction coefficient (mu) for collision shapes using masks. + + .. note:: + This method expects full data. + + .. tip:: + Both the index and mask methods have dedicated optimized implementations. Performance is similar for both. + However, to allow graphed pipelines, the mask method must be used. + + Args: + friction: Friction coefficient for all shapes. Shape is (num_instances, num_shapes). + env_mask: Environment mask. If None, then all the instances are updated. Shape is (num_instances,). + """ + # resolve masks + if env_mask is None: + env_mask = self._ALL_ENV_MASK + shape_mask = self._ALL_SHAPE_MASK + self.assert_shape_and_dtype_mask(friction, (env_mask, shape_mask), wp.float32, "friction") + wp.launch( + shared_kernels.write_2d_data_to_buffer_with_mask, + dim=(env_mask.shape[0], shape_mask.shape[0]), + inputs=[ + friction, + env_mask, + shape_mask, + ], + outputs=[ + self.data._sim_bind_shape_material_mu, + ], + device=self.device, + ) + # Tell the physics engine that some of the shape properties have been updated + SimulationManager.add_model_change(SolverNotifyFlags.SHAPE_PROPERTIES) + + def set_restitution_index( + self, + *, + restitution: torch.Tensor | wp.array, + shape_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + """Set restitution coefficient for collision shapes using indices. + + .. note:: + This method expects partial data. + + .. tip:: + Both the index and mask methods have dedicated optimized implementations. Performance is similar for both. + However, to allow graphed pipelines, the mask method must be used. + + Args: + restitution: Restitution coefficient for shapes. Shape is (len(env_ids), len(shape_ids)). + shape_ids: Shape indices. If None, all shapes are updated. + env_ids: Environment indices. If None, all environments are updated. + """ + # resolve all indices + env_ids = self._resolve_env_ids(env_ids) + shape_ids = self._resolve_shape_ids(shape_ids) + self.assert_shape_and_dtype(restitution, (env_ids.shape[0], shape_ids.shape[0]), wp.float32, "restitution") + # Write to consolidated buffer + wp.launch( + shared_kernels.write_2d_data_to_buffer_with_indices, + dim=(env_ids.shape[0], shape_ids.shape[0]), + inputs=[ + restitution, + env_ids, + shape_ids, + ], + outputs=[ + self.data._sim_bind_shape_material_restitution, + ], + device=self.device, + ) + # Tell the physics engine that some of the shape properties have been updated + SimulationManager.add_model_change(SolverNotifyFlags.SHAPE_PROPERTIES) + + def set_restitution_mask( + self, + *, + restitution: torch.Tensor | wp.array, + env_mask: wp.array | None = None, + ) -> None: + """Set restitution coefficient for collision shapes using masks. + + .. note:: + This method expects full data. + + .. tip:: + Both the index and mask methods have dedicated optimized implementations. Performance is similar for both. + However, to allow graphed pipelines, the mask method must be used. + + Args: + restitution: Restitution coefficient for all shapes. Shape is (num_instances, num_shapes). + env_mask: Environment mask. If None, then all the instances are updated. Shape is (num_instances,). + """ + # resolve masks + if env_mask is None: + env_mask = self._ALL_ENV_MASK + shape_mask = self._ALL_SHAPE_MASK + self.assert_shape_and_dtype_mask(restitution, (env_mask, shape_mask), wp.float32, "restitution") + wp.launch( + shared_kernels.write_2d_data_to_buffer_with_mask, + dim=(env_mask.shape[0], shape_mask.shape[0]), + inputs=[ + restitution, + env_mask, + shape_mask, + ], + outputs=[ + self.data._sim_bind_shape_material_restitution, + ], + device=self.device, + ) + # Tell the physics engine that some of the shape properties have been updated + SimulationManager.add_model_change(SolverNotifyFlags.SHAPE_PROPERTIES) + """ Internal helper. """ @@ -1137,8 +1306,12 @@ def _create_buffers(self): self._ALL_BODY_INDICES = wp.array( np.arange(self.num_bodies, dtype=np.int32), device=self.device, dtype=wp.int32 ) + self._ALL_SHAPE_INDICES = wp.array( + np.arange(self.num_shapes, dtype=np.int32), device=self.device, dtype=wp.int32 + ) self._ALL_ENV_MASK = wp.ones((self.num_instances,), dtype=wp.bool, device=self.device) self._ALL_BODY_MASK = wp.ones((self.num_bodies,), dtype=wp.bool, device=self.device) + self._ALL_SHAPE_MASK = wp.ones((self.num_shapes,), dtype=wp.bool, device=self.device) # external wrench composer self._instantaneous_wrench_composer = WrenchComposer(self) @@ -1195,6 +1368,21 @@ def _resolve_body_ids(self, body_ids) -> wp.array: return wp.from_torch(body_ids.to(torch.int32), dtype=wp.int32) return body_ids + def _resolve_shape_ids(self, shape_ids: Sequence[int] | torch.Tensor | wp.array | None) -> wp.array | torch.Tensor: + """Resolve shape indices to a warp array or tensor. + + Args: + shape_ids: Shape indices. If None, then all indices are used. + + Returns: + A warp array of shape indices or a tensor of shape indices. + """ + if (shape_ids is None) or (shape_ids == slice(None)): + return self._ALL_SHAPE_INDICES + elif isinstance(shape_ids, list): + return wp.array(shape_ids, dtype=wp.int32, device=self.device) + return shape_ids + def _resolve_env_mask(self, env_mask: wp.array | None) -> wp.array | torch.Tensor: """Resolve environment mask to indices via torch.nonzero.""" if env_mask is not None: diff --git a/source/isaaclab_newton/isaaclab_newton/assets/rigid_object_collection/rigid_object_collection_data.py b/source/isaaclab_newton/isaaclab_newton/assets/rigid_object_collection/rigid_object_collection_data.py index 3fbc1801322f..6b3c53580f50 100644 --- a/source/isaaclab_newton/isaaclab_newton/assets/rigid_object_collection/rigid_object_collection_data.py +++ b/source/isaaclab_newton/isaaclab_newton/assets/rigid_object_collection/rigid_object_collection_data.py @@ -608,6 +608,14 @@ def _create_simulation_bindings(self) -> None: device=_body_inertia_raw.device, copy=False, ) + # -- shape material properties (for collision shapes) + self._sim_bind_shape_material_mu = self._root_view.get_attribute("shape_material_mu", model)[:, :, 0] + self._sim_bind_shape_material_restitution = self._root_view.get_attribute("shape_material_restitution", model)[ + :, :, 0 + ] + self._num_shapes = ( + self._sim_bind_shape_material_mu.shape[1] if len(self._sim_bind_shape_material_mu.shape) > 1 else 1 + ) def _create_buffers(self) -> None: """Create buffers for computing and caching derived quantities.""" diff --git a/source/isaaclab_newton/isaaclab_newton/cloner/newton_replicate.py b/source/isaaclab_newton/isaaclab_newton/cloner/newton_replicate.py index 59e5d2476ccc..0467fb862957 100644 --- a/source/isaaclab_newton/isaaclab_newton/cloner/newton_replicate.py +++ b/source/isaaclab_newton/isaaclab_newton/cloner/newton_replicate.py @@ -155,6 +155,30 @@ def newton_physics_replicate( Returns: Tuple of the populated Newton model builder and stage metadata. """ + import os as _os + _export_dir = _os.environ.get("NEWTON_SAVE_CLONER_STAGE", "") + if _export_dir: + import json as _json + _os.makedirs(_export_dir, exist_ok=True) + + _out = _os.path.join(_export_dir, "cloner_stage.usd") + stage.Flatten().Export(_out) + print(f"[cloner] Saved flattened stage to {_out}") + + _pos = positions if positions is not None else torch.zeros((mapping.size(1), 3)) + _info = { + "sources": sources, + "destinations": destinations, + "ignore_paths": ["/World/envs"] + sources, + "up_axis": up_axis, + "simplify_meshes": simplify_meshes, + "num_envs": int(mapping.size(1)), + "positions": _pos.cpu().tolist(), + } + with open(_os.path.join(_export_dir, "cloner_info.json"), "w") as _f: + _json.dump(_info, _f, indent=2) + print(f"[cloner] Saved cloner_info.json") + builder, stage_info = _build_newton_builder_from_mapping( stage=stage, sources=sources, diff --git a/source/isaaclab_newton/isaaclab_newton/physics/__init__.pyi b/source/isaaclab_newton/isaaclab_newton/physics/__init__.pyi index 4e589e6f69d1..9d26f3e24a73 100644 --- a/source/isaaclab_newton/isaaclab_newton/physics/__init__.pyi +++ b/source/isaaclab_newton/isaaclab_newton/physics/__init__.pyi @@ -5,12 +5,23 @@ __all__ = [ "FeatherstoneSolverCfg", + "HydroelasticSDFCfg", "MJWarpSolverCfg", + "NanReplayCfg", "NewtonCfg", + "NewtonCollisionPipelineCfg", "NewtonManager", "NewtonSolverCfg", "XPBDSolverCfg", ] +from .newton_collision_cfg import HydroelasticSDFCfg, NewtonCollisionPipelineCfg from .newton_manager import NewtonManager -from .newton_manager_cfg import FeatherstoneSolverCfg, MJWarpSolverCfg, NewtonCfg, NewtonSolverCfg, XPBDSolverCfg +from .newton_manager_cfg import ( + FeatherstoneSolverCfg, + MJWarpSolverCfg, + NanReplayCfg, + NewtonCfg, + NewtonSolverCfg, + XPBDSolverCfg, +) diff --git a/source/isaaclab_newton/isaaclab_newton/physics/debug_state_buffer.py b/source/isaaclab_newton/isaaclab_newton/physics/debug_state_buffer.py new file mode 100644 index 000000000000..05f8cd831e16 --- /dev/null +++ b/source/isaaclab_newton/isaaclab_newton/physics/debug_state_buffer.py @@ -0,0 +1,474 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Debug state buffer for NaN incident replay in Newton physics.""" + +from __future__ import annotations + +import logging +import os +from collections.abc import Callable +from datetime import datetime +from typing import TYPE_CHECKING, Any + +import numpy as np +import torch +import warp as wp + +if TYPE_CHECKING: + from newton import Model, State + +logger = logging.getLogger(__name__) + +_MAX_BUFFER_SIZE = 2000 + + +class DebugStateBuffer: + """Rolling buffer of Newton state snapshots with GPU-side NaN detection. + + On every step the current state is copied into a ring buffer (GPU-GPU via + ``State.assign``). NaN detection runs entirely on GPU using a single fused + ``torch.isnan`` + ``any`` per array -- no data ever leaves the device on the + hot path. Only when a NaN is found does the buffer dump to CPU for export. + + When the model has multiple worlds (replicated envs), the buffer identifies + which env(s) contain NaN, exports only those slices, and suppresses future + detection of already-exported env_ids (NaN is sticky). + + After :attr:`max_exports` exports, :attr:`nan_halt` is set and subsequent + ``step`` calls are no-ops. The caller (Newton manager) should check + ``nan_halt`` and raise to stop simulation. + + Diagnostics (solver convergence, forces, accelerations, mass matrix + condition, contact penetration) are recorded alongside state snapshots + when provided via the ``diagnostics`` argument to :meth:`step`. + + When ``solver_data`` is provided, the buffer exports the complete MuJoCo + Warp solver state on NaN for exact single-step reproduction. Solver + divergence history is retrospectively analyzed from the diagnostics ring + during export -- no extra GPU->CPU syncs on the hot path. + """ + + def __init__( + self, + model: Model, + buffer_size: int, + export_path: str = ".", + max_exports: int = 1, + scene_exporter: Callable[[str, list[int]], None] | None = None, + ) -> None: + """Initialize the debug state buffer. + + Args: + model: Finalized Newton model (used to allocate state clones and read + world layout). + buffer_size: Number of state snapshots to keep. Capped at + :data:`_MAX_BUFFER_SIZE`. + export_path: Directory for npz export. + max_exports: Maximum number of NaN export events before halting. Each + event exports a distinct set of newly-NaN env_ids. + scene_exporter: Optional callable ``(usd_path, env_ids) -> None`` that + exports USD prim subtrees for the given env_ids. Called once per + export event. If *env_ids* is empty the exporter should export the + whole scene (single-env case). + """ + size = min(max(int(buffer_size), 1), _MAX_BUFFER_SIZE) + self._ring: list[State] = [model.state() for _ in range(size)] + self._diag_ring: list[dict[str, torch.Tensor]] = [{} for _ in range(size)] + self._size: int = size + self._write_idx: int = 0 + self._export_path: str = export_path + self._max_exports: int = max(int(max_exports), 1) + self._scene_exporter = scene_exporter + + self._export_count: int = 0 + self._nan_halt: bool = False + self._exported_envs: set[int] = set() + + # Per-env layout (populated if model has worlds) + self._world_count: int = 0 + self._body_starts: np.ndarray | None = None + self._joint_coord_starts: np.ndarray | None = None + self._joint_dof_starts: np.ndarray | None = None + + # Solver data reference (updated every step, used on export) + self._last_solver_data: dict[str, Any] | None = None + + # Rolling mjw_data shadow: 2 slots, GPU-GPU copy each step. + # On NaN, the previous slot has the pre-NaN solver input state. + self._mjw_shadow: list[dict[str, torch.Tensor]] = [{}, {}] + self._mjw_shadow_idx: int = 0 + + # Per-env episode step tracking (set externally via :attr:`episode_length_buf`) + self._episode_length_buf: torch.Tensor | None = None + + # Pre-step state snapshot: captures state *before* the solver step so + # that on NaN export we have the exact solver input (post-reset state + # when episode_step=0). + self._pre_step_state: State = model.state() + + self._read_world_layout(model) + + def _read_world_layout(self, model: Model) -> None: + world_count = getattr(model, "world_count", 0) or 0 + if world_count <= 1: + return + body_ws = getattr(model, "body_world_start", None) + if body_ws is None: + return + self._world_count = int(world_count) + self._body_starts = body_ws.numpy() if hasattr(body_ws, "numpy") else np.asarray(body_ws) + jc = getattr(model, "joint_coord_world_start", None) + jd = getattr(model, "joint_dof_world_start", None) + self._joint_coord_starts = jc.numpy() if jc is not None and hasattr(jc, "numpy") else (np.asarray(jc) if jc is not None else None) + self._joint_dof_starts = jd.numpy() if jd is not None and hasattr(jd, "numpy") else (np.asarray(jd) if jd is not None else None) + + # ------------------------------------------------------------------ + # Properties + # ------------------------------------------------------------------ + + @property + def size(self) -> int: + """Number of state snapshots in the buffer.""" + return self._size + + @property + def nan_halt(self) -> bool: + """True after :attr:`max_exports` NaN exports have occurred.""" + return self._nan_halt + + @property + def episode_length_buf(self) -> torch.Tensor | None: + """Per-env episode step counter. Set by the RL env so the buffer can + report how far into each episode a NaN occurs.""" + return self._episode_length_buf + + @episode_length_buf.setter + def episode_length_buf(self, buf: torch.Tensor) -> None: + self._episode_length_buf = buf + + # ------------------------------------------------------------------ + # Hot path + # ------------------------------------------------------------------ + + def snapshot_pre_step(self, current_state: State) -> None: + """Capture state before the solver step. + + Call this right before ``NewtonManager._simulate()``. On NaN, the + export will include this snapshot as ``pre_step_*`` arrays — the exact + solver input state (which, for ``episode_step=0``, is the post-reset + configuration). + """ + if self._nan_halt: + return + self._pre_step_state.assign(current_state) + + def step( + self, + current_state: State, + sim_time: float, + diagnostics: dict | None = None, + solver_data: dict[str, Any] | None = None, + ) -> None: + """Copy state into ring, advance index, check for NaN, export if found. + + Everything except the final export stays on GPU. No-op when + :attr:`nan_halt` is True or the ring is empty. + + Args: + current_state: Current Newton state (GPU). + sim_time: Simulation time [s] at this step. + diagnostics: Optional dict of per-step diagnostic tensors (GPU). + Supported keys: ``solver_niter``, ``qfrc_constraint``, + ``qfrc_actuator``, ``qacc``, ``qM_diag_min``, + ``contact_dist_min``. Values are copied to CPU numpy arrays + and stored in the diagnostics ring buffer. + solver_data: Optional dict with MuJoCo Warp solver references for + divergence detection and NaN snapshot export. Expected keys: + ``mjw_data`` (the solver's Data object), ``max_iterations`` + (int), ``nv`` (int, actual DOFs). + """ + if self._nan_halt or not self._ring: + return + + self._last_solver_data = solver_data + + idx = self._write_idx + self._ring[idx].assign(current_state) + + if diagnostics: + snap = self._diag_ring[idx] + for key, val in diagnostics.items(): + if val is None: + continue + src = val if isinstance(val, torch.Tensor) else wp.to_torch(val) + if key in snap and snap[key].shape == src.shape: + snap[key].copy_(src) + else: + snap[key] = src.detach().clone() + self._diag_ring[idx] = snap + + self._write_idx = (idx + 1) % self._size + + # Shadow-copy key mjw_data arrays (GPU-GPU) for pre-NaN snapshot + if solver_data is not None: + self._snapshot_mjw_data(solver_data) + + nan_detected, bad_envs = self._detect_nan(current_state) + if nan_detected: + self._export(sim_time, bad_envs) + + _MJW_SNAPSHOT_KEYS = ( + "qpos", "qvel", "qacc", "qacc_warmstart", "qfrc_applied", "qfrc_bias", + "qfrc_passive", "qfrc_constraint", "qfrc_actuator", "qfrc_smooth", "qM", + "solver_niter", + ) + _MJW_EFC_SNAPSHOT_KEYS = ("force",) + _MJW_CONTACT_SNAPSHOT_KEYS = ("dist", "pos", "frame", "friction", "dim", "geom", "worldid") + _MJW_SCALAR_SNAPSHOT_KEYS = ("nacon",) + + def _snapshot_mjw_data(self, solver_data: dict[str, Any]) -> None: + """GPU-GPU copy of key mjw_data arrays into a rolling 2-slot shadow.""" + mjd = solver_data.get("mjw_data") + if mjd is None: + return + slot = self._mjw_shadow[self._mjw_shadow_idx] + for name in self._MJW_SNAPSHOT_KEYS: + arr = getattr(mjd, name, None) + if arr is None: + continue + src = wp.to_torch(arr) if isinstance(arr, wp.array) else arr + if not isinstance(src, torch.Tensor): + continue + if name in slot and slot[name].shape == src.shape: + slot[name].copy_(src) + else: + slot[name] = src.detach().clone() + for sub_name, keys in (("efc", self._MJW_EFC_SNAPSHOT_KEYS), + ("contact", self._MJW_CONTACT_SNAPSHOT_KEYS)): + sub = getattr(mjd, sub_name, None) + if sub is None: + continue + for name in keys: + arr = getattr(sub, name, None) + if arr is None: + continue + src = wp.to_torch(arr) if isinstance(arr, wp.array) else arr + if not isinstance(src, torch.Tensor): + continue + key = f"{sub_name}_{name}" + if key in slot and slot[key].shape == src.shape: + slot[key].copy_(src) + else: + slot[key] = src.detach().clone() + for name in self._MJW_SCALAR_SNAPSHOT_KEYS: + arr = getattr(mjd, name, None) + if arr is None: + continue + src = wp.to_torch(arr) if isinstance(arr, wp.array) else arr + if not isinstance(src, torch.Tensor): + continue + if name in slot and slot[name].shape == src.shape: + slot[name].copy_(src) + else: + slot[name] = src.detach().clone() + self._mjw_shadow_idx = 1 - self._mjw_shadow_idx + + def _detect_nan(self, state: State) -> tuple[bool, list[int]]: + """GPU-side NaN check. Returns (has_nan, list_of_bad_env_ids). + + When world layout is unavailable, bad_env_ids is empty. + Already-exported env_ids are excluded from results. + + Multi-world detection is fully vectorized: per-world NaN flags are + computed with a single ``reshape`` + ``any(dim=1)`` per array, avoiding + the per-world ``.item()`` sync loop that would otherwise serialize + GPU and CPU (O(world_count) syncs per step). + """ + arrays: list[torch.Tensor] = [] + if state.joint_qd is not None: + arrays.append(wp.to_torch(state.joint_qd)) + if state.joint_q is not None: + arrays.append(wp.to_torch(state.joint_q)) + if state.body_q is not None: + arrays.append(wp.to_torch(state.body_q)) + if state.body_qd is not None: + arrays.append(wp.to_torch(state.body_qd)) + if not arrays: + return False, [] + + if self._world_count <= 1 or self._body_starts is None: + combined = torch.cat([a.flatten() for a in arrays]) + if not torch.isnan(combined).any().item(): + return False, [] + return True, [] + + # Vectorized per-world NaN check (1-2 GPU syncs total, not O(world_count)) + wc = self._world_count + device = arrays[0].device + per_world_nan = torch.zeros(wc, dtype=torch.bool, device=device) + for arr in arrays: + flat = arr.flatten() + if flat.shape[0] % wc == 0: + per_world_nan |= torch.isnan(flat.reshape(wc, -1)).any(dim=1) + elif torch.isnan(flat).any().item(): + per_world_nan[:] = True + + if self._exported_envs: + exported = torch.tensor(sorted(self._exported_envs), dtype=torch.long, device=device) + per_world_nan[exported] = False + + if not per_world_nan.any().item(): + return False, [] + + bad = per_world_nan.nonzero(as_tuple=False).flatten().cpu().tolist() + return True, bad + + # ------------------------------------------------------------------ + # Export (cold path -- only on NaN) + # ------------------------------------------------------------------ + + def _export(self, sim_time: float, bad_envs: list[int]) -> None: + """Dump ring buffer to npz. Only called when NaN detected.""" + n = self._size + idx = self._write_idx + ordered = [self._ring[(idx + i) % n] for i in range(n)] + ordered_diag = [self._diag_ring[(idx + i) % n] for i in range(n)] + + os.makedirs(self._export_path, exist_ok=True) + ts = datetime.now().strftime("%Y%m%d_%H%M%S_%f") + stem = f"nan_replay_{ts}" + npz_path = os.path.join(self._export_path, f"{stem}.npz") + + data = self._dump_full(ordered, n) + data["exported_env_ids"] = np.array(bad_envs, dtype=np.int32) + + data["buffer_size"] = n + data["sim_time"] = sim_time + data["world_count"] = self._world_count + + if self._episode_length_buf is not None: + ep_len = self._episode_length_buf.cpu().numpy() + data["episode_length_buf"] = ep_len + if bad_envs: + for eid in bad_envs: + logger.error(" env %d: episode_step=%d", eid, int(ep_len[eid])) + + # Pre-step state: the solver input that produced NaN + ps = self._pre_step_state + if ps.joint_q is not None: + data["pre_step_joint_q"] = ps.joint_q.numpy() + if ps.joint_qd is not None: + data["pre_step_joint_qd"] = ps.joint_qd.numpy() + if ps.body_q is not None: + data["pre_step_body_q"] = ps.body_q.numpy() + if ps.body_qd is not None: + data["pre_step_body_qd"] = ps.body_qd.numpy() + + if self._last_solver_data is not None: + opt = self._last_solver_data.get("opt") + if opt is not None: + for k, v in opt.items(): + if v is None: + continue + if isinstance(v, wp.array): + v = v.numpy() + data[f"cfg_{k}"] = np.asarray(v) + + self._export_diagnostics(ordered_diag, n, bad_envs, data) + + np.savez_compressed(npz_path, **data) + + # Export USD scene for the NaN'd envs + if self._scene_exporter is not None: + usd_path = os.path.join(self._export_path, f"{stem}.usd") + try: + self._scene_exporter(usd_path, bad_envs) + logger.info("Exported scene for envs %s to %s", bad_envs if bad_envs else "all", usd_path) + except Exception: + logger.exception("Failed to export scene USD to %s", usd_path) + + # Export pre-NaN mjw_data shadow (the step BEFORE NaN) + pre_nan_slot = self._mjw_shadow[self._mjw_shadow_idx] # previous slot + if pre_nan_slot: + try: + mjw_data = {} + for k, v in pre_nan_slot.items(): + mjw_data[k] = v.cpu().numpy() if isinstance(v, torch.Tensor) else np.asarray(v) + mjw_path = os.path.join(self._export_path, f"{stem}_pre_nan_mjw.npz") + np.savez_compressed(mjw_path, **mjw_data) + logger.error("Pre-NaN mjw_data exported to %s", mjw_path) + except Exception: + logger.exception("Failed to export pre-NaN mjw_data") + + logger.error( + "NaN detected (envs %s). Exported %d snapshots to %s", + bad_envs if bad_envs else "all", + n, + npz_path, + ) + + # Track exported envs and check halt condition + self._exported_envs.update(bad_envs) + self._export_count += 1 + if self._export_count >= self._max_exports: + self._nan_halt = True + logger.error( + "Reached max NaN exports (%d). Halting debug state buffer.", + self._max_exports, + ) + + def _export_diagnostics( + self, ordered_diag: list[dict[str, torch.Tensor]], n: int, bad_envs: list[int], data: dict + ) -> None: + """Add diagnostics arrays to the export data dict. + + Converts GPU tensors to CPU numpy only here (cold export path). + """ + if not ordered_diag or not ordered_diag[0]: + return + + all_keys: set[str] = set() + for d in ordered_diag: + all_keys.update(d.keys()) + + for key in sorted(all_keys): + frames = [] + for i in range(n): + val = ordered_diag[i].get(key) + if val is not None: + frames.append(val.cpu().numpy() if isinstance(val, torch.Tensor) else np.asarray(val)) + else: + frames.append(np.zeros_like(frames[-1]) if frames else np.array([0.0])) + + stacked = np.stack(frames) + + data[f"diag_{key}"] = stacked + + @staticmethod + def _dump_full(ordered: list[State], n: int) -> dict: + data: dict = {} + s0 = ordered[0] + if s0.body_q is not None: + data["body_q"] = np.stack([ordered[i].body_q.numpy() for i in range(n)]) # type: ignore[union-attr] + if s0.body_qd is not None: + data["body_qd"] = np.stack([ordered[i].body_qd.numpy() for i in range(n)]) # type: ignore[union-attr] + if s0.joint_q is not None: + data["joint_q"] = np.stack([ordered[i].joint_q.numpy() for i in range(n)]) # type: ignore[union-attr] + if s0.joint_qd is not None: + data["joint_qd"] = np.stack([ordered[i].joint_qd.numpy() for i in range(n)]) # type: ignore[union-attr] + return data + + # ------------------------------------------------------------------ + # Teardown + # ------------------------------------------------------------------ + + def clear(self) -> None: + """Release ring buffer.""" + self._ring.clear() + self._diag_ring.clear() + self._write_idx = 0 + self._size = 0 + self._last_solver_data = None diff --git a/source/isaaclab_newton/isaaclab_newton/physics/newton_collision_cfg.py b/source/isaaclab_newton/isaaclab_newton/physics/newton_collision_cfg.py new file mode 100644 index 000000000000..be1d6a6748c9 --- /dev/null +++ b/source/isaaclab_newton/isaaclab_newton/physics/newton_collision_cfg.py @@ -0,0 +1,168 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Configuration for Newton collision pipeline.""" + +from __future__ import annotations + +from typing import Literal + +from isaaclab.utils import configclass + + +@configclass +class HydroelasticSDFCfg: + """Configuration for SDF-based hydroelastic collision handling. + + Hydroelastic contacts generate distributed contact areas instead of point contacts, + providing more realistic force distribution for manipulation and compliant surfaces. + + For more details, see the `Newton Collisions Guide`_. + + .. _Newton Collisions Guide: https://newton-physics.github.io/newton/latest/concepts/collisions.html#hydroelastic-contacts + """ + + reduce_contacts: bool = True + """Whether to reduce contacts to a smaller representative set per shape pair. + + When False, all generated contacts are passed through without reduction. + + Defaults to ``True`` (same as Newton's default). + """ + + buffer_fraction: float = 1.0 + """Fraction of worst-case hydroelastic buffer allocations. Range: (0, 1]. + + Lower values reduce memory usage but may cause overflows in dense scenes. + Overflows are bounds-safe and emit warnings; increase this value when warnings appear. + + Defaults to ``1.0`` (same as Newton's default). + """ + + normal_matching: bool = True + """Whether to rotate reduced contact normals to align with aggregate force direction. + + Only active when ``reduce_contacts`` is True. + + Defaults to ``True`` (same as Newton's default). + """ + + anchor_contact: bool = False + """Whether to add an anchor contact at the center of pressure for each normal bin. + + The anchor contact helps preserve moment balance. Only active when ``reduce_contacts`` is True. + + Defaults to ``False`` (same as Newton's default). + """ + + margin_contact_area: float = 0.01 + """Contact area [m^2] used for non-penetrating contacts at the margin. + + Defaults to ``0.01`` (same as Newton's default). + """ + + output_contact_surface: bool = False + """Whether to output hydroelastic contact surface vertices for visualization. + + Defaults to ``False`` (same as Newton's default). + """ + + +@configclass +class NewtonCollisionPipelineCfg: + """Configuration for Newton collision pipeline. + + Full-featured collision pipeline with GJK/MPR narrow phase and pluggable broad phase. + When this config is set on :attr:`NewtonCfg.collision_cfg`: + + - **MJWarpSolverCfg**: Newton's collision pipeline replaces MuJoCo's internal contact solver. + - **Other solvers** (XPBD, Featherstone, etc.): Configures the collision pipeline parameters + (these solvers always use Newton's collision pipeline). + + Key features: + + - GJK/MPR algorithms for convex-convex collision detection + - Multiple broad phase options: NXN (all-pairs), SAP (sweep-and-prune), EXPLICIT (precomputed pairs) + - Mesh-mesh collision via SDF with contact reduction + - Optional hydroelastic contact model for compliant surfaces + + For more details, see the `Newton Collisions Guide`_ and `CollisionPipeline API`_. + + .. _Newton Collisions Guide: https://newton-physics.github.io/newton/latest/concepts/collisions.html + .. _CollisionPipeline API: https://newton-physics.github.io/newton/api/_generated/newton.CollisionPipeline.html + """ + + broad_phase: Literal["explicit", "nxn", "sap"] = "explicit" + """Broad phase algorithm for collision detection. + + Options: + + - ``"explicit"``: Use precomputed shape pairs from ``model.shape_contact_pairs``. + - ``"nxn"``: All-pairs brute force. Simple but O(n^2) complexity. + - ``"sap"``: Sweep-and-prune. Good for scenes with many dynamic objects. + + Defaults to ``"explicit"`` (same as Newton's default when ``broad_phase=None``). + """ + + reduce_contacts: bool = True + """Whether to reduce contacts for mesh-mesh collisions. + + When True, uses shared memory contact reduction to select representative contacts. + Improves performance and stability for meshes with many vertices. + + Defaults to ``True`` (same as Newton's default). + """ + + rigid_contact_max: int | None = None + """Maximum number of rigid contacts to allocate. + + Resolution order: + + 1. If provided, use this value. + 2. Else if ``model.rigid_contact_max > 0``, use the model value. + 3. Else estimate automatically from model shape and pair metadata. + + Defaults to ``None`` (auto-estimate, same as Newton's default). + """ + + max_triangle_pairs: int = 1_000_000 + """Maximum number of triangle pairs allocated by narrow phase for mesh and heightfield collisions. + + Increase this when scenes with large/complex meshes or heightfields report + triangle-pair overflow warnings. + + Defaults to ``1_000_000`` (same as Newton's default). + """ + + soft_contact_max: int | None = None + """Maximum number of soft contacts to allocate. + + If None, computed as ``shape_count * particle_count``. + + Defaults to ``None`` (auto-compute, same as Newton's default). + """ + + soft_contact_margin: float = 0.01 + """Margin [m] for soft contact generation. + + Defaults to ``0.01`` (same as Newton's default). + """ + + requires_grad: bool | None = None + """Whether to enable gradient computation for collision. + + If ``None``, uses ``model.requires_grad``. + + Defaults to ``None`` (same as Newton's default). + """ + + sdf_hydroelastic_config: HydroelasticSDFCfg | None = None + """Configuration for SDF-based hydroelastic collision handling. + + If ``None``, hydroelastic contacts are disabled. + If set, enables hydroelastic contacts with the specified parameters. + + Defaults to ``None`` (hydroelastic disabled, same as Newton's default). + """ diff --git a/source/isaaclab_newton/isaaclab_newton/physics/newton_manager.py b/source/isaaclab_newton/isaaclab_newton/physics/newton_manager.py index 3e967751d8df..f7a7f264d151 100644 --- a/source/isaaclab_newton/isaaclab_newton/physics/newton_manager.py +++ b/source/isaaclab_newton/isaaclab_newton/physics/newton_manager.py @@ -28,6 +28,7 @@ _cudart = None from newton import Axis, CollisionPipeline, Contacts, Control, Model, ModelBuilder, State, eval_fk from newton._src.usd.schemas import SchemaResolverNewton, SchemaResolverPhysx +from newton.geometry import HydroelasticSDF from newton.sensors import SensorContact as NewtonContactSensor from newton.solvers import SolverBase, SolverFeatherstone, SolverMuJoCo, SolverNotifyFlags, SolverXPBD @@ -35,6 +36,8 @@ from isaaclab.sim.utils.stage import get_current_stage from isaaclab.utils.timer import Timer +from .debug_state_buffer import DebugStateBuffer + if TYPE_CHECKING: from isaaclab.sim.simulation_context import SimulationContext @@ -91,6 +94,7 @@ class NewtonManager(PhysicsManager): _contacts: Contacts | None = None _needs_collision_pipeline: bool = False _collision_pipeline = None + _collision_cfg = None # CollisionPipelineCfg, set during initialize() _newton_contact_sensors: dict = {} # Maps sensor_key to NewtonContactSensor _report_contacts: bool = False _fk_dirty: bool = False @@ -114,6 +118,9 @@ class NewtonManager(PhysicsManager): # Model changes (callbacks use unified system from PhysicsManager) _model_changes: set[int] = set() + # Debug state buffer for NaN replay (None = disabled) + _debug_state_buffer: DebugStateBuffer | None = None + # Views list for assets to register their views _views: list = [] @@ -300,6 +307,10 @@ def step(cls) -> None: else: logger.warning("Newton deferred CUDA graph capture failed; using eager execution") + # Snapshot state before stepping for NaN debug (captures post-reset state) + if cls._debug_state_buffer is not None: + cls._debug_state_buffer.snapshot_pre_step(cls._state_0) + # Ensure body_q is up-to-date before collision detection. # After env resets, joint_q is written but body_q (used by # broadphase/narrowphase) is stale until FK runs. @@ -324,6 +335,43 @@ def step(cls) -> None: logger.warning(f"Solver didn't converge! max_iter={convergence_data['max']}") PhysicsManager._sim_time += cls._solver_dt * cls._num_substeps + if cls._debug_state_buffer is not None: + with wp.ScopedDevice(PhysicsManager._device): + diagnostics = cls._collect_diagnostics() + solver_data = None + if isinstance(cls._solver, SolverMuJoCo): + mjw_opt = cls._solver.mjw_model.opt + mjw_data = cls._solver.mjw_data + collision_cfg = cls._collision_cfg + solver_cfg = getattr(PhysicsManager._cfg, "solver_cfg", None) + solver_cfg_dict = solver_cfg.to_dict() if solver_cfg is not None and hasattr(solver_cfg, "to_dict") else {} + solver_data = { + "mjw_data": mjw_data, + "max_iterations": mjw_opt.iterations, + "nv": int(cls._solver.mjw_model.nv), + "opt": { + "iterations": mjw_opt.iterations, + "tolerance": solver_cfg_dict.get("tolerance", getattr(mjw_opt, "tolerance", None)), + "integrator": solver_cfg_dict.get("integrator", getattr(mjw_opt, "integrator", None)), + "cone": solver_cfg_dict.get("cone", getattr(mjw_opt, "cone", None)), + "impratio": solver_cfg_dict.get("impratio", getattr(mjw_opt, "impratio", None)), + "njmax": solver_cfg_dict.get("njmax", 400), + "nconmax": solver_cfg_dict.get("nconmax", 200), + "use_mujoco_contacts": solver_cfg_dict.get("use_mujoco_contacts", not cls._needs_collision_pipeline), + "max_triangle_pairs": getattr(collision_cfg, "max_triangle_pairs", None) if collision_cfg else None, + "dt": cls._solver_dt, + "num_substeps": cls._num_substeps, + }, + } + cls._debug_state_buffer.step(cls._state_0, PhysicsManager._sim_time, diagnostics, solver_data) + if cls._debug_state_buffer.nan_halt: + nr = getattr(PhysicsManager._cfg, "nan_replay", None) + export_dir = nr.export_path if nr is not None else "." + raise RuntimeError( + f"NaN detected in physics state. Debug replay exported to {export_dir}. " + "Halting simulation." + ) + @classmethod def close(cls) -> None: """Clean up Newton physics resources.""" @@ -342,6 +390,21 @@ def get_physics_sim_view(cls) -> list: """ return cls._views + @classmethod + def set_debug_episode_length_buf(cls, buf: torch.Tensor) -> None: + """Attach per-env episode length to the debug state buffer. + + When the buffer exports a NaN event it will include ``episode_step`` + for each affected environment so the user can tell whether the NaN + happened right after reset or mid-episode. + + Args: + buf: Episode length tensor, shape ``(num_envs,)``, dtype long. + Typically ``env.episode_length_buf``. + """ + if cls._debug_state_buffer is not None: + cls._debug_state_buffer.episode_length_buf = buf + @classmethod def is_fabric_enabled(cls) -> bool: """Check if fabric interface is enabled (not applicable for Newton).""" @@ -376,6 +439,73 @@ def clear(cls): cls._up_axis = "Z" cls._model_changes = set() cls._views = [] + if cls._debug_state_buffer is not None: + cls._debug_state_buffer.clear() + cls._debug_state_buffer = None + + @staticmethod + def _make_scene_exporter(): + """Build a callable that exports NaN'd env prim subtrees AND global collision geometry to USD. + + Copies all relevant prims into a single target layer in one pass to avoid + multiple ``export_prim_to_file`` calls clobbering each other. + + Returns None if USD utilities are unavailable (headless / no-USD environments). + """ + try: + from pxr import Sdf, Usd, UsdGeom # noqa: PLC0415 + + from isaaclab.sim.utils.prims import resolve_paths # noqa: PLC0415 + except ImportError: + return None + + def _exporter(usd_path: str, env_ids: list[int]) -> None: + stage = get_current_stage() + if stage is None: + return + + source_layer = stage.GetRootLayer() + target_layer = Sdf.Layer.CreateNew(usd_path) + target_stage = Usd.Stage.Open(target_layer) + UsdGeom.SetStageUpAxis(target_stage, UsdGeom.GetStageUpAxis(stage)) + UsdGeom.SetStageMetersPerUnit(target_stage, UsdGeom.GetStageMetersPerUnit(stage)) + + prim_paths_to_copy = [] + + if not env_ids: + prim_paths_to_copy.append("/World/envs/env_0") + else: + for eid in env_ids: + prim_paths_to_copy.append(f"/World/envs/env_{eid}") + + world_prim = stage.GetPrimAtPath("/World") + if world_prim and world_prim.IsValid(): + for child in world_prim.GetChildren(): + child_path = child.GetPath().pathString + if child_path == "/World/envs": + continue + if _prim_has_collision(child): + prim_paths_to_copy.append(child_path) + + for prim_path in prim_paths_to_copy: + Sdf.CreatePrimInLayer(target_layer, prim_path) + Sdf.CopySpec(source_layer, Sdf.Path(prim_path), target_layer, Sdf.Path(prim_path)) + + if prim_paths_to_copy: + target_layer.defaultPrim = Sdf.Path(prim_paths_to_copy[0]).name + + resolve_paths(source_layer.identifier, target_layer.identifier) + target_layer.Save() + + def _prim_has_collision(prim) -> bool: + from pxr import Usd, UsdPhysics # noqa: PLC0415 + + for p in Usd.PrimRange(prim): + if p.HasAPI(UsdPhysics.CollisionAPI) or p.HasAPI(UsdPhysics.MeshCollisionAPI): + return True + return False + + return _exporter @classmethod def set_builder(cls, builder: ModelBuilder) -> None: @@ -417,17 +547,53 @@ def start_simulation(cls) -> None: device = PhysicsManager._device logger.info(f"Finalizing model on device: {device}") cls._builder.up_axis = Axis.from_string(cls._up_axis) - # Set smaller contact margin for manipulation examples (default 10cm is too large) + with Timer(name="newton_finalize_builder", msg="Finalize builder took:"): cls._model = cls._builder.finalize(device=device) cls._model.set_gravity(cls._gravity_vector) cls._model.num_envs = cls._num_envs + # #region agent log + import os as _os + _export_dir = _os.environ.get("NEWTON_SAVE_CLONER_STAGE", _os.environ.get("NEWTON_SAVE_MODEL", "")) + if _export_dir: + _os.makedirs(_export_dir, exist_ok=True) + import numpy as _np + _m = cls._model + _data = {} + for _attr in dir(_m): + if _attr.startswith("_"): + continue + _v = getattr(_m, _attr, None) + if _v is None: + continue + if hasattr(_v, "numpy"): + try: + _data[_attr] = _v.numpy() + except Exception: + pass + elif isinstance(_v, (int, float)): + _data[_attr] = _np.array([_v]) + _np.savez_compressed(_os.path.join(_export_dir, "model.npz"), **_data) + logger.info(f"Saved model ({len(_data)} arrays) to {_export_dir}/model.npz") + # #endregion + cls._state_0 = cls._model.state() cls._state_1 = cls._model.state() cls._control = cls._model.control() eval_fk(cls._model, cls._state_0.joint_q, cls._state_0.joint_qd, cls._state_0, None) + nan_replay_cfg = getattr(PhysicsManager._cfg, "nan_replay", None) + if nan_replay_cfg is not None: + cls._debug_state_buffer = DebugStateBuffer( + cls._model, + nan_replay_cfg.buffer_size, + export_path=nan_replay_cfg.export_path, + max_exports=nan_replay_cfg.max_exports, + scene_exporter=cls._make_scene_exporter(), + ) + logger.info("Debug state buffer enabled: %d snapshots", cls._debug_state_buffer.size) + logger.info("Dispatching PHYSICS_READY callbacks") cls.dispatch_event(PhysicsEvent.PHYSICS_READY) @@ -490,7 +656,31 @@ def instantiate_builder_from_stage(cls): else: # Load everything except the env subtrees (ground plane, lights, etc.) ignore_paths = [path for _, path in env_paths] + # #region agent log + import json as _json, time as _time + _log_path = "/home/zhengyuz/Projects/IsaacLab/.cursor/debug-dd2fc5.log" + _entry = {"sessionId": "dd2fc5", "hypothesisId": "H_terrain_load", + "location": "newton_manager.py:instantiate_builder_from_stage", + "timestamp": int(_time.time() * 1000), + "message": "pre add_usd terrain", + "data": {"num_envs": len(env_paths), "ignore_paths_count": len(ignore_paths), + "stage_root_layer": str(stage.GetRootLayer().identifier), + "builder_shape_count_before": builder.shape_count}} + with open(_log_path, "a") as _f: + _f.write(_json.dumps(_entry) + "\n") + # #endregion builder.add_usd(stage, ignore_paths=ignore_paths, schema_resolvers=schema_resolvers) + # #region agent log + _entry2 = {"sessionId": "dd2fc5", "hypothesisId": "H_terrain_load", + "location": "newton_manager.py:instantiate_builder_from_stage", + "timestamp": int(_time.time() * 1000), + "message": "post add_usd terrain", + "data": {"builder_shape_count_after": builder.shape_count, + "builder_body_count": builder.body_count, + "shape_labels": builder.shape_label[:10] if builder.shape_label else []}} + with open(_log_path, "a") as _f: + _f.write(_json.dumps(_entry2) + "\n") + # #endregion # Build a prototype from the first env (all envs assumed identical) _, proto_path = env_paths[0] @@ -520,6 +710,22 @@ def instantiate_builder_from_stage(cls): cls._num_envs = len(env_paths) + # #region agent log + import json as _json, time as _time + _log_path = "/home/zhengyuz/Projects/IsaacLab/.cursor/debug-dd2fc5.log" + _entry = {"sessionId": "dd2fc5", "hypothesisId": "H_terrain_load", + "location": "newton_manager.py:instantiate_builder_from_stage", + "timestamp": int(_time.time() * 1000), + "message": "final builder state", + "data": {"builder_shape_count": builder.shape_count, + "builder_body_count": builder.body_count, + "builder_joint_count": builder.joint_count, + "shape_labels": builder.shape_label[:20] if builder.shape_label else [], + "shape_geo_types": [int(builder.shape_geo_type[i]) for i in range(min(5, builder.shape_count))] if hasattr(builder, 'shape_geo_type') else []}} + with open(_log_path, "a") as _f: + _f.write(_json.dumps(_entry) + "\n") + # #endregion + cls.set_builder(builder) @classmethod @@ -532,7 +738,15 @@ def _initialize_contacts(cls) -> None: if cls._needs_collision_pipeline: # Newton collision pipeline: create pipeline and generate contacts if cls._collision_pipeline is None: - cls._collision_pipeline = CollisionPipeline(cls._model, broad_phase="explicit") + # Convert config to dict, handling nested hydroelastic config + cfg_dict = cls._collision_cfg.to_dict() + + # Convert HydroelasticSDFCfg to Newton's HydroelasticSDF.Config if present + hydro_cfg = cfg_dict.pop("sdf_hydroelastic_config", None) + cfg_dict["sdf_hydroelastic_config"] = HydroelasticSDF.Config(**hydro_cfg) if hydro_cfg else None + + cls._collision_pipeline = CollisionPipeline(cls._model, **cfg_dict) + if cls._contacts is None: cls._contacts = cls._collision_pipeline.contacts() @@ -597,17 +811,15 @@ def initialize_solver(cls) -> None: # - SolverMuJoCo with use_mujoco_contacts=False: needs Newton's unified collision pipeline # - Other solvers (XPBD, Featherstone): always need Newton's unified collision pipeline if isinstance(cls._solver, SolverMuJoCo): - # Handle both dict and object configs - if hasattr(solver_cfg, "use_mujoco_contacts"): - use_mujoco_contacts = solver_cfg.use_mujoco_contacts - elif isinstance(solver_cfg, dict): - use_mujoco_contacts = solver_cfg.get("use_mujoco_contacts", False) - else: - use_mujoco_contacts = getattr(solver_cfg, "use_mujoco_contacts", False) - cls._needs_collision_pipeline = not use_mujoco_contacts + cls._needs_collision_pipeline = not solver_cfg.use_mujoco_contacts + if cls._collision_cfg is not None: + raise ValueError("NewtonManager: collision_cfg is not supported with use_mujoco_contacts=True") else: cls._needs_collision_pipeline = True + # Store collision pipeline config + cls._collision_cfg = getattr(cfg, "collision_cfg", None) + # Initialize contacts and collision pipeline cls._initialize_contacts() @@ -832,6 +1044,54 @@ def get_solver_convergence_steps(cls) -> dict[str, float | int]: "std": np.std(niter), } + @classmethod + def _collect_diagnostics(cls) -> dict | None: + """Gather per-step solver diagnostics for the debug state buffer. + + Returns None if the solver doesn't expose MuJoCo data (non-MJWarp solvers). + All values stay on GPU; the debug buffer copies to CPU only on NaN export. + """ + if not isinstance(cls._solver, SolverMuJoCo): + return None + + import torch # noqa: PLC0415 + + mjd = cls._solver.mjw_data + diag: dict = {} + + # Scalar per world -- cheap + diag["solver_niter"] = mjd.solver_niter + + # Per-dof arrays -- wp.to_torch is zero-copy, clone happens in buffer + diag["qfrc_applied"] = mjd.qfrc_applied + diag["qfrc_constraint"] = mjd.qfrc_constraint + diag["qfrc_actuator"] = mjd.qfrc_actuator + diag["qacc"] = mjd.qacc + + # Mass matrix diagonal min -- qM is (num_worlds, nv_pad, nv_pad) + # nv_pad may be larger than nv (actual DOFs), with zero padding. + # Only check the first nv diagonal elements. + nv = int(cls._solver.mjw_model.nv) + qM = wp.to_torch(mjd.qM) + if qM.dim() == 3: + diag_vals = torch.diagonal(qM, dim1=-2, dim2=-1)[:, :nv] + diag["qM_diag_min"] = diag_vals.min(dim=-1).values + elif qM.dim() == 2: + diag["qM_diag_min"] = torch.diag(qM)[:nv].min().unsqueeze(0) + + # Contact penetration -- vectorized scatter_reduce, no Python loop + contact = getattr(mjd, "contact", None) + if contact is not None: + dist = wp.to_torch(contact.dist) + if dist.numel() > 0: + worldid = wp.to_torch(contact.worldid).long() + nw = getattr(cls._model, "world_count", 0) or 1 + diag["contact_dist_min"] = torch.zeros(nw, device=dist.device).scatter_reduce( + 0, worldid, dist, reduce="amin", include_self=True + ) + + return diag + # State accessors (used extensively by articulation/rigid object data) @classmethod def get_model(cls) -> Model: diff --git a/source/isaaclab_newton/isaaclab_newton/physics/newton_manager_cfg.py b/source/isaaclab_newton/isaaclab_newton/physics/newton_manager_cfg.py index afbf7f54ba0c..2c9c3c7c2263 100644 --- a/source/isaaclab_newton/isaaclab_newton/physics/newton_manager_cfg.py +++ b/source/isaaclab_newton/isaaclab_newton/physics/newton_manager_cfg.py @@ -12,8 +12,10 @@ from isaaclab.physics import PhysicsCfg from isaaclab.utils import configclass +from .newton_collision_cfg import NewtonCollisionPipelineCfg + if TYPE_CHECKING: - from .newton_manager import NewtonManager + from isaaclab_newton.physics import NewtonManager @configclass @@ -104,8 +106,26 @@ class MJWarpSolverCfg(NewtonSolverCfg): """Whether to use parallel line search.""" use_mujoco_contacts: bool = True - """Whether to use MuJoCo's contact solver.""" + """Whether to use MuJoCo's internal contact solver. + + If ``True`` (default), MuJoCo handles collision detection and contact resolution internally. + If ``False``, MuJoCo's collision detection is disabled, and Newton's :class:`CollisionPipeline` + must be used instead (set :attr:`NewtonCfg.collision_cfg`). + .. warning:: + This must be consistent with :attr:`NewtonCfg.collision_cfg`. If ``use_mujoco_contacts=False``, + you must also set ``collision_cfg`` to a :class:`NewtonCollisionPipelineCfg` instance. + Mismatched settings will cause simulation errors (e.g., NaN values). + """ + + tolerance: float = 1e-6 + """Solver convergence tolerance for the constraint residual. + + The solver iterates until the residual drops below this threshold or + ``iterations`` is reached. Lower values give more precise constraint + satisfaction at the cost of more iterations. MuJoCo default is ``1e-8``; + Newton default is ``1e-6``. + """ @configclass class XPBDSolverCfg(NewtonSolverCfg): @@ -194,6 +214,33 @@ class FeatherstoneSolverCfg(NewtonSolverCfg): """Whether to fuse the Cholesky decomposition.""" +@configclass +class NanReplayCfg: + """Configuration for the NaN replay debug buffer. + + When attached to :class:`NewtonCfg`, a rolling buffer of GPU state snapshots + is kept. If a NaN is detected after a physics step the buffer is exported to + disk (with optional USD scene export) and simulation is halted. + """ + + buffer_size: int = 200 + """Number of state snapshots to keep in the rolling buffer. + + Capped at 2000 to bound GPU memory use. + """ + + export_path: str = "./nan_debug/" + """Directory for exported ``.npz`` and ``.usd`` files.""" + + max_exports: int = 5 + """Maximum number of NaN export events before halting simulation. + + Each export event covers a distinct set of newly-NaN env_ids. After this + many exports the debug buffer stops recording and the physics step raises + ``RuntimeError``. + """ + + @configclass class NewtonCfg(PhysicsCfg): """Configuration for Newton physics manager. @@ -216,5 +263,24 @@ class NewtonCfg(PhysicsCfg): If set to False, the simulation performance will be severely degraded. """ + nan_replay: NanReplayCfg | None = None + """NaN replay debug buffer configuration. + + Set to a :class:`NanReplayCfg` instance to enable the rolling state buffer + and automatic NaN detection / export. ``None`` (default) disables the + feature entirely — no memory is allocated and no per-step overhead is added. + """ + solver_cfg: NewtonSolverCfg = MJWarpSolverCfg() """Solver configuration. Default is MJWarpSolverCfg().""" + + collision_cfg: NewtonCollisionPipelineCfg | None = None + """Newton collision pipeline configuration. + + If ``None`` (default), the solver's native collision pipeline is used (e.g., MuJoCo's + internal contact solver for :class:`MJWarpSolverCfg`). + + If set, Newton's :class:`CollisionPipeline` is used with the specified parameters. + This is required for scenarios where the solver's native collision does not support + the geometry (e.g., mesh terrain with MuJoCo Warp). + """ diff --git a/source/isaaclab_newton/isaaclab_newton/sim/__init__.py b/source/isaaclab_newton/isaaclab_newton/sim/__init__.py new file mode 100644 index 000000000000..f4ee7eadc3bd --- /dev/null +++ b/source/isaaclab_newton/isaaclab_newton/sim/__init__.py @@ -0,0 +1,6 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Newton simulation utilities.""" diff --git a/source/isaaclab_newton/isaaclab_newton/sim/views/__init__.py b/source/isaaclab_newton/isaaclab_newton/sim/views/__init__.py new file mode 100644 index 000000000000..ce10cabf240d --- /dev/null +++ b/source/isaaclab_newton/isaaclab_newton/sim/views/__init__.py @@ -0,0 +1,8 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from .xform_prim_view import XformPrimView + +__all__ = ["XformPrimView"] diff --git a/source/isaaclab_newton/isaaclab_newton/sim/views/xform_prim_view.py b/source/isaaclab_newton/isaaclab_newton/sim/views/xform_prim_view.py new file mode 100644 index 000000000000..a2bfbac757f7 --- /dev/null +++ b/source/isaaclab_newton/isaaclab_newton/sim/views/xform_prim_view.py @@ -0,0 +1,193 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Newton-backed XformPrimView using sites (body + local offset) for GPU-native pose queries.""" + +from __future__ import annotations + +from collections.abc import Sequence + +import torch +import warp as wp + +import isaaclab.sim as sim_utils +from isaaclab.sim.views.base_xform_prim_view import BaseXformPrimView +from isaaclab_newton.physics.newton_manager import NewtonManager + + +@wp.kernel +def _compute_site_world_transforms( + body_q: wp.array(dtype=wp.transformf), + site_body: wp.array(dtype=wp.int32), + site_local: wp.array(dtype=wp.transformf), + out_pos: wp.array(dtype=wp.vec3f), + out_quat: wp.array(dtype=wp.vec4f), +): + """Compute world transforms for sites: world = body_q[body] * local.""" + i = wp.tid() + world = wp.transform_multiply(body_q[site_body[i]], site_local[i]) + out_pos[i] = wp.transform_get_translation(world) + q = wp.transform_get_rotation(world) + out_quat[i] = wp.vec4f(q[0], q[1], q[2], q[3]) + + +class XformPrimView(BaseXformPrimView): + """Batched prim view backed by Newton sites on GPU. + + Each matched prim is resolved to a Newton *site* -- a ``(body_index, + local_transform)`` pair. World poses are computed on GPU as + ``body_q[body] * local_transform`` using a Warp kernel, matching Newton's + native site mechanism. + + Resolution order for each prim path: + + 1. Look up in ``model.shape_label`` (sites and collision shapes). If found, + use ``shape_body`` and ``shape_transform`` for the body index and local + offset. + 2. Fall back to ``model.body_label`` (the body itself). Local offset is + identity. + """ + + def __init__(self, prim_path: str, device: str = "cpu", stage=None, **kwargs): + self._prim_path = prim_path + self._device = device + + stage = sim_utils.get_current_stage() if stage is None else stage + self._prims = sim_utils.find_matching_prims(prim_path, stage=stage) + + model = NewtonManager.get_model() + body_labels = list(model.body_label) + shape_labels = list(model.shape_label) + shape_body_np = wp.to_torch(model.shape_body) + shape_xform_np = wp.to_torch(model.shape_transform) + + prim_paths = [str(p.GetPath()) for p in self._prims] + site_bodies: list[int] = [] + site_locals: list[list[float]] = [] + + identity_xform = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0] + + for pp in prim_paths: + if pp in shape_labels: + si = shape_labels.index(pp) + site_bodies.append(int(shape_body_np[si])) + site_locals.append(shape_xform_np[si].tolist()) + elif pp in body_labels: + site_bodies.append(body_labels.index(pp)) + site_locals.append(identity_xform) + else: + raise ValueError( + f"XformPrimView (Newton): prim '{pp}' not found in model shape_label or body_label. " + f"Shape labels (first 10): {shape_labels[:10]}, " + f"Body labels (first 10): {body_labels[:10]}" + ) + + self._site_body = wp.array(site_bodies, dtype=wp.int32, device=device) + self._site_local = wp.array( + [wp.transform(*x) for x in site_locals], + dtype=wp.transformf, + device=device, + ) + + self._pos_buf = wp.zeros(self.count, dtype=wp.vec3f, device=device) + self._quat_buf = wp.zeros(self.count, dtype=wp.vec4f, device=device) + + @property + def count(self) -> int: + return len(self._prims) + + # ------------------------------------------------------------------ + # World poses + # ------------------------------------------------------------------ + + def get_world_poses( + self, indices: Sequence[int] | None = None + ) -> tuple[torch.Tensor, torch.Tensor]: + """Compute site world poses on GPU: ``body_q[body] * local``.""" + state = NewtonManager.get_state_0() + + wp.launch( + _compute_site_world_transforms, + dim=self.count, + inputs=[state.body_q, self._site_body, self._site_local], + outputs=[self._pos_buf, self._quat_buf], + device=self._device, + ) + + pos, quat = wp.to_torch(self._pos_buf), wp.to_torch(self._quat_buf) + if indices is not None: + return pos[indices], quat[indices] + return pos, quat + + def set_world_poses( + self, + positions: torch.Tensor | None = None, + orientations: torch.Tensor | None = None, + indices: Sequence[int] | None = None, + ) -> None: + """Write world poses into Newton ``state.body_q``. + + Only works correctly for sites with identity local transforms (i.e. + body-level prims). For offset sites the inverse local transform would + need to be applied first. + """ + state = NewtonManager.get_state_0() + body_q = wp.to_torch(state.body_q) + + if indices is not None: + idx_t = torch.as_tensor(indices, dtype=torch.long, device=self._device) + bodies = wp.to_torch(self._site_body)[idx_t] + else: + bodies = wp.to_torch(self._site_body) + + if positions is not None: + body_q[bodies, :3] = positions + if orientations is not None: + body_q[bodies, 3:7] = orientations + + # ------------------------------------------------------------------ + # Local poses -- delegate to world (Newton bodies live in world space) + # ------------------------------------------------------------------ + + def get_local_poses(self, indices=None) -> tuple[torch.Tensor, torch.Tensor]: + return self.get_world_poses(indices) + + def set_local_poses(self, translations=None, orientations=None, indices=None) -> None: + self.set_world_poses(positions=translations, orientations=orientations, indices=indices) + + # ------------------------------------------------------------------ + # Scales + # ------------------------------------------------------------------ + + def get_scales(self, indices: Sequence[int] | None = None) -> torch.Tensor: + model = NewtonManager.get_model() + shape_scale = wp.to_torch(model.shape_scale) + shape_body = wp.to_torch(model.shape_body) + + if indices is not None: + bodies = wp.to_torch(self._site_body)[torch.as_tensor(indices, dtype=torch.long, device=self._device)] + else: + bodies = wp.to_torch(self._site_body) + + scales = [] + for body_idx in bodies: + mask = shape_body == body_idx + body_scales = shape_scale[mask] + scales.append(body_scales[0] if len(body_scales) > 0 else torch.ones(3, device=self._device)) + return torch.stack(scales) + + def set_scales(self, scales: torch.Tensor, indices: Sequence[int] | None = None) -> None: + model = NewtonManager.get_model() + shape_scale = wp.to_torch(model.shape_scale) + shape_body = wp.to_torch(model.shape_body) + + if indices is not None: + bodies = wp.to_torch(self._site_body)[torch.as_tensor(indices, dtype=torch.long, device=self._device)] + else: + bodies = wp.to_torch(self._site_body) + + for i, body_idx in enumerate(bodies): + mask = shape_body == body_idx + shape_scale[mask] = scales[i] diff --git a/source/isaaclab_newton/setup.py b/source/isaaclab_newton/setup.py index c15812f8282a..dbc9c4ef7c03 100644 --- a/source/isaaclab_newton/setup.py +++ b/source/isaaclab_newton/setup.py @@ -42,8 +42,8 @@ def run(self): EXTRAS_REQUIRE = { "all": [ "prettytable==3.3.0", - "mujoco==3.5.0", - "mujoco-warp==3.5.0.2", + "mujoco==3.6.0", + "mujoco-warp==3.6.0", "PyOpenGL-accelerate==3.1.10", "newton @ git+https://github.com/newton-physics/newton.git@2684d75bfa4bb8b058a93b81c458a74b7701c997", ], @@ -74,6 +74,8 @@ def run(self): "isaaclab_newton.physics", "isaaclab_newton.renderers", "isaaclab_newton.scene_data_providers", + "isaaclab_newton.sim", + "isaaclab_newton.sim.views", "isaaclab_newton.sensors", "isaaclab_newton.sensors.contact_sensor", "isaaclab_newton.sensors.frame_transformer", diff --git a/source/isaaclab_newton/test/assets/test_articulation.py b/source/isaaclab_newton/test/assets/test_articulation.py index 867f0fe8eba1..78b888edd49f 100644 --- a/source/isaaclab_newton/test/assets/test_articulation.py +++ b/source/isaaclab_newton/test/assets/test_articulation.py @@ -2486,6 +2486,150 @@ def _patched_simulate(cls): f"body_q was stale when collide() ran: diff={diff:.4f}m, jq={jq_root.tolist()}, bq={bq_root.tolist()}" ) +@pytest.mark.parametrize("add_ground_plane", [True]) +@pytest.mark.parametrize("num_articulations", [1, 2]) +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) +@pytest.mark.parametrize("articulation_type", ["panda"]) +@pytest.mark.isaacsim_ci +def test_set_material_properties(sim, num_articulations, device, add_ground_plane, articulation_type): + """Test getting and setting material properties (friction/restitution) of articulation shapes.""" + articulation_cfg = generate_articulation_cfg(articulation_type=articulation_type) + articulation, _ = generate_articulation( + articulation_cfg=articulation_cfg, num_articulations=num_articulations, device=device + ) + + # Play the simulator + sim.reset() + + # Get actual number of shapes from the articulation + num_shapes = articulation.num_shapes + + # Test 1: Set all shapes using index method (passing shape_ids explicitly) + shape_ids = torch.arange(num_shapes, dtype=torch.int32, device=device) + env_ids = torch.arange(num_articulations, dtype=torch.int32, device=device) + friction = torch.empty(num_articulations, num_shapes, device=device).uniform_(0.4, 0.8) + restitution = torch.empty(num_articulations, num_shapes, device=device).uniform_(0.0, 0.2) + + articulation.set_friction_index(friction=friction, shape_ids=shape_ids, env_ids=env_ids) + articulation.set_restitution_index(restitution=restitution, shape_ids=shape_ids, env_ids=env_ids) + + # Simulate physics + sim.step() + articulation.update(sim.cfg.dt) + + # Get material properties via internal bindings (for test verification only) + mu = wp.to_torch(articulation.data._sim_bind_shape_material_mu) + restitution_check = wp.to_torch(articulation.data._sim_bind_shape_material_restitution) + + # Check if material properties are set correctly + torch.testing.assert_close(mu, friction) + torch.testing.assert_close(restitution_check, restitution) + + # Test 2: Set subset of shapes using index method + if num_shapes > 1: + subset_shape_ids = torch.tensor([0], dtype=torch.int32, device=device) + subset_friction = torch.empty(num_articulations, 1, device=device).uniform_(0.1, 0.2) + subset_restitution = torch.empty(num_articulations, 1, device=device).uniform_(0.5, 0.6) + + articulation.set_friction_index(friction=subset_friction, shape_ids=subset_shape_ids, env_ids=env_ids) + articulation.set_restitution_index(restitution=subset_restitution, shape_ids=subset_shape_ids, env_ids=env_ids) + + sim.step() + articulation.update(sim.cfg.dt) + + # Check only the subset was updated + mu_updated = wp.to_torch(articulation.data._sim_bind_shape_material_mu) + restitution_updated = wp.to_torch(articulation.data._sim_bind_shape_material_restitution) + torch.testing.assert_close(mu_updated[:, 0:1], subset_friction) + torch.testing.assert_close(restitution_updated[:, 0:1], subset_restitution) + + +@pytest.mark.parametrize("num_articulations", [2]) +@pytest.mark.parametrize("device", ["cuda:0"]) +@pytest.mark.parametrize("articulation_type", ["anymal"]) +def test_body_q_consistent_after_root_write(num_articulations, device, articulation_type): + """Test that body_q is fresh when collide() runs after a root pose write. + + Regression test for a NaN bug where collide() used stale body_q after env + reset because eval_fk was not called between write_root_pose and collide. + + Uses ``use_mujoco_contacts=False`` so the Newton collision pipeline is + active, then patches ``_simulate_physics_only`` to capture body_q at + the moment collide() is called and asserts it matches joint_q. + """ + from unittest.mock import patch + from isaaclab_newton.physics import NewtonCollisionPipelineCfg + + sim_cfg = SimulationCfg( + dt=1 / 200, + physics=NewtonCfg( + solver_cfg=MJWarpSolverCfg( + njmax=70, nconmax=70, integrator="implicitfast", + use_mujoco_contacts=False, + ), + collision_cfg=NewtonCollisionPipelineCfg(), + num_substeps=1, + use_cuda_graph=False, + ), + ) + with build_simulation_context(sim_cfg=sim_cfg, device=device) as sim: + sim._app_control_on_stop_handle = None + articulation_cfg = generate_articulation_cfg(articulation_type=articulation_type) + articulation, env_pos = generate_articulation(articulation_cfg, num_articulations, device) + + sim.reset() + + state = SimulationManager.get_state_0() + model = SimulationManager.get_model() + jc_starts = model.joint_coord_world_start.numpy() + body_starts = model.body_world_start.numpy() + + for _ in range(5): + sim.step() + articulation.update(sim.cfg.dt) + + # Teleport env 0 by 10m (simulating a reset) + new_pose = wp.to_torch(articulation.data.default_root_pose).clone() + new_pose[0, 0] += 10.0 + new_pose[0, 1] += 5.0 + articulation.write_root_pose_to_sim_index( + root_pose=new_pose[0:1], + env_ids=torch.tensor([0], device=device, dtype=torch.int32), + ) + + assert SimulationManager._fk_dirty, ( + "invalidate_fk() was not called by write_root_pose_to_sim_index" + ) + + # Patch _simulate_physics_only to capture body_q before collide runs + captured = {} + original_simulate = SimulationManager._simulate_physics_only.__func__ + + @classmethod # type: ignore[misc] + def _patched_simulate(cls): + if cls._needs_collision_pipeline: + bq = wp.to_torch(cls._state_0.body_q) + jq = wp.to_torch(cls._state_0.joint_q) + b0 = int(body_starts[0]) + jc0 = int(jc_starts[0]) + captured["bq_root"] = bq[b0, :3].clone() + captured["jq_root"] = jq[jc0 : jc0 + 3].clone() + original_simulate(cls) + + with patch.object(SimulationManager, "_simulate_physics_only", _patched_simulate): + sim.step() + articulation.update(sim.cfg.dt) + + assert not SimulationManager._fk_dirty, "_fk_dirty was not cleared after sim.step()" + assert captured, "collision pipeline did not run — _needs_collision_pipeline is False" + + bq_root = captured["bq_root"] + jq_root = captured["jq_root"] + diff = (jq_root - bq_root).abs().max().item() + assert diff < 0.01, ( + f"body_q was stale when collide() ran: diff={diff:.4f}m, jq={jq_root.tolist()}, bq={bq_root.tolist()}" + ) + if __name__ == "__main__": pytest.main([__file__, "-v", "--maxfail=1"]) diff --git a/source/isaaclab_newton/test/assets/test_rigid_object.py b/source/isaaclab_newton/test/assets/test_rigid_object.py index 9cc1717d8c5f..4df40aaeed00 100644 --- a/source/isaaclab_newton/test/assets/test_rigid_object.py +++ b/source/isaaclab_newton/test/assets/test_rigid_object.py @@ -589,7 +589,6 @@ def test_reset_rigid_object(num_cubes, device): assert torch.count_nonzero(wp.to_torch(cube_object._permanent_wrench_composer.composed_torque)) == 0 -@pytest.mark.skip(reason="Newton friction/restitution/material not yet supported") @pytest.mark.parametrize("num_cubes", [1, 2]) @pytest.mark.parametrize("device", ["cuda:0", "cpu"]) @pytest.mark.isaacsim_ci @@ -603,30 +602,30 @@ def test_rigid_body_set_material_properties(num_cubes, device): # Play sim sim.reset() - # Set material properties - static_friction = torch.FloatTensor(num_cubes, 1).uniform_(0.4, 0.8) - dynamic_friction = torch.FloatTensor(num_cubes, 1).uniform_(0.4, 0.8) - restitution = torch.FloatTensor(num_cubes, 1).uniform_(0.0, 0.2) - - materials = torch.cat([static_friction, dynamic_friction, restitution], dim=-1) + # Set material properties using Newton's native API + # Get actual number of shapes from the object + num_shapes = cube_object.num_shapes + shape_ids = torch.arange(num_shapes, dtype=torch.int32, device=device) + env_ids = torch.arange(num_cubes, dtype=torch.int32, device=device) + friction = torch.empty(num_cubes, num_shapes, device=device).uniform_(0.4, 0.8) + restitution = torch.empty(num_cubes, num_shapes, device=device).uniform_(0.0, 0.2) - indices = torch.tensor(range(num_cubes), dtype=torch.int32) - # Add friction to cube - cube_object.root_view.set_material_properties( - wp.from_torch(materials, dtype=wp.float32), wp.from_torch(indices, dtype=wp.int32) - ) + # Use Newton's native friction/restitution API with shape_ids + cube_object.set_friction_index(friction=friction, shape_ids=shape_ids, env_ids=env_ids) + cube_object.set_restitution_index(restitution=restitution, shape_ids=shape_ids, env_ids=env_ids) # Simulate physics - # perform rendering sim.step() # update object cube_object.update(sim.cfg.dt) - # Get material properties - materials_to_check = wp.to_torch(cube_object.root_view.get_material_properties()) + # Get material properties via internal bindings (for test verification only) + mu = wp.to_torch(cube_object.data._sim_bind_shape_material_mu) + restitution_check = wp.to_torch(cube_object.data._sim_bind_shape_material_restitution) # Check if material properties are set correctly - torch.testing.assert_close(materials_to_check.reshape(num_cubes, 3), materials) + torch.testing.assert_close(mu, friction) + torch.testing.assert_close(restitution_check, restitution) @pytest.mark.skip(reason="Newton friction/restitution/material not yet supported") diff --git a/source/isaaclab_newton/test/assets/test_rigid_object_collection.py b/source/isaaclab_newton/test/assets/test_rigid_object_collection.py index a1086d4371ec..0d51f25675f1 100644 --- a/source/isaaclab_newton/test/assets/test_rigid_object_collection.py +++ b/source/isaaclab_newton/test/assets/test_rigid_object_collection.py @@ -522,42 +522,39 @@ def test_reset_object_collection(num_envs, num_cubes, device): ) -@pytest.mark.skip(reason="Newton doesn't support friction/restitution/material properties yet") @pytest.mark.parametrize("num_envs", [1, 3]) @pytest.mark.parametrize("num_cubes", [1, 2]) @pytest.mark.parametrize("device", ["cuda:0", "cpu"]) def test_set_material_properties(num_envs, num_cubes, device): - """Test getting and setting material properties of rigid object.""" + """Test getting and setting material properties of rigid object collection.""" with _newton_sim_context(device, auto_add_lighting=True) as sim: sim._app_control_on_stop_handle = None object_collection, _ = generate_cubes_scene(num_envs=num_envs, num_cubes=num_cubes, device=device) sim.reset() - # Set material properties - static_friction = torch.FloatTensor(num_envs, num_cubes, 1).uniform_(0.4, 0.8) - dynamic_friction = torch.FloatTensor(num_envs, num_cubes, 1).uniform_(0.4, 0.8) - restitution = torch.FloatTensor(num_envs, num_cubes, 1).uniform_(0.0, 0.2) + # Set material properties using Newton's native API + # Get actual number of shapes from the collection + num_shapes = object_collection.num_shapes + shape_ids = torch.arange(num_shapes, dtype=torch.int32, device=device) + env_ids = torch.arange(num_envs, dtype=torch.int32, device=device) + friction = torch.empty(num_envs, num_shapes, device=device).uniform_(0.4, 0.8) + restitution = torch.empty(num_envs, num_shapes, device=device).uniform_(0.0, 0.2) - materials = torch.cat([static_friction, dynamic_friction, restitution], dim=-1) - - # Add friction to cube - indices = torch.tensor(range(num_cubes * num_envs), dtype=torch.int) - object_collection.root_view.set_material_properties( - object_collection.reshape_data_to_view_3d(wp.from_torch(materials, dtype=wp.float32), 3, device="cpu"), - wp.from_torch(indices, dtype=wp.int32), - ) + # Use Newton's native friction/restitution API with shape_ids + object_collection.set_friction_index(friction=friction, shape_ids=shape_ids, env_ids=env_ids) + object_collection.set_restitution_index(restitution=restitution, shape_ids=shape_ids, env_ids=env_ids) # Perform simulation sim.step() object_collection.update(sim.cfg.dt) - # Get material properties - materials_to_check = object_collection.root_view.get_material_properties() + # Get material properties via internal bindings (for test verification only) + mu = wp.to_torch(object_collection.data._sim_bind_shape_material_mu) + restitution_check = wp.to_torch(object_collection.data._sim_bind_shape_material_restitution) # Check if material properties are set correctly - torch.testing.assert_close( - wp.to_torch(object_collection.reshape_view_to_data_3d(materials_to_check, 3, device="cpu")), materials - ) + torch.testing.assert_close(mu, friction) + torch.testing.assert_close(restitution_check, restitution) @pytest.mark.parametrize("num_envs", [1, 3]) diff --git a/source/isaaclab_newton/test/physics/physics_test_utils.py b/source/isaaclab_newton/test/physics/physics_test_utils.py index 65177a9c8a93..11c78ef61f74 100644 --- a/source/isaaclab_newton/test/physics/physics_test_utils.py +++ b/source/isaaclab_newton/test/physics/physics_test_utils.py @@ -16,7 +16,7 @@ from enum import Enum, auto import pytest -from isaaclab_newton.physics import MJWarpSolverCfg, NewtonCfg +from isaaclab_newton.physics import MJWarpSolverCfg, NewtonCfg, NewtonCollisionPipelineCfg import isaaclab.sim as sim_utils from isaaclab.assets import RigidObjectCfg @@ -28,12 +28,13 @@ def make_sim_cfg( - use_mujoco_contacts: bool = False, device: str = "cuda:0", gravity: tuple = (0.0, 0.0, -9.81) + use_mujoco_contacts: bool = True, device: str = "cuda:0", gravity: tuple = (0.0, 0.0, -9.81) ) -> SimulationCfg: """Create simulation configuration with specified collision pipeline. Args: - use_mujoco_contacts: If True, use MuJoCo contact pipeline. If False, use Newton contact pipeline. + use_mujoco_contacts: If True, use MuJoCo contact pipeline (default). + If False, use Newton collision pipeline. device: Device to run simulation on ("cuda:0" or "cpu"). gravity: Gravity vector (x, y, z). @@ -46,11 +47,14 @@ def make_sim_cfg( cone="elliptic", ls_parallel=False, integrator="implicitfast", - use_mujoco_contacts=use_mujoco_contacts, ) + # collision_cfg=None means MuJoCo contacts, otherwise Newton collision pipeline + collision_cfg = None if use_mujoco_contacts else NewtonCollisionPipelineCfg() + newton_cfg = NewtonCfg( solver_cfg=solver_cfg, + collision_cfg=collision_cfg, num_substeps=1, debug_mode=False, use_cuda_graph=False, diff --git a/source/isaaclab_newton/test/physics/test_debug_state_buffer.py b/source/isaaclab_newton/test/physics/test_debug_state_buffer.py new file mode 100644 index 000000000000..de3453bb52c3 --- /dev/null +++ b/source/isaaclab_newton/test/physics/test_debug_state_buffer.py @@ -0,0 +1,403 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +# pyright: reportPrivateUsage=none, reportArgumentType=none + +"""Tests for debug state buffer (NaN incident replay). + +Uses real warp arrays on the default device so torch interop paths are tested. +""" + +from __future__ import annotations + +import tempfile +import time +from pathlib import Path + +import numpy as np +import pytest +import torch +import warp as wp + +from isaaclab_newton.physics.debug_state_buffer import DebugStateBuffer + +wp.init() + +NUM_BODIES = 4 +NUM_JOINTS = 3 + + +class FakeState: + """Minimal stand-in for newton.State backed by real warp arrays.""" + + def __init__(self, num_bodies: int = NUM_BODIES, num_joints: int = NUM_JOINTS, device: str = "cpu") -> None: + self.body_q = wp.zeros(num_bodies, dtype=wp.transformf, device=device) + self.body_qd = wp.zeros(num_bodies, dtype=wp.spatial_vectorf, device=device) + self.joint_q = wp.zeros(num_joints, dtype=wp.float32, device=device) + self.joint_qd = wp.zeros(num_joints, dtype=wp.float32, device=device) + + def assign(self, other: FakeState) -> None: + wp.copy(self.body_q, other.body_q) + wp.copy(self.body_qd, other.body_qd) + wp.copy(self.joint_q, other.joint_q) + wp.copy(self.joint_qd, other.joint_qd) + + def numpy(self): # noqa: D102 + raise NotImplementedError + + +class FakeModel: + """Minimal stand-in for newton.Model.""" + + def __init__( + self, + num_bodies: int = NUM_BODIES, + num_joints: int = NUM_JOINTS, + world_count: int = 0, + device: str = "cpu", + ) -> None: + self._num_bodies = num_bodies + self._num_joints = num_joints + self._device = device + self.world_count = world_count + if world_count > 1: + bodies_per_world = num_bodies // world_count + joints_per_world = num_joints // world_count + self.body_world_start = wp.array( + [i * bodies_per_world for i in range(world_count)] + [num_bodies], + dtype=wp.int32, + device="cpu", + ) + self.joint_coord_world_start = wp.array( + [i * joints_per_world for i in range(world_count)] + [num_joints], + dtype=wp.int32, + device="cpu", + ) + self.joint_dof_world_start = wp.array( + [i * joints_per_world for i in range(world_count)] + [num_joints], + dtype=wp.int32, + device="cpu", + ) + + def state(self) -> FakeState: + return FakeState(self._num_bodies, self._num_joints, self._device) + + +def _inject_nan_body_q(state: FakeState, body_idx: int, component: int = 0) -> None: + """Set one element of body_q to NaN on the device.""" + t = wp.to_torch(state.body_q) + t[body_idx, component] = float("nan") + + +def _inject_nan_joint_qd(state: FakeState, joint_idx: int) -> None: + t = wp.to_torch(state.joint_qd) + t[joint_idx] = float("nan") + + +# ------------------------------------------------------------------ +# Basic buffer tests +# ------------------------------------------------------------------ + + +def test_buffer_size_clamped(): + model = FakeModel() + buf = DebugStateBuffer(model, 5) + assert buf.size == 5 + + buf_big = DebugStateBuffer(model, 999999) + assert buf_big.size == 2000 + + +@pytest.mark.parametrize("size", [1, 3, 10]) +def test_size_property(size): + model = FakeModel() + buf = DebugStateBuffer(model, size) + assert buf.size == size + + +def test_step_rolls_index(): + model = FakeModel() + buf = DebugStateBuffer(model, 3) + state = FakeState() + for i in range(7): + buf.step(state, sim_time=float(i)) + assert buf._write_idx == 7 % 3 + + +def test_no_export_on_clean_state(): + model = FakeModel() + with tempfile.TemporaryDirectory() as tmpdir: + buf = DebugStateBuffer(model, 3, export_path=tmpdir) + state = FakeState() + for i in range(5): + buf.step(state, sim_time=float(i) * 0.01) + assert len(list(Path(tmpdir).glob("nan_replay_*.npz"))) == 0 + + +def test_export_on_nan_body_q(): + model = FakeModel() + with tempfile.TemporaryDirectory() as tmpdir: + buf = DebugStateBuffer(model, 4, export_path=tmpdir) + clean = FakeState() + buf.step(clean, sim_time=0.0) + buf.step(clean, sim_time=0.01) + + bad = FakeState() + _inject_nan_body_q(bad, 0) + buf.step(bad, sim_time=0.02) + + files = list(Path(tmpdir).glob("nan_replay_*.npz")) + assert len(files) == 1 + data = np.load(files[0]) + assert data["buffer_size"] == 4 + assert float(data["sim_time"]) == pytest.approx(0.02) + assert np.isnan(data["body_q"]).any() + + +def test_export_on_nan_joint_qd(): + model = FakeModel() + with tempfile.TemporaryDirectory() as tmpdir: + buf = DebugStateBuffer(model, 2, export_path=tmpdir) + bad = FakeState() + _inject_nan_joint_qd(bad, 1) + buf.step(bad, sim_time=1.0) + files = list(Path(tmpdir).glob("nan_replay_*.npz")) + assert len(files) == 1 + data = np.load(files[0]) + assert np.isnan(data["joint_qd"]).any() + + +def test_per_env_nan_detection(): + """With 2 worlds, only the env containing NaN should be reported.""" + model = FakeModel(num_bodies=4, num_joints=2, world_count=2) + with tempfile.TemporaryDirectory() as tmpdir: + buf = DebugStateBuffer(model, 2, export_path=tmpdir, export_envs_only=True) + assert buf._world_count == 2 + + bad = FakeState(num_bodies=4, num_joints=2) + _inject_nan_body_q(bad, body_idx=2, component=0) # env 1 (bodies 2-3) + + buf.step(bad, sim_time=0.5) + + files = list(Path(tmpdir).glob("nan_replay_*.npz")) + assert len(files) == 1 + data = np.load(files[0]) + env_ids = data["exported_env_ids"] + assert len(env_ids) == 1 + assert env_ids[0] == 1 + + +def test_clear_resets(): + model = FakeModel() + buf = DebugStateBuffer(model, 5) + assert buf.size == 5 + buf.clear() + assert buf.size == 0 + assert buf._ring == [] + + +def test_step_after_clear_is_noop(): + """step() after clear() should not crash.""" + model = FakeModel() + buf = DebugStateBuffer(model, 2) + buf.clear() + state = FakeState() + buf.step(state, sim_time=0.0) + + +def test_export_envs_only_false_exports_all(): + """With export_envs_only=False, full state is exported even with world layout.""" + model = FakeModel(num_bodies=4, num_joints=2, world_count=2) + with tempfile.TemporaryDirectory() as tmpdir: + buf = DebugStateBuffer(model, 2, export_path=tmpdir, export_envs_only=False) + bad = FakeState(num_bodies=4, num_joints=2) + _inject_nan_body_q(bad, body_idx=3) + buf.step(bad, sim_time=0.1) + + files = list(Path(tmpdir).glob("nan_replay_*.npz")) + assert len(files) == 1 + data = np.load(files[0]) + assert "exported_env_ids" not in data + assert data["body_q"].shape[1] == 4 # all bodies, not sliced + + +# ------------------------------------------------------------------ +# Deduplication: already-NaN env_ids are suppressed +# ------------------------------------------------------------------ + + +def test_same_env_nan_consecutive_steps_single_export(): + """When the same env NaNs on consecutive steps, only one npz is produced.""" + model = FakeModel(num_bodies=4, num_joints=2, world_count=2) + with tempfile.TemporaryDirectory() as tmpdir: + buf = DebugStateBuffer(model, 3, export_path=tmpdir, max_exports=10) + bad = FakeState(num_bodies=4, num_joints=2) + _inject_nan_body_q(bad, body_idx=2) # env 1 + + for t in range(5): + buf.step(bad, sim_time=float(t) * 0.01) + + files = list(Path(tmpdir).glob("nan_replay_*.npz")) + assert len(files) == 1, f"Expected 1 export, got {len(files)}" + assert 1 in buf._exported_envs + + +def test_second_env_nan_after_first_gets_separate_export(): + """A new env NaN'ing after the first env was already exported gets its own export.""" + model = FakeModel(num_bodies=4, num_joints=2, world_count=2) + with tempfile.TemporaryDirectory() as tmpdir: + buf = DebugStateBuffer(model, 2, export_path=tmpdir, max_exports=10) + + bad_env1 = FakeState(num_bodies=4, num_joints=2) + _inject_nan_body_q(bad_env1, body_idx=2) # env 1 + buf.step(bad_env1, sim_time=0.01) + assert len(list(Path(tmpdir).glob("nan_replay_*.npz"))) == 1 + + # Ensure timestamp differs + time.sleep(0.01) + + bad_both = FakeState(num_bodies=4, num_joints=2) + _inject_nan_body_q(bad_both, body_idx=0) # env 0 + _inject_nan_body_q(bad_both, body_idx=2) # env 1 (already exported) + buf.step(bad_both, sim_time=0.02) + + files = list(Path(tmpdir).glob("nan_replay_*.npz")) + assert len(files) == 2, f"Expected 2 exports, got {len(files)}" + assert buf._exported_envs == {0, 1} + + +# ------------------------------------------------------------------ +# max_exports and nan_halt +# ------------------------------------------------------------------ + + +def test_nan_halt_after_max_exports_default(): + """With default max_exports=1, nan_halt is True after first export.""" + model = FakeModel() + with tempfile.TemporaryDirectory() as tmpdir: + buf = DebugStateBuffer(model, 2, export_path=tmpdir) + assert not buf.nan_halt + + bad = FakeState() + _inject_nan_body_q(bad, 0) + buf.step(bad, sim_time=0.0) + + assert buf.nan_halt + assert buf._export_count == 1 + + +def test_nan_halt_stops_recording(): + """After nan_halt, step() is a no-op (no more exports).""" + model = FakeModel() + with tempfile.TemporaryDirectory() as tmpdir: + buf = DebugStateBuffer(model, 2, export_path=tmpdir, max_exports=1) + bad = FakeState() + _inject_nan_body_q(bad, 0) + buf.step(bad, sim_time=0.0) + assert buf.nan_halt + + write_idx_before = buf._write_idx + buf.step(bad, sim_time=0.01) + assert buf._write_idx == write_idx_before # no advancement + + +def test_max_exports_allows_multiple(): + """With max_exports=3, exactly 3 exports are allowed.""" + model = FakeModel(num_bodies=6, num_joints=3, world_count=3) + with tempfile.TemporaryDirectory() as tmpdir: + buf = DebugStateBuffer(model, 2, export_path=tmpdir, max_exports=3) + + for env_idx in range(3): + time.sleep(0.01) + bad = FakeState(num_bodies=6, num_joints=3) + _inject_nan_body_q(bad, body_idx=env_idx * 2) # each env's first body + buf.step(bad, sim_time=float(env_idx) * 0.01) + + files = list(Path(tmpdir).glob("nan_replay_*.npz")) + assert len(files) == 3 + assert buf.nan_halt + assert buf._export_count == 3 + + +# ------------------------------------------------------------------ +# Scene exporter +# ------------------------------------------------------------------ + + +def test_scene_exporter_called_on_nan(): + """scene_exporter callable is invoked with (usd_path, env_ids) on NaN.""" + calls: list[tuple[str, list[int]]] = [] + + def mock_exporter(usd_path: str, env_ids: list[int]) -> None: + calls.append((usd_path, env_ids)) + + model = FakeModel(num_bodies=4, num_joints=2, world_count=2) + with tempfile.TemporaryDirectory() as tmpdir: + buf = DebugStateBuffer( + model, 2, export_path=tmpdir, scene_exporter=mock_exporter, max_exports=5, + ) + bad = FakeState(num_bodies=4, num_joints=2) + _inject_nan_body_q(bad, body_idx=2) # env 1 + buf.step(bad, sim_time=0.5) + + assert len(calls) == 1 + usd_path, env_ids = calls[0] + assert usd_path.endswith(".usd") + assert env_ids == [1] + + +def test_scene_exporter_not_called_without_nan(): + """scene_exporter is not called when no NaN.""" + calls: list = [] + + def mock_exporter(usd_path: str, env_ids: list[int]) -> None: + calls.append(1) + + model = FakeModel() + with tempfile.TemporaryDirectory() as tmpdir: + buf = DebugStateBuffer(model, 2, export_path=tmpdir, scene_exporter=mock_exporter) + clean = FakeState() + buf.step(clean, sim_time=0.0) + assert len(calls) == 0 + + +def test_scene_exporter_single_env_empty_ids(): + """For single-env (no world layout), scene_exporter receives empty env_ids.""" + calls: list[tuple[str, list[int]]] = [] + + def mock_exporter(usd_path: str, env_ids: list[int]) -> None: + calls.append((usd_path, env_ids)) + + model = FakeModel() + with tempfile.TemporaryDirectory() as tmpdir: + buf = DebugStateBuffer(model, 2, export_path=tmpdir, scene_exporter=mock_exporter) + bad = FakeState() + _inject_nan_body_q(bad, 0) + buf.step(bad, sim_time=0.0) + + assert len(calls) == 1 + assert calls[0][1] == [] + + +def test_scene_exporter_failure_does_not_crash(): + """If scene_exporter raises, export still completes (npz is written).""" + + def broken_exporter(usd_path: str, env_ids: list[int]) -> None: + raise RuntimeError("USD not available") + + model = FakeModel() + with tempfile.TemporaryDirectory() as tmpdir: + buf = DebugStateBuffer(model, 2, export_path=tmpdir, scene_exporter=broken_exporter) + bad = FakeState() + _inject_nan_body_q(bad, 0) + buf.step(bad, sim_time=0.0) + + files = list(Path(tmpdir).glob("nan_replay_*.npz")) + assert len(files) == 1 + assert buf.nan_halt + + +if __name__ == "__main__": + pytest.main([__file__, "-v", "--maxfail=1"]) diff --git a/source/isaaclab_newton/test/sensors/test_contact_sensor.py b/source/isaaclab_newton/test/sensors/test_contact_sensor.py index c3da11a99696..12882e02f392 100644 --- a/source/isaaclab_newton/test/sensors/test_contact_sensor.py +++ b/source/isaaclab_newton/test/sensors/test_contact_sensor.py @@ -27,6 +27,7 @@ import pytest import torch import warp as wp +from isaaclab_newton.physics import MJWarpSolverCfg, NewtonCfg, NewtonCollisionPipelineCfg from physics.physics_test_utils import ( COLLISION_PIPELINES, STABLE_SHAPES, @@ -43,11 +44,12 @@ from isaaclab.assets import Articulation, RigidObject, RigidObjectCfg from isaaclab.scene import InteractiveScene, InteractiveSceneCfg from isaaclab.sensors import ContactSensor, ContactSensorCfg -from isaaclab.sim import build_simulation_context -from isaaclab.terrains import TerrainImporterCfg +from isaaclab.sim import SimulationCfg, build_simulation_context +from isaaclab.terrains import HfRandomUniformTerrainCfg, TerrainGeneratorCfg, TerrainImporterCfg from isaaclab.utils import configclass from isaaclab_assets.robots.allegro import ALLEGRO_HAND_CFG +from isaaclab_assets.robots.anymal import ANYMAL_D_CFG ## # Scene Configuration @@ -753,6 +755,142 @@ def test_finger_contact_sensor_isolation(device: str, use_mujoco_contacts: bool, ) +@pytest.mark.parametrize( + "device", ["cuda:0", pytest.param("cpu", marks=pytest.mark.skip(reason="CPU segfaults with mesh terrain"))] +) +def test_rough_terrain_contact_sanity(device: str): + """Test that dropping robot on rough terrain only produces foot contacts. + + This test drops the robot and verifies: + - Only feet should have contact forces + - All other bodies (base, thighs, shanks, hips) should have zero contact + + Physics or asset bugs manifest as spurious contact forces on non-foot bodies. + """ + + num_envs = 16 + simulation_steps = 200 + settling_steps = 10 + gravity_mag = 9.81 + contact_threshold = 1.0 + + solver_cfg = MJWarpSolverCfg( + njmax=300, + nconmax=300, + ls_iterations=20, + cone="pyramidal", + ls_parallel=False, + integrator="implicitfast", + ) + collision_cfg = NewtonCollisionPipelineCfg(max_triangle_pairs=2_000_000) + newton_cfg = NewtonCfg( + solver_cfg=solver_cfg, collision_cfg=collision_cfg, num_substeps=1, debug_mode=False, use_cuda_graph=False + ) + sim_cfg = SimulationCfg( + dt=1.0 / 120.0, + device=device, + gravity=(0.0, 0.0, -gravity_mag), + create_stage_in_memory=False, + physics=newton_cfg, + ) + + with build_simulation_context(sim_cfg=sim_cfg) as sim: + sim._app_control_on_stop_handle = None + + scene_cfg = ContactSensorTestSceneCfg(num_envs=num_envs, env_spacing=3.0, lazy_sensor_update=False) + + # Use rough terrain instead of flat ground plane + scene_cfg.terrain = TerrainImporterCfg( + prim_path="/World/ground", + terrain_type="generator", + terrain_generator=TerrainGeneratorCfg( + seed=0, + size=(16.0, 16.0), + border_width=0.0, + num_rows=1, + num_cols=1, + sub_terrains={ + "random_rough": HfRandomUniformTerrainCfg( + proportion=1.0, noise_range=(0.0, 0.05), noise_step=0.01, border_width=0.25 + ), + }, + ), + ) + + # Use Anymal-D articulated robot - same as rough terrain locomotion task + scene_cfg.robot = ANYMAL_D_CFG.copy() + scene_cfg.robot.prim_path = "{ENV_REGEX_NS}/Robot" + scene_cfg.robot.init_state.pos = (0.0, 0.0, 0.6) + + # Single contact sensor on all bodies + scene_cfg.contact_sensor_a = ContactSensorCfg( + prim_path="{ENV_REGEX_NS}/Robot/.*", + update_period=0.0, + history_length=1, + ) + + scene = InteractiveScene(scene_cfg) + sim.reset() + scene.reset() + + contact_sensor: ContactSensor = scene["contact_sensor_a"] + robot: Articulation = scene["robot"] + + # Find foot vs non-foot body indices + sensor_names = contact_sensor.sensor_names + foot_indices = [i for i, name in enumerate(sensor_names) if "FOOT" in name] + non_foot_indices = [i for i, name in enumerate(sensor_names) if "FOOT" not in name] + + # Set default joint positions to standing pose + default_jpos = wp.to_torch(robot.data.default_joint_pos).clone() + default_jvel = wp.to_torch(robot.data.default_joint_vel).clone() + robot.write_joint_position_to_sim_index(position=default_jpos) + robot.write_joint_velocity_to_sim_index(velocity=default_jvel) + + nan_step = -1 + spurious_contact_step = -1 + total_spurious_contacts = 0 + max_foot_force = 0.0 + max_non_foot_force = 0.0 + + for step in range(simulation_steps): + perform_sim_step(sim, scene, SIM_DT) + + forces = wp.to_torch(contact_sensor.data.net_forces_w) + # forces shape: [num_envs, num_bodies, 3] + + # Extract foot and non-foot forces + foot_forces = forces[:, foot_indices, :] + non_foot_forces = forces[:, non_foot_indices, :] + + foot_force_mag = torch.norm(foot_forces, dim=-1).max().item() + non_foot_force_mag = torch.norm(non_foot_forces, dim=-1).max().item() + max_foot_force = max(max_foot_force, foot_force_mag) + max_non_foot_force = max(max_non_foot_force, non_foot_force_mag) + + # Skip settling period for spurious contact check + if step >= settling_steps and non_foot_force_mag > contact_threshold: + total_spurious_contacts += 1 + if spurious_contact_step < 0: + spurious_contact_step = step + + # Check for NaN + if torch.isnan(forces).any(): + if nan_step < 0: + nan_step = step + + assert nan_step < 0, f"NaN detected at step {nan_step}." + + # Spurious contacts on non-foot bodies indicates corrupted contact forces + # After settling, should have very few spurious contacts + checked_steps = simulation_steps - settling_steps + assert total_spurious_contacts < checked_steps * 0.2, ( + f"Spurious contacts on non-foot bodies: {total_spurious_contacts}/{checked_steps} steps " + f"(first at step {spurious_contact_step}, max force {max_non_foot_force:.1f} N). " + f"Only feet should have contact forces (max {max_foot_force:.1f} N)." + ) + + # =================================================================== # Utility # =================================================================== diff --git a/source/isaaclab_newton/test/sim/__init__.py b/source/isaaclab_newton/test/sim/__init__.py new file mode 100644 index 000000000000..8b137891791f --- /dev/null +++ b/source/isaaclab_newton/test/sim/__init__.py @@ -0,0 +1 @@ + diff --git a/source/isaaclab_newton/test/sim/test_views_xform_prim_newton.py b/source/isaaclab_newton/test/sim/test_views_xform_prim_newton.py new file mode 100644 index 000000000000..62541d3c97f4 --- /dev/null +++ b/source/isaaclab_newton/test/sim/test_views_xform_prim_newton.py @@ -0,0 +1,279 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Tests for Newton XformPrimView backed by sites (body_q + local transform). + +Covers the same test surface as the core USD and PhysX Fabric tests where +applicable. Tests that require USD hierarchy features (local poses with +parents, visibility) are skipped since Newton tracks bodies in world space. +""" + +import sys +from pathlib import Path + +sys.path.insert(0, str(Path(__file__).resolve().parents[1])) + +import pytest +import torch +import warp as wp + +from isaaclab_newton.physics import MJWarpSolverCfg, NewtonCfg +from isaaclab_newton.physics.newton_manager import NewtonManager +from isaaclab_newton.sim.views import XformPrimView + +import isaaclab.sim as sim_utils +from isaaclab.assets import RigidObject, RigidObjectCfg +from isaaclab.scene import InteractiveScene, InteractiveSceneCfg +from isaaclab.sim import SimulationCfg, build_simulation_context +from isaaclab.utils import configclass + +NEWTON_SIM_CFG = SimulationCfg( + physics=NewtonCfg( + solver_cfg=MJWarpSolverCfg(), + ), +) + + +@configclass +class SimpleSceneCfg(InteractiveSceneCfg): + cube: RigidObjectCfg = RigidObjectCfg( + prim_path="{ENV_REGEX_NS}/Cube", + spawn=sim_utils.CuboidCfg( + size=(0.2, 0.2, 0.2), + rigid_props=sim_utils.RigidBodyPropertiesCfg(), + mass_props=sim_utils.MassPropertiesCfg(mass=1.0), + collision_props=sim_utils.CollisionPropertiesCfg(), + ), + init_state=RigidObjectCfg.InitialStateCfg(pos=(0.0, 0.0, 1.0)), + ) + + +def _newton_sim_context(device="cuda:0", num_envs=4, **kwargs): + NEWTON_SIM_CFG.device = device + return build_simulation_context(device=device, sim_cfg=NEWTON_SIM_CFG, **kwargs) + + +def _build_scene(num_envs=4, device="cuda:0"): + """Build scene and return (sim, scene, view).""" + ctx = _newton_sim_context(device=device, num_envs=num_envs, add_ground_plane=True) + sim = ctx.__enter__() + sim._app_control_on_stop_handle = None + scene = InteractiveScene(SimpleSceneCfg(num_envs=num_envs, env_spacing=2.0)) + sim.reset() + view = XformPrimView("/World/envs/env_.*/Cube", device=device) + return sim, scene, view, ctx + + +""" +Tests - Initialization. +""" + + +@pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available") +def test_count(): + """Test that Newton XformPrimView reports correct prim count.""" + sim, scene, view, ctx = _build_scene(num_envs=4) + assert view.count == 4 + ctx.__exit__(None, None, None) + + +@pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available") +@pytest.mark.parametrize("num_envs", [1, 4, 8]) +def test_count_various_sizes(num_envs): + """Test count with different numbers of environments.""" + sim, scene, view, ctx = _build_scene(num_envs=num_envs) + assert view.count == num_envs + ctx.__exit__(None, None, None) + + +""" +Tests - Getters. +""" + + +@pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available") +def test_get_world_poses_shape(): + """Test that get_world_poses returns correct shapes.""" + sim, scene, view, ctx = _build_scene(num_envs=4) + positions, orientations = view.get_world_poses() + + assert positions.shape == (4, 3) + assert orientations.shape == (4, 4) + assert positions.device.type == "cuda" + ctx.__exit__(None, None, None) + + +@pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available") +def test_get_world_poses_matches_body_q(): + """Test that get_world_poses returns values matching body_q.""" + sim, scene, view, ctx = _build_scene(num_envs=4) + positions, orientations = view.get_world_poses() + + model = NewtonManager.get_model() + body_labels = list(model.body_label) + state = NewtonManager.get_state_0() + body_q = wp.to_torch(state.body_q) + + for i in range(4): + prim_path = f"/World/envs/env_{i}/Cube" + if prim_path in body_labels: + body_idx = body_labels.index(prim_path) + expected_pos = body_q[body_idx, :3] + torch.testing.assert_close(positions[i], expected_pos, atol=1e-5, rtol=0) + ctx.__exit__(None, None, None) + + +@pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available") +def test_get_world_poses_with_indices(): + """Test get_world_poses with a subset of indices.""" + sim, scene, view, ctx = _build_scene(num_envs=4) + positions_all, orientations_all = view.get_world_poses() + + indices = [0, 2] + positions_subset, orientations_subset = view.get_world_poses(indices) + + assert positions_subset.shape == (2, 3) + assert orientations_subset.shape == (2, 4) + torch.testing.assert_close(positions_subset[0], positions_all[0], atol=1e-6, rtol=0) + torch.testing.assert_close(positions_subset[1], positions_all[2], atol=1e-6, rtol=0) + ctx.__exit__(None, None, None) + + +@pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available") +def test_get_world_poses_single_index(): + """Test get_world_poses with a single index.""" + sim, scene, view, ctx = _build_scene(num_envs=5) + positions_all, _ = view.get_world_poses() + + positions_single, orientations_single = view.get_world_poses([3]) + assert positions_single.shape == (1, 3) + assert orientations_single.shape == (1, 4) + torch.testing.assert_close(positions_single[0], positions_all[3], atol=1e-6, rtol=0) + ctx.__exit__(None, None, None) + + +@pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available") +def test_get_world_poses_out_of_order_indices(): + """Test get_world_poses with out-of-order indices.""" + sim, scene, view, ctx = _build_scene(num_envs=5) + positions_all, _ = view.get_world_poses() + + indices = [4, 1, 3] + positions_subset, _ = view.get_world_poses(indices) + + assert positions_subset.shape == (3, 3) + torch.testing.assert_close(positions_subset[0], positions_all[4], atol=1e-6, rtol=0) + torch.testing.assert_close(positions_subset[1], positions_all[1], atol=1e-6, rtol=0) + torch.testing.assert_close(positions_subset[2], positions_all[3], atol=1e-6, rtol=0) + ctx.__exit__(None, None, None) + + +""" +Tests - Setters. +""" + + +@pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available") +def test_set_world_poses(): + """Test setting world poses writes to body_q.""" + sim, scene, view, ctx = _build_scene(num_envs=4) + + new_positions = torch.tensor( + [[10.0, 20.0, 30.0], [40.0, 50.0, 60.0], [70.0, 80.0, 90.0], [100.0, 110.0, 120.0]], + device="cuda:0", + ) + new_orientations = torch.tensor( + [[0.0, 0.0, 0.0, 1.0], [0.0, 0.0, 0.7071068, 0.7071068], + [0.7071068, 0.0, 0.0, 0.7071068], [0.3826834, 0.0, 0.0, 0.9238795]], + device="cuda:0", + ) + + view.set_world_poses(new_positions, new_orientations) + retrieved_positions, retrieved_orientations = view.get_world_poses() + + torch.testing.assert_close(retrieved_positions, new_positions, atol=1e-5, rtol=0) + torch.testing.assert_close(retrieved_orientations, new_orientations, atol=1e-5, rtol=0) + ctx.__exit__(None, None, None) + + +@pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available") +def test_set_world_poses_only_positions(): + """Test setting only positions, leaving orientations unchanged.""" + sim, scene, view, ctx = _build_scene(num_envs=3) + _, initial_orientations = view.get_world_poses() + + new_positions = torch.tensor([[1.0, 0.0, 0.0], [0.0, 2.0, 0.0], [0.0, 0.0, 3.0]], device="cuda:0") + view.set_world_poses(positions=new_positions, orientations=None) + + retrieved_positions, retrieved_orientations = view.get_world_poses() + torch.testing.assert_close(retrieved_positions, new_positions, atol=1e-5, rtol=0) + torch.testing.assert_close(retrieved_orientations, initial_orientations, atol=1e-5, rtol=0) + ctx.__exit__(None, None, None) + + +@pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available") +def test_set_world_poses_only_orientations(): + """Test setting only orientations, leaving positions unchanged.""" + sim, scene, view, ctx = _build_scene(num_envs=3) + initial_positions, _ = view.get_world_poses() + + new_orientations = torch.tensor( + [[0.0, 0.0, 0.7071068, 0.7071068], [0.7071068, 0.0, 0.0, 0.7071068], [0.3826834, 0.0, 0.0, 0.9238795]], + device="cuda:0", + ) + view.set_world_poses(positions=None, orientations=new_orientations) + + retrieved_positions, retrieved_orientations = view.get_world_poses() + torch.testing.assert_close(retrieved_positions, initial_positions, atol=1e-5, rtol=0) + torch.testing.assert_close(retrieved_orientations, new_orientations, atol=1e-5, rtol=0) + ctx.__exit__(None, None, None) + + +@pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available") +def test_set_world_poses_with_indices(): + """Test setting world poses for a subset of indices.""" + sim, scene, view, ctx = _build_scene(num_envs=5) + initial_positions, initial_orientations = view.get_world_poses() + + indices = [1, 3] + new_positions = torch.tensor([[10.0, 0.0, 0.0], [30.0, 0.0, 0.0]], device="cuda:0") + view.set_world_poses(positions=new_positions, orientations=None, indices=indices) + + updated_positions, _ = view.get_world_poses() + + torch.testing.assert_close(updated_positions[1], new_positions[0], atol=1e-5, rtol=0) + torch.testing.assert_close(updated_positions[3], new_positions[1], atol=1e-5, rtol=0) + torch.testing.assert_close(updated_positions[0], initial_positions[0], atol=1e-5, rtol=0) + torch.testing.assert_close(updated_positions[2], initial_positions[2], atol=1e-5, rtol=0) + torch.testing.assert_close(updated_positions[4], initial_positions[4], atol=1e-5, rtol=0) + ctx.__exit__(None, None, None) + + +""" +Tests - Round-trip consistency. +""" + + +@pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available") +def test_write_read_roundtrip(): + """Test that write -> read round-trip is consistent.""" + sim, scene, view, ctx = _build_scene(num_envs=4) + + new_positions = torch.rand((4, 3), dtype=torch.float32, device="cuda:0") * 10.0 + new_orientations = torch.tensor([[0.0, 0.0, 0.0, 1.0]] * 4, dtype=torch.float32, device="cuda:0") + + view.set_world_poses(new_positions, new_orientations) + pos, quat = view.get_world_poses() + + torch.testing.assert_close(pos, new_positions, atol=1e-5, rtol=0) + torch.testing.assert_close(quat, new_orientations, atol=1e-5, rtol=0) + + new_positions2 = torch.rand((4, 3), dtype=torch.float32, device="cuda:0") * 10.0 + view.set_world_poses(new_positions2, new_orientations) + pos2, quat2 = view.get_world_poses() + + torch.testing.assert_close(pos2, new_positions2, atol=1e-5, rtol=0) + torch.testing.assert_close(quat2, new_orientations, atol=1e-5, rtol=0) + ctx.__exit__(None, None, None) diff --git a/source/isaaclab_physx/isaaclab_physx/assets/articulation/articulation.py b/source/isaaclab_physx/isaaclab_physx/assets/articulation/articulation.py index cf7d1f95d5ca..2610ff1e4f6f 100644 --- a/source/isaaclab_physx/isaaclab_physx/assets/articulation/articulation.py +++ b/source/isaaclab_physx/isaaclab_physx/assets/articulation/articulation.py @@ -2218,6 +2218,65 @@ def set_inertias_mask( # Set full data to True to ensure the right code path is taken inside the kernel. self.set_inertias_index(inertias=inertias, body_ids=body_ids, env_ids=env_ids, full_data=True) + def set_material_properties_index( + self, + *, + materials: torch.Tensor | wp.array, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + """Set material properties (friction and restitution) for collision shapes using indices. + + The material properties consist of static friction, dynamic friction, and restitution + coefficients for each collision shape. + + .. note:: + This method expects partial data. + + .. tip:: + Both the index and mask methods have dedicated optimized implementations. Performance is similar for both. + However, to allow graphed pipelines, the mask method must be used. + + Args: + materials: Material properties for all shapes. Shape is (len(env_ids), max_shapes, 3) + where the 3 values are [static_friction, dynamic_friction, restitution]. + env_ids: Environment indices. If None, all environments are updated. + """ + # resolve all indices + env_ids = self._resolve_env_ids(env_ids) + # convert materials to warp array if needed (root_view requires warp arrays) + if isinstance(materials, torch.Tensor): + materials = wp.from_torch(materials, dtype=wp.float32) + # set material properties via root view + self.root_view.set_material_properties(materials, env_ids) + + def set_material_properties_mask( + self, + *, + materials: torch.Tensor | wp.array, + env_mask: wp.array | None = None, + ) -> None: + """Set material properties (friction and restitution) for collision shapes using masks. + + The material properties consist of static friction, dynamic friction, and restitution + coefficients for each collision shape. + + .. note:: + This method expects full data. + + .. tip:: + For maximum performance we recommend using the index method. This is because in PhysX, the tensor API + is only supporting indexing, hence masks need to be converted to indices. + + Args: + materials: Material properties for all shapes. Shape is ``(num_instances, max_shapes, 3)`` + where the 3 values are ``[static_friction, dynamic_friction, restitution]``. + env_mask: Environment mask. If None, then all the instances are updated. Shape is (num_instances,). + """ + # Resolve masks to indices + env_ids = self._resolve_env_mask(env_mask) + # Call the index method + self.set_material_properties_index(materials=materials, env_ids=env_ids) + def set_joint_position_target_index( self, *, diff --git a/source/isaaclab_physx/isaaclab_physx/assets/rigid_object/rigid_object.py b/source/isaaclab_physx/isaaclab_physx/assets/rigid_object/rigid_object.py index b549da96b787..586750eaa6a3 100644 --- a/source/isaaclab_physx/isaaclab_physx/assets/rigid_object/rigid_object.py +++ b/source/isaaclab_physx/isaaclab_physx/assets/rigid_object/rigid_object.py @@ -912,6 +912,65 @@ def set_inertias_mask( # Set full data to True to ensure the right code path is taken inside the kernel. self.set_inertias_index(inertias=inertias, body_ids=body_ids, env_ids=env_ids, full_data=True) + def set_material_properties_index( + self, + *, + materials: torch.Tensor | wp.array, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + """Set material properties (friction and restitution) for collision shapes using indices. + + The material properties consist of static friction, dynamic friction, and restitution + coefficients for each collision shape. + + .. note:: + This method expects partial data. + + .. tip:: + Both the index and mask methods have dedicated optimized implementations. Performance is similar for both. + However, to allow graphed pipelines, the mask method must be used. + + Args: + materials: Material properties for all shapes. Shape is (len(env_ids), max_shapes, 3) + where the 3 values are [static_friction, dynamic_friction, restitution]. + env_ids: Environment indices. If None, all environments are updated. + """ + # resolve all indices + env_ids = self._resolve_env_ids(env_ids) + # convert materials to warp array if needed (root_view requires warp arrays) + if isinstance(materials, torch.Tensor): + materials = wp.from_torch(materials, dtype=wp.float32) + # set material properties via root view + self.root_view.set_material_properties(materials, env_ids) + + def set_material_properties_mask( + self, + *, + materials: torch.Tensor | wp.array, + env_mask: wp.array | None = None, + ) -> None: + """Set material properties (friction and restitution) for collision shapes using masks. + + The material properties consist of static friction, dynamic friction, and restitution + coefficients for each collision shape. + + .. note:: + This method expects full data. + + .. tip:: + For maximum performance we recommend using the index method. This is because in PhysX, the tensor API + is only supporting indexing, hence masks need to be converted to indices. + + Args: + materials: Material properties for all shapes. Shape is ``(num_instances, max_shapes, 3)`` + where the 3 values are ``[static_friction, dynamic_friction, restitution]``. + env_mask: Environment mask. If None, then all the instances are updated. Shape is (num_instances,). + """ + # Resolve masks to indices + env_ids = self._resolve_env_mask(env_mask) + # Call the index method + self.set_material_properties_index(materials=materials, env_ids=env_ids) + """ Internal helper. """ diff --git a/source/isaaclab_physx/isaaclab_physx/assets/rigid_object_collection/rigid_object_collection.py b/source/isaaclab_physx/isaaclab_physx/assets/rigid_object_collection/rigid_object_collection.py index 877a44261293..c84eac2eef45 100644 --- a/source/isaaclab_physx/isaaclab_physx/assets/rigid_object_collection/rigid_object_collection.py +++ b/source/isaaclab_physx/isaaclab_physx/assets/rigid_object_collection/rigid_object_collection.py @@ -1053,6 +1053,65 @@ def set_inertias_mask( # Set full data to True to ensure the right code path is taken inside the kernel. self.set_inertias_index(inertias=inertias, body_ids=body_ids, env_ids=env_ids, full_data=True) + def set_material_properties_index( + self, + *, + materials: torch.Tensor | wp.array, + env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, + ) -> None: + """Set material properties (friction and restitution) for collision shapes using indices. + + The material properties consist of static friction, dynamic friction, and restitution + coefficients for each collision shape. + + .. note:: + This method expects partial data. + + .. tip:: + Both the index and mask methods have dedicated optimized implementations. Performance is similar for both. + However, to allow graphed pipelines, the mask method must be used. + + Args: + materials: Material properties for all shapes. Shape is (len(env_ids), max_shapes, 3) + where the 3 values are [static_friction, dynamic_friction, restitution]. + env_ids: Environment indices. If None, all environments are updated. + """ + # resolve all indices + env_ids = self._resolve_env_ids(env_ids) + # convert materials to warp array if needed (root_view requires warp arrays) + if isinstance(materials, torch.Tensor): + materials = wp.from_torch(materials, dtype=wp.float32) + # set material properties via root view + self.root_view.set_material_properties(materials, env_ids) + + def set_material_properties_mask( + self, + *, + materials: torch.Tensor | wp.array, + env_mask: wp.array | None = None, + ) -> None: + """Set material properties (friction and restitution) for collision shapes using masks. + + The material properties consist of static friction, dynamic friction, and restitution + coefficients for each collision shape. + + .. note:: + This method expects full data. + + .. tip:: + For maximum performance we recommend using the index method. This is because in PhysX, the tensor API + is only supporting indexing, hence masks need to be converted to indices. + + Args: + materials: Material properties for all shapes. Shape is ``(num_instances, max_shapes, 3)`` + where the 3 values are ``[static_friction, dynamic_friction, restitution]``. + env_mask: Environment mask. If None, then all the instances are updated. Shape is (num_instances,). + """ + # Resolve masks to indices + env_ids = self._resolve_env_mask(env_mask) + # Call the index method + self.set_material_properties_index(materials=materials, env_ids=env_ids) + """ Helper functions. """ diff --git a/source/isaaclab_physx/isaaclab_physx/sim/__init__.py b/source/isaaclab_physx/isaaclab_physx/sim/__init__.py new file mode 100644 index 000000000000..e71f4b7ab957 --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/sim/__init__.py @@ -0,0 +1,6 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""PhysX simulation utilities.""" diff --git a/source/isaaclab_physx/isaaclab_physx/sim/views/__init__.py b/source/isaaclab_physx/isaaclab_physx/sim/views/__init__.py new file mode 100644 index 000000000000..ce10cabf240d --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/sim/views/__init__.py @@ -0,0 +1,8 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from .xform_prim_view import XformPrimView + +__all__ = ["XformPrimView"] diff --git a/source/isaaclab_physx/isaaclab_physx/sim/views/xform_prim_view.py b/source/isaaclab_physx/isaaclab_physx/sim/views/xform_prim_view.py new file mode 100644 index 000000000000..96a2976dc8db --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/sim/views/xform_prim_view.py @@ -0,0 +1,403 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""PhysX XformPrimView with Fabric GPU acceleration.""" + +from __future__ import annotations + +import logging +from collections.abc import Sequence + +import torch +import warp as wp + +from pxr import Usd + +import isaaclab.sim as sim_utils +from isaaclab.app.settings_manager import SettingsManager +from isaaclab.sim.views.xform_prim_view import XformPrimView as _CoreXformPrimView +from isaaclab.utils.warp import fabric as fabric_utils + +logger = logging.getLogger(__name__) + + +class XformPrimView(_CoreXformPrimView): + """XformPrimView with Fabric GPU acceleration for the PhysX backend. + + Inherits all USD-based operations from the core + :class:`~isaaclab.sim.views.XformPrimView` and adds GPU-accelerated Fabric + paths for ``get_world_poses``, ``set_world_poses``, ``get_scales``, and + ``set_scales``. + + When Fabric is enabled, this class leverages NVIDIA's Fabric API: + + - Uses ``omni:fabric:worldMatrix`` and ``omni:fabric:localMatrix`` attributes for all Boundable prims + - Performs batch matrix decomposition/composition using Warp kernels on GPU + - Works for both physics-enabled and non-physics prims (cameras, meshes, etc.) + + When Fabric is **not** enabled, all operations fall through to the USD base class. + + .. warning:: + **Fabric requires CUDA**: Fabric is only supported on CUDA devices. + Warp's CPU backend for fabric-array writes has known issues, so attempting to use + Fabric with CPU device will automatically fall back to USD operations. + """ + + def __init__( + self, + prim_path: str, + device: str = "cpu", + validate_xform_ops: bool = True, + sync_usd_on_fabric_write: bool = False, + stage: Usd.Stage | None = None, + ): + """Initialize the view with matching prims and optional Fabric acceleration. + + Args: + prim_path: USD prim path pattern to match prims. + device: Device to place the tensors on. Defaults to ``"cpu"``. + validate_xform_ops: Whether to validate standard xform operations. Defaults to True. + sync_usd_on_fabric_write: Whether to mirror Fabric transform writes back to USD. + Defaults to False for better performance. + stage: USD stage to search for prims. Defaults to None (current active stage). + """ + super().__init__(prim_path, device=device, validate_xform_ops=validate_xform_ops, stage=stage) + + self._sync_usd_on_fabric_write = sync_usd_on_fabric_write + + settings = SettingsManager.instance() + self._use_fabric = bool(settings.get("/physics/fabricEnabled", False)) + + if self._use_fabric and self._device == "cpu": + logger.warning( + "Fabric mode with Warp fabric-array operations is not supported on CPU devices. " + "Falling back to standard USD operations on the CPU. This may impact performance." + ) + self._use_fabric = False + + if self._use_fabric and self._device not in ("cuda", "cuda:0"): + logger.warning( + f"Fabric mode is not supported on device '{self._device}'. " + "USDRT SelectPrims and Warp fabric arrays only support cuda:0. " + "Falling back to standard USD operations. This may impact performance." + ) + self._use_fabric = False + + self._fabric_initialized = False + self._fabric_usd_sync_done = False + self._fabric_selection = None + self._fabric_to_view: wp.array | None = None + self._view_to_fabric: wp.array | None = None + self._default_view_indices: wp.array | None = None + self._fabric_hierarchy = None + self._view_index_attr = f"isaaclab:view_index:{abs(hash(self))}" + + """ + Overrides -- Fabric-accelerated getters / setters. + """ + + def set_world_poses( + self, + positions: torch.Tensor | None = None, + orientations: torch.Tensor | None = None, + indices: Sequence[int] | None = None, + ): + if self._use_fabric: + self._set_world_poses_fabric(positions, orientations, indices) + else: + super().set_world_poses(positions, orientations, indices) + + def set_local_poses( + self, + translations: torch.Tensor | None = None, + orientations: torch.Tensor | None = None, + indices: Sequence[int] | None = None, + ): + if self._use_fabric: + self._set_local_poses_fabric(translations, orientations, indices) + else: + super().set_local_poses(translations, orientations, indices) + + def set_scales(self, scales: torch.Tensor, indices: Sequence[int] | None = None): + if self._use_fabric: + self._set_scales_fabric(scales, indices) + else: + super().set_scales(scales, indices) + + def get_world_poses(self, indices: Sequence[int] | None = None) -> tuple[torch.Tensor, torch.Tensor]: + if self._use_fabric: + return self._get_world_poses_fabric(indices) + return super().get_world_poses(indices) + + def get_local_poses(self, indices: Sequence[int] | None = None) -> tuple[torch.Tensor, torch.Tensor]: + if self._use_fabric: + return self._get_local_poses_fabric(indices) + return super().get_local_poses(indices) + + def get_scales(self, indices: Sequence[int] | None = None) -> torch.Tensor: + if self._use_fabric: + return self._get_scales_fabric(indices) + return super().get_scales(indices) + + """ + Internal Functions - Fabric. + """ + + def _set_world_poses_fabric( + self, + positions: torch.Tensor | None = None, + orientations: torch.Tensor | None = None, + indices: Sequence[int] | None = None, + ): + """Set world poses using Fabric GPU batch operations.""" + if not self._fabric_initialized: + self._initialize_fabric() + + indices_wp = self._resolve_indices_wp(indices) + count = indices_wp.shape[0] + + positions_wp = wp.from_torch(positions) if positions is not None else wp.zeros((0, 3), dtype=wp.float32).to(self._device) + orientations_wp = wp.from_torch(orientations) if orientations is not None else wp.zeros((0, 4), dtype=wp.float32).to(self._device) + scales_wp = wp.zeros((0, 3), dtype=wp.float32).to(self._device) + + wp.launch( + kernel=fabric_utils.compose_fabric_transformation_matrix_from_warp_arrays, + dim=count, + inputs=[ + self._fabric_world_matrices, positions_wp, orientations_wp, scales_wp, + False, False, False, indices_wp, self._view_to_fabric, + ], + device=self._fabric_device, + ) + wp.synchronize() + + self._fabric_hierarchy.update_world_xforms() + self._fabric_usd_sync_done = True + if self._sync_usd_on_fabric_write: + self._set_world_poses_usd(positions, orientations, indices) + + def _set_local_poses_fabric( + self, + translations: torch.Tensor | None = None, + orientations: torch.Tensor | None = None, + indices: Sequence[int] | None = None, + ): + """Set local poses using USD (matches Isaac Sim's design -- Fabric only accelerates world poses).""" + self._set_local_poses_usd(translations, orientations, indices) + + def _set_scales_fabric(self, scales: torch.Tensor, indices: Sequence[int] | None = None): + """Set scales using Fabric GPU batch operations.""" + if not self._fabric_initialized: + self._initialize_fabric() + + indices_wp = self._resolve_indices_wp(indices) + count = indices_wp.shape[0] + + scales_wp = wp.from_torch(scales) + positions_wp = wp.zeros((0, 3), dtype=wp.float32).to(self._device) + orientations_wp = wp.zeros((0, 4), dtype=wp.float32).to(self._device) + + wp.launch( + kernel=fabric_utils.compose_fabric_transformation_matrix_from_warp_arrays, + dim=count, + inputs=[ + self._fabric_world_matrices, positions_wp, orientations_wp, scales_wp, + False, False, False, indices_wp, self._view_to_fabric, + ], + device=self._fabric_device, + ) + wp.synchronize() + + self._fabric_hierarchy.update_world_xforms() + self._fabric_usd_sync_done = True + if self._sync_usd_on_fabric_write: + self._set_scales_usd(scales, indices) + + def _get_world_poses_fabric(self, indices: Sequence[int] | None = None) -> tuple[torch.Tensor, torch.Tensor]: + """Get world poses from Fabric using GPU batch operations.""" + if not self._fabric_initialized: + self._initialize_fabric() + if not self._fabric_usd_sync_done: + self._sync_fabric_from_usd_once() + + indices_wp = self._resolve_indices_wp(indices) + count = indices_wp.shape[0] + + use_cached_buffers = indices is None or indices == slice(None) + if use_cached_buffers: + positions_wp = self._fabric_positions_buffer + orientations_wp = self._fabric_orientations_buffer + scales_wp = self._fabric_dummy_buffer + else: + positions_wp = wp.zeros((count, 3), dtype=wp.float32).to(self._device) + orientations_wp = wp.zeros((count, 4), dtype=wp.float32).to(self._device) + scales_wp = self._fabric_dummy_buffer + + wp.launch( + kernel=fabric_utils.decompose_fabric_transformation_matrix_to_warp_arrays, + dim=count, + inputs=[ + self._fabric_world_matrices, positions_wp, orientations_wp, scales_wp, + indices_wp, self._view_to_fabric, + ], + device=self._fabric_device, + ) + + if use_cached_buffers: + wp.synchronize() + return self._fabric_positions_torch, self._fabric_orientations_torch + else: + positions = wp.to_torch(positions_wp) + orientations = wp.to_torch(orientations_wp) + return positions, orientations + + def _get_local_poses_fabric(self, indices: Sequence[int] | None = None) -> tuple[torch.Tensor, torch.Tensor]: + """Get local poses using USD (matches Isaac Sim's design).""" + return self._get_local_poses_usd(indices) + + def _get_scales_fabric(self, indices: Sequence[int] | None = None) -> torch.Tensor: + """Get scales from Fabric using GPU batch operations.""" + if not self._fabric_initialized: + self._initialize_fabric() + if not self._fabric_usd_sync_done: + self._sync_fabric_from_usd_once() + + indices_wp = self._resolve_indices_wp(indices) + count = indices_wp.shape[0] + + use_cached_buffers = indices is None or indices == slice(None) + if use_cached_buffers: + scales_wp = self._fabric_scales_buffer + else: + scales_wp = wp.zeros((count, 3), dtype=wp.float32).to(self._device) + + positions_wp = self._fabric_dummy_buffer + orientations_wp = self._fabric_dummy_buffer + + wp.launch( + kernel=fabric_utils.decompose_fabric_transformation_matrix_to_warp_arrays, + dim=count, + inputs=[ + self._fabric_world_matrices, positions_wp, orientations_wp, scales_wp, + indices_wp, self._view_to_fabric, + ], + device=self._fabric_device, + ) + + if use_cached_buffers: + wp.synchronize() + return self._fabric_scales_torch + else: + return wp.to_torch(scales_wp) + + """ + Internal Functions - Initialization. + """ + + def _initialize_fabric(self) -> None: + """Initialize Fabric batch infrastructure for GPU-accelerated pose queries.""" + import usdrt # noqa: PLC0415 + from usdrt import Rt # noqa: PLC0415 + + stage_id = sim_utils.get_current_stage_id() + fabric_stage = usdrt.Usd.Stage.Attach(stage_id) + + for i in range(self.count): + rt_prim = fabric_stage.GetPrimAtPath(self.prim_paths[i]) + rt_xformable = Rt.Xformable(rt_prim) + + has_attr = ( + rt_xformable.HasFabricHierarchyWorldMatrixAttr() + if hasattr(rt_xformable, "HasFabricHierarchyWorldMatrixAttr") + else False + ) + if not has_attr: + rt_xformable.CreateFabricHierarchyWorldMatrixAttr() + + rt_xformable.SetWorldXformFromUsd() + + rt_prim.CreateAttribute(self._view_index_attr, usdrt.Sdf.ValueTypeNames.UInt, custom=True) + rt_prim.GetAttribute(self._view_index_attr).Set(i) + + self._fabric_hierarchy = usdrt.hierarchy.IFabricHierarchy().get_fabric_hierarchy( + fabric_stage.GetFabricId(), fabric_stage.GetStageIdAsStageId() + ) + self._fabric_hierarchy.update_world_xforms() + + self._default_view_indices = wp.zeros((self.count,), dtype=wp.uint32).to(self._device) + wp.launch(kernel=fabric_utils.arange_k, dim=self.count, inputs=[self._default_view_indices], device=self._device) + wp.synchronize() + + fabric_device = self._device + if self._device == "cuda": + logger.warning("Fabric device is not specified, defaulting to 'cuda:0'.") + fabric_device = "cuda:0" + elif self._device.startswith("cuda:"): + if self._device != "cuda:0": + logger.debug( + f"SelectPrims only supports cuda:0. Using cuda:0 for SelectPrims " + f"even though simulation device is {self._device}." + ) + fabric_device = "cuda:0" + + self._fabric_selection = fabric_stage.SelectPrims( + require_attrs=[ + (usdrt.Sdf.ValueTypeNames.UInt, self._view_index_attr, usdrt.Usd.Access.Read), + (usdrt.Sdf.ValueTypeNames.Matrix4d, "omni:fabric:worldMatrix", usdrt.Usd.Access.ReadWrite), + ], + device=fabric_device, + ) + + self._view_to_fabric = wp.zeros((self.count,), dtype=wp.uint32).to(fabric_device) + self._fabric_to_view = wp.fabricarray(self._fabric_selection, self._view_index_attr) + + wp.launch( + kernel=fabric_utils.set_view_to_fabric_array, + dim=self._fabric_to_view.shape[0], + inputs=[self._fabric_to_view, self._view_to_fabric], + device=fabric_device, + ) + wp.synchronize() + + self._fabric_positions_torch = torch.zeros((self.count, 3), dtype=torch.float32, device=self._device) + self._fabric_orientations_torch = torch.zeros((self.count, 4), dtype=torch.float32, device=self._device) + self._fabric_scales_torch = torch.zeros((self.count, 3), dtype=torch.float32, device=self._device) + + self._fabric_positions_buffer = wp.from_torch(self._fabric_positions_torch, dtype=wp.float32) + self._fabric_orientations_buffer = wp.from_torch(self._fabric_orientations_torch, dtype=wp.float32) + self._fabric_scales_buffer = wp.from_torch(self._fabric_scales_torch, dtype=wp.float32) + + self._fabric_dummy_buffer = wp.zeros((0, 3), dtype=wp.float32).to(self._device) + self._fabric_world_matrices = wp.fabricarray(self._fabric_selection, "omni:fabric:worldMatrix") + self._fabric_stage = fabric_stage + self._fabric_device = fabric_device + + self._fabric_initialized = True + self._fabric_usd_sync_done = False + + def _sync_fabric_from_usd_once(self) -> None: + """Sync Fabric world matrices from USD once, on the first read.""" + if not self._fabric_initialized: + self._initialize_fabric() + + positions_usd, orientations_usd = self._get_world_poses_usd() + scales_usd = self._get_scales_usd() + + prev_sync = self._sync_usd_on_fabric_write + self._sync_usd_on_fabric_write = False + self._set_world_poses_fabric(positions_usd, orientations_usd) + self._set_scales_fabric(scales_usd) + self._sync_usd_on_fabric_write = prev_sync + + self._fabric_usd_sync_done = True + + def _resolve_indices_wp(self, indices: Sequence[int] | None) -> wp.array: + """Resolve view indices as a Warp array.""" + if indices is None or indices == slice(None): + if self._default_view_indices is None: + raise RuntimeError("Fabric indices are not initialized.") + return self._default_view_indices + indices_list = indices.tolist() if isinstance(indices, torch.Tensor) else list(indices) + return wp.array(indices_list, dtype=wp.uint32).to(self._device) diff --git a/source/isaaclab_physx/test/assets/test_articulation.py b/source/isaaclab_physx/test/assets/test_articulation.py index f1e31795c130..9726a7bd3a80 100644 --- a/source/isaaclab_physx/test/assets/test_articulation.py +++ b/source/isaaclab_physx/test/assets/test_articulation.py @@ -2245,5 +2245,43 @@ def test_write_joint_frictions_to_sim(sim, num_articulations, device, add_ground assert torch.allclose(joint_friction_coeff_sim_2, friction_2.cpu()) +@pytest.mark.parametrize("add_ground_plane", [True]) +@pytest.mark.parametrize("num_articulations", [1, 2]) +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) +@pytest.mark.parametrize("articulation_type", ["panda"]) +@pytest.mark.isaacsim_ci +def test_set_material_properties(sim, num_articulations, device, add_ground_plane, articulation_type): + """Test getting and setting material properties (friction/restitution) of articulation shapes.""" + articulation_cfg = generate_articulation_cfg(articulation_type=articulation_type) + articulation, _ = generate_articulation( + articulation_cfg=articulation_cfg, num_articulations=num_articulations, device=device + ) + + # Play the simulator + sim.reset() + + # Get number of shapes from the articulation + max_shapes = articulation.root_view.max_shapes + + # Generate random material properties: (static_friction, dynamic_friction, restitution) + materials = torch.empty(num_articulations, max_shapes, 3, device="cpu").uniform_(0.0, 1.0) + # Ensure dynamic friction <= static friction + materials[..., 1] = torch.min(materials[..., 0], materials[..., 1]) + + # Use PhysX's material properties API + env_ids = torch.arange(num_articulations, dtype=torch.int32) + articulation.set_material_properties_index(materials=materials, env_ids=env_ids) + + # Simulate physics + sim.step() + articulation.update(sim.cfg.dt) + + # Get material properties from simulation + materials_check = wp.to_torch(articulation.root_view.get_material_properties()) + + # Check if material properties are set correctly + torch.testing.assert_close(materials_check, materials) + + if __name__ == "__main__": pytest.main([__file__, "-v", "--maxfail=1"]) diff --git a/source/isaaclab_physx/test/assets/test_rigid_object.py b/source/isaaclab_physx/test/assets/test_rigid_object.py index 6cea115105e1..431bbf0fab55 100644 --- a/source/isaaclab_physx/test/assets/test_rigid_object.py +++ b/source/isaaclab_physx/test/assets/test_rigid_object.py @@ -604,6 +604,44 @@ def test_rigid_body_set_material_properties(num_cubes, device): torch.testing.assert_close(materials_to_check.reshape(num_cubes, 3), materials) +@pytest.mark.parametrize("num_cubes", [1, 2]) +@pytest.mark.parametrize("device", ["cuda:0", "cpu"]) +@pytest.mark.isaacsim_ci +def test_set_material_properties_index(num_cubes, device): + """Test setting material properties using the set_material_properties_index API.""" + with build_simulation_context( + device=device, gravity_enabled=True, add_ground_plane=True, auto_add_lighting=True + ) as sim: + sim._app_control_on_stop_handle = None + # Generate cubes scene + cube_object, _ = generate_cubes_scene(num_cubes=num_cubes, device=device) + + # Play sim + sim.reset() + + # Get number of shapes + max_shapes = cube_object.root_view.max_shapes + + # Generate random material properties: (static_friction, dynamic_friction, restitution) + materials = torch.empty(num_cubes, max_shapes, 3, device="cpu").uniform_(0.0, 1.0) + # Ensure dynamic friction <= static friction + materials[..., 1] = torch.min(materials[..., 0], materials[..., 1]) + + # Use the new set_material_properties_index API + env_ids = torch.arange(num_cubes, dtype=torch.int32) + cube_object.set_material_properties_index(materials=materials, env_ids=env_ids) + + # Simulate physics + sim.step() + cube_object.update(sim.cfg.dt) + + # Get material properties from simulation + materials_check = wp.to_torch(cube_object.root_view.get_material_properties()) + + # Check if material properties are set correctly + torch.testing.assert_close(materials_check, materials) + + @pytest.mark.parametrize("num_cubes", [1, 2]) @pytest.mark.parametrize("device", ["cuda:0", "cpu"]) @pytest.mark.isaacsim_ci diff --git a/source/isaaclab_physx/test/sim/__init__.py b/source/isaaclab_physx/test/sim/__init__.py new file mode 100644 index 000000000000..8b137891791f --- /dev/null +++ b/source/isaaclab_physx/test/sim/__init__.py @@ -0,0 +1 @@ + diff --git a/source/isaaclab_physx/test/sim/test_views_xform_prim_fabric.py b/source/isaaclab_physx/test/sim/test_views_xform_prim_fabric.py new file mode 100644 index 000000000000..689e1091571b --- /dev/null +++ b/source/isaaclab_physx/test/sim/test_views_xform_prim_fabric.py @@ -0,0 +1,440 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Tests for PhysX XformPrimView with Fabric acceleration. + +Re-runs all the backend-parametrized tests from the core test suite with +``backend="fabric"`` and the PhysX :class:`XformPrimView`, plus the two +dedicated Fabric-only tests. +""" + +from isaaclab.app import AppLauncher + +simulation_app = AppLauncher(headless=True).app + +import pytest # noqa: E402 +import torch # noqa: E402 + +import isaaclab.sim as sim_utils # noqa: E402 +from isaaclab_physx.sim.views import XformPrimView # noqa: E402 + +BACKEND = "fabric" + + +@pytest.fixture(autouse=True) +def test_setup_teardown(): + """Create a blank new stage for each test.""" + sim_utils.create_new_stage() + sim_utils.update_stage() + yield + sim_utils.clear_stage() + sim_utils.SimulationContext.clear_instance() + + +def _skip_if_unavailable(device: str): + if device.startswith("cuda") and not torch.cuda.is_available(): + pytest.skip("CUDA not available") + if device == "cpu": + pytest.skip("Warp fabricarray operations on CPU have known issues") + + +def _create_view(pattern: str, device: str) -> XformPrimView: + sim_utils.SimulationContext(sim_utils.SimulationCfg(dt=0.01, device=device, use_fabric=True)) + return XformPrimView(pattern, device=device) + + +""" +Tests - Getters. +""" + + +@pytest.mark.parametrize("device", ["cpu", "cuda"]) +def test_get_world_poses(device): + _skip_if_unavailable(device) + stage = sim_utils.get_current_stage() + + expected_positions = [(1.0, 2.0, 3.0), (4.0, 5.0, 6.0), (7.0, 8.0, 9.0)] + expected_orientations = [(0.0, 0.0, 0.0, 1.0), (0.0, 0.0, 0.7071068, 0.7071068), (0.7071068, 0.0, 0.0, 0.7071068)] + + for i, (pos, quat) in enumerate(zip(expected_positions, expected_orientations)): + sim_utils.create_prim(f"/World/Object_{i}", "Camera", translation=pos, orientation=quat, stage=stage) + + view = _create_view("/World/Object_.*", device=device) + + expected_positions_tensor = torch.tensor(expected_positions, dtype=torch.float32, device=device) + expected_orientations_tensor = torch.tensor(expected_orientations, dtype=torch.float32, device=device) + + positions, orientations = view.get_world_poses() + + assert positions.shape == (3, 3) + assert orientations.shape == (3, 4) + torch.testing.assert_close(positions, expected_positions_tensor, atol=1e-5, rtol=0) + + try: + torch.testing.assert_close(orientations, expected_orientations_tensor, atol=1e-5, rtol=0) + except AssertionError: + torch.testing.assert_close(orientations, -expected_orientations_tensor, atol=1e-5, rtol=0) + + +@pytest.mark.parametrize("device", ["cpu", "cuda"]) +def test_get_local_poses(device): + _skip_if_unavailable(device) + stage = sim_utils.get_current_stage() + + sim_utils.create_prim("/World/Parent", "Xform", translation=(10.0, 0.0, 0.0), stage=stage) + + expected_local_positions = [(1.0, 0.0, 0.0), (0.0, 2.0, 0.0), (0.0, 0.0, 3.0)] + expected_local_orientations = [ + (0.0, 0.0, 0.0, 1.0), (0.0, 0.0, 0.7071068, 0.7071068), (0.7071068, 0.0, 0.0, 0.7071068), + ] + + for i, (pos, quat) in enumerate(zip(expected_local_positions, expected_local_orientations)): + sim_utils.create_prim(f"/World/Parent/Child_{i}", "Camera", translation=pos, orientation=quat, stage=stage) + + view = _create_view("/World/Parent/Child_.*", device=device) + translations, orientations = view.get_local_poses() + + assert translations.shape == (3, 3) + assert orientations.shape == (3, 4) + + expected_translations_tensor = torch.tensor(expected_local_positions, dtype=torch.float32, device=device) + expected_orientations_tensor = torch.tensor(expected_local_orientations, dtype=torch.float32, device=device) + + torch.testing.assert_close(translations, expected_translations_tensor, atol=1e-5, rtol=0) + try: + torch.testing.assert_close(orientations, expected_orientations_tensor, atol=1e-5, rtol=0) + except AssertionError: + torch.testing.assert_close(orientations, -expected_orientations_tensor, atol=1e-5, rtol=0) + + +@pytest.mark.parametrize("device", ["cpu", "cuda"]) +def test_get_scales(device): + _skip_if_unavailable(device) + stage = sim_utils.get_current_stage() + + expected_scales = [(1.0, 1.0, 1.0), (2.0, 2.0, 2.0), (1.0, 2.0, 3.0)] + for i, scale in enumerate(expected_scales): + sim_utils.create_prim(f"/World/Object_{i}", "Camera", scale=scale, stage=stage) + + view = _create_view("/World/Object_.*", device=device) + scales = view.get_scales() + + assert scales.shape == (3, 3) + torch.testing.assert_close(scales, torch.tensor(expected_scales, dtype=torch.float32, device=device), atol=1e-5, rtol=0) + + +""" +Tests - Setters. +""" + + +@pytest.mark.parametrize("device", ["cpu", "cuda"]) +def test_set_world_poses(device): + _skip_if_unavailable(device) + stage = sim_utils.get_current_stage() + + num_prims = 5 + for i in range(num_prims): + sim_utils.create_prim(f"/World/Object_{i}", "Camera", translation=(0.0, 0.0, 0.0), stage=stage) + + view = _create_view("/World/Object_.*", device=device) + + new_positions = torch.tensor( + [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0], [10.0, 11.0, 12.0], [13.0, 14.0, 15.0]], device=device + ) + new_orientations = torch.tensor( + [ + [0.0, 0.0, 0.0, 1.0], [0.0, 0.0, 0.7071068, 0.7071068], [0.7071068, 0.0, 0.0, 0.7071068], + [0.3826834, 0.0, 0.0, 0.9238795], [0.0, 0.7071068, 0.0, 0.7071068], + ], + device=device, + ) + + view.set_world_poses(new_positions, new_orientations) + retrieved_positions, retrieved_orientations = view.get_world_poses() + + torch.testing.assert_close(retrieved_positions, new_positions, atol=1e-5, rtol=0) + try: + torch.testing.assert_close(retrieved_orientations, new_orientations, atol=1e-5, rtol=0) + except AssertionError: + torch.testing.assert_close(retrieved_orientations, -new_orientations, atol=1e-5, rtol=0) + + +@pytest.mark.parametrize("device", ["cpu", "cuda"]) +def test_set_world_poses_only_positions(device): + _skip_if_unavailable(device) + stage = sim_utils.get_current_stage() + + initial_quat = (0.0, 0.0, 0.7071068, 0.7071068) + for i in range(3): + sim_utils.create_prim(f"/World/Object_{i}", "Camera", translation=(0.0, 0.0, 0.0), orientation=initial_quat, stage=stage) + + view = _create_view("/World/Object_.*", device=device) + _, initial_orientations = view.get_world_poses() + + new_positions = torch.tensor([[1.0, 0.0, 0.0], [0.0, 2.0, 0.0], [0.0, 0.0, 3.0]], device=device) + view.set_world_poses(positions=new_positions, orientations=None) + + retrieved_positions, retrieved_orientations = view.get_world_poses() + torch.testing.assert_close(retrieved_positions, new_positions, atol=1e-5, rtol=0) + try: + torch.testing.assert_close(retrieved_orientations, initial_orientations, atol=1e-5, rtol=0) + except AssertionError: + torch.testing.assert_close(retrieved_orientations, -initial_orientations, atol=1e-5, rtol=0) + + +@pytest.mark.parametrize("device", ["cpu", "cuda"]) +def test_set_world_poses_only_orientations(device): + _skip_if_unavailable(device) + stage = sim_utils.get_current_stage() + + for i in range(3): + sim_utils.create_prim(f"/World/Object_{i}", "Camera", translation=(float(i), 0.0, 0.0), stage=stage) + + view = _create_view("/World/Object_.*", device=device) + initial_positions, _ = view.get_world_poses() + + new_orientations = torch.tensor( + [[0.0, 0.0, 0.7071068, 0.7071068], [0.7071068, 0.0, 0.0, 0.7071068], [0.3826834, 0.0, 0.0, 0.9238795]], + device=device, + ) + view.set_world_poses(positions=None, orientations=new_orientations) + + retrieved_positions, retrieved_orientations = view.get_world_poses() + torch.testing.assert_close(retrieved_positions, initial_positions, atol=1e-5, rtol=0) + try: + torch.testing.assert_close(retrieved_orientations, new_orientations, atol=1e-5, rtol=0) + except AssertionError: + torch.testing.assert_close(retrieved_orientations, -new_orientations, atol=1e-5, rtol=0) + + +@pytest.mark.parametrize("device", ["cpu", "cuda"]) +def test_set_world_poses_with_hierarchy(device): + _skip_if_unavailable(device) + stage = sim_utils.get_current_stage() + + for i in range(3): + parent_pos = (i * 10.0, 0.0, 0.0) + parent_quat = (0.0, 0.0, 0.7071068, 0.7071068) + sim_utils.create_prim(f"/World/Parent_{i}", "Xform", translation=parent_pos, orientation=parent_quat, stage=stage) + sim_utils.create_prim(f"/World/Parent_{i}/Child", "Camera", translation=(0.0, 0.0, 0.0), stage=stage) + + view = _create_view("/World/Parent_.*/Child", device=device) + + desired_world_positions = torch.tensor([[5.0, 5.0, 0.0], [15.0, 5.0, 0.0], [25.0, 5.0, 0.0]], device=device) + desired_world_orientations = torch.tensor([[0.0, 0.0, 0.0, 1.0]] * 3, device=device) + + view.set_world_poses(desired_world_positions, desired_world_orientations) + retrieved_positions, retrieved_orientations = view.get_world_poses() + + torch.testing.assert_close(retrieved_positions, desired_world_positions, atol=1e-4, rtol=0) + try: + torch.testing.assert_close(retrieved_orientations, desired_world_orientations, atol=1e-4, rtol=0) + except AssertionError: + torch.testing.assert_close(retrieved_orientations, -desired_world_orientations, atol=1e-4, rtol=0) + + +@pytest.mark.parametrize("device", ["cpu", "cuda"]) +def test_set_local_poses(device): + _skip_if_unavailable(device) + stage = sim_utils.get_current_stage() + + sim_utils.create_prim("/World/Parent", "Xform", translation=(5.0, 5.0, 5.0), stage=stage) + num_prims = 4 + for i in range(num_prims): + sim_utils.create_prim(f"/World/Parent/Child_{i}", "Camera", translation=(0.0, 0.0, 0.0), stage=stage) + + view = _create_view("/World/Parent/Child_.*", device=device) + + new_translations = torch.tensor([[1.0, 0.0, 0.0], [0.0, 2.0, 0.0], [0.0, 0.0, 3.0], [4.0, 4.0, 4.0]], device=device) + new_orientations = torch.tensor( + [[0.0, 0.0, 0.0, 1.0], [0.0, 0.0, 0.7071068, 0.7071068], [0.7071068, 0.0, 0.0, 0.7071068], [0.3826834, 0.0, 0.0, 0.9238795]], + device=device, + ) + + view.set_local_poses(new_translations, new_orientations) + retrieved_translations, retrieved_orientations = view.get_local_poses() + + torch.testing.assert_close(retrieved_translations, new_translations, atol=1e-5, rtol=0) + try: + torch.testing.assert_close(retrieved_orientations, new_orientations, atol=1e-5, rtol=0) + except AssertionError: + torch.testing.assert_close(retrieved_orientations, -new_orientations, atol=1e-5, rtol=0) + + +@pytest.mark.parametrize("device", ["cpu", "cuda"]) +def test_set_local_poses_only_translations(device): + _skip_if_unavailable(device) + stage = sim_utils.get_current_stage() + + sim_utils.create_prim("/World/Parent", "Xform", translation=(0.0, 0.0, 0.0), stage=stage) + initial_quat = (0.0, 0.0, 0.7071068, 0.7071068) + for i in range(3): + sim_utils.create_prim(f"/World/Parent/Child_{i}", "Camera", translation=(0.0, 0.0, 0.0), orientation=initial_quat, stage=stage) + + view = _create_view("/World/Parent/Child_.*", device=device) + _, initial_orientations = view.get_local_poses() + + new_translations = torch.tensor([[1.0, 0.0, 0.0], [0.0, 2.0, 0.0], [0.0, 0.0, 3.0]], device=device) + view.set_local_poses(translations=new_translations, orientations=None) + + retrieved_translations, retrieved_orientations = view.get_local_poses() + torch.testing.assert_close(retrieved_translations, new_translations, atol=1e-5, rtol=0) + try: + torch.testing.assert_close(retrieved_orientations, initial_orientations, atol=1e-5, rtol=0) + except AssertionError: + torch.testing.assert_close(retrieved_orientations, -initial_orientations, atol=1e-5, rtol=0) + + +@pytest.mark.parametrize("device", ["cpu", "cuda"]) +def test_set_scales(device): + _skip_if_unavailable(device) + stage = sim_utils.get_current_stage() + + num_prims = 5 + for i in range(num_prims): + sim_utils.create_prim(f"/World/Object_{i}", "Camera", scale=(1.0, 1.0, 1.0), stage=stage) + + view = _create_view("/World/Object_.*", device=device) + new_scales = torch.tensor( + [[2.0, 2.0, 2.0], [1.0, 2.0, 3.0], [0.5, 0.5, 0.5], [3.0, 1.0, 2.0], [1.5, 1.5, 1.5]], device=device + ) + view.set_scales(new_scales) + + retrieved_scales = view.get_scales() + torch.testing.assert_close(retrieved_scales, new_scales, atol=1e-5, rtol=0) + + +""" +Tests - Indices. +""" + + +@pytest.mark.parametrize("device", ["cpu", "cuda"]) +def test_indices_single_element(device): + _skip_if_unavailable(device) + stage = sim_utils.get_current_stage() + + num_prims = 5 + for i in range(num_prims): + sim_utils.create_prim(f"/World/Object_{i}", "Camera", translation=(float(i), 0.0, 0.0), stage=stage) + + view = _create_view("/World/Object_.*", device=device) + + indices = [3] + positions, orientations = view.get_world_poses(indices=indices) + assert positions.shape == (1, 3) + assert orientations.shape == (1, 4) + + new_position = torch.tensor([[100.0, 200.0, 300.0]], device=device) + view.set_world_poses(positions=new_position, indices=indices) + + retrieved_positions, _ = view.get_world_poses(indices=indices) + torch.testing.assert_close(retrieved_positions, new_position, atol=1e-5, rtol=0) + + +@pytest.mark.parametrize("device", ["cpu", "cuda"]) +def test_indices_out_of_order(device): + _skip_if_unavailable(device) + stage = sim_utils.get_current_stage() + + num_prims = 10 + for i in range(num_prims): + sim_utils.create_prim(f"/World/Object_{i}", "Camera", translation=(0.0, 0.0, 0.0), stage=stage) + + view = _create_view("/World/Object_.*", device=device) + + indices = [7, 2, 9, 0, 5] + new_positions = torch.tensor( + [[7.0, 0.0, 0.0], [2.0, 0.0, 0.0], [9.0, 0.0, 0.0], [0.0, 0.0, 0.0], [5.0, 0.0, 0.0]], device=device + ) + view.set_world_poses(positions=new_positions, indices=indices) + + all_positions, _ = view.get_world_poses() + + expected_x_values = [0.0, 0.0, 2.0, 0.0, 0.0, 5.0, 0.0, 7.0, 0.0, 9.0] + for i in range(num_prims): + assert abs(all_positions[i, 0].item() - expected_x_values[i]) < 1e-5 + + +@pytest.mark.parametrize("device", ["cpu", "cuda"]) +def test_indices_with_only_positions_or_orientations(device): + _skip_if_unavailable(device) + stage = sim_utils.get_current_stage() + + num_prims = 5 + for i in range(num_prims): + sim_utils.create_prim(f"/World/Object_{i}", "Camera", translation=(0.0, 0.0, 0.0), orientation=(0.0, 0.0, 0.0, 1.0), stage=stage) + + view = _create_view("/World/Object_.*", device=device) + initial_positions, initial_orientations = view.get_world_poses() + + indices = [1, 3] + new_positions = torch.tensor([[10.0, 0.0, 0.0], [30.0, 0.0, 0.0]], device=device) + view.set_world_poses(positions=new_positions, orientations=None, indices=indices) + + updated_positions, updated_orientations = view.get_world_poses() + + torch.testing.assert_close(updated_positions[1], new_positions[0], atol=1e-5, rtol=0) + torch.testing.assert_close(updated_positions[3], new_positions[1], atol=1e-5, rtol=0) + torch.testing.assert_close(updated_positions[0], initial_positions[0], atol=1e-5, rtol=0) + + +""" +Tests - Fabric Operations. +""" + + +@pytest.mark.parametrize("device", ["cpu", "cuda"]) +def test_fabric_initialization(device): + _skip_if_unavailable(device) + stage = sim_utils.get_current_stage() + + num_prims = 5 + for i in range(num_prims): + sim_utils.create_prim(f"/World/Cam_{i}", "Camera", translation=(i * 1.0, 0.0, 1.0), stage=stage) + + view = _create_view("/World/Cam_.*", device=device) + + assert view.count == num_prims + assert view.device == device + assert len(view.prims) == num_prims + + +@pytest.mark.parametrize("device", ["cpu", "cuda"]) +def test_fabric_usd_consistency(device): + _skip_if_unavailable(device) + stage = sim_utils.get_current_stage() + + num_prims = 5 + for i in range(num_prims): + sim_utils.create_prim( + f"/World/Cam_{i}", "Camera", + translation=(i * 1.0, 2.0, 3.0), + orientation=(0.0, 0.0, 0.7071068, 0.7071068), + stage=stage, + ) + + view = _create_view("/World/Cam_.*", device=device) + + init_positions = torch.zeros((num_prims, 3), dtype=torch.float32, device=device) + init_positions[:, 0] = torch.arange(num_prims, dtype=torch.float32, device=device) + init_positions[:, 1] = 2.0 + init_positions[:, 2] = 3.0 + init_orientations = torch.tensor([[0.0, 0.0, 0.7071068, 0.7071068]] * num_prims, dtype=torch.float32, device=device) + + view.set_world_poses(init_positions, init_orientations) + + pos, quat = view.get_world_poses() + torch.testing.assert_close(pos, init_positions, atol=1e-4, rtol=0) + torch.testing.assert_close(quat, init_orientations, atol=1e-4, rtol=0) + + new_positions = torch.rand((num_prims, 3), dtype=torch.float32, device=device) * 10.0 + new_orientations = torch.tensor([[0.0, 0.0, 0.0, 1.0]] * num_prims, dtype=torch.float32, device=device) + view.set_world_poses(new_positions, new_orientations) + + pos2, quat2 = view.get_world_poses() + torch.testing.assert_close(pos2, new_positions, atol=1e-4, rtol=0) + torch.testing.assert_close(quat2, new_orientations, atol=1e-4, rtol=0) diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/anymal_d/rough_env_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/anymal_d/rough_env_cfg.py index 38d6b9d0546e..ab5b70936aca 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/anymal_d/rough_env_cfg.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/config/anymal_d/rough_env_cfg.py @@ -3,6 +3,13 @@ # # SPDX-License-Identifier: BSD-3-Clause +from isaaclab_newton.physics import MJWarpSolverCfg, NewtonCfg, NewtonCollisionPipelineCfg +from isaaclab_physx.physics import PhysxCfg + +from isaaclab.sim import SimulationCfg +from isaaclab.managers import EventTermCfg as EventTerm +from isaaclab.managers import SceneEntityCfg +import isaaclab.envs.mdp as mdp from isaaclab.utils import configclass from isaaclab_tasks.manager_based.locomotion.velocity.velocity_env_cfg import ( @@ -18,20 +25,51 @@ from isaaclab_assets.robots.anymal import ANYMAL_D_CFG # isort: skip +@configclass +class RoughPhysicsCfg(PresetCfg): + default = PhysxCfg(gpu_max_rigid_patch_count=10 * 2**15) + newton = NewtonCfg( + solver_cfg=MJWarpSolverCfg( + njmax=200, + nconmax=100, + cone="pyramidal", + impratio=1.0, + integrator="implicitfast", + use_mujoco_contacts=False, + ), + collision_cfg=NewtonCollisionPipelineCfg(max_triangle_pairs=2_500_000), + num_substeps=1, + debug_mode=False, + ) + physx = default + + @configclass class AnymalDPhysxEventsCfg(EventsCfg, StartupEventsCfg): pass +@configclass +class AnymalDNewtonEventsCfg(EventsCfg): + + collider_offsets = EventTerm( + func=mdp.randomize_rigid_body_collider_offsets, + mode="startup", + params={ + "asset_cfg": SceneEntityCfg("robot", body_names=".*"), + "contact_offset_distribution_params": (0.01, 0.01) + }, + ) @configclass class AnymalDEventsCfg(PresetCfg): default = AnymalDPhysxEventsCfg() - newton = EventsCfg() + newton = AnymalDNewtonEventsCfg() physx = default @configclass class AnymalDRoughEnvCfg(LocomotionVelocityRoughEnvCfg): + sim: SimulationCfg = SimulationCfg(physics=RoughPhysicsCfg()) events: AnymalDEventsCfg = AnymalDEventsCfg() def __post_init__(self): @@ -40,7 +78,6 @@ def __post_init__(self): # switch robot to anymal-d self.scene.robot = ANYMAL_D_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot") - @configclass class AnymalDRoughEnvCfg_PLAY(AnymalDRoughEnvCfg): def __post_init__(self): diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/velocity_env_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/velocity_env_cfg.py index 81f38e756975..c6cbfa9b324f 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/velocity_env_cfg.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomotion/velocity/velocity_env_cfg.py @@ -209,17 +209,6 @@ class EventsCfg: @configclass class StartupEventsCfg: # startup - physics_material = EventTerm( - func=mdp.randomize_rigid_body_material, - mode="startup", - params={ - "asset_cfg": SceneEntityCfg("robot", body_names=".*"), - "static_friction_range": (0.8, 0.8), - "dynamic_friction_range": (0.6, 0.6), - "restitution_range": (0.0, 0.0), - "num_buckets": 64, - }, - ) add_base_mass = EventTerm( func=mdp.randomize_rigid_body_mass, @@ -240,6 +229,16 @@ class StartupEventsCfg: }, ) + collider_offsets = EventTerm( + func=mdp.randomize_rigid_body_collider_offsets, + mode="startup", + params={ + "asset_cfg": SceneEntityCfg("robot", body_names=".*"), + "contact_offset_distribution_params": (0.01, 0.01), + "rest_offset_distribution_params": (0.01, 0.01), + }, + ) + @configclass class RewardsCfg: @@ -286,6 +285,10 @@ class TerminationsCfg: func=mdp.illegal_contact, params={"sensor_cfg": SceneEntityCfg("contact_forces", body_names="base"), "threshold": 1.0}, ) + body_lin_vel = DoneTerm( + func=mdp.body_lin_vel_out_of_limit, + params={"max_velocity": 20.0, "asset_cfg": SceneEntityCfg("robot", body_names=".*")}, + ) @configclass diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/dexsuite/config/kuka_allegro/dexsuite_kuka_allegro_env_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/dexsuite/config/kuka_allegro/dexsuite_kuka_allegro_env_cfg.py index f7d45344ac69..99f3f444ad25 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/dexsuite/config/kuka_allegro/dexsuite_kuka_allegro_env_cfg.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/dexsuite/config/kuka_allegro/dexsuite_kuka_allegro_env_cfg.py @@ -7,14 +7,15 @@ from isaaclab_physx.physics import PhysxCfg from isaaclab.assets import ArticulationCfg +from isaaclab.envs import mdp as lab_mdp +from isaaclab.managers import EventTermCfg, SceneEntityCfg from isaaclab.managers import RewardTermCfg as RewTerm -from isaaclab.managers import SceneEntityCfg from isaaclab.sensors import ContactSensorCfg, TiledCameraCfg from isaaclab.utils import configclass from isaaclab_tasks.utils import PresetCfg -from isaaclab_assets.robots import KUKA_ALLEGRO_CFG +from isaaclab_assets.robots import KUKA_ALLEGRO_CFG, KUKA_ALLEGRO_RANDOMIZABLE_CFG from ... import dexsuite_env_cfg as dexsuite from ... import mdp @@ -83,7 +84,15 @@ def __post_init__(self: dexsuite.SceneCfg): ), ) - default = KukaAllegroSceneCfg(num_envs=4096, env_spacing=3, replicate_physics=True) + # Default scene uses randomizable robot for color randomization (materials created at spawn time) + default = KukaAllegroSceneCfg( + num_envs=4096, + env_spacing=3, + replicate_physics=True, + robot=KUKA_ALLEGRO_RANDOMIZABLE_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot"), + ) + # Non-randomizable variant for faster startup when color randomization is not needed + no_color = KukaAllegroSceneCfg(num_envs=4096, env_spacing=3, replicate_physics=True) single_camera = default.replace(base_camera=BaseTiledCameraCfg()) duo_camera = default.replace(base_camera=BaseTiledCameraCfg(), wrist_camera=WristTiledCameraCfg()) @@ -138,9 +147,23 @@ class KukaAllegroEventCfg(PresetCfg): class KukaAllegroPhysxEventCfg(dexsuite.StartupEventCfg, dexsuite.EventCfg): pass - default = KukaAllegroPhysxEventCfg() + @configclass + class KukaAllegroWithColorRandomizationCfg(dexsuite.StartupEventCfg, dexsuite.EventCfg): + """Event config with layer-based color randomization (preserves instancing).""" + + randomize_robot_color = EventTermCfg( + func=lab_mdp.randomize_visual_color_instanced, + mode="reset", + params={ + "asset_cfg": SceneEntityCfg("robot"), + "colors": {"r": (0.0, 1.0), "g": (0.0, 1.0), "b": (0.0, 1.0)}, + }, + ) + + default = KukaAllegroWithColorRandomizationCfg() + no_color = KukaAllegroPhysxEventCfg() # Use with scene=no_color newton = dexsuite.EventCfg() - physx = default + physx = KukaAllegroWithColorRandomizationCfg() @configclass diff --git a/source/isaaclab_tasks/isaaclab_tasks/utils/presets.py b/source/isaaclab_tasks/isaaclab_tasks/utils/presets.py index f6fac9cb9aa6..02bbf66f6ed8 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/utils/presets.py +++ b/source/isaaclab_tasks/isaaclab_tasks/utils/presets.py @@ -6,6 +6,7 @@ from isaaclab_newton.renderers import NewtonWarpRendererCfg from isaaclab_ov.renderers import OVRTXRendererCfg from isaaclab_physx.renderers import IsaacRtxRendererCfg +from isaaclab_newton.physics import NanReplayCfg from isaaclab.utils import configclass @@ -18,3 +19,9 @@ class MultiBackendRendererCfg(PresetCfg): newton_renderer: NewtonWarpRendererCfg = NewtonWarpRendererCfg() ovrtx_renderer: OVRTXRendererCfg = OVRTXRendererCfg() isaacsim_rtx_renderer = default + + +@configclass +class NewtonNanDebugCfg(PresetCfg): + default = None + debug_nan = NanReplayCfg()