diff --git a/source/isaaclab/isaaclab/renderers/__init__.pyi b/source/isaaclab/isaaclab/renderers/__init__.pyi index ae408c03ae7d..34fe2715d2eb 100644 --- a/source/isaaclab/isaaclab/renderers/__init__.pyi +++ b/source/isaaclab/isaaclab/renderers/__init__.pyi @@ -6,6 +6,7 @@ __all__ = [ "BaseRenderer", "CameraRenderSpec", + "CameraPPISPCfg", "RenderBufferKind", "RenderBufferSpec", "Renderer", @@ -16,6 +17,7 @@ __all__ = [ from .base_renderer import BaseRenderer from .camera_render_spec import CameraRenderSpec from .output_contract import RenderBufferKind, RenderBufferSpec +from .camera_ppisp import CameraPPISPCfg from .renderer import Renderer from .renderer_cfg import RendererCfg from .render_context import RenderContext diff --git a/source/isaaclab/isaaclab/renderers/camera_ppisp.py b/source/isaaclab/isaaclab/renderers/camera_ppisp.py new file mode 100644 index 000000000000..9ef28c751ce0 --- /dev/null +++ b/source/isaaclab/isaaclab/renderers/camera_ppisp.py @@ -0,0 +1,271 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Camera PPISP configuration and USD/SPG parsing helpers. + +The implementation follows the physically plausible ISP model described in +https://arxiv.org/abs/2601.18336. +""" + +from __future__ import annotations + +from dataclasses import dataclass, field +from typing import Any + +from isaaclab.utils import configclass + +CAMERA_PPISP_SHADER_NAME = "PPISP" + +CAMERA_PPISP_FLOAT2_INPUTS = { + "vignettingCenterR", + "vignettingCenterG", + "vignettingCenterB", + "colorLatentBlue", + "colorLatentRed", + "colorLatentGreen", + "colorLatentNeutral", +} + +CAMERA_PPISP_DEFAULT_INPUTS: dict[str, float | tuple[float, float]] = { + "exposureOffset": 0.0, + "vignettingCenterR": (0.0, 0.0), + "vignettingAlpha1R": 0.0, + "vignettingAlpha2R": 0.0, + "vignettingAlpha3R": 0.0, + "vignettingCenterG": (0.0, 0.0), + "vignettingAlpha1G": 0.0, + "vignettingAlpha2G": 0.0, + "vignettingAlpha3G": 0.0, + "vignettingCenterB": (0.0, 0.0), + "vignettingAlpha1B": 0.0, + "vignettingAlpha2B": 0.0, + "vignettingAlpha3B": 0.0, + "colorLatentBlue": (0.0, 0.0), + "colorLatentRed": (0.0, 0.0), + "colorLatentGreen": (0.0, 0.0), + "colorLatentNeutral": (0.0, 0.0), + "crfToeR": 0.013659, + "crfShoulderR": 0.013659, + "crfGammaR": 0.378165, + "crfCenterR": 0.0, + "crfToeG": 0.013659, + "crfShoulderG": 0.013659, + "crfGammaG": 0.378165, + "crfCenterG": 0.0, + "crfToeB": 0.013659, + "crfShoulderB": 0.013659, + "crfGammaB": 0.378165, + "crfCenterB": 0.0, +} + + +def default_camera_ppisp_inputs() -> dict[str, float | tuple[float, float]]: + """Return a copy of the PPISP identity/default input dictionary.""" + return dict(CAMERA_PPISP_DEFAULT_INPUTS) + + +@configclass +class CameraPPISPCfg: + """Configuration for PPISP post-processing. + + PPISP inputs are static in IsaacLab. If imported from animated USD shader inputs, + the first authored time sample is used and later samples are ignored. + """ + + shader_prim_path: str | None = None + """Optional source USD shader prim path used to populate :attr:`inputs`.""" + + inputs: dict[str, float | tuple[float, float]] = field(default_factory=default_camera_ppisp_inputs) + """Flat PPISP shader input values keyed by USD input name.""" + + +@dataclass +class RenderProductInfo: + """Parsed USD RenderProduct information used for PPISP validation.""" + + render_product_path: str + camera_paths: list[str] + resolution: tuple[int, int] | None + ordered_vars: list[str] + ppisp: CameraPPISPCfg | None + camera_xform_time_samples: list[float] + + +def normalize_camera_ppisp_cfg( + ppisp_cfg: CameraPPISPCfg | dict[str, Any] | None, + stage: Any | None = None, +) -> CameraPPISPCfg | None: + """Convert supported user PPISP representations to :class:`CameraPPISPCfg`.""" + if ppisp_cfg is None: + return None + if isinstance(ppisp_cfg, CameraPPISPCfg): + input_overrides = dict(ppisp_cfg.inputs) + if ppisp_cfg.shader_prim_path and stage is not None: + ppisp_cfg = _merge_shader_inputs_with_cfg(ppisp_cfg, stage, input_overrides) + else: + ppisp_cfg.inputs = _normalized_inputs(input_overrides) + return ppisp_cfg + if isinstance(ppisp_cfg, dict): + input_overrides = ppisp_cfg.get( + "inputs", {key: value for key, value in ppisp_cfg.items() if key in CAMERA_PPISP_DEFAULT_INPUTS} + ) + cfg = CameraPPISPCfg() + cfg.inputs = _normalized_inputs(input_overrides) + shader_prim_path = ppisp_cfg.get("shader_prim_path") + if shader_prim_path is not None: + cfg.shader_prim_path = str(shader_prim_path) + if stage is not None: + cfg = _merge_shader_inputs_with_cfg(cfg, stage, input_overrides) + return cfg + raise TypeError(f"Unsupported PPISP configuration type: {type(ppisp_cfg)!r}") + + +def camera_ppisp_cfg_from_usd_shader(shader: Any) -> CameraPPISPCfg: + """Create :class:`CameraPPISPCfg` from a ``UsdShade.Shader`` prim. + + Animated inputs are collapsed to their first authored time sample. + """ + cfg = CameraPPISPCfg(shader_prim_path=str(shader.GetPath())) + values = default_camera_ppisp_inputs() + for input_name in values: + shader_input = shader.GetInput(input_name) + if not shader_input: + continue + attr = shader_input.GetAttr() + value = _read_first_authored_value(attr) + if value is not None: + values[input_name] = _normalize_input_value(input_name, value) + cfg.inputs = values + return cfg + + +def camera_ppisp_cfg_from_usd_stage(stage: Any, shader_prim_path: str) -> CameraPPISPCfg: + """Create :class:`CameraPPISPCfg` from a shader prim path in a USD stage.""" + from pxr import UsdShade + + shader = UsdShade.Shader(stage.GetPrimAtPath(shader_prim_path)) + if not shader: + raise ValueError(f"PPISP shader prim not found at path: {shader_prim_path}") + return camera_ppisp_cfg_from_usd_shader(shader) + + +def parse_render_product(stage: Any, render_product_path: str) -> RenderProductInfo: + """Parse a USD RenderProduct and optional PPISP shader configuration.""" + render_product = stage.GetPrimAtPath(render_product_path) + if not render_product.IsValid() or render_product.GetTypeName() != "RenderProduct": + raise ValueError(f"RenderProduct not found at path: {render_product_path}") + + camera_rel = render_product.GetRelationship("camera") + camera_paths = [str(path) for path in camera_rel.GetTargets()] if camera_rel else [] + if not camera_paths: + raise ValueError(f"RenderProduct at path '{render_product_path}' has no camera relationship targets.") + + resolution = None + resolution_attr = render_product.GetAttribute("resolution") + if resolution_attr: + resolution_value = resolution_attr.Get() + if resolution_value is not None: + resolution = (int(resolution_value[0]), int(resolution_value[1])) + + ordered_vars_rel = render_product.GetRelationship("orderedVars") + ordered_vars = [str(path) for path in ordered_vars_rel.GetTargets()] if ordered_vars_rel else [] + + ppisp = None + ppisp_prim = stage.GetPrimAtPath(f"{render_product_path}/{CAMERA_PPISP_SHADER_NAME}") + if ppisp_prim.IsValid(): + from pxr import UsdShade + + ppisp = camera_ppisp_cfg_from_usd_shader(UsdShade.Shader(ppisp_prim)) + + return RenderProductInfo( + render_product_path=render_product_path, + camera_paths=camera_paths, + resolution=resolution, + ordered_vars=ordered_vars, + ppisp=ppisp, + camera_xform_time_samples=collect_camera_xform_time_samples(stage, camera_paths), + ) + + +def parse_render_product_file(usd_path: str, render_product_path: str) -> RenderProductInfo: + """Open a USD file and parse a RenderProduct.""" + from pxr import Usd + + stage = Usd.Stage.Open(usd_path) + if stage is None: + raise RuntimeError(f"Failed to open USD stage at path: {usd_path}") + return parse_render_product(stage, render_product_path) + + +def collect_camera_xform_time_samples(stage: Any, camera_paths: list[str]) -> list[float]: + """Collect authored xform time samples from cameras and inherited source cameras.""" + time_samples = set() + for camera_path in camera_paths: + prim = stage.GetPrimAtPath(camera_path) + if not prim.IsValid(): + continue + _collect_xform_attr_time_samples(prim, time_samples) + for inherited_path in prim.GetInherits().GetAllDirectInherits(): + inherited_prim = stage.GetPrimAtPath(inherited_path) + if inherited_prim.IsValid(): + _collect_xform_attr_time_samples(inherited_prim, time_samples) + if not time_samples: + start_time = stage.GetStartTimeCode() + end_time = stage.GetEndTimeCode() + if start_time != end_time: + time_samples.update([start_time, end_time]) + else: + time_samples.add(start_time) + return sorted(time_samples) + + +def _normalized_inputs(inputs: dict[str, Any]) -> dict[str, float | tuple[float, float]]: + values = default_camera_ppisp_inputs() + for input_name, value in inputs.items(): + if input_name not in values: + raise ValueError(f"Unknown PPISP input: {input_name}") + values[input_name] = _normalize_input_value(input_name, value) + return values + + +def _merge_shader_inputs_with_cfg( + ppisp_cfg: CameraPPISPCfg, + stage: Any, + input_overrides: dict[str, Any], +) -> CameraPPISPCfg: + parsed_cfg = camera_ppisp_cfg_from_usd_stage(stage, ppisp_cfg.shader_prim_path) + if input_overrides != CAMERA_PPISP_DEFAULT_INPUTS: + parsed_cfg.inputs.update(_normalized_input_overrides(input_overrides)) + return parsed_cfg + + +def _normalized_input_overrides(inputs: dict[str, Any]) -> dict[str, float | tuple[float, float]]: + values = {} + for input_name, value in inputs.items(): + if input_name not in CAMERA_PPISP_DEFAULT_INPUTS: + raise ValueError(f"Unknown PPISP input: {input_name}") + values[input_name] = _normalize_input_value(input_name, value) + return values + + +def _normalize_input_value(input_name: str, value: Any) -> float | tuple[float, float]: + if input_name in CAMERA_PPISP_FLOAT2_INPUTS: + if len(value) != 2: + raise ValueError(f"PPISP input '{input_name}' expects two values.") + return (float(value[0]), float(value[1])) + return float(value) + + +def _read_first_authored_value(attr: Any) -> Any: + time_samples = attr.GetTimeSamples() + if time_samples: + return attr.Get(time_samples[0]) + return attr.Get() + + +def _collect_xform_attr_time_samples(prim: Any, time_samples: set[float]) -> None: + for attr in prim.GetAttributes(): + if attr.GetName().startswith("xformOp:"): + time_samples.update(attr.GetTimeSamples()) diff --git a/source/isaaclab/isaaclab/renderers/camera_ppisp_warp.py b/source/isaaclab/isaaclab/renderers/camera_ppisp_warp.py new file mode 100644 index 000000000000..825a71267638 --- /dev/null +++ b/source/isaaclab/isaaclab/renderers/camera_ppisp_warp.py @@ -0,0 +1,292 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Warp kernels for IsaacLab-side PPISP post-processing.""" + +from __future__ import annotations + +import torch +import warp as wp + +from .camera_ppisp import CameraPPISPCfg + +wp.init() + + +@wp.func +def _bounded_softplus(raw: wp.float32, min_value: wp.float32): + """Map an unconstrained parameter to a positive value with a lower bound. + + The PPISP CRF stores toe/shoulder/gamma as raw optimization parameters. This + helper applies ``min + log(1 + exp(raw))`` so the resulting shape parameters + stay positive and numerically away from degenerate zero values. + """ + return min_value + wp.log(1.0 + wp.exp(raw)) + + +@wp.func +def _sigmoid(raw: wp.float32): + return 1.0 / (1.0 + wp.exp(0.0 - raw)) + + +@wp.func +def _apply_vignetting( + value: wp.float32, + uv: wp.vec2f, + optical_center: wp.vec2f, + alpha1: wp.float32, + alpha2: wp.float32, + alpha3: wp.float32, +): + """Apply per-channel radial vignetting in local normalized image coordinates. + + The pixel coordinate ``uv`` is centered on the current de-tiled camera image + and normalized by ``max(width, height)``. The falloff is the clamped radial + polynomial ``1 + a1 r^2 + a2 r^4 + a3 r^6``, where + ``r^2 = dot(uv - optical_center, uv - optical_center)``. + """ + delta = uv - optical_center + radius_squared = wp.dot(delta, delta) + radius_power = radius_squared + falloff = wp.float32(1.0) + alpha1 * radius_power + radius_power = radius_power * radius_squared + falloff = falloff + alpha2 * radius_power + radius_power = radius_power * radius_squared + falloff = falloff + alpha3 * radius_power + return value * wp.clamp(falloff, 0.0, 1.0) + + +@wp.func +def _apply_crf( + value: wp.float32, + toe_raw: wp.float32, + shoulder_raw: wp.float32, + gamma_raw: wp.float32, + center_raw: wp.float32, +): + """Apply one channel of the PPISP camera response function. + + The CRF is a piecewise power curve split around ``center = sigmoid(raw)``. + Toe, shoulder, and gamma are bounded-softplus parameters. Values below the + center use the toe exponent; values above it use the shoulder exponent; the + result is then raised to ``gamma``. + """ + x = wp.clamp(value, 0.0, 1.0) + toe = _bounded_softplus(toe_raw, 0.3) + shoulder = _bounded_softplus(shoulder_raw, 0.3) + gamma = _bounded_softplus(gamma_raw, 0.1) + center = _sigmoid(center_raw) + + lerp_value = (shoulder - toe) * center + toe + a = (shoulder * center) / lerp_value + b = 1.0 - a + + y = wp.float32(0.0) + if x <= center: + y = a * wp.pow(x / center, toe) + else: + y = 1.0 - b * wp.pow((1.0 - x) / (1.0 - center), shoulder) + + return wp.pow(wp.max(0.0, y), gamma) + + +@wp.func +def _compute_homography_mul( + rgb: wp.vec3f, + blue_latent: wp.vec2f, + red_latent: wp.vec2f, + green_latent: wp.vec2f, + neutral_latent: wp.vec2f, +): + """Apply the PPISP color correction homography. + + The four 2D latent controls perturb the blue, red, green, and neutral + chromaticity anchors. A projective transform is solved from those anchors + and applied in ``r,g,intensity`` space, preserving the input pixel intensity + after the chromaticity remap. + """ + blue_delta = wp.vec2f( + 0.0480542 * blue_latent[0] - 0.0043631 * blue_latent[1], + -0.0043631 * blue_latent[0] + 0.0481283 * blue_latent[1], + ) + red_delta = wp.vec2f( + 0.0580570 * red_latent[0] - 0.0179872 * red_latent[1], + -0.0179872 * red_latent[0] + 0.0431061 * red_latent[1], + ) + green_delta = wp.vec2f( + 0.0433336 * green_latent[0] - 0.0180537 * green_latent[1], + -0.0180537 * green_latent[0] + 0.0580500 * green_latent[1], + ) + neutral_delta = wp.vec2f( + 0.0128369 * neutral_latent[0] - 0.0034654 * neutral_latent[1], + -0.0034654 * neutral_latent[0] + 0.0128158 * neutral_latent[1], + ) + + target_blue = wp.vec3f(blue_delta[0], blue_delta[1], 1.0) + target_red = wp.vec3f(1.0 + red_delta[0], red_delta[1], 1.0) + target_green = wp.vec3f(green_delta[0], 1.0 + green_delta[1], 1.0) + target_gray = wp.vec3f(1.0 / 3.0 + neutral_delta[0], 1.0 / 3.0 + neutral_delta[1], 1.0) + + row0 = wp.vec3f(target_gray[1] - target_blue[1], target_gray[1] - target_red[1], target_gray[1] - target_green[1]) + row1 = wp.vec3f(target_blue[0] - target_gray[0], target_red[0] - target_gray[0], target_green[0] - target_gray[0]) + row2 = wp.vec3f( + -target_gray[1] * target_blue[0] + target_gray[0] * target_blue[1], + -target_gray[1] * target_red[0] + target_gray[0] * target_red[1], + -target_gray[1] * target_green[0] + target_gray[0] * target_green[1], + ) + + lam = wp.cross(row0, row1) + if wp.dot(lam, lam) < 1.0e-20: + lam = wp.cross(row0, row2) + if wp.dot(lam, lam) < 1.0e-20: + lam = wp.cross(row1, row2) + + col0 = -target_blue * lam[0] + target_red * lam[1] + col1 = -target_blue * lam[0] + target_green * lam[2] + col2 = target_blue * lam[0] + + h22 = col0[2] + col1[2] + col2[2] + if wp.abs(h22) > 1.0e-20: + inv_h22 = 1.0 / h22 + col0 = col0 * inv_h22 + col1 = col1 * inv_h22 + col2 = col2 * inv_h22 + + intensity = rgb[0] + rgb[1] + rgb[2] + rgi = col0 * rgb[0] + col1 * rgb[1] + col2 * intensity + rgi = rgi * (intensity / (rgi[2] + 1.0e-5)) + return wp.vec3f(rgi[0], rgi[1], rgi[2] - rgi[0] - rgi[1]) + + +@wp.kernel(enable_backward=False) +def _apply_ppisp_kernel( + hdr_color: wp.array4d(dtype=wp.float32), + out_rgba: wp.array4d(dtype=wp.uint8), + image_width: wp.int32, + image_height: wp.int32, + exposure_offset: wp.float32, + vignetting_center_r: wp.vec2f, + vignetting_alpha1_r: wp.float32, + vignetting_alpha2_r: wp.float32, + vignetting_alpha3_r: wp.float32, + vignetting_center_g: wp.vec2f, + vignetting_alpha1_g: wp.float32, + vignetting_alpha2_g: wp.float32, + vignetting_alpha3_g: wp.float32, + vignetting_center_b: wp.vec2f, + vignetting_alpha1_b: wp.float32, + vignetting_alpha2_b: wp.float32, + vignetting_alpha3_b: wp.float32, + color_latent_blue: wp.vec2f, + color_latent_red: wp.vec2f, + color_latent_green: wp.vec2f, + color_latent_neutral: wp.vec2f, + crf_toe_r: wp.float32, + crf_shoulder_r: wp.float32, + crf_gamma_r: wp.float32, + crf_center_r: wp.float32, + crf_toe_g: wp.float32, + crf_shoulder_g: wp.float32, + crf_gamma_g: wp.float32, + crf_center_g: wp.float32, + crf_toe_b: wp.float32, + crf_shoulder_b: wp.float32, + crf_gamma_b: wp.float32, + crf_center_b: wp.float32, +): + """Apply the camera PPISP pipeline to one de-tiled HDR color tensor. + + For each pixel, the model applies exposure scaling, per-channel vignetting, + color homography correction, CRF tone mapping, and uint8 RGBA packing. The + first tensor dimension is the camera/environment index, so image-space + effects use local ``height_id`` and ``width_id`` coordinates per camera. + """ + camera_id, height_id, width_id = wp.tid() + rgb = wp.vec3f( + hdr_color[camera_id, height_id, width_id, 0], + hdr_color[camera_id, height_id, width_id, 1], + hdr_color[camera_id, height_id, width_id, 2], + ) + max_resolution = wp.float32(image_width) + if image_height > image_width: + max_resolution = wp.float32(image_height) + uv = wp.vec2f( + (wp.float32(width_id) + 0.5 - wp.float32(image_width) * 0.5) / max_resolution, + (wp.float32(height_id) + 0.5 - wp.float32(image_height) * 0.5) / max_resolution, + ) + + out_rgb = rgb * wp.pow(2.0, exposure_offset) + out_rgb[0] = _apply_vignetting( + out_rgb[0], uv, vignetting_center_r, vignetting_alpha1_r, vignetting_alpha2_r, vignetting_alpha3_r + ) + out_rgb[1] = _apply_vignetting( + out_rgb[1], uv, vignetting_center_g, vignetting_alpha1_g, vignetting_alpha2_g, vignetting_alpha3_g + ) + out_rgb[2] = _apply_vignetting( + out_rgb[2], uv, vignetting_center_b, vignetting_alpha1_b, vignetting_alpha2_b, vignetting_alpha3_b + ) + out_rgb = _compute_homography_mul( + out_rgb, color_latent_blue, color_latent_red, color_latent_green, color_latent_neutral + ) + out_rgb[0] = _apply_crf(out_rgb[0], crf_toe_r, crf_shoulder_r, crf_gamma_r, crf_center_r) + out_rgb[1] = _apply_crf(out_rgb[1], crf_toe_g, crf_shoulder_g, crf_gamma_g, crf_center_g) + out_rgb[2] = _apply_crf(out_rgb[2], crf_toe_b, crf_shoulder_b, crf_gamma_b, crf_center_b) + + out_rgba[camera_id, height_id, width_id, 0] = wp.uint8(wp.clamp(out_rgb[0], 0.0, 1.0) * 255.0) + out_rgba[camera_id, height_id, width_id, 1] = wp.uint8(wp.clamp(out_rgb[1], 0.0, 1.0) * 255.0) + out_rgba[camera_id, height_id, width_id, 2] = wp.uint8(wp.clamp(out_rgb[2], 0.0, 1.0) * 255.0) + out_rgba[camera_id, height_id, width_id, 3] = wp.uint8(255) + + +def apply_ppisp_to_rgba(hdr_color: torch.Tensor, out_rgba: torch.Tensor, cfg: CameraPPISPCfg) -> None: + """Apply PPISP to ``hdr_color`` and write LDR RGBA into ``out_rgba``.""" + if not hdr_color.is_contiguous(): + raise ValueError("Camera PPISP HDR input tensor must be contiguous.") + if not out_rgba.is_contiguous(): + raise ValueError("Camera PPISP RGBA output tensor must be contiguous.") + + inputs = cfg.inputs + stream = wp.stream_from_torch(out_rgba.device) if out_rgba.is_cuda else None + wp.launch( + _apply_ppisp_kernel, + dim=out_rgba.shape[:3], + inputs=[ + wp.from_torch(hdr_color, dtype=wp.float32), + wp.from_torch(out_rgba, dtype=wp.uint8), + int(out_rgba.shape[2]), + int(out_rgba.shape[1]), + float(inputs["exposureOffset"]), + wp.vec2f(*inputs["vignettingCenterR"]), + float(inputs["vignettingAlpha1R"]), + float(inputs["vignettingAlpha2R"]), + float(inputs["vignettingAlpha3R"]), + wp.vec2f(*inputs["vignettingCenterG"]), + float(inputs["vignettingAlpha1G"]), + float(inputs["vignettingAlpha2G"]), + float(inputs["vignettingAlpha3G"]), + wp.vec2f(*inputs["vignettingCenterB"]), + float(inputs["vignettingAlpha1B"]), + float(inputs["vignettingAlpha2B"]), + float(inputs["vignettingAlpha3B"]), + wp.vec2f(*inputs["colorLatentBlue"]), + wp.vec2f(*inputs["colorLatentRed"]), + wp.vec2f(*inputs["colorLatentGreen"]), + wp.vec2f(*inputs["colorLatentNeutral"]), + float(inputs["crfToeR"]), + float(inputs["crfShoulderR"]), + float(inputs["crfGammaR"]), + float(inputs["crfCenterR"]), + float(inputs["crfToeG"]), + float(inputs["crfShoulderG"]), + float(inputs["crfGammaG"]), + float(inputs["crfCenterG"]), + float(inputs["crfToeB"]), + float(inputs["crfShoulderB"]), + float(inputs["crfGammaB"]), + float(inputs["crfCenterB"]), + ], + device=str(out_rgba.device), + stream=stream, + ) diff --git a/source/isaaclab/isaaclab/renderers/output_contract.py b/source/isaaclab/isaaclab/renderers/output_contract.py index bfa3fff41d8b..8a49795e230e 100644 --- a/source/isaaclab/isaaclab/renderers/output_contract.py +++ b/source/isaaclab/isaaclab/renderers/output_contract.py @@ -29,6 +29,7 @@ class RenderBufferKind(StrEnum): RGB = "rgb" RGBA = "rgba" + RGB_HDR = "rgb_hdr" ALBEDO = "albedo" DEPTH = "depth" DISTANCE_TO_IMAGE_PLANE = "distance_to_image_plane" diff --git a/source/isaaclab/isaaclab/sensors/camera/camera.py b/source/isaaclab/isaaclab/sensors/camera/camera.py index be52668dbd6c..29caafe3bb39 100644 --- a/source/isaaclab/isaaclab/sensors/camera/camera.py +++ b/source/isaaclab/isaaclab/sensors/camera/camera.py @@ -19,6 +19,8 @@ import isaaclab.utils.sensors as sensor_utils from isaaclab.app.settings_manager import get_settings_manager from isaaclab.renderers import BaseRenderer +from isaaclab.renderers.camera_ppisp import normalize_camera_ppisp_cfg +from isaaclab.renderers.camera_ppisp_warp import apply_ppisp_to_rgba from isaaclab.renderers.camera_render_spec import CameraRenderSpec from isaaclab.sim.views import FrameView from isaaclab.utils import to_camel_case @@ -134,6 +136,7 @@ def __init__(self, cfg: CameraCfg): # Renderer and render data — assigned in _initialize_impl. self._renderer: BaseRenderer | None = None self._render_data = None + self._renderer_output_data: dict[str, torch.Tensor] | None = None def __del__(self): """Unsubscribes from callbacks and cleans up renderer resources.""" @@ -448,6 +451,7 @@ def _update_buffers_impl(self, env_mask: wp.array): else: renderer.render(self._render_data) renderer.read_output(self._render_data, self._data) + self._apply_ppisp_if_needed() """ Private Helpers @@ -472,6 +476,9 @@ def _check_supported_data_types(self, cfg: CameraCfg): "\n\tHint: If you need to work with these sensor types, we recommend using their fast counterparts." f"\n\t\tFast counterparts: {fast_common_elements}" ) + cfg.ppisp = normalize_camera_ppisp_cfg(cfg.ppisp) + if cfg.ppisp is not None and not any(data_type in ("rgb", "rgba") for data_type in cfg.data_types): + raise ValueError("CAMERA PPISP requires 'rgb' or 'rgba' as the LDR output target.") def _create_buffers(self): """Create buffers for storing data.""" @@ -508,7 +515,34 @@ def _create_buffers(self): self._data.pos_w = torch.zeros((self._view.count, 3), device=self._device) self._data.quat_w_world = torch.zeros((self._view.count, 4), device=self._device) self._update_poses(self._ALL_INDICES) - self._renderer.set_outputs(self._render_data, self._data.output) + self._renderer_output_data = dict(self._data.output) + if self.cfg.ppisp is not None: + # PPISP consumes scene-linear HDR color but publishes only the user's + # requested LDR color outputs. Allocate the renderer-side RGB_HDR + # input buffer here once, even when RGB_HDR is not part of + # CameraData.output, so each render can fill it before the wrapper + # post-processes into rgb/rgba. + hdr_kind = RenderBufferKind.RGB_HDR + hdr_spec = specs.get(hdr_kind) + if hdr_spec is None: + raise RuntimeError( + f"Renderer {type(self._renderer).__name__} does not support CAMERA PPISP RGB HDR input." + ) + if str(hdr_kind) not in self._renderer_output_data: + self._renderer_output_data[str(hdr_kind)] = torch.zeros( + (self._view.count, self.cfg.height, self.cfg.width, hdr_spec.channels), + dtype=hdr_spec.dtype, + device=self._device, + ).contiguous() + self._renderer.set_outputs(self._render_data, self._renderer_output_data) + + def _apply_ppisp_if_needed(self) -> None: + """Apply CAMERA PPISP once at the IsaacLab camera wrapper boundary.""" + if self.cfg.ppisp is None or self._renderer_output_data is None: + return + hdr_color = self._renderer_output_data[str(RenderBufferKind.RGB_HDR)] + rgba = self._data.output[str(RenderBufferKind.RGBA)] + apply_ppisp_to_rgba(hdr_color, rgba, self.cfg.ppisp) def _update_intrinsic_matrices(self, env_ids: Sequence[int]): """Compute camera's matrix of intrinsic parameters. diff --git a/source/isaaclab/isaaclab/sensors/camera/camera_cfg.py b/source/isaaclab/isaaclab/sensors/camera/camera_cfg.py index 5ee6cf30b6f6..5de60354b185 100644 --- a/source/isaaclab/isaaclab/sensors/camera/camera_cfg.py +++ b/source/isaaclab/isaaclab/sensors/camera/camera_cfg.py @@ -11,7 +11,7 @@ from isaaclab_physx.renderers import IsaacRtxRendererCfg -from isaaclab.renderers import RendererCfg +from isaaclab.renderers import CameraPPISPCfg, RendererCfg from isaaclab.sim import FisheyeCameraCfg, PinholeCameraCfg from isaaclab.utils import configclass @@ -194,6 +194,13 @@ class OffsetCfg: renderer_cfg: RendererCfg = field(default_factory=IsaacRtxRendererCfg) """Renderer configuration for camera sensor.""" + ppisp: CameraPPISPCfg | dict | None = None + """Optional PPISP post-processing effect for color outputs. + + PPISP is applied before LDR conversion. If imported from animated USD shader + inputs, exposure and color inputs are collapsed to their first authored value. + """ + def __post_init__(self): """Forward deprecated RTX-flavored fields onto :attr:`renderer_cfg`. diff --git a/source/isaaclab/test/renderers/test_camera_output_contract.py b/source/isaaclab/test/renderers/test_camera_output_contract.py index 2d6087d29708..7b5b5ec09f41 100644 --- a/source/isaaclab/test/renderers/test_camera_output_contract.py +++ b/source/isaaclab/test/renderers/test_camera_output_contract.py @@ -6,6 +6,7 @@ """Tests for the renderer→camera output contract.""" import warnings +from types import SimpleNamespace import pytest import torch @@ -127,11 +128,29 @@ def test_newton_warp_supported_output_types_key_set(): assert set(specs.keys()) == { RenderBufferKind.RGB, RenderBufferKind.RGBA, + RenderBufferKind.RGB_HDR, RenderBufferKind.ALBEDO, RenderBufferKind.DEPTH, RenderBufferKind.NORMALS, RenderBufferKind.INSTANCE_SEGMENTATION_FAST, } + assert specs[RenderBufferKind.RGB_HDR] == RenderBufferSpec(3, torch.float32) + + +def test_newton_warp_wraps_requested_rgb_hdr_output(): + """NewtonWarpRenderer wires requested RGB_HDR tensors to the Newton HDR output slot.""" + pytest.importorskip("isaaclab_newton") + pytest.importorskip("newton") + from isaaclab_newton.renderers.newton_warp_renderer import RenderData + + fake_sensor = SimpleNamespace(model=SimpleNamespace(world_count=2, device="cpu")) + render_data = RenderData(fake_sensor, SimpleNamespace(cfg=SimpleNamespace(width=4, height=3))) + rgb_hdr = torch.zeros((2, 3, 4, 3), dtype=torch.float32) + + render_data.set_outputs({str(RenderBufferKind.RGB_HDR): rgb_hdr}) + + assert render_data.outputs.hdr_color_image is not None + assert render_data.get_output(RenderBufferKind.RGB_HDR) is render_data.outputs.hdr_color_image def _make_camera_cfg(data_types: list[str]) -> CameraCfg: diff --git a/source/isaaclab/test/renderers/test_camera_ppisp.py b/source/isaaclab/test/renderers/test_camera_ppisp.py new file mode 100644 index 000000000000..d2e12384dfd0 --- /dev/null +++ b/source/isaaclab/test/renderers/test_camera_ppisp.py @@ -0,0 +1,107 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Tests for camera PPISP USD parsing helpers.""" + +import pytest + +from pxr import Gf, Sdf, Usd, UsdGeom, UsdShade + +from isaaclab.renderers.camera_ppisp import ( + camera_ppisp_cfg_from_usd_shader, + normalize_camera_ppisp_cfg, + parse_render_product, + parse_render_product_file, +) + + +def test_ppisp_shader_import_uses_first_time_sample(): + stage = Usd.Stage.CreateInMemory() + shader = UsdShade.Shader.Define(stage, "/Render/RenderProduct/PPISP") + + exposure = shader.CreateInput("exposureOffset", Sdf.ValueTypeNames.Float).GetAttr() + exposure.Set(1.0) + exposure.Set(2.0, 10.0) + exposure.Set(3.0, 20.0) + + color = shader.CreateInput("colorLatentBlue", Sdf.ValueTypeNames.Float2).GetAttr() + color.Set(Gf.Vec2f(0.0, 0.0)) + color.Set(Gf.Vec2f(0.1, 0.2), 5.0) + + cfg = camera_ppisp_cfg_from_usd_shader(shader) + + assert cfg.inputs["exposureOffset"] == 2.0 + assert cfg.inputs["colorLatentBlue"] == pytest.approx((0.1, 0.2)) + + +def test_parse_render_product_collects_ppisp_and_camera_xform_samples(): + stage = Usd.Stage.CreateInMemory() + camera = UsdGeom.Camera.Define(stage, "/World/Camera") + translate = camera.AddTranslateOp() + translate.Set(Gf.Vec3f(0.0, 0.0, 1.0), 1.0) + translate.Set(Gf.Vec3f(0.0, 0.0, 2.0), 2.0) + stage.DefinePrim("/Render", "Scope") + render_product = stage.DefinePrim("/Render/RenderProduct", "RenderProduct") + render_product.CreateRelationship("camera").SetTargets([Sdf.Path("/World/Camera")]) + render_product.CreateAttribute("resolution", Sdf.ValueTypeNames.Int2).Set(Gf.Vec2i(32, 16)) + shader = UsdShade.Shader.Define(stage, "/Render/RenderProduct/PPISP") + shader.CreateInput("exposureOffset", Sdf.ValueTypeNames.Float).Set(1.25) + + info = parse_render_product(stage, "/Render/RenderProduct") + + assert info.render_product_path == "/Render/RenderProduct" + assert info.resolution == (32, 16) + assert len(info.camera_paths) == 1 + assert info.ppisp is not None + assert info.ppisp.inputs["exposureOffset"] == 1.25 + assert info.camera_xform_time_samples == [1.0, 2.0] + + +def test_parse_render_product_file_reads_exported_stage(tmp_path): + usd_path = tmp_path / "camera_ppisp.usda" + stage = Usd.Stage.CreateNew(str(usd_path)) + UsdGeom.Camera.Define(stage, "/World/Camera") + stage.DefinePrim("/Render", "Scope") + render_product = stage.DefinePrim("/Render/RenderProduct", "RenderProduct") + render_product.CreateRelationship("camera").SetTargets([Sdf.Path("/World/Camera")]) + shader = UsdShade.Shader.Define(stage, "/Render/RenderProduct/PPISP") + shader.CreateInput("exposureOffset", Sdf.ValueTypeNames.Float).Set(1.25) + stage.GetRootLayer().Save() + + info = parse_render_product_file(str(usd_path), "/Render/RenderProduct") + + assert info.ppisp is not None + assert info.ppisp.inputs["exposureOffset"] == 1.25 + + +def test_normalize_camera_ppisp_cfg_imports_shader_prim_path_from_stage(): + stage = Usd.Stage.CreateInMemory() + shader = UsdShade.Shader.Define(stage, "/Render/RenderProduct/PPISP") + shader.CreateInput("exposureOffset", Sdf.ValueTypeNames.Float).Set(1.5) + shader.CreateInput("colorLatentRed", Sdf.ValueTypeNames.Float2).Set(Gf.Vec2f(0.25, -0.5)) + + cfg = normalize_camera_ppisp_cfg({"shader_prim_path": "/Render/RenderProduct/PPISP"}, stage=stage) + + assert cfg.shader_prim_path == "/Render/RenderProduct/PPISP" + assert cfg.inputs["exposureOffset"] == 1.5 + assert cfg.inputs["colorLatentRed"] == pytest.approx((0.25, -0.5)) + + +def test_normalize_camera_ppisp_cfg_applies_explicit_overrides_after_shader_import(): + stage = Usd.Stage.CreateInMemory() + shader = UsdShade.Shader.Define(stage, "/Render/RenderProduct/PPISP") + shader.CreateInput("exposureOffset", Sdf.ValueTypeNames.Float).Set(1.5) + shader.CreateInput("colorLatentRed", Sdf.ValueTypeNames.Float2).Set(Gf.Vec2f(0.25, -0.5)) + + cfg = normalize_camera_ppisp_cfg( + { + "shader_prim_path": "/Render/RenderProduct/PPISP", + "inputs": {"exposureOffset": 2.0}, + }, + stage=stage, + ) + + assert cfg.inputs["exposureOffset"] == 2.0 + assert cfg.inputs["colorLatentRed"] == pytest.approx((0.25, -0.5)) diff --git a/source/isaaclab/test/renderers/test_camera_ppisp_warp.py b/source/isaaclab/test/renderers/test_camera_ppisp_warp.py new file mode 100644 index 000000000000..df11bcad5e02 --- /dev/null +++ b/source/isaaclab/test/renderers/test_camera_ppisp_warp.py @@ -0,0 +1,114 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from types import SimpleNamespace + +import torch + +from pxr import Sdf, Usd, UsdGeom, UsdShade + +from isaaclab.renderers.camera_ppisp import CameraPPISPCfg, normalize_camera_ppisp_cfg, parse_render_product +from isaaclab.renderers.camera_ppisp_warp import apply_ppisp_to_rgba +from isaaclab.sensors.camera.camera import Camera +from isaaclab.sensors.camera.camera_data import CameraData +from isaaclab.sensors.camera.tiled_camera import TiledCamera +from isaaclab.sensors.camera.tiled_camera_cfg import TiledCameraCfg + + +def test_ppisp_warp_exposure_increases_ldr_output(): + hdr_color = torch.full((1, 4, 4, 3), 0.25, dtype=torch.float32) + baseline = torch.zeros((1, 4, 4, 4), dtype=torch.uint8) + exposed = torch.zeros_like(baseline) + + apply_ppisp_to_rgba(hdr_color, baseline, normalize_camera_ppisp_cfg({"inputs": {"exposureOffset": 0.0}})) + apply_ppisp_to_rgba(hdr_color, exposed, normalize_camera_ppisp_cfg({"inputs": {"exposureOffset": 1.0}})) + + assert torch.all(baseline[..., 3] == 255) + assert torch.all(exposed[..., 3] == 255) + assert exposed[..., :3].float().mean() > baseline[..., :3].float().mean() + + +def test_ppisp_warp_vignetting_uses_detiled_camera_coordinates(): + hdr_color = torch.full((2, 5, 5, 3), 0.5, dtype=torch.float32) + rgba = torch.zeros((2, 5, 5, 4), dtype=torch.uint8) + + apply_ppisp_to_rgba( + hdr_color, + rgba, + normalize_camera_ppisp_cfg( + { + "inputs": { + "vignettingAlpha1R": -1.0, + "vignettingAlpha1G": -1.0, + "vignettingAlpha1B": -1.0, + } + } + ), + ) + + assert torch.all(rgba[:, 2, 2, :3] > rgba[:, 0, 0, :3]) + assert torch.all(rgba[0] == rgba[1]) + + +def test_camera_wrapper_applies_parsed_ppisp_from_render_product(): + stage = Usd.Stage.CreateInMemory() + UsdGeom.Camera.Define(stage, "/World/Camera") + stage.DefinePrim("/Render", "Scope") + render_product = stage.DefinePrim("/Render/RenderProduct", "RenderProduct") + render_product.CreateRelationship("camera").SetTargets([Sdf.Path("/World/Camera")]) + shader = UsdShade.Shader.Define(stage, "/Render/RenderProduct/PPISP") + shader.CreateInput("exposureOffset", Sdf.ValueTypeNames.Float).Set(1.0) + + ppisp_cfg = parse_render_product(stage, "/Render/RenderProduct").ppisp + assert ppisp_cfg is not None + hdr_color = torch.full((1, 4, 4, 3), 0.25, dtype=torch.float32) + baseline = torch.zeros((1, 4, 4, 4), dtype=torch.uint8) + rgba = torch.zeros((1, 4, 4, 4), dtype=torch.uint8) + apply_ppisp_to_rgba(hdr_color, baseline, normalize_camera_ppisp_cfg({"inputs": {"exposureOffset": 0.0}})) + + camera = SimpleNamespace( + cfg=SimpleNamespace(ppisp=ppisp_cfg), + _renderer_output_data={"rgb_hdr": hdr_color}, + _data=CameraData(output={"rgba": rgba, "rgb": rgba[..., :3]}), + ) + + Camera._apply_ppisp_if_needed(camera) + + assert torch.all(camera._data.output["rgba"][..., 3] == 255) + assert torch.all(camera._data.output["rgb"] == camera._data.output["rgba"][..., :3]) + assert camera._data.output["rgb"].float().mean() > baseline[..., :3].float().mean() + + +def test_tiled_camera_cfg_accepts_camera_ppisp(): + ppisp_cfg = {"inputs": {"exposureOffset": 1.0}} + + cfg = TiledCameraCfg( + prim_path="/World/Camera", + width=4, + height=4, + data_types=["rgb"], + ppisp=ppisp_cfg, + ) + + assert cfg.ppisp == ppisp_cfg + + +def test_tiled_camera_alias_uses_camera_ppisp_wrapper(): + ppisp_cfg = normalize_camera_ppisp_cfg(CameraPPISPCfg(inputs={"exposureOffset": 1.0})) + hdr_color = torch.full((1, 4, 4, 3), 0.25, dtype=torch.float32) + baseline = torch.zeros((1, 4, 4, 4), dtype=torch.uint8) + rgba = torch.zeros((1, 4, 4, 4), dtype=torch.uint8) + apply_ppisp_to_rgba(hdr_color, baseline, normalize_camera_ppisp_cfg({"inputs": {"exposureOffset": 0.0}})) + + camera = SimpleNamespace( + cfg=SimpleNamespace(ppisp=ppisp_cfg), + _renderer_output_data={"rgb_hdr": hdr_color}, + _data=CameraData(output={"rgba": rgba, "rgb": rgba[..., :3]}), + ) + + TiledCamera._apply_ppisp_if_needed(camera) + + assert torch.all(camera._data.output["rgba"][..., 3] == 255) + assert camera._data.output["rgb"].float().mean() > baseline[..., :3].float().mean() diff --git a/source/isaaclab_newton/isaaclab_newton/renderers/newton_warp_renderer.py b/source/isaaclab_newton/isaaclab_newton/renderers/newton_warp_renderer.py index a02d820f2951..be254dc154e6 100644 --- a/source/isaaclab_newton/isaaclab_newton/renderers/newton_warp_renderer.py +++ b/source/isaaclab_newton/isaaclab_newton/renderers/newton_warp_renderer.py @@ -36,6 +36,7 @@ class RenderData: @dataclass class CameraOutputs: color_image: wp.array(dtype=wp.uint32, ndim=4) = None + hdr_color_image: wp.array(dtype=wp.vec3f, ndim=4) = None albedo_image: wp.array(dtype=wp.uint32, ndim=4) = None depth_image: wp.array(dtype=wp.float32, ndim=4) = None normals_image: wp.array(dtype=wp.vec3f, ndim=4) = None @@ -56,6 +57,8 @@ def set_outputs(self, output_data: dict[str, torch.Tensor]): for output_name, tensor_data in output_data.items(): if output_name == RenderBufferKind.RGBA: self.outputs.color_image = self._from_torch(tensor_data, dtype=wp.uint32) + elif output_name == RenderBufferKind.RGB_HDR: + self.outputs.hdr_color_image = self._from_torch(tensor_data, dtype=wp.vec3f) elif output_name == RenderBufferKind.ALBEDO: self.outputs.albedo_image = self._from_torch(tensor_data, dtype=wp.uint32) elif output_name == RenderBufferKind.DEPTH: @@ -72,6 +75,8 @@ def set_outputs(self, output_data: dict[str, torch.Tensor]): def get_output(self, output_name: str) -> wp.array: if output_name == RenderBufferKind.RGBA: return self.outputs.color_image + elif output_name == RenderBufferKind.RGB_HDR: + return self.outputs.hdr_color_image elif output_name == RenderBufferKind.ALBEDO: return self.outputs.albedo_image elif output_name == RenderBufferKind.DEPTH: @@ -186,6 +191,7 @@ def supported_output_types(self) -> dict[RenderBufferKind, RenderBufferSpec]: return { RenderBufferKind.RGBA: RenderBufferSpec(4, torch.uint8), RenderBufferKind.RGB: RenderBufferSpec(3, torch.uint8), + RenderBufferKind.RGB_HDR: RenderBufferSpec(3, torch.float32), RenderBufferKind.ALBEDO: RenderBufferSpec(4, torch.uint8), RenderBufferKind.DEPTH: RenderBufferSpec(1, torch.float32), RenderBufferKind.NORMALS: RenderBufferSpec(3, torch.float32), @@ -225,6 +231,7 @@ def render(self, render_data: RenderData): render_data.camera_transforms, render_data.camera_rays, color_image=render_data.outputs.color_image, + hdr_color_image=render_data.outputs.hdr_color_image, albedo_image=render_data.outputs.albedo_image, depth_image=render_data.outputs.depth_image, normal_image=render_data.outputs.normals_image, diff --git a/source/isaaclab_ov/isaaclab_ov/renderers/ovrtx_renderer.py b/source/isaaclab_ov/isaaclab_ov/renderers/ovrtx_renderer.py index 5d1782373d87..92a87d691c3c 100644 --- a/source/isaaclab_ov/isaaclab_ov/renderers/ovrtx_renderer.py +++ b/source/isaaclab_ov/isaaclab_ov/renderers/ovrtx_renderer.py @@ -49,6 +49,7 @@ create_camera_transforms_kernel, extract_all_depth_tiles_kernel, extract_all_depth_tiles_kernel_legacy, + extract_all_rgb_float_tiles_kernel, extract_all_rgba_tiles_kernel, generate_random_colors_from_ids_kernel, generate_random_colors_from_ids_kernel_legacy, @@ -135,6 +136,7 @@ def supported_output_types(self) -> dict[RenderBufferKind, RenderBufferSpec]: return { RenderBufferKind.RGBA: RenderBufferSpec(4, torch.uint8), RenderBufferKind.RGB: RenderBufferSpec(3, torch.uint8), + RenderBufferKind.RGB_HDR: RenderBufferSpec(3, torch.float32), RenderBufferKind.ALBEDO: RenderBufferSpec(4, torch.uint8), RenderBufferKind.SIMPLE_SHADING_CONSTANT_DIFFUSE: RenderBufferSpec(3, torch.uint8), RenderBufferKind.SIMPLE_SHADING_DIFFUSE_MDL: RenderBufferSpec(3, torch.uint8), @@ -186,6 +188,8 @@ def initialize(self, spec: CameraRenderSpec): height = spec.cfg.height num_envs = spec.num_instances data_types = spec.cfg.data_types if spec.cfg.data_types else ["rgb"] + if spec.cfg.ppisp is not None and "rgb_hdr" not in data_types: + data_types = [*data_types, "rgb_hdr"] env_0_prefix = "/World/envs/env_0/" first_cam_path = spec.camera_prim_paths[0] @@ -541,6 +545,25 @@ def _extract_depth_tiles( device=DEVICE, ) + def _extract_hdr_color_tiles( + self, render_data: OVRTXRenderData, tiled_data: wp.array, output_buffers: dict + ) -> None: + """Extract per-env HdrColor tiles into output_buffers.""" + if "rgb_hdr" not in output_buffers: + return + wp.launch( + kernel=extract_all_rgb_float_tiles_kernel, + dim=(render_data.num_envs, render_data.height, render_data.width), + inputs=[ + tiled_data, + output_buffers["rgb_hdr"], + render_data.num_cols, + render_data.width, + render_data.height, + ], + device=DEVICE, + ) + def _process_render_frame(self, render_data: OVRTXRenderData, frame, output_buffers: dict) -> None: """Extract RGB, depth, albedo, and semantic from a single render frame into output_buffers.""" if "LdrColor" in frame.render_vars: @@ -578,6 +601,11 @@ def _process_render_frame(self, render_data: OVRTXRenderData, frame, output_buff tiled_albedo_data = wp.from_dlpack(mapping.tensor) self._extract_rgba_tiles(render_data, tiled_albedo_data, output_buffers, "albedo", suffix="albedo") + if "HdrColor" in frame.render_vars and "rgb_hdr" in output_buffers: + with frame.render_vars["HdrColor"].map(device=Device.CUDA) as mapping: + tiled_hdr_data = wp.from_dlpack(mapping.tensor) + self._extract_hdr_color_tiles(render_data, tiled_hdr_data, output_buffers) + if "SemanticSegmentation" in frame.render_vars and "semantic_segmentation" in output_buffers: with frame.render_vars["SemanticSegmentation"].map(device=Device.CUDA) as mapping: tiled_semantic_data = wp.from_dlpack(mapping.tensor) diff --git a/source/isaaclab_ov/isaaclab_ov/renderers/ovrtx_renderer_kernels.py b/source/isaaclab_ov/isaaclab_ov/renderers/ovrtx_renderer_kernels.py index c287f1257632..588e057e3d13 100644 --- a/source/isaaclab_ov/isaaclab_ov/renderers/ovrtx_renderer_kernels.py +++ b/source/isaaclab_ov/isaaclab_ov/renderers/ovrtx_renderer_kernels.py @@ -109,6 +109,25 @@ def extract_all_rgba_tiles_kernel( output_buffer[env_idx, y, x, 3] = tiled_buffer[src_y, src_x, 3] +@wp.kernel +def extract_all_rgb_float_tiles_kernel( + tiled_buffer: wp.array(dtype=wp.float32, ndim=3), # type: ignore + output_buffer: wp.array(dtype=wp.float32, ndim=4), # type: ignore (num_envs, H, W, 3) + num_cols: int, + tile_width: int, + tile_height: int, +): + """Extract ALL RGB float tiles from a tiled buffer in a single kernel launch.""" + env_idx, y, x = wp.tid() + tile_x = env_idx % num_cols + tile_y = env_idx // num_cols + src_x = tile_x * tile_width + x + src_y = tile_y * tile_height + y + output_buffer[env_idx, y, x, 0] = tiled_buffer[src_y, src_x, 0] + output_buffer[env_idx, y, x, 1] = tiled_buffer[src_y, src_x, 1] + output_buffer[env_idx, y, x, 2] = tiled_buffer[src_y, src_x, 2] + + @wp.kernel def extract_all_depth_tiles_kernel_legacy( tiled_buffer: wp.array(dtype=wp.float32, ndim=2), # type: ignore diff --git a/source/isaaclab_ov/isaaclab_ov/renderers/ovrtx_usd.py b/source/isaaclab_ov/isaaclab_ov/renderers/ovrtx_usd.py index a222981ea2ed..3a6453232c47 100644 --- a/source/isaaclab_ov/isaaclab_ov/renderers/ovrtx_usd.py +++ b/source/isaaclab_ov/isaaclab_ov/renderers/ovrtx_usd.py @@ -19,20 +19,39 @@ logger = logging.getLogger(__name__) -def get_render_var_config(data_types: list[str]) -> tuple[str, str, str]: - """Return (render_var_path, render_var_name, source_name) from data_types.""" +def get_render_var_configs(data_types: list[str]) -> list[tuple[str, str, str]]: + """Return render var configs needed for the requested data types. + + Each entry is ``(render_var_path, render_var_name, source_name)``. + """ + data_types = data_types if data_types else ["rgb"] + render_vars: list[tuple[str, str, str]] = [] + use_depth = any(dt in ["depth", "distance_to_image_plane", "distance_to_camera"] for dt in data_types) use_albedo = "albedo" in data_types use_semantic = "semantic_segmentation" in data_types - use_rgb = any(dt in ["rgb", "rgba"] for dt in data_types) + use_hdr = "rgb_hdr" in data_types + use_ldr = any(dt in ["rgb", "rgba"] or dt.startswith("simple_shading_") for dt in data_types) + + if use_ldr: + render_vars.append(("/Render/Vars/LdrColor", "LdrColor", "LdrColor")) + if use_hdr: + render_vars.append(("/Render/Vars/HdrColor", "HdrColor", "HdrColor")) + if use_depth: + render_vars.append(("/Render/Vars/depth", "depth", "DistanceToImagePlaneSD")) + if use_albedo: + render_vars.append(("/Render/Vars/albedo", "albedo", "DiffuseAlbedoSD")) + if use_semantic: + render_vars.append(("/Render/Vars/semantic", "semantic", "SemanticSegmentation")) - if use_depth and not (use_rgb or use_albedo or use_semantic): - return "/Render/Vars/depth", "depth", "DistanceToImagePlaneSD" - if use_albedo and not (use_rgb or use_semantic): - return "/Render/Vars/albedo", "albedo", "DiffuseAlbedoSD" - if use_semantic and not (use_rgb or use_albedo): - return "/Render/Vars/semantic", "semantic", "SemanticSegmentation" - return "/Render/Vars/LdrColor", "LdrColor", "LdrColor" + if not render_vars: + render_vars.append(("/Render/Vars/LdrColor", "LdrColor", "LdrColor")) + return render_vars + + +def get_render_var_config(data_types: list[str]) -> tuple[str, str, str]: + """Return (render_var_path, render_var_name, source_name) from data_types.""" + return get_render_var_configs(data_types)[0] def build_render_scope_usd( @@ -44,6 +63,7 @@ def build_render_scope_usd( tiled_width: int, tiled_height: int, minimal_mode: int | None = None, + render_var_configs: list[tuple[str, str, str]] | None = None, ) -> str: """Build the Render scope USD string (def Scope Render, RenderProduct, Vars). @@ -56,6 +76,7 @@ def build_render_scope_usd( tiled_width: Width of the tiled image. tiled_height: Height of the tiled image. minimal_mode: RTX minimal mode. None if not requested. Valid values are 1, 2, 3. + render_var_configs: Render variables to author. Uses the single render var arguments if not provided. Returns: The USD string for the render scope. @@ -71,6 +92,16 @@ def build_render_scope_usd( ] render_mode_block = "\n ".join(render_mode_lines) + if render_var_configs is None: + render_var_configs = [(render_var_path, render_var_name, source_name)] + ordered_vars = ", ".join(f"<{path}>" for path, _, _ in render_var_configs) + render_var_defs = "\n".join( + f''' def RenderVar "{name}" + {{ + uniform string sourceName = "{source}" + }}''' + for _, name, source in render_var_configs + ) return f''' def Scope "Render" @@ -83,16 +114,13 @@ def RenderProduct "{render_product_name}" ( float omni:rtx:rt:ambientLight:intensity = 1.0 {render_mode_block} token[] omni:rtx:waitForEvents = ["AllLoadingFinished", "OnlyOnFirstRequest"] - rel orderedVars = <{render_var_path}> + rel orderedVars = [{ordered_vars}] uniform int2 resolution = ({tiled_width}, {tiled_height}) }} def "Vars" {{ - def RenderVar "{render_var_name}" - {{ - uniform string sourceName = "{source_name}" - }} +{render_var_defs} }} }} ''' @@ -140,7 +168,8 @@ def inject_cameras_into_usd( render_product_name = "RenderProduct" render_product_path = f"/Render/{render_product_name}" - render_var_path, render_var_name, source_name = get_render_var_config(data_types) + render_var_configs = get_render_var_configs(data_types) + render_var_path, render_var_name, source_name = render_var_configs[0] camera_content = build_render_scope_usd( camera_paths, @@ -151,6 +180,7 @@ def inject_cameras_into_usd( tiled_width, tiled_height, minimal_mode, + render_var_configs, ) combined_usd = original_usd.rstrip() + "\n\n" + camera_content diff --git a/source/isaaclab_ov/test/test_ovrtx_renderer_contract.py b/source/isaaclab_ov/test/test_ovrtx_renderer_contract.py index b6c590a54a54..65dcd72c1c6a 100644 --- a/source/isaaclab_ov/test/test_ovrtx_renderer_contract.py +++ b/source/isaaclab_ov/test/test_ovrtx_renderer_contract.py @@ -13,6 +13,11 @@ from isaaclab_ov.renderers import OVRTXRendererCfg from isaaclab_ov.renderers.ovrtx_renderer import OVRTXRenderData, OVRTXRenderer +from isaaclab_ov.renderers.ovrtx_usd import ( + build_render_scope_usd, + get_render_var_config, + get_render_var_configs, +) from isaaclab.sensors.camera import CameraCfg from isaaclab.sensors.camera.camera_data import CameraData, RenderBufferKind, RenderBufferSpec @@ -52,16 +57,51 @@ def test_ovrtx_supported_output_types_key_set(): assert set(specs.keys()) == { RenderBufferKind.RGB, RenderBufferKind.RGBA, + RenderBufferKind.RGB_HDR, RenderBufferKind.ALBEDO, + RenderBufferKind.SIMPLE_SHADING_CONSTANT_DIFFUSE, + RenderBufferKind.SIMPLE_SHADING_DIFFUSE_MDL, + RenderBufferKind.SIMPLE_SHADING_FULL_MDL, RenderBufferKind.SEMANTIC_SEGMENTATION, RenderBufferKind.DEPTH, RenderBufferKind.DISTANCE_TO_IMAGE_PLANE, RenderBufferKind.DISTANCE_TO_CAMERA, } assert specs[RenderBufferKind.RGBA] == RenderBufferSpec(4, torch.uint8) + assert specs[RenderBufferKind.RGB_HDR] == RenderBufferSpec(3, torch.float32) assert specs[RenderBufferKind.DEPTH] == RenderBufferSpec(1, torch.float32) +def test_ovrtx_rgb_hdr_uses_hdr_color_render_var(): + """Requesting RGB_HDR from OVRTX selects the HdrColor render variable.""" + assert get_render_var_config(["rgb_hdr"]) == ("/Render/Vars/HdrColor", "HdrColor", "HdrColor") + + +def test_ovrtx_rgb_and_rgb_hdr_author_both_render_vars(): + """Requesting LDR RGB and RGB_HDR keeps both OVRTX render variables.""" + render_var_configs = get_render_var_configs(["rgb", "rgb_hdr"]) + + assert render_var_configs == [ + ("/Render/Vars/LdrColor", "LdrColor", "LdrColor"), + ("/Render/Vars/HdrColor", "HdrColor", "HdrColor"), + ] + + render_scope = build_render_scope_usd( + camera_paths=["/World/envs/env_0/Camera"], + render_product_name="RenderProduct", + render_var_path=render_var_configs[0][0], + render_var_name=render_var_configs[0][1], + source_name=render_var_configs[0][2], + tiled_width=16, + tiled_height=8, + render_var_configs=render_var_configs, + ) + + assert "rel orderedVars = [, ]" in render_scope + assert 'def RenderVar "LdrColor"' in render_scope + assert 'def RenderVar "HdrColor"' in render_scope + + def test_ovrtx_set_outputs_wraps_caller_torch_zero_copy(): """OVRTXRenderer.set_outputs publishes warp views over the caller's torch storage.""" import warp as wp @@ -90,6 +130,31 @@ def test_ovrtx_set_outputs_wraps_caller_torch_zero_copy(): assert "rgb" not in render_data.warp_buffers +def test_ovrtx_set_outputs_wraps_requested_rgb_hdr_output(): + """OVRTXRenderer.set_outputs publishes a zero-copy view for requested RGB_HDR.""" + import warp as wp + + renderer = OVRTXRenderer(OVRTXRendererCfg()) + + if not torch.cuda.is_available(): + pytest.skip("OVRTX zero-copy wrapping requires a CUDA device") + device = "cuda" + + cfg = _make_camera_cfg(["rgb_hdr"]) + data = CameraData.allocate( + data_types=cfg.data_types, + height=8, + width=16, + num_views=2, + device=device, + supported_specs=renderer.supported_output_types(), + ) + render_data = _make_ovrtx_render_data() + renderer.set_outputs(render_data, data.output) + + assert render_data.warp_buffers["rgb_hdr"].ptr == wp.from_torch(data.output["rgb_hdr"]).ptr + + def test_ovrtx_read_output_is_a_no_op_after_consolidation(): """OVRTXRenderer.read_output is a no-op once set_outputs wires up zero-copy.""" renderer = OVRTXRenderer(OVRTXRendererCfg()) diff --git a/source/isaaclab_ov/test/test_ovrtx_renderer_kernels.py b/source/isaaclab_ov/test/test_ovrtx_renderer_kernels.py index ed416d05a7e6..7ca14770d11e 100644 --- a/source/isaaclab_ov/test/test_ovrtx_renderer_kernels.py +++ b/source/isaaclab_ov/test/test_ovrtx_renderer_kernels.py @@ -14,6 +14,7 @@ DEVICE, extract_all_depth_tiles_kernel, extract_all_depth_tiles_kernel_legacy, + extract_all_rgb_float_tiles_kernel, extract_all_rgba_tiles_kernel, generate_random_colors_from_ids_kernel, generate_random_colors_from_ids_kernel_legacy, @@ -129,6 +130,28 @@ def _reference_extract_all_rgba_tiles( return out +def _reference_extract_all_rgb_float_tiles( + tiled_np: np.ndarray, + num_envs: int, + num_cols: int, + tile_width: int, + tile_height: int, +) -> np.ndarray: + """NumPy reference for ``extract_all_rgb_float_tiles_kernel``.""" + out = np.zeros((num_envs, tile_height, tile_width, 3), dtype=np.float32) + for env_idx in range(num_envs): + tile_x = env_idx % num_cols + tile_y = env_idx // num_cols + for y in range(tile_height): + for x in range(tile_width): + src_y = tile_y * tile_height + y + src_x = tile_x * tile_width + x + out[env_idx, y, x, 0] = tiled_np[src_y, src_x, 0] + out[env_idx, y, x, 1] = tiled_np[src_y, src_x, 1] + out[env_idx, y, x, 2] = tiled_np[src_y, src_x, 2] + return out + + class TestExtractAllDepthTilesKernel: """Tests for ``extract_all_depth_tiles_kernel``.""" @@ -414,6 +437,38 @@ def test_various_layouts(self, num_cols, num_envs, tile_width, tile_height, num_ np.testing.assert_array_equal(output_wp.numpy(), expected) +class TestExtractAllRgbFloatTilesKernel: + """Tests for ``extract_all_rgb_float_tiles_kernel`` used by OVRTX HdrColor.""" + + def test_two_by_two_tile_grid(self): + num_cols = 2 + num_envs = 4 + tile_width = 2 + tile_height = 3 + tiled_h = (num_envs // num_cols) * tile_height + tiled_w = num_cols * tile_width + tiled_np = np.zeros((tiled_h, tiled_w, 3), dtype=np.float32) + for h in range(tiled_h): + for w in range(tiled_w): + tiled_np[h, w, 0] = float(h * 1000 + w) + tiled_np[h, w, 1] = float(h * 1000 + w + 100) + tiled_np[h, w, 2] = float(h * 1000 + w + 200) + + tiled_wp = wp.array(tiled_np, dtype=wp.float32, ndim=3, device=DEVICE) + output_wp = wp.zeros(shape=(num_envs, tile_height, tile_width, 3), dtype=wp.float32, device=DEVICE) + + wp.launch( + kernel=extract_all_rgb_float_tiles_kernel, + dim=(num_envs, tile_height, tile_width), + inputs=[tiled_wp, output_wp, num_cols, tile_width, tile_height], + device=DEVICE, + ) + wp.synchronize() + + expected = _reference_extract_all_rgb_float_tiles(tiled_np, num_envs, num_cols, tile_width, tile_height) + np.testing.assert_allclose(output_wp.numpy(), expected, rtol=0, atol=0) + + class TestRandomColorsFromIdsKernel: """Tests for generate_random_colors_from_ids_kernel.""" diff --git a/source/isaaclab_physx/isaaclab_physx/renderers/isaac_rtx_renderer.py b/source/isaaclab_physx/isaaclab_physx/renderers/isaac_rtx_renderer.py index 242ac3729d0b..40da49633c7c 100644 --- a/source/isaaclab_physx/isaaclab_physx/renderers/isaac_rtx_renderer.py +++ b/source/isaaclab_physx/isaaclab_physx/renderers/isaac_rtx_renderer.py @@ -109,6 +109,7 @@ def supported_output_types(self) -> dict[RenderBufferKind, RenderBufferSpec]: # ``Camera`` aliases ``rgb`` as a view into ``rgba`` storage. RenderBufferKind.RGBA: RenderBufferSpec(4, torch.uint8), RenderBufferKind.RGB: RenderBufferSpec(3, torch.uint8), + RenderBufferKind.RGB_HDR: RenderBufferSpec(3, torch.float32), RenderBufferKind.DEPTH: RenderBufferSpec(1, torch.float32), RenderBufferKind.DISTANCE_TO_IMAGE_PLANE: RenderBufferSpec(1, torch.float32), RenderBufferKind.DISTANCE_TO_CAMERA: RenderBufferSpec(1, torch.float32), @@ -211,8 +212,19 @@ def create_render_data(self, spec: CameraRenderSpec) -> IsaacRtxRenderData: annotators = {} for annotator_type in spec.cfg.data_types: if annotator_type == "rgba" or annotator_type == "rgb": - annotator = rep.AnnotatorRegistry.get_annotator("rgb", device=spec.device, do_array_copy=False) - annotators["rgba"] = annotator + if spec.cfg.ppisp is not None: + if str(RenderBufferKind.RGB_HDR) not in annotators: + annotator = rep.AnnotatorRegistry.get_annotator( + "HdrColor", device=spec.device, do_array_copy=False + ) + annotators[str(RenderBufferKind.RGB_HDR)] = annotator + else: + annotator = rep.AnnotatorRegistry.get_annotator("rgb", device=spec.device, do_array_copy=False) + annotators["rgba"] = annotator + elif annotator_type == str(RenderBufferKind.RGB_HDR): + if str(RenderBufferKind.RGB_HDR) not in annotators: + annotator = rep.AnnotatorRegistry.get_annotator("HdrColor", device=spec.device, do_array_copy=False) + annotators[str(RenderBufferKind.RGB_HDR)] = annotator elif annotator_type == "albedo": # TODO: this is a temporary solution because replicator has not exposed the annotator yet # once it's exposed, we can remove this @@ -363,6 +375,8 @@ def tiling_grid_shape(): tiled_data_buffer = tiled_data_buffer[:, :, :3].contiguous() if data_type in SIMPLE_SHADING_MODES: tiled_data_buffer = tiled_data_buffer[:, :, :3].contiguous() + if data_type == RenderBufferKind.RGB_HDR: + tiled_data_buffer = tiled_data_buffer[:, :, :3].contiguous() wp.launch( kernel=reshape_tiled_image, diff --git a/source/isaaclab_physx/test/renderers/test_isaac_rtx_renderer_contract.py b/source/isaaclab_physx/test/renderers/test_isaac_rtx_renderer_contract.py new file mode 100644 index 000000000000..5008d10cad43 --- /dev/null +++ b/source/isaaclab_physx/test/renderers/test_isaac_rtx_renderer_contract.py @@ -0,0 +1,120 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Tests for the Isaac RTX renderer output contract.""" + +from __future__ import annotations + +import sys +import types +from types import SimpleNamespace +from unittest.mock import MagicMock, patch + +import pytest +import torch +from packaging import version + +from isaaclab.renderers import RenderBufferKind, RenderBufferSpec +from isaaclab.renderers.camera_render_spec import CameraRenderSpec +from isaaclab.sensors.camera import CameraCfg +from isaaclab.sim import PinholeCameraCfg + +pytestmark = pytest.mark.isaacsim_ci + + +def _install_omni_stubs(monkeypatch): + omni_module = sys.modules.get("omni", types.ModuleType("omni")) + replicator_module = types.ModuleType("omni.replicator") + replicator_core_module = types.ModuleType("omni.replicator.core") + syntheticdata_module = types.ModuleType("omni.syntheticdata") + usd_module = MagicMock() + + monkeypatch.setitem(sys.modules, "omni", omni_module) + monkeypatch.setitem(sys.modules, "omni.replicator", replicator_module) + monkeypatch.setitem(sys.modules, "omni.replicator.core", replicator_core_module) + monkeypatch.setitem(sys.modules, "omni.syntheticdata", syntheticdata_module) + monkeypatch.setitem(sys.modules, "omni.usd", usd_module) + monkeypatch.setattr(omni_module, "replicator", replicator_module, raising=False) + monkeypatch.setattr(omni_module, "syntheticdata", syntheticdata_module, raising=False) + monkeypatch.setattr(omni_module, "usd", usd_module, raising=False) + monkeypatch.setattr(replicator_module, "core", replicator_core_module, raising=False) + + return replicator_core_module, syntheticdata_module + + +def _make_camera_spec(ppisp): + cfg = CameraCfg( + height=8, + width=16, + prim_path="/World/Camera", + spawn=PinholeCameraCfg(focal_length=24.0, horizontal_aperture=20.955), + data_types=["rgb"], + ppisp=ppisp, + ) + return CameraRenderSpec( + cfg=cfg, + device="cpu", + num_instances=1, + camera_prim_paths=("/World/Camera",), + view_count=1, + camera_path_relative_to_env_0="Camera", + ) + + +def test_isaac_rtx_supported_output_types_include_rgb_hdr(monkeypatch): + """Isaac RTX advertises RGB_HDR as a 3-channel float renderer output.""" + _install_omni_stubs(monkeypatch) + from isaaclab_physx.renderers.isaac_rtx_renderer import IsaacRtxRenderer + from isaaclab_physx.renderers.isaac_rtx_renderer_cfg import IsaacRtxRendererCfg + + renderer = IsaacRtxRenderer.__new__(IsaacRtxRenderer) + renderer.cfg = IsaacRtxRendererCfg() + with patch("isaaclab_physx.renderers.isaac_rtx_renderer.get_isaac_sim_version", return_value=version.parse("6.0")): + specs = renderer.supported_output_types() + + assert specs[RenderBufferKind.RGB_HDR] == RenderBufferSpec(3, torch.float32) + + +def test_isaac_rtx_requests_hdr_color_annotator_for_camera_ppisp(monkeypatch): + """PPISP switches Isaac RTX color acquisition from LDR rgb to HdrColor.""" + rep, syntheticdata = _install_omni_stubs(monkeypatch) + + from isaaclab_physx.renderers.isaac_rtx_renderer import IsaacRtxRenderer + from isaaclab_physx.renderers.isaac_rtx_renderer_cfg import IsaacRtxRendererCfg + + annotators = {} + + def get_annotator(name, *args, **kwargs): + annotator = MagicMock(name=f"{name}_annotator") + annotators[name] = annotator + return annotator + + rep.AnnotatorRegistry = SimpleNamespace(get_annotator=MagicMock(side_effect=get_annotator)) + rep.create = SimpleNamespace(render_product_tiled=MagicMock(return_value=SimpleNamespace(path="/Render/Product"))) + syntheticdata.SyntheticData = SimpleNamespace( + Get=MagicMock(return_value=SimpleNamespace(set_instance_mapping_semantic_filter=MagicMock())) + ) + + camera_prim = MagicMock() + camera_prim.IsA.return_value = True + stage = MagicMock() + stage.GetPrimAtPath.return_value = camera_prim + stage_module = types.ModuleType("isaaclab.sim.utils.stage") + stage_module.get_current_stage = MagicMock(return_value=stage) + monkeypatch.setitem(sys.modules, "isaaclab.sim.utils.stage", stage_module) + + settings = SimpleNamespace(get=MagicMock(return_value=False), set_bool=MagicMock(), set_int=MagicMock()) + renderer = IsaacRtxRenderer.__new__(IsaacRtxRenderer) + renderer.cfg = IsaacRtxRendererCfg() + + with ( + patch("isaaclab_physx.renderers.isaac_rtx_renderer.get_settings_manager", return_value=settings), + patch("isaaclab_physx.renderers.isaac_rtx_renderer.get_isaac_sim_version", return_value=version.parse("6.0")), + ): + render_data = renderer.create_render_data(_make_camera_spec(ppisp={"inputs": {"exposureOffset": 0.0}})) + + rep.AnnotatorRegistry.get_annotator.assert_called_with("HdrColor", device="cpu", do_array_copy=False) + assert str(RenderBufferKind.RGB_HDR) in render_data.annotators + assert render_data.annotators[str(RenderBufferKind.RGB_HDR)] is annotators["HdrColor"]