Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
19 changes: 10 additions & 9 deletions tools/template/cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
import importlib
import os
from collections.abc import Callable
from textwrap import fill

import rich.console
import rich.table
Expand Down Expand Up @@ -213,21 +214,21 @@ def main() -> None:
# - show supported RL libraries and features
rl_library_table = rich.table.Table(title="Supported RL libraries")
rl_library_table.add_column("RL/training feature", no_wrap=True)
rl_library_table.add_column("rl_games")
rl_library_table.add_column("rsl_rl")
rl_library_table.add_column("skrl")
rl_library_table.add_column("sb3")
rl_library_table.add_column("rl_games", overflow="fold")
rl_library_table.add_column("rsl_rl", overflow="fold")
rl_library_table.add_column("skrl", overflow="fold")
rl_library_table.add_column("sb3", overflow="fold")
rl_library_table.add_row("ML frameworks", "PyTorch", "PyTorch", "PyTorch, JAX", "PyTorch")
rl_library_table.add_row("Relative performance", "~1X", "~1X", "~1X", "~0.03X")
rl_library_table.add_row(
"Algorithms",
", ".join(algorithms_per_rl_library.get("rl_games", [])),
", ".join(algorithms_per_rl_library.get("rsl_rl", [])),
", ".join(algorithms_per_rl_library.get("skrl", [])),
", ".join(algorithms_per_rl_library.get("sb3", [])),
fill(", ".join(algorithms_per_rl_library.get("rl_games", [])), width=12, break_long_words=False),
fill(", ".join(algorithms_per_rl_library.get("rsl_rl", [])), width=12, break_long_words=False),
fill(", ".join(algorithms_per_rl_library.get("skrl", [])), width=12, break_long_words=False),
fill(", ".join(algorithms_per_rl_library.get("sb3", [])), width=12, break_long_words=False),
)
rl_library_table.add_row("Multi-agent support", State.No, State.No, State.Yes, State.No)
rl_library_table.add_row("Distributed training", State.Yes, State.No, State.Yes, State.No)
rl_library_table.add_row("Distributed training", State.Yes, State.Yes, State.Yes, State.No)
rl_library_table.add_row("Vectorized training", State.Yes, State.Yes, State.Yes, State.No)
rl_library_table.add_row("Fundamental/composite spaces", State.No, State.No, State.Yes, State.No)
cli_handler.output_table(rl_library_table)
Expand Down
2 changes: 1 addition & 1 deletion tools/template/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,5 +11,5 @@
TEMPLATE_DIR = os.path.join(ROOT_DIR, "tools", "template", "templates")

# RL algorithms
SINGLE_AGENT_ALGORITHMS = ["AMP", "PPO"]
SINGLE_AGENT_ALGORITHMS = ["AMP", "PPO", "DISTILLATION"]
MULTI_AGENT_ALGORITHMS = ["IPPO", "MAPPO"]
34 changes: 34 additions & 0 deletions tools/template/templates/agents/rsl_rl_distillation_cfg
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
# Copyright (c) 2022-2025, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md).
Comment thread
ClemensSchwarke marked this conversation as resolved.
Comment thread
ClemensSchwarke marked this conversation as resolved.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause

from isaaclab.utils import configclass

from isaaclab_rl.rsl_rl import RslRlDistillationAlgorithmCfg, RslRlDistillationRunnerCfg, RslRlMLPModelCfg


@configclass
class DistillationRunnerCfg(RslRlDistillationRunnerCfg):
num_steps_per_env = 60
max_iterations = 150
save_interval = 50
experiment_name = "cartpole_direct"
obs_groups = {"student": ["policy"], "teacher": ["policy"]}
student = RslRlMLPModelCfg(
hidden_dims=[32, 32],
activation="elu",
obs_normalization=False,
distribution_cfg=RslRlMLPModelCfg.GaussianDistributionCfg(init_std=1.0),
)
teacher = RslRlMLPModelCfg(
hidden_dims=[32, 32],
activation="elu",
obs_normalization=False,
Comment thread
ClemensSchwarke marked this conversation as resolved.
distribution_cfg=RslRlMLPModelCfg.GaussianDistributionCfg(init_std=0.0),
)
algorithm = RslRlDistillationAlgorithmCfg(
num_learning_epochs=2,
learning_rate=1.0e-3,
gradient_length=15,
)
18 changes: 11 additions & 7 deletions tools/template/templates/agents/rsl_rl_ppo_cfg
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@

from isaaclab.utils import configclass

from isaaclab_rl.rsl_rl import RslRlOnPolicyRunnerCfg, RslRlPpoActorCriticCfg, RslRlPpoAlgorithmCfg
from isaaclab_rl.rsl_rl import RslRlMLPModelCfg, RslRlOnPolicyRunnerCfg, RslRlPpoAlgorithmCfg


@configclass
Expand All @@ -14,13 +14,17 @@ class PPORunnerCfg(RslRlOnPolicyRunnerCfg):
max_iterations = 150
save_interval = 50
experiment_name = "cartpole_direct"
policy = RslRlPpoActorCriticCfg(
init_noise_std=1.0,
actor_obs_normalization=False,
critic_obs_normalization=False,
actor_hidden_dims=[32, 32],
critic_hidden_dims=[32, 32],
obs_groups = {"actor": ["policy"], "critic": ["policy"]}
actor = RslRlMLPModelCfg(
hidden_dims=[32, 32],
activation="elu",
obs_normalization=False,
distribution_cfg=RslRlMLPModelCfg.GaussianDistributionCfg(init_std=1.0),
)
critic = RslRlMLPModelCfg(
hidden_dims=[32, 32],
activation="elu",
obs_normalization=False,
)
algorithm = RslRlPpoAlgorithmCfg(
value_loss_coef=1.0,
Expand Down
Loading