|
5 | 5 | import jax |
6 | 6 | import jax.numpy as jnp |
7 | 7 | import numpy as np |
| 8 | +from drone_controllers.mellinger.params import ForceTorqueParams |
8 | 9 | from gymnasium import spaces |
9 | 10 | from gymnasium.vector import AutoresetMode, VectorEnv |
10 | 11 | from gymnasium.vector.utils import batch_space |
11 | 12 | from jax import Array |
12 | 13 | from numpy.typing import NDArray |
13 | 14 |
|
14 | | -from crazyflow.control.control import MAX_THRUST, MIN_THRUST, Control |
| 15 | +from crazyflow.control.control import Control |
15 | 16 | from crazyflow.sim import Sim |
| 17 | +from crazyflow.sim.data import SimData |
16 | 18 | from crazyflow.sim.physics import Physics |
17 | | -from crazyflow.sim.structs import SimData |
18 | 19 | from crazyflow.utils import leaf_replace |
19 | 20 |
|
20 | 21 |
|
21 | | -def action_space(control_type: Control) -> spaces.Box: |
| 22 | +def action_space(control_type: Control, drone_model: str) -> spaces.Box: |
22 | 23 | """Select the appropriate action space for a given control type. |
23 | 24 |
|
24 | 25 | Args: |
25 | 26 | control_type: The desired control mode. |
| 27 | + drone_model: Drone model of the environment. |
26 | 28 |
|
27 | 29 | Returns: |
28 | 30 | The action space. |
29 | 31 | """ |
30 | 32 | match control_type: |
31 | 33 | case Control.attitude: |
| 34 | + params = ForceTorqueParams.load(drone_model) |
| 35 | + thrust_min, thrust_max = params.thrust_min * 4, params.thrust_max * 4 |
32 | 36 | return spaces.Box( |
33 | | - np.array([4 * MIN_THRUST, -np.pi / 2, -np.pi / 2, -np.pi / 2], dtype=np.float32), |
34 | | - np.array([4 * MAX_THRUST, np.pi / 2, np.pi / 2, np.pi / 2], dtype=np.float32), |
| 37 | + np.array([-np.pi / 2, -np.pi / 2, -np.pi / 2, thrust_min], dtype=np.float32), |
| 38 | + np.array([np.pi / 2, np.pi / 2, np.pi / 2, thrust_max], dtype=np.float32), |
35 | 39 | ) |
36 | 40 | case Control.force_torque: |
37 | 41 | return spaces.Box(-1.0, 1.0, shape=(6,)) |
@@ -99,7 +103,7 @@ def __init__( |
99 | 103 | self._marked_for_reset = jnp.zeros((self.sim.n_worlds), dtype=jnp.bool_, device=self.device) |
100 | 104 |
|
101 | 105 | # Define action and observation spaces |
102 | | - self.single_action_space = action_space(self.sim.control) |
| 106 | + self.single_action_space = action_space(self.sim.control, self.sim.drone_model) |
103 | 107 | self.action_space = batch_space(self.single_action_space, self.sim.n_worlds) |
104 | 108 | self.single_observation_space = spaces.Dict( |
105 | 109 | { |
|
0 commit comments