curobo.rollout.dynamics_model.kinematic_model module

class TimeTrajConfig(
base_dt: float,
base_ratio: float,
max_dt: float,
)

Bases: object

base_dt: float
base_ratio: float
max_dt: float
get_dt_array(
num_points: int,
)
update_dt(
all_dt: float | None = None,
base_dt: float | None = None,
max_dt: float | None = None,
base_ratio: float | None = None,
)
class KinematicModelState(
state_seq: curobo.types.state.JointState,
ee_pos_seq: torch.Tensor | None = None,
ee_quat_seq: torch.Tensor | None = None,
robot_spheres: torch.Tensor | None = None,
link_pos_seq: torch.Tensor | None = None,
link_quat_seq: torch.Tensor | None = None,
lin_jac_seq: torch.Tensor | None = None,
ang_jac_seq: torch.Tensor | None = None,
link_names: List[str] | None = None,
)

Bases: Sequence

state_seq: JointState
ee_pos_seq: Tensor | None = None
ee_quat_seq: Tensor | None = None
robot_spheres: Tensor | None = None
lin_jac_seq: Tensor | None = None
ang_jac_seq: Tensor | None = None
property ee_pose: Pose

Use link_poses instead.

Type:

Deprecated

_abc_impl = <_abc._abc_data object>
_is_protocol = False
count(
value,
) integer -- return number of occurrences of value
index(
value[,
start[,
stop,]]
) integer -- return first index of value.

Raises ValueError if the value is not present.

Supporting start and stop arguments is optional, but recommended.

class KinematicModelConfig(
robot_config: curobo.types.robot.RobotConfig,
dt_traj_params: curobo.rollout.dynamics_model.kinematic_model.TimeTrajConfig,
tensor_args: curobo.types.base.TensorDeviceType,
vel_scale: float = 1.0,
state_estimation_variance: float = 0.0,
batch_size: int = 1,
horizon: int = 5,
control_space: curobo.types.enum.StateType = <StateType.ACCELERATION: 2>,
state_filter_cfg: Optional[curobo.util.state_filter.FilterConfig] = None,
teleport_mode: bool = False,
return_full_act_buffer: bool = False,
state_finite_difference_mode: str = 'BACKWARD',
filter_robot_command: bool = False,
)

Bases: object

robot_config: RobotConfig
dt_traj_params: TimeTrajConfig
tensor_args: TensorDeviceType
vel_scale: float = 1.0
state_estimation_variance: float = 0.0
batch_size: int = 1
horizon: int = 5
control_space: StateType = 2
state_filter_cfg: FilterConfig | None = None
teleport_mode: bool = False
return_full_act_buffer: bool = False
state_finite_difference_mode: str = 'BACKWARD'
filter_robot_command: bool = False
static from_dict(
data_dict_in,
robot_cfg: Dict | RobotConfig,
tensor_args=TensorDeviceType(device=device(type='cuda', index=0), dtype=torch.float32, collision_geometry_dtype=torch.float32, collision_gradient_dtype=torch.float32, collision_distance_dtype=torch.float32),
)
class KinematicModel(
kinematic_model_config: KinematicModelConfig,
)

Bases: KinematicModelConfig

update_traj_dt(
dt: float | Tensor,
base_dt: float | None = None,
max_dt: float | None = None,
base_ratio: float | None = None,
)
get_next_state(
curr_state: Tensor,
act: Tensor,
dt,
)

Does a single step from the current state Args: curr_state: current state act: action dt: time to integrate Returns: next_state TODO: Move this into tensorstep class?

tensor_step(
state: JointState,
act: Tensor,
state_seq: JointState,
state_idx: Tensor | None = None,
) JointState

Args: state: [1,N] act: [H,N] todo: Integration with variable dt along trajectory

robot_cmd_tensor_step(
state: JointState,
act: Tensor,
state_seq: JointState,
state_idx: Tensor | None = None,
) JointState

Args: state: [1,N] act: [H,N] todo: Integration with variable dt along trajectory

update_cmd_batch_size(
batch_size,
)
update_batch_size(
batch_size,
force_update=False,
)
forward(
start_state: JointState,
act_seq: Tensor,
start_state_idx: Tensor | None = None,
) KinematicModelState
integrate_action(act_seq)
integrate_action_step(
act,
dt,
)
filter_robot_state(
current_state: JointState,
)
get_robot_command(
current_state: JointState,
act_seq: Tensor,
shift_steps: int = 1,
state_idx: Tensor | None = None,
) JointState
property action_bound_lows
property action_bound_highs
property init_action_mean
property retract_config
batch_size: int = 1
control_space: StateType = 2
property cspace_distance_weight
filter_robot_command: bool = False
static from_dict(
data_dict_in,
robot_cfg: Dict | RobotConfig,
tensor_args=TensorDeviceType(device=device(type='cuda', index=0), dtype=torch.float32, collision_geometry_dtype=torch.float32, collision_gradient_dtype=torch.float32, collision_distance_dtype=torch.float32),
)
horizon: int = 5
return_full_act_buffer: bool = False
state_estimation_variance: float = 0.0
state_filter_cfg: FilterConfig | None = None
state_finite_difference_mode: str = 'BACKWARD'
teleport_mode: bool = False
vel_scale: float = 1.0
robot_config: RobotConfig
dt_traj_params: TimeTrajConfig
tensor_args: TensorDeviceType
property null_space_weight
property max_acceleration
property max_jerk
property action_horizon
get_state_bounds()
get_action_from_state(
state: JointState,
) Tensor
get_state_from_action(
start_state: JointState,
act_seq: Tensor,
state_idx: Tensor | None = None,
) JointState

Compute State sequence from an action trajectory

Parameters:
Returns:

_description_

Return type:

JointState