curobo.rollout.rollout_base module

class RolloutMetrics(
cost: 'Optional[T_BValue_float]' = None,
constraint: 'Optional[T_BValue_float]' = None,
feasible: 'Optional[T_BValue_bool]' = None,
state: 'Optional[State]' = None,
)

Bases: Sequence

cost: Tensor | None = None
constraint: Tensor | None = None
feasible: Tensor | None = None
state: State | None = None
clone(clone_state=False)
_abc_impl = <_abc._abc_data object>
_is_protocol = False
count(
value,
) integer -- return number of occurrences of value
index(
value[,
start[,
stop,]]
) integer -- return first index of value.

Raises ValueError if the value is not present.

Supporting start and stop arguments is optional, but recommended.

class Trajectory(
actions: 'T_BHDOF_float',
costs: 'T_BHValue_float',
state: 'Optional[State]' = None,
debug: 'Optional[dict]' = None,
)

Bases: object

actions: Tensor
costs: Tensor
state: State | None = None
debug: dict | None = None
class Goal(
name: str = 'goal',
goal_state: ~curobo.types.state.State | None = None,
goal_pose: ~curobo.types.math.Pose = <factory>,
links_goal_pose: ~typing.Dict[str,
~curobo.types.math.Pose] | None = None,
current_state: ~curobo.types.state.State | None = None,
retract_state: ~torch.Tensor | None = None,
batch: int = -1,
batch_pose_idx: ~torch.Tensor | None = None,
batch_goal_state_idx: ~torch.Tensor | None = None,
batch_retract_state_idx: ~torch.Tensor | None = None,
batch_current_state_idx: ~torch.Tensor | None = None,
batch_enable_idx: ~torch.Tensor | None = None,
batch_world_idx: ~torch.Tensor | None = None,
update_batch_idx_buffers: bool = True,
n_goalset: int = 1,
)

Bases: Sequence

Goal data class used to update optimization target.

#NOTE: We can parallelize Goal in two ways: 1. Solve for current_state, pose pair in same environment 2. Solve for current_state, pose pair in different environment For case (1), we use batch_pose_idx to find the memory address of the current_state, pose pair while keeping batch_world_idx = [0] For case (2), we add a batch_world_idx[0,1,2..].

name: str = 'goal'
goal_state: State | None = None
goal_pose: Pose
current_state: State | None = None
retract_state: Tensor | None = None
batch: int = -1
batch_pose_idx: Tensor | None = None
batch_goal_state_idx: Tensor | None = None
batch_retract_state_idx: Tensor | None = None
batch_current_state_idx: Tensor | None = None
batch_enable_idx: Tensor | None = None
batch_world_idx: Tensor | None = None
update_batch_idx_buffers: bool = True
n_goalset: int = 1
_update_batch_size()
repeat_seeds(num_seeds: int)
clone()
_tensor_repeat_seeds(
tensor,
num_seeds,
)
apply_kernel(kernel_mat)
to(
tensor_args: TensorDeviceType,
)
copy_(
goal: Goal,
update_idx_buffers: bool = True,
)

Copy data from another goal object.

Parameters:

goal (Goal) – _description_

Raises:
Returns:

_description_

Return type:

_type_

_copy_buffer(ref_buffer, buffer)
_copy_tensor(ref_buffer, buffer)
get_batch_goal_state()
create_index_buffers(
batch_size: int,
batch_env: bool,
batch_retract: bool,
num_seeds: int,
tensor_args: TensorDeviceType,
)
classmethod create_idx(
pose_batch_size: int,
batch_env: bool,
batch_retract: bool,
num_seeds: int,
tensor_args: TensorDeviceType,
)
_abc_impl = <_abc._abc_data object>
_is_protocol = False
count(
value,
) integer -- return number of occurrences of value
index(
value[,
start[,
stop,]]
) integer -- return first index of value.

Raises ValueError if the value is not present.

Supporting start and stop arguments is optional, but recommended.

class RolloutConfig(
tensor_args: 'TensorDeviceType',
sum_horizon: 'bool' = False,
sampler_seed: 'int' = 1312,
)

Bases: object

tensor_args: TensorDeviceType
sum_horizon: bool = False
sampler_seed: int = 1312
class RolloutBase(
config: RolloutConfig | None = None,
)

Bases: object

_init_after_config_load()
abstract cost_fn(
state: State,
)
abstract constraint_fn(
state: State,
out_metrics: RolloutMetrics | None = None,
) RolloutMetrics
abstract convergence_fn(
state: State,
out_metrics: RolloutMetrics | None = None,
) RolloutMetrics
get_metrics(
state: State,
)
get_metrics_cuda_graph(
state: State,
)
rollout_fn(act)
current_cost(current_state)
abstract update_params(
goal: Goal,
)
abstract property action_bounds
abstract filter_robot_state(
current_state: State,
) State
abstract get_robot_command(
current_state,
act_seq,
shift_steps: int = 1,
state_idx: Tensor | None = None,
)
reset_seed()
reset()
abstract property d_action: int
abstract property action_bound_lows
abstract property action_bound_highs
abstract property dt
property horizon: int
property action_horizon: int
update_start_state(
start_state: Tensor,
)
abstract get_init_action_seq()
property state_bounds: Dict[str, List[float]]
sample_random_actions(
n: int = 0,
)
abstract rollout_constraint(
act_seq: Tensor,
) RolloutMetrics
reset_cuda_graph()
reset_shape()
property cuda_graph_instance
abstract get_action_from_state(
state: State,
)
abstract get_state_from_action(
start_state: State,
act_seq: Tensor,
state_idx: Tensor | None = None,
)
abstract property cspace_config: CSpaceConfig
get_full_dof_from_solution(
q_js: JointState,
) JointState
break_cuda_graph()
tensor_repeat_seeds(
tensor,
num_seeds: int,
)