base

Contents

base#


class ArrayFromDictObservation(dict_observation: DictObservation[TStateAction])[source]#
compute_observation(state: TStateAction) ndarray[source]#
property observation_space: Box#
class ArrayObservation[source]#
abstract property observation_space: Box#
class ConcatenatedArrayObservation(array_observations: list[ArrayObservation[TStateAction]])[source]#
compute_observation(state: TStateAction) ndarray[source]#
static concatenate_boxes(boxes: list[Box]) Box[source]#
property observation_space: Box#
class DictObservation[source]#
merged_with(other: Self) MergedDictObservation[TStateAction][source]#
abstract property observation_space: Dict#
to_array_observation() ArrayObservation[TStateAction][source]#
class DummyArrayObservation[source]#
compute_observation(state: Any) ndarray[source]#
property observation_space: Space[TObs]#
class DummyDictObservation[source]#
compute_observation(state: Any) dict[str, ndarray][source]#
property observation_space: Dict#
exception EnvPreconditionError[source]#
class EnvRollout(*, observations: list[~TObs] = <factory>, rewards: list[float] = <factory>, actions: list[typing.Optional[~TAction]] = <factory>, infos: list[dict[str, typing.Any]] = <factory>, terminated: list[bool] = <factory>, truncated: list[bool] = <factory>)[source]#
actions: list[TAction | None]#
append_reset(observation: TObs, info: dict[str, Any], reward: float = 0, terminated: bool = False, truncated: bool = False) None[source]#
append_step(action: TAction, observation: TObs, reward: float, terminated: bool, truncated: bool, info: dict[str, Any]) None[source]#
infos: list[dict[str, Any]]#
observations: list[TObs]#
rewards: list[float]#
terminated: list[bool]#
truncated: list[bool]#
class EnvStatus(*, episode_len: int, state_action: TStateAction | None, observation: TObs | None, reward: float | None, is_terminated: bool, is_truncated: bool, is_closed: bool, info: dict[str, Any])[source]#
episode_len: int#
info: dict[str, Any]#
is_closed: bool#
is_terminated: bool#
is_truncated: bool#
observation: TObs | None#
reward: float | None#
state_action: TStateAction | None#
class MergedDictObservation(dict_observations: list[DictObservation[TStateAction]])[source]#
compute_observation(state: TStateAction) dict[str, ndarray][source]#
property dict_observations: list[DictObservation[TStateAction]]#
property observation_space: Dict#
class ModularEnv(reward_metric: RewardMetric[TStateAction], observation: Observation[TStateAction, TObs], termination_criterion: TerminationCriterion | None = None, max_episode_len: int | None = None)[source]#
abstract property action_space: Space[TAction]#
close() None[source]#

After the user has finished using the environment, close contains the code necessary to "clean up" the environment.

This is critical for closing rendering windows, database or HTTP connections. Calling close on an already closed environment has no effect and won't raise an error.

compute_cur_observation() TObs[source]#
compute_cur_reward() float[source]#
abstract compute_next_state(action: TAction) TStateAction[source]#
property cur_episode_len: int#
property cur_observation: TObs | None#
property cur_reward: float | None#
property cur_state_action: TStateAction | None#
get_cur_env_status() EnvStatus[TStateAction, TObs][source]#
get_info_dict() dict[str, Any][source]#
property is_closed: bool#
property is_terminated: bool#
property is_truncated: bool#
property observation_space: Space[TObs]#
reset(seed: int | None = None, **kwargs: Any) tuple[TObs, dict[str, Any]][source]#

Resets the environment to an initial internal state, returning an initial observation and info.

This method generates a new starting state often with some randomness to ensure that the agent explores the state space and learns a generalised policy about the environment. This randomness can be controlled with the seed parameter otherwise if the environment already has a random number generator and reset() is called with seed=None, the RNG is not reset.

Therefore, reset() should (in the typical use case) be called with a seed right after initialization and then never again.

For Custom environments, the first line of reset() should be super().reset(seed=seed) which implements the seeding correctly.

Changed in version v0.25: The return_info parameter was removed and now info is expected to be returned.

Args:
seed (optional int): The seed that is used to initialize the environment's PRNG (np_random) and

the read-only attribute np_random_seed. If the environment does not already have a PRNG and seed=None (the default option) is passed, a seed will be chosen from some source of entropy (e.g. timestamp or /dev/urandom). However, if the environment already has a PRNG and seed=None is passed, the PRNG will not be reset and the env's np_random_seed will not be altered. If you pass an integer, the PRNG will be reset even if it already exists. Usually, you want to pass an integer right after the environment has been initialized and then never again. Please refer to the minimal example above to see this paradigm in action.

options (optional dict): Additional information to specify how the environment is reset (optional,

depending on the specific environment)

Returns:
observation (ObsType): Observation of the initial state. This will be an element of observation_space

(typically a numpy array) and is analogous to the observation returned by step().

info (dictionary): This dictionary contains auxiliary information complementing observation. It should be analogous to

the info returned by step().

abstract sample_initial_state() TStateAction[source]#
should_terminate() bool[source]#
should_truncate() bool[source]#
step(action: TAction) tuple[TObs, float, bool, bool, dict[str, Any]][source]#

Step through the environment to navigate to the next state.

class NeverTerminate[source]#
should_terminate(env: Any) bool[source]#
class Observation[source]#
abstract compute_observation(state: TStateAction) TObs[source]#
abstract property observation_space: Space[TObs]#
class RewardMetric[source]#
abstract compute_reward(state: TStateAction) float[source]#
abstract property range: tuple[float, float]#
class StateAction(*, normalized_action_arr: Any)[source]#
normalized_action_arr: Any#
class TerminationCriterion[source]#
abstract should_terminate(env: TEnv) bool[source]#