Skip to content

prefect_aws

AwsClientParameters

Bases: BaseModel

Model used to manage extra parameters that you can pass when you initialize the Client. If you want to find more information, see boto3 docs for more info about the possible client configurations.

Attributes:

Name Type Description
api_version Optional[str]

The API version to use. By default, botocore will use the latest API version when creating a client. You only need to specify this parameter if you want to use a previous API version of the client.

use_ssl bool

Whether or not to use SSL. By default, SSL is used. Note that not all services support non-ssl connections.

verify Union[bool, FilePath, None]

Whether or not to verify SSL certificates. By default SSL certificates are verified. If False, SSL will still be used (unless use_ssl is False), but SSL certificates will not be verified. Passing a file path to this is deprecated.

verify_cert_path Optional[FilePath]

A filename of the CA cert bundle to use. You can specify this argument if you want to use a different CA cert bundle than the one used by botocore.

endpoint_url Optional[str]

The complete URL to use for the constructed client. Normally, botocore will automatically construct the appropriate URL to use when communicating with a service. You can specify a complete URL (including the "http/https" scheme) to override this behavior. If this value is provided, then use_ssl is ignored.

config Optional[Dict[str, Any]]

Advanced configuration for Botocore clients. See botocore docs for more details.

Source code in prefect_aws/client_parameters.py
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
class AwsClientParameters(BaseModel):
    """
    Model used to manage extra parameters that you can pass when you initialize
    the Client. If you want to find more information, see
    [boto3 docs](https://boto3.amazonaws.com/v1/documentation/api/latest/reference/core/session.html)
    for more info about the possible client configurations.

    Attributes:
        api_version: The API version to use. By default, botocore will
            use the latest API version when creating a client. You only need
            to specify this parameter if you want to use a previous API version
            of the client.
        use_ssl: Whether or not to use SSL. By default, SSL is used.
            Note that not all services support non-ssl connections.
        verify: Whether or not to verify SSL certificates. By default
            SSL certificates are verified. If False, SSL will still be used
            (unless use_ssl is False), but SSL certificates
            will not be verified. Passing a file path to this is deprecated.
        verify_cert_path: A filename of the CA cert bundle to
            use. You can specify this argument if you want to use a
            different CA cert bundle than the one used by botocore.
        endpoint_url: The complete URL to use for the constructed
            client. Normally, botocore will automatically construct the
            appropriate URL to use when communicating with a service. You
            can specify a complete URL (including the "http/https" scheme)
            to override this behavior. If this value is provided,
            then ``use_ssl`` is ignored.
        config: Advanced configuration for Botocore clients. See
            [botocore docs](https://botocore.amazonaws.com/v1/documentation/api/latest/reference/config.html)
            for more details.
    """  # noqa E501

    api_version: Optional[str] = Field(
        default=None, description="The API version to use.", title="API Version"
    )
    use_ssl: bool = Field(
        default=True, description="Whether or not to use SSL.", title="Use SSL"
    )
    verify: Union[bool, FilePath, None] = Field(
        default=None, description="Whether or not to verify SSL certificates."
    )
    verify_cert_path: Optional[FilePath] = Field(
        default=None,
        description="Path to the CA cert bundle to use.",
        title="Certificate Authority Bundle File Path",
    )
    endpoint_url: Optional[str] = Field(
        default=None,
        description="The complete URL to use for the constructed client.",
        title="Endpoint URL",
    )
    config: Optional[Dict[str, Any]] = Field(
        default=None,
        description="Advanced configuration for Botocore clients.",
        title="Botocore Config",
    )

    def __hash__(self):
        return hash(
            (
                self.api_version,
                self.use_ssl,
                self.verify,
                self.verify_cert_path,
                self.endpoint_url,
                hash_collection(self.config),
            )
        )

    @field_validator("config", mode="before")
    @classmethod
    def instantiate_config(cls, value: Union[Config, Dict[str, Any]]) -> Dict[str, Any]:
        """
        Casts lists to Config instances.
        """
        if isinstance(value, Config):
            return value.__dict__["_user_provided_options"]
        return value

    @model_validator(mode="before")
    @classmethod
    def deprecated_verify_cert_path(cls, values: Dict[str, Any]) -> Dict[str, Any]:
        """
        If verify is not a bool, raise a warning.
        """
        verify = values.get("verify")

        # deprecate using verify in favor of verify_cert_path
        # so the UI looks nicer
        if verify is not None and not isinstance(verify, bool):
            warnings.warn(
                (
                    "verify should be a boolean. "
                    "If you want to use a CA cert bundle, use verify_cert_path instead."
                ),
                DeprecationWarning,
            )
        return values

    @model_validator(mode="before")
    @classmethod
    def verify_cert_path_and_verify(cls, values: Dict[str, Any]) -> Dict[str, Any]:
        """
        If verify_cert_path is set but verify is False, raise a warning.
        """
        verify = values.get("verify", True)
        verify_cert_path = values.get("verify_cert_path")

        if not verify and verify_cert_path:
            warnings.warn(
                "verify_cert_path is set but verify is False. "
                "verify_cert_path will be ignored."
            )
            values["verify_cert_path"] = None
        elif not isinstance(verify, bool) and verify_cert_path:
            warnings.warn(
                "verify_cert_path is set but verify is also set as a file path. "
                "verify_cert_path will take precedence."
            )
            values["verify"] = True
        return values

    def get_params_override(self) -> Dict[str, Any]:
        """
        Return the dictionary of the parameters to override.
        The parameters to override are the one which are not None.
        """
        params = self.model_dump()
        if params.get("verify_cert_path"):
            # to ensure that verify doesn't re-overwrite verify_cert_path
            params.pop("verify")

        params_override = {}
        for key, value in params.items():
            if value is None:
                continue
            elif key == "config":
                params_override[key] = Config(**value)
                # botocore UNSIGNED is an instance while actual signers can
                # be fetched as strings
                if params_override[key].signature_version == "unsigned":
                    params_override[key].signature_version = UNSIGNED
            elif key == "verify_cert_path":
                params_override["verify"] = value
            elif key == "verify":
                if value is not None:
                    params_override[key] = value
            else:
                params_override[key] = value
        return params_override

deprecated_verify_cert_path(values) classmethod

If verify is not a bool, raise a warning.

Source code in prefect_aws/client_parameters.py
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
@model_validator(mode="before")
@classmethod
def deprecated_verify_cert_path(cls, values: Dict[str, Any]) -> Dict[str, Any]:
    """
    If verify is not a bool, raise a warning.
    """
    verify = values.get("verify")

    # deprecate using verify in favor of verify_cert_path
    # so the UI looks nicer
    if verify is not None and not isinstance(verify, bool):
        warnings.warn(
            (
                "verify should be a boolean. "
                "If you want to use a CA cert bundle, use verify_cert_path instead."
            ),
            DeprecationWarning,
        )
    return values

get_params_override()

Return the dictionary of the parameters to override. The parameters to override are the one which are not None.

Source code in prefect_aws/client_parameters.py
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
def get_params_override(self) -> Dict[str, Any]:
    """
    Return the dictionary of the parameters to override.
    The parameters to override are the one which are not None.
    """
    params = self.model_dump()
    if params.get("verify_cert_path"):
        # to ensure that verify doesn't re-overwrite verify_cert_path
        params.pop("verify")

    params_override = {}
    for key, value in params.items():
        if value is None:
            continue
        elif key == "config":
            params_override[key] = Config(**value)
            # botocore UNSIGNED is an instance while actual signers can
            # be fetched as strings
            if params_override[key].signature_version == "unsigned":
                params_override[key].signature_version = UNSIGNED
        elif key == "verify_cert_path":
            params_override["verify"] = value
        elif key == "verify":
            if value is not None:
                params_override[key] = value
        else:
            params_override[key] = value
    return params_override

instantiate_config(value) classmethod

Casts lists to Config instances.

Source code in prefect_aws/client_parameters.py
82
83
84
85
86
87
88
89
90
@field_validator("config", mode="before")
@classmethod
def instantiate_config(cls, value: Union[Config, Dict[str, Any]]) -> Dict[str, Any]:
    """
    Casts lists to Config instances.
    """
    if isinstance(value, Config):
        return value.__dict__["_user_provided_options"]
    return value

verify_cert_path_and_verify(values) classmethod

If verify_cert_path is set but verify is False, raise a warning.

Source code in prefect_aws/client_parameters.py
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
@model_validator(mode="before")
@classmethod
def verify_cert_path_and_verify(cls, values: Dict[str, Any]) -> Dict[str, Any]:
    """
    If verify_cert_path is set but verify is False, raise a warning.
    """
    verify = values.get("verify", True)
    verify_cert_path = values.get("verify_cert_path")

    if not verify and verify_cert_path:
        warnings.warn(
            "verify_cert_path is set but verify is False. "
            "verify_cert_path will be ignored."
        )
        values["verify_cert_path"] = None
    elif not isinstance(verify, bool) and verify_cert_path:
        warnings.warn(
            "verify_cert_path is set but verify is also set as a file path. "
            "verify_cert_path will take precedence."
        )
        values["verify"] = True
    return values

AwsCredentials

Bases: CredentialsBlock

Block used to manage authentication with AWS. AWS authentication is handled via the boto3 module. Refer to the boto3 docs for more info about the possible credential configurations.

Example

Load stored AWS credentials:

from prefect_aws import AwsCredentials

aws_credentials_block = AwsCredentials.load("BLOCK_NAME")
Source code in prefect_aws/credentials.py
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
class AwsCredentials(CredentialsBlock):
    """
    Block used to manage authentication with AWS. AWS authentication is
    handled via the `boto3` module. Refer to the
    [boto3 docs](https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html)
    for more info about the possible credential configurations.

    Example:
        Load stored AWS credentials:
        ```python
        from prefect_aws import AwsCredentials

        aws_credentials_block = AwsCredentials.load("BLOCK_NAME")
        ```
    """  # noqa E501

    model_config = ConfigDict(arbitrary_types_allowed=True)

    _logo_url = "https://cdn.sanity.io/images/3ugk85nk/production/d74b16fe84ce626345adf235a47008fea2869a60-225x225.png"  # noqa
    _block_type_name = "AWS Credentials"
    _documentation_url = "https://prefecthq.github.io/prefect-aws/credentials/#prefect_aws.credentials.AwsCredentials"  # noqa

    aws_access_key_id: Optional[str] = Field(
        default=None,
        description="A specific AWS access key ID.",
        title="AWS Access Key ID",
    )
    aws_secret_access_key: Optional[SecretStr] = Field(
        default=None,
        description="A specific AWS secret access key.",
        title="AWS Access Key Secret",
    )
    aws_session_token: Optional[str] = Field(
        default=None,
        description=(
            "The session key for your AWS account. "
            "This is only needed when you are using temporary credentials."
        ),
        title="AWS Session Token",
    )
    profile_name: Optional[str] = Field(
        default=None, description="The profile to use when creating your session."
    )
    region_name: Optional[str] = Field(
        default=None,
        description="The AWS Region where you want to create new connections.",
    )
    aws_client_parameters: AwsClientParameters = Field(
        default_factory=AwsClientParameters,
        description="Extra parameters to initialize the Client.",
        title="AWS Client Parameters",
    )

    def __hash__(self):
        field_hashes = (
            hash(self.aws_access_key_id),
            hash(self.aws_secret_access_key),
            hash(self.aws_session_token),
            hash(self.profile_name),
            hash(self.region_name),
            hash(self.aws_client_parameters),
        )
        return hash(field_hashes)

    def get_boto3_session(self) -> boto3.Session:
        """
        Returns an authenticated boto3 session that can be used to create clients
        for AWS services

        Example:
            Create an S3 client from an authorized boto3 session:
            ```python
            aws_credentials = AwsCredentials(
                aws_access_key_id = "access_key_id",
                aws_secret_access_key = "secret_access_key"
                )
            s3_client = aws_credentials.get_boto3_session().client("s3")
            ```
        """

        if self.aws_secret_access_key:
            aws_secret_access_key = self.aws_secret_access_key.get_secret_value()
        else:
            aws_secret_access_key = None

        return boto3.Session(
            aws_access_key_id=self.aws_access_key_id,
            aws_secret_access_key=aws_secret_access_key,
            aws_session_token=self.aws_session_token,
            profile_name=self.profile_name,
            region_name=self.region_name,
        )

    def get_client(self, client_type: Union[str, ClientType]):
        """
        Helper method to dynamically get a client type.

        Args:
            client_type: The client's service name.

        Returns:
            An authenticated client.

        Raises:
            ValueError: if the client is not supported.
        """
        if isinstance(client_type, ClientType):
            client_type = client_type.value

        return _get_client_cached(ctx=self, client_type=client_type)

    def get_s3_client(self) -> S3Client:
        """
        Gets an authenticated S3 client.

        Returns:
            An authenticated S3 client.
        """
        return self.get_client(client_type=ClientType.S3)

    def get_secrets_manager_client(self) -> SecretsManagerClient:
        """
        Gets an authenticated Secrets Manager client.

        Returns:
            An authenticated Secrets Manager client.
        """
        return self.get_client(client_type=ClientType.SECRETS_MANAGER)

get_boto3_session()

Returns an authenticated boto3 session that can be used to create clients for AWS services

Example

Create an S3 client from an authorized boto3 session:

aws_credentials = AwsCredentials(
    aws_access_key_id = "access_key_id",
    aws_secret_access_key = "secret_access_key"
    )
s3_client = aws_credentials.get_boto3_session().client("s3")
Source code in prefect_aws/credentials.py
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
def get_boto3_session(self) -> boto3.Session:
    """
    Returns an authenticated boto3 session that can be used to create clients
    for AWS services

    Example:
        Create an S3 client from an authorized boto3 session:
        ```python
        aws_credentials = AwsCredentials(
            aws_access_key_id = "access_key_id",
            aws_secret_access_key = "secret_access_key"
            )
        s3_client = aws_credentials.get_boto3_session().client("s3")
        ```
    """

    if self.aws_secret_access_key:
        aws_secret_access_key = self.aws_secret_access_key.get_secret_value()
    else:
        aws_secret_access_key = None

    return boto3.Session(
        aws_access_key_id=self.aws_access_key_id,
        aws_secret_access_key=aws_secret_access_key,
        aws_session_token=self.aws_session_token,
        profile_name=self.profile_name,
        region_name=self.region_name,
    )

get_client(client_type)

Helper method to dynamically get a client type.

Parameters:

Name Type Description Default
client_type Union[str, ClientType]

The client's service name.

required

Returns:

Type Description

An authenticated client.

Raises:

Type Description
ValueError

if the client is not supported.

Source code in prefect_aws/credentials.py
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
def get_client(self, client_type: Union[str, ClientType]):
    """
    Helper method to dynamically get a client type.

    Args:
        client_type: The client's service name.

    Returns:
        An authenticated client.

    Raises:
        ValueError: if the client is not supported.
    """
    if isinstance(client_type, ClientType):
        client_type = client_type.value

    return _get_client_cached(ctx=self, client_type=client_type)

get_s3_client()

Gets an authenticated S3 client.

Returns:

Type Description
S3Client

An authenticated S3 client.

Source code in prefect_aws/credentials.py
164
165
166
167
168
169
170
171
def get_s3_client(self) -> S3Client:
    """
    Gets an authenticated S3 client.

    Returns:
        An authenticated S3 client.
    """
    return self.get_client(client_type=ClientType.S3)

get_secrets_manager_client()

Gets an authenticated Secrets Manager client.

Returns:

Type Description
SecretsManagerClient

An authenticated Secrets Manager client.

Source code in prefect_aws/credentials.py
173
174
175
176
177
178
179
180
def get_secrets_manager_client(self) -> SecretsManagerClient:
    """
    Gets an authenticated Secrets Manager client.

    Returns:
        An authenticated Secrets Manager client.
    """
    return self.get_client(client_type=ClientType.SECRETS_MANAGER)

AwsSecret

Bases: SecretBlock

Manages a secret in AWS's Secrets Manager.

Attributes:

Name Type Description
aws_credentials AwsCredentials

The credentials to use for authentication with AWS.

secret_name str

The name of the secret.

Source code in prefect_aws/secrets_manager.py
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
class AwsSecret(SecretBlock):
    """
    Manages a secret in AWS's Secrets Manager.

    Attributes:
        aws_credentials: The credentials to use for authentication with AWS.
        secret_name: The name of the secret.
    """

    _logo_url = "https://cdn.sanity.io/images/3ugk85nk/production/d74b16fe84ce626345adf235a47008fea2869a60-225x225.png"  # noqa
    _block_type_name = "AWS Secret"
    _documentation_url = "https://prefecthq.github.io/prefect-aws/secrets_manager/#prefect_aws.secrets_manager.AwsSecret"  # noqa

    aws_credentials: AwsCredentials
    secret_name: str = Field(default=..., description="The name of the secret.")

    @sync_compatible
    async def read_secret(
        self,
        version_id: Optional[str] = None,
        version_stage: Optional[str] = None,
        **read_kwargs: Dict[str, Any],
    ) -> bytes:
        """
        Reads the secret from the secret storage service.

        Args:
            version_id: The version of the secret to read. If not provided, the latest
                version will be read.
            version_stage: The version stage of the secret to read. If not provided,
                the latest version will be read.
            read_kwargs: Additional keyword arguments to pass to the
                `get_secret_value` method of the boto3 client.

        Returns:
            The secret data.

        Examples:
            Reads a secret.
            ```python
            secrets_manager = SecretsManager.load("MY_BLOCK")
            secrets_manager.read_secret()
            ```
        """
        client = self.aws_credentials.get_secrets_manager_client()
        if version_id is not None:
            read_kwargs["VersionId"] = version_id
        if version_stage is not None:
            read_kwargs["VersionStage"] = version_stage
        response = await run_sync_in_worker_thread(
            client.get_secret_value, SecretId=self.secret_name, **read_kwargs
        )
        if "SecretBinary" in response:
            secret = response["SecretBinary"]
        elif "SecretString" in response:
            secret = response["SecretString"]
        arn = response["ARN"]
        self.logger.info(f"The secret {arn!r} data was successfully read.")
        return secret

    @sync_compatible
    async def write_secret(
        self, secret_data: bytes, **put_or_create_secret_kwargs: Dict[str, Any]
    ) -> str:
        """
        Writes the secret to the secret storage service as a SecretBinary;
        if it doesn't exist, it will be created.

        Args:
            secret_data: The secret data to write.
            **put_or_create_secret_kwargs: Additional keyword arguments to pass to
                put_secret_value or create_secret method of the boto3 client.

        Returns:
            The path that the secret was written to.

        Examples:
            Write some secret data.
            ```python
            secrets_manager = SecretsManager.load("MY_BLOCK")
            secrets_manager.write_secret(b"my_secret_data")
            ```
        """
        client = self.aws_credentials.get_secrets_manager_client()
        try:
            response = await run_sync_in_worker_thread(
                client.put_secret_value,
                SecretId=self.secret_name,
                SecretBinary=secret_data,
                **put_or_create_secret_kwargs,
            )
        except client.exceptions.ResourceNotFoundException:
            self.logger.info(
                f"The secret {self.secret_name!r} does not exist yet, creating it now."
            )
            response = await run_sync_in_worker_thread(
                client.create_secret,
                Name=self.secret_name,
                SecretBinary=secret_data,
                **put_or_create_secret_kwargs,
            )
        arn = response["ARN"]
        self.logger.info(f"The secret data was written successfully to {arn!r}.")
        return arn

    @sync_compatible
    async def delete_secret(
        self,
        recovery_window_in_days: int = 30,
        force_delete_without_recovery: bool = False,
        **delete_kwargs: Dict[str, Any],
    ) -> str:
        """
        Deletes the secret from the secret storage service.

        Args:
            recovery_window_in_days: The number of days to wait before permanently
                deleting the secret. Must be between 7 and 30 days.
            force_delete_without_recovery: If True, the secret will be deleted
                immediately without a recovery window.
            **delete_kwargs: Additional keyword arguments to pass to the
                delete_secret method of the boto3 client.

        Returns:
            The path that the secret was deleted from.

        Examples:
            Deletes the secret with a recovery window of 15 days.
            ```python
            secrets_manager = SecretsManager.load("MY_BLOCK")
            secrets_manager.delete_secret(recovery_window_in_days=15)
            ```
        """
        if force_delete_without_recovery and recovery_window_in_days:
            raise ValueError(
                "Cannot specify recovery window and force delete without recovery."
            )
        elif not (7 <= recovery_window_in_days <= 30):
            raise ValueError(
                "Recovery window must be between 7 and 30 days, got "
                f"{recovery_window_in_days}."
            )

        client = self.aws_credentials.get_secrets_manager_client()
        response = await run_sync_in_worker_thread(
            client.delete_secret,
            SecretId=self.secret_name,
            RecoveryWindowInDays=recovery_window_in_days,
            ForceDeleteWithoutRecovery=force_delete_without_recovery,
            **delete_kwargs,
        )
        arn = response["ARN"]
        self.logger.info(f"The secret {arn} was deleted successfully.")
        return arn

delete_secret(recovery_window_in_days=30, force_delete_without_recovery=False, **delete_kwargs) async

Deletes the secret from the secret storage service.

Parameters:

Name Type Description Default
recovery_window_in_days int

The number of days to wait before permanently deleting the secret. Must be between 7 and 30 days.

30
force_delete_without_recovery bool

If True, the secret will be deleted immediately without a recovery window.

False
**delete_kwargs Dict[str, Any]

Additional keyword arguments to pass to the delete_secret method of the boto3 client.

{}

Returns:

Type Description
str

The path that the secret was deleted from.

Examples:

Deletes the secret with a recovery window of 15 days.

secrets_manager = SecretsManager.load("MY_BLOCK")
secrets_manager.delete_secret(recovery_window_in_days=15)
Source code in prefect_aws/secrets_manager.py
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
@sync_compatible
async def delete_secret(
    self,
    recovery_window_in_days: int = 30,
    force_delete_without_recovery: bool = False,
    **delete_kwargs: Dict[str, Any],
) -> str:
    """
    Deletes the secret from the secret storage service.

    Args:
        recovery_window_in_days: The number of days to wait before permanently
            deleting the secret. Must be between 7 and 30 days.
        force_delete_without_recovery: If True, the secret will be deleted
            immediately without a recovery window.
        **delete_kwargs: Additional keyword arguments to pass to the
            delete_secret method of the boto3 client.

    Returns:
        The path that the secret was deleted from.

    Examples:
        Deletes the secret with a recovery window of 15 days.
        ```python
        secrets_manager = SecretsManager.load("MY_BLOCK")
        secrets_manager.delete_secret(recovery_window_in_days=15)
        ```
    """
    if force_delete_without_recovery and recovery_window_in_days:
        raise ValueError(
            "Cannot specify recovery window and force delete without recovery."
        )
    elif not (7 <= recovery_window_in_days <= 30):
        raise ValueError(
            "Recovery window must be between 7 and 30 days, got "
            f"{recovery_window_in_days}."
        )

    client = self.aws_credentials.get_secrets_manager_client()
    response = await run_sync_in_worker_thread(
        client.delete_secret,
        SecretId=self.secret_name,
        RecoveryWindowInDays=recovery_window_in_days,
        ForceDeleteWithoutRecovery=force_delete_without_recovery,
        **delete_kwargs,
    )
    arn = response["ARN"]
    self.logger.info(f"The secret {arn} was deleted successfully.")
    return arn

read_secret(version_id=None, version_stage=None, **read_kwargs) async

Reads the secret from the secret storage service.

Parameters:

Name Type Description Default
version_id Optional[str]

The version of the secret to read. If not provided, the latest version will be read.

None
version_stage Optional[str]

The version stage of the secret to read. If not provided, the latest version will be read.

None
read_kwargs Dict[str, Any]

Additional keyword arguments to pass to the get_secret_value method of the boto3 client.

{}

Returns:

Type Description
bytes

The secret data.

Examples:

Reads a secret.

secrets_manager = SecretsManager.load("MY_BLOCK")
secrets_manager.read_secret()
Source code in prefect_aws/secrets_manager.py
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
@sync_compatible
async def read_secret(
    self,
    version_id: Optional[str] = None,
    version_stage: Optional[str] = None,
    **read_kwargs: Dict[str, Any],
) -> bytes:
    """
    Reads the secret from the secret storage service.

    Args:
        version_id: The version of the secret to read. If not provided, the latest
            version will be read.
        version_stage: The version stage of the secret to read. If not provided,
            the latest version will be read.
        read_kwargs: Additional keyword arguments to pass to the
            `get_secret_value` method of the boto3 client.

    Returns:
        The secret data.

    Examples:
        Reads a secret.
        ```python
        secrets_manager = SecretsManager.load("MY_BLOCK")
        secrets_manager.read_secret()
        ```
    """
    client = self.aws_credentials.get_secrets_manager_client()
    if version_id is not None:
        read_kwargs["VersionId"] = version_id
    if version_stage is not None:
        read_kwargs["VersionStage"] = version_stage
    response = await run_sync_in_worker_thread(
        client.get_secret_value, SecretId=self.secret_name, **read_kwargs
    )
    if "SecretBinary" in response:
        secret = response["SecretBinary"]
    elif "SecretString" in response:
        secret = response["SecretString"]
    arn = response["ARN"]
    self.logger.info(f"The secret {arn!r} data was successfully read.")
    return secret

write_secret(secret_data, **put_or_create_secret_kwargs) async

Writes the secret to the secret storage service as a SecretBinary; if it doesn't exist, it will be created.

Parameters:

Name Type Description Default
secret_data bytes

The secret data to write.

required
**put_or_create_secret_kwargs Dict[str, Any]

Additional keyword arguments to pass to put_secret_value or create_secret method of the boto3 client.

{}

Returns:

Type Description
str

The path that the secret was written to.

Examples:

Write some secret data.

secrets_manager = SecretsManager.load("MY_BLOCK")
secrets_manager.write_secret(b"my_secret_data")
Source code in prefect_aws/secrets_manager.py
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
@sync_compatible
async def write_secret(
    self, secret_data: bytes, **put_or_create_secret_kwargs: Dict[str, Any]
) -> str:
    """
    Writes the secret to the secret storage service as a SecretBinary;
    if it doesn't exist, it will be created.

    Args:
        secret_data: The secret data to write.
        **put_or_create_secret_kwargs: Additional keyword arguments to pass to
            put_secret_value or create_secret method of the boto3 client.

    Returns:
        The path that the secret was written to.

    Examples:
        Write some secret data.
        ```python
        secrets_manager = SecretsManager.load("MY_BLOCK")
        secrets_manager.write_secret(b"my_secret_data")
        ```
    """
    client = self.aws_credentials.get_secrets_manager_client()
    try:
        response = await run_sync_in_worker_thread(
            client.put_secret_value,
            SecretId=self.secret_name,
            SecretBinary=secret_data,
            **put_or_create_secret_kwargs,
        )
    except client.exceptions.ResourceNotFoundException:
        self.logger.info(
            f"The secret {self.secret_name!r} does not exist yet, creating it now."
        )
        response = await run_sync_in_worker_thread(
            client.create_secret,
            Name=self.secret_name,
            SecretBinary=secret_data,
            **put_or_create_secret_kwargs,
        )
    arn = response["ARN"]
    self.logger.info(f"The secret data was written successfully to {arn!r}.")
    return arn

ECSWorker

Bases: BaseWorker

A Prefect worker to run flow runs as ECS tasks.

Source code in prefect_aws/workers/ecs_worker.py
 644
 645
 646
 647
 648
 649
 650
 651
 652
 653
 654
 655
 656
 657
 658
 659
 660
 661
 662
 663
 664
 665
 666
 667
 668
 669
 670
 671
 672
 673
 674
 675
 676
 677
 678
 679
 680
 681
 682
 683
 684
 685
 686
 687
 688
 689
 690
 691
 692
 693
 694
 695
 696
 697
 698
 699
 700
 701
 702
 703
 704
 705
 706
 707
 708
 709
 710
 711
 712
 713
 714
 715
 716
 717
 718
 719
 720
 721
 722
 723
 724
 725
 726
 727
 728
 729
 730
 731
 732
 733
 734
 735
 736
 737
 738
 739
 740
 741
 742
 743
 744
 745
 746
 747
 748
 749
 750
 751
 752
 753
 754
 755
 756
 757
 758
 759
 760
 761
 762
 763
 764
 765
 766
 767
 768
 769
 770
 771
 772
 773
 774
 775
 776
 777
 778
 779
 780
 781
 782
 783
 784
 785
 786
 787
 788
 789
 790
 791
 792
 793
 794
 795
 796
 797
 798
 799
 800
 801
 802
 803
 804
 805
 806
 807
 808
 809
 810
 811
 812
 813
 814
 815
 816
 817
 818
 819
 820
 821
 822
 823
 824
 825
 826
 827
 828
 829
 830
 831
 832
 833
 834
 835
 836
 837
 838
 839
 840
 841
 842
 843
 844
 845
 846
 847
 848
 849
 850
 851
 852
 853
 854
 855
 856
 857
 858
 859
 860
 861
 862
 863
 864
 865
 866
 867
 868
 869
 870
 871
 872
 873
 874
 875
 876
 877
 878
 879
 880
 881
 882
 883
 884
 885
 886
 887
 888
 889
 890
 891
 892
 893
 894
 895
 896
 897
 898
 899
 900
 901
 902
 903
 904
 905
 906
 907
 908
 909
 910
 911
 912
 913
 914
 915
 916
 917
 918
 919
 920
 921
 922
 923
 924
 925
 926
 927
 928
 929
 930
 931
 932
 933
 934
 935
 936
 937
 938
 939
 940
 941
 942
 943
 944
 945
 946
 947
 948
 949
 950
 951
 952
 953
 954
 955
 956
 957
 958
 959
 960
 961
 962
 963
 964
 965
 966
 967
 968
 969
 970
 971
 972
 973
 974
 975
 976
 977
 978
 979
 980
 981
 982
 983
 984
 985
 986
 987
 988
 989
 990
 991
 992
 993
 994
 995
 996
 997
 998
 999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
class ECSWorker(BaseWorker):
    """
    A Prefect worker to run flow runs as ECS tasks.
    """

    type: str = "ecs"
    job_configuration: Type[ECSJobConfiguration] = ECSJobConfiguration
    job_configuration_variables: Type[ECSVariables] = ECSVariables
    _description: str = (
        "Execute flow runs within containers on AWS ECS. Works with EC2 "
        "and Fargate clusters. Requires an AWS account."
    )
    _display_name = "AWS Elastic Container Service"
    _documentation_url = "https://prefecthq.github.io/prefect-aws/ecs_worker/"
    _logo_url = "https://cdn.sanity.io/images/3ugk85nk/production/d74b16fe84ce626345adf235a47008fea2869a60-225x225.png"  # noqa

    async def run(
        self,
        flow_run: "FlowRun",
        configuration: ECSJobConfiguration,
        task_status: Optional[anyio.abc.TaskStatus] = None,
    ) -> ECSWorkerResult:
        """
        Runs a given flow run on the current worker.
        """
        ecs_client = await run_sync_in_worker_thread(
            self._get_client, configuration, "ecs"
        )

        logger = self.get_flow_run_logger(flow_run)

        (
            task_arn,
            cluster_arn,
            task_definition,
            is_new_task_definition,
        ) = await run_sync_in_worker_thread(
            self._create_task_and_wait_for_start,
            logger,
            ecs_client,
            configuration,
            flow_run,
        )

        # The task identifier is "{cluster}::{task}" where we use the configured cluster
        # if set to preserve matching by name rather than arn
        # Note "::" is used despite the Prefect standard being ":" because ARNs contain
        # single colons.
        identifier = (
            (configuration.cluster if configuration.cluster else cluster_arn)
            + "::"
            + task_arn
        )

        if task_status:
            task_status.started(identifier)

        status_code = await run_sync_in_worker_thread(
            self._watch_task_and_get_exit_code,
            logger,
            configuration,
            task_arn,
            cluster_arn,
            task_definition,
            is_new_task_definition and configuration.auto_deregister_task_definition,
            ecs_client,
        )

        return ECSWorkerResult(
            identifier=identifier,
            # If the container does not start the exit code can be null but we must
            # still report a status code. We use a -1 to indicate a special code.
            status_code=status_code if status_code is not None else -1,
        )

    def _get_client(
        self, configuration: ECSJobConfiguration, client_type: Union[str, ClientType]
    ) -> _ECSClient:
        """
        Get a boto3 client of client_type. Will use a cached client if one exists.
        """
        return configuration.aws_credentials.get_client(client_type)

    def _create_task_and_wait_for_start(
        self,
        logger: logging.Logger,
        ecs_client: _ECSClient,
        configuration: ECSJobConfiguration,
        flow_run: FlowRun,
    ) -> Tuple[str, str, dict, bool]:
        """
        Register the task definition, create the task run, and wait for it to start.

        Returns a tuple of
        - The task ARN
        - The task's cluster ARN
        - The task definition
        - A bool indicating if the task definition is newly registered
        """
        task_definition_arn = configuration.task_run_request.get("taskDefinition")
        new_task_definition_registered = False

        if not task_definition_arn:
            task_definition = self._prepare_task_definition(
                configuration, region=ecs_client.meta.region_name, flow_run=flow_run
            )

            (
                task_definition_arn,
                new_task_definition_registered,
            ) = self._get_or_register_task_definition(
                logger, ecs_client, configuration, flow_run, task_definition
            )
        else:
            task_definition = self._retrieve_task_definition(
                logger, ecs_client, task_definition_arn
            )
            if configuration.task_definition:
                logger.warning(
                    "Ignoring task definition in configuration since task definition"
                    " ARN is provided on the task run request."
                )

        self._validate_task_definition(task_definition, configuration)

        _TASK_DEFINITION_CACHE[flow_run.deployment_id] = task_definition_arn

        logger.info(f"Using ECS task definition {task_definition_arn!r}...")
        logger.debug(
            f"Task definition {json.dumps(task_definition, indent=2, default=str)}"
        )

        task_run_request = self._prepare_task_run_request(
            configuration,
            task_definition,
            task_definition_arn,
        )

        logger.info("Creating ECS task run...")
        logger.debug(
            "Task run request"
            f"{json.dumps(mask_api_key(task_run_request), indent=2, default=str)}"
        )

        try:
            task = self._create_task_run(ecs_client, task_run_request)
            task_arn = task["taskArn"]
            cluster_arn = task["clusterArn"]
        except Exception as exc:
            self._report_task_run_creation_failure(configuration, task_run_request, exc)
            raise

        logger.info("Waiting for ECS task run to start...")
        self._wait_for_task_start(
            logger,
            configuration,
            task_arn,
            cluster_arn,
            ecs_client,
            timeout=configuration.task_start_timeout_seconds,
        )

        return task_arn, cluster_arn, task_definition, new_task_definition_registered

    def _get_or_register_task_definition(
        self,
        logger: logging.Logger,
        ecs_client: _ECSClient,
        configuration: ECSJobConfiguration,
        flow_run: FlowRun,
        task_definition: dict,
    ) -> Tuple[str, bool]:
        """Get or register a task definition for the given flow run.

        Returns a tuple of the task definition ARN and a bool indicating if the task
        definition is newly registered.
        """

        cached_task_definition_arn = _TASK_DEFINITION_CACHE.get(flow_run.deployment_id)
        new_task_definition_registered = False

        if cached_task_definition_arn:
            try:
                cached_task_definition = self._retrieve_task_definition(
                    logger, ecs_client, cached_task_definition_arn
                )
                if not cached_task_definition[
                    "status"
                ] == "ACTIVE" or not self._task_definitions_equal(
                    task_definition, cached_task_definition
                ):
                    cached_task_definition_arn = None
            except Exception:
                cached_task_definition_arn = None

        if (
            not cached_task_definition_arn
            and configuration.match_latest_revision_in_family
        ):
            family_name = task_definition.get("family", ECS_DEFAULT_FAMILY)
            try:
                task_definition_from_family = self._retrieve_task_definition(
                    logger, ecs_client, family_name
                )
                if task_definition_from_family and self._task_definitions_equal(
                    task_definition, task_definition_from_family
                ):
                    cached_task_definition_arn = task_definition_from_family[
                        "taskDefinitionArn"
                    ]
            except Exception:
                cached_task_definition_arn = None

        if not cached_task_definition_arn:
            task_definition_arn = self._register_task_definition(
                logger, ecs_client, task_definition
            )
            new_task_definition_registered = True
        else:
            task_definition_arn = cached_task_definition_arn

        return task_definition_arn, new_task_definition_registered

    def _watch_task_and_get_exit_code(
        self,
        logger: logging.Logger,
        configuration: ECSJobConfiguration,
        task_arn: str,
        cluster_arn: str,
        task_definition: dict,
        deregister_task_definition: bool,
        ecs_client: _ECSClient,
    ) -> Optional[int]:
        """
        Wait for the task run to complete and retrieve the exit code of the Prefect
        container.
        """

        # Wait for completion and stream logs
        task = self._wait_for_task_finish(
            logger,
            configuration,
            task_arn,
            cluster_arn,
            task_definition,
            ecs_client,
        )

        if deregister_task_definition:
            ecs_client.deregister_task_definition(
                taskDefinition=task["taskDefinitionArn"]
            )

        container_name = (
            configuration.container_name
            or _container_name_from_task_definition(task_definition)
            or ECS_DEFAULT_CONTAINER_NAME
        )

        # Check the status code of the Prefect container
        container = _get_container(task["containers"], container_name)
        assert (
            container is not None
        ), f"'{container_name}' container missing from task: {task}"
        status_code = container.get("exitCode")
        self._report_container_status_code(logger, container_name, status_code)

        return status_code

    def _report_container_status_code(
        self, logger: logging.Logger, name: str, status_code: Optional[int]
    ) -> None:
        """
        Display a log for the given container status code.
        """
        if status_code is None:
            logger.error(
                f"Task exited without reporting an exit status for container {name!r}."
            )
        elif status_code == 0:
            logger.info(f"Container {name!r} exited successfully.")
        else:
            logger.warning(
                f"Container {name!r} exited with non-zero exit code {status_code}."
            )

    def _report_task_run_creation_failure(
        self, configuration: ECSJobConfiguration, task_run: dict, exc: Exception
    ) -> None:
        """
        Wrap common AWS task run creation failures with nicer user-facing messages.
        """
        # AWS generates exception types at runtime so they must be captured a bit
        # differently than normal.
        if "ClusterNotFoundException" in str(exc):
            cluster = task_run.get("cluster", "default")
            raise RuntimeError(
                f"Failed to run ECS task, cluster {cluster!r} not found. "
                "Confirm that the cluster is configured in your region."
            ) from exc
        elif (
            "No Container Instances" in str(exc) and task_run.get("launchType") == "EC2"
        ):
            cluster = task_run.get("cluster", "default")
            raise RuntimeError(
                f"Failed to run ECS task, cluster {cluster!r} does not appear to "
                "have any container instances associated with it. Confirm that you "
                "have EC2 container instances available."
            ) from exc
        elif (
            "failed to validate logger args" in str(exc)
            and "AccessDeniedException" in str(exc)
            and configuration.configure_cloudwatch_logs
        ):
            raise RuntimeError(
                "Failed to run ECS task, the attached execution role does not appear"
                " to have sufficient permissions. Ensure that the execution role"
                f" {configuration.execution_role!r} has permissions"
                " logs:CreateLogStream, logs:CreateLogGroup, and logs:PutLogEvents."
            )
        else:
            raise

    def _validate_task_definition(
        self, task_definition: dict, configuration: ECSJobConfiguration
    ) -> None:
        """
        Ensure that the task definition is compatible with the configuration.

        Raises `ValueError` on incompatibility. Returns `None` on success.
        """
        launch_type = configuration.task_run_request.get(
            "launchType", ECS_DEFAULT_LAUNCH_TYPE
        )
        if (
            launch_type != "EC2"
            and "FARGATE" not in task_definition["requiresCompatibilities"]
        ):
            raise ValueError(
                "Task definition does not have 'FARGATE' in 'requiresCompatibilities'"
                f" and cannot be used with launch type {launch_type!r}"
            )

        if launch_type == "FARGATE" or launch_type == "FARGATE_SPOT":
            # Only the 'awsvpc' network mode is supported when using FARGATE
            network_mode = task_definition.get("networkMode")
            if network_mode != "awsvpc":
                raise ValueError(
                    f"Found network mode {network_mode!r} which is not compatible with "
                    f"launch type {launch_type!r}. Use either the 'EC2' launch "
                    "type or the 'awsvpc' network mode."
                )

        if configuration.configure_cloudwatch_logs and not task_definition.get(
            "executionRoleArn"
        ):
            raise ValueError(
                "An execution role arn must be set on the task definition to use "
                "`configure_cloudwatch_logs` or `stream_logs` but no execution role "
                "was found on the task definition."
            )

    def _register_task_definition(
        self,
        logger: logging.Logger,
        ecs_client: _ECSClient,
        task_definition: dict,
    ) -> str:
        """
        Register a new task definition with AWS.

        Returns the ARN.
        """
        logger.info("Registering ECS task definition...")
        logger.debug(
            "Task definition request"
            f"{json.dumps(task_definition, indent=2, default=str)}"
        )

        response = ecs_client.register_task_definition(**task_definition)
        return response["taskDefinition"]["taskDefinitionArn"]

    def _retrieve_task_definition(
        self,
        logger: logging.Logger,
        ecs_client: _ECSClient,
        task_definition: str,
    ):
        """
        Retrieve an existing task definition from AWS.
        """
        if task_definition.startswith("arn:aws:ecs:"):
            logger.info(f"Retrieving ECS task definition {task_definition!r}...")
        else:
            logger.info(
                "Retrieving most recent active revision from "
                f"ECS task family {task_definition!r}..."
            )
        response = ecs_client.describe_task_definition(taskDefinition=task_definition)
        return response["taskDefinition"]

    def _wait_for_task_start(
        self,
        logger: logging.Logger,
        configuration: ECSJobConfiguration,
        task_arn: str,
        cluster_arn: str,
        ecs_client: _ECSClient,
        timeout: int,
    ) -> dict:
        """
        Waits for an ECS task run to reach a RUNNING status.

        If a STOPPED status is reached instead, an exception is raised indicating the
        reason that the task run did not start.
        """
        for task in self._watch_task_run(
            logger,
            configuration,
            task_arn,
            cluster_arn,
            ecs_client,
            until_status="RUNNING",
            timeout=timeout,
        ):
            # TODO: It is possible that the task has passed _through_ a RUNNING
            #       status during the polling interval. In this case, there is not an
            #       exception to raise.
            if task["lastStatus"] == "STOPPED":
                code = task.get("stopCode")
                reason = task.get("stoppedReason")
                # Generate a dynamic exception type from the AWS name
                raise type(code, (RuntimeError,), {})(reason)

        return task

    def _wait_for_task_finish(
        self,
        logger: logging.Logger,
        configuration: ECSJobConfiguration,
        task_arn: str,
        cluster_arn: str,
        task_definition: dict,
        ecs_client: _ECSClient,
    ):
        """
        Watch an ECS task until it reaches a STOPPED status.

        If configured, logs from the Prefect container are streamed to stderr.

        Returns a description of the task on completion.
        """
        can_stream_output = False
        container_name = (
            configuration.container_name
            or _container_name_from_task_definition(task_definition)
            or ECS_DEFAULT_CONTAINER_NAME
        )

        if configuration.stream_output:
            container_def = _get_container(
                task_definition["containerDefinitions"], container_name
            )
            if not container_def:
                logger.warning(
                    "Prefect container definition not found in "
                    "task definition. Output cannot be streamed."
                )
            elif not container_def.get("logConfiguration"):
                logger.warning(
                    "Logging configuration not found on task. "
                    "Output cannot be streamed."
                )
            elif not container_def["logConfiguration"].get("logDriver") == "awslogs":
                logger.warning(
                    "Logging configuration uses unsupported "
                    " driver {container_def['logConfiguration'].get('logDriver')!r}. "
                    "Output cannot be streamed."
                )
            else:
                # Prepare to stream the output
                log_config = container_def["logConfiguration"]["options"]
                logs_client = self._get_client(configuration, "logs")
                can_stream_output = True
                # Track the last log timestamp to prevent double display
                last_log_timestamp: Optional[int] = None
                # Determine the name of the stream as "prefix/container/run-id"
                stream_name = "/".join(
                    [
                        log_config["awslogs-stream-prefix"],
                        container_name,
                        task_arn.rsplit("/")[-1],
                    ]
                )
                self._logger.info(
                    f"Streaming output from container {container_name!r}..."
                )

        for task in self._watch_task_run(
            logger,
            configuration,
            task_arn,
            cluster_arn,
            ecs_client,
            current_status="RUNNING",
        ):
            if configuration.stream_output and can_stream_output:
                # On each poll for task run status, also retrieve available logs
                last_log_timestamp = self._stream_available_logs(
                    logger,
                    logs_client,
                    log_group=log_config["awslogs-group"],
                    log_stream=stream_name,
                    last_log_timestamp=last_log_timestamp,
                )

        return task

    def _stream_available_logs(
        self,
        logger: logging.Logger,
        logs_client: Any,
        log_group: str,
        log_stream: str,
        last_log_timestamp: Optional[int] = None,
    ) -> Optional[int]:
        """
        Stream logs from the given log group and stream since the last log timestamp.

        Will continue on paginated responses until all logs are returned.

        Returns the last log timestamp which can be used to call this method in the
        future.
        """
        last_log_stream_token = "NO-TOKEN"
        next_log_stream_token = None

        # AWS will return the same token that we send once the end of the paginated
        # response is reached
        while last_log_stream_token != next_log_stream_token:
            last_log_stream_token = next_log_stream_token

            request = {
                "logGroupName": log_group,
                "logStreamName": log_stream,
            }

            if last_log_stream_token is not None:
                request["nextToken"] = last_log_stream_token

            if last_log_timestamp is not None:
                # Bump the timestamp by one ms to avoid retrieving the last log again
                request["startTime"] = last_log_timestamp + 1

            try:
                response = logs_client.get_log_events(**request)
            except Exception:
                logger.error(
                    f"Failed to read log events with request {request}",
                    exc_info=True,
                )
                return last_log_timestamp

            log_events = response["events"]
            for log_event in log_events:
                # TODO: This doesn't forward to the local logger, which can be
                #       bad for customizing handling and understanding where the
                #       log is coming from, but it avoid nesting logger information
                #       when the content is output from a Prefect logger on the
                #       running infrastructure
                print(log_event["message"], file=sys.stderr)

                if (
                    last_log_timestamp is None
                    or log_event["timestamp"] > last_log_timestamp
                ):
                    last_log_timestamp = log_event["timestamp"]

            next_log_stream_token = response.get("nextForwardToken")
            if not log_events:
                # Stop reading pages if there was no data
                break

        return last_log_timestamp

    def _watch_task_run(
        self,
        logger: logging.Logger,
        configuration: ECSJobConfiguration,
        task_arn: str,
        cluster_arn: str,
        ecs_client: _ECSClient,
        current_status: str = "UNKNOWN",
        until_status: Optional[str] = None,
        timeout: Optional[int] = None,
    ) -> Generator[None, None, dict]:
        """
        Watches an ECS task run by querying every `poll_interval` seconds. After each
        query, the retrieved task is yielded. This function returns when the task run
        reaches a STOPPED status or the provided `until_status`.

        Emits a log each time the status changes.
        """
        last_status = status = current_status
        t0 = time.time()
        while status != until_status:
            tasks = ecs_client.describe_tasks(
                tasks=[task_arn], cluster=cluster_arn, include=["TAGS"]
            )["tasks"]

            if tasks:
                task = tasks[0]

                status = task["lastStatus"]
                if status != last_status:
                    logger.info(f"ECS task status is {status}.")

                yield task

                # No point in continuing if the status is final
                if status == "STOPPED":
                    break

                last_status = status

            else:
                # Intermittently, the task will not be described. We wat to respect the
                # watch timeout though.
                logger.debug("Task not found.")

            elapsed_time = time.time() - t0
            if timeout is not None and elapsed_time > timeout:
                raise RuntimeError(
                    f"Timed out after {elapsed_time}s while watching task for status "
                    f"{until_status or 'STOPPED'}."
                )
            time.sleep(configuration.task_watch_poll_interval)

    def _get_or_generate_family(self, task_definition: dict, flow_run: FlowRun) -> str:
        """
        Gets or generate a family for the task definition.
        """
        family = task_definition.get("family")
        if not family:
            assert self._work_pool_name and flow_run.deployment_id
            family = (
                f"{ECS_DEFAULT_FAMILY}_{self._work_pool_name}_{flow_run.deployment_id}"
            )
        slugify(
            family,
            max_length=255,
            regex_pattern=r"[^a-zA-Z0-9-_]+",
        )
        return family

    def _prepare_task_definition(
        self,
        configuration: ECSJobConfiguration,
        region: str,
        flow_run: FlowRun,
    ) -> dict:
        """
        Prepare a task definition by inferring any defaults and merging overrides.
        """
        task_definition = copy.deepcopy(configuration.task_definition)

        # Configure the Prefect runtime container
        task_definition.setdefault("containerDefinitions", [])

        # Remove empty container definitions
        task_definition["containerDefinitions"] = [
            d for d in task_definition["containerDefinitions"] if d
        ]

        container_name = configuration.container_name
        if not container_name:
            container_name = (
                _container_name_from_task_definition(task_definition)
                or ECS_DEFAULT_CONTAINER_NAME
            )

        container = _get_container(
            task_definition["containerDefinitions"], container_name
        )
        if container is None:
            if container_name != ECS_DEFAULT_CONTAINER_NAME:
                raise ValueError(
                    f"Container {container_name!r} not found in task definition."
                )

            # Look for a container without a name
            for container in task_definition["containerDefinitions"]:
                if "name" not in container:
                    container["name"] = container_name
                    break
            else:
                container = {"name": container_name}
                task_definition["containerDefinitions"].append(container)

        # Image is required so make sure it's present
        container.setdefault("image", get_prefect_image_name())

        # Remove any keys that have been explicitly "unset"
        unset_keys = {key for key, value in configuration.env.items() if value is None}
        for item in tuple(container.get("environment", [])):
            if item["name"] in unset_keys or item["value"] is None:
                container["environment"].remove(item)

        if configuration.configure_cloudwatch_logs:
            prefix = f"prefect-logs_{self._work_pool_name}_{flow_run.deployment_id}"
            container["logConfiguration"] = {
                "logDriver": "awslogs",
                "options": {
                    "awslogs-create-group": "true",
                    "awslogs-group": "prefect",
                    "awslogs-region": region,
                    "awslogs-stream-prefix": (
                        configuration.cloudwatch_logs_prefix or prefix
                    ),
                    **configuration.cloudwatch_logs_options,
                },
            }

        task_definition["family"] = self._get_or_generate_family(
            task_definition, flow_run
        )
        # CPU and memory are required in some cases, retrieve the value to use
        cpu = task_definition.get("cpu") or ECS_DEFAULT_CPU
        memory = task_definition.get("memory") or ECS_DEFAULT_MEMORY

        launch_type = configuration.task_run_request.get(
            "launchType", ECS_DEFAULT_LAUNCH_TYPE
        )

        if launch_type == "FARGATE" or launch_type == "FARGATE_SPOT":
            # Task level memory and cpu are required when using fargate
            task_definition["cpu"] = str(cpu)
            task_definition["memory"] = str(memory)

            # The FARGATE compatibility is required if it will be used as as launch type
            requires_compatibilities = task_definition.setdefault(
                "requiresCompatibilities", []
            )
            if "FARGATE" not in requires_compatibilities:
                task_definition["requiresCompatibilities"].append("FARGATE")

            # Only the 'awsvpc' network mode is supported when using FARGATE
            # However, we will not enforce that here if the user has set it
            task_definition.setdefault("networkMode", "awsvpc")

        elif launch_type == "EC2":
            # Container level memory and cpu are required when using ec2
            container.setdefault("cpu", cpu)
            container.setdefault("memory", memory)

            # Ensure set values are cast to integers
            container["cpu"] = int(container["cpu"])
            container["memory"] = int(container["memory"])

        # Ensure set values are cast to strings
        if task_definition.get("cpu"):
            task_definition["cpu"] = str(task_definition["cpu"])
        if task_definition.get("memory"):
            task_definition["memory"] = str(task_definition["memory"])

        return task_definition

    def _load_network_configuration(
        self, vpc_id: Optional[str], configuration: ECSJobConfiguration
    ) -> dict:
        """
        Load settings from a specific VPC or the default VPC and generate a task
        run request's network configuration.
        """
        ec2_client = self._get_client(configuration, "ec2")
        vpc_message = "the default VPC" if not vpc_id else f"VPC with ID {vpc_id}"

        if not vpc_id:
            # Retrieve the default VPC
            describe = {"Filters": [{"Name": "isDefault", "Values": ["true"]}]}
        else:
            describe = {"VpcIds": [vpc_id]}

        vpcs = ec2_client.describe_vpcs(**describe)["Vpcs"]
        if not vpcs:
            help_message = (
                "Pass an explicit `vpc_id` or configure a default VPC."
                if not vpc_id
                else "Check that the VPC exists in the current region."
            )
            raise ValueError(
                f"Failed to find {vpc_message}. "
                "Network configuration cannot be inferred. " + help_message
            )

        vpc_id = vpcs[0]["VpcId"]
        subnets = ec2_client.describe_subnets(
            Filters=[{"Name": "vpc-id", "Values": [vpc_id]}]
        )["Subnets"]
        if not subnets:
            raise ValueError(
                f"Failed to find subnets for {vpc_message}. "
                "Network configuration cannot be inferred."
            )

        return {
            "awsvpcConfiguration": {
                "subnets": [s["SubnetId"] for s in subnets],
                "assignPublicIp": "ENABLED",
                "securityGroups": [],
            }
        }

    def _custom_network_configuration(
        self,
        vpc_id: str,
        network_configuration: dict,
        configuration: ECSJobConfiguration,
    ) -> dict:
        """
        Load settings from a specific VPC or the default VPC and generate a task
        run request's network configuration.
        """
        ec2_client = self._get_client(configuration, "ec2")
        vpc_message = f"VPC with ID {vpc_id}"

        vpcs = ec2_client.describe_vpcs(VpcIds=[vpc_id]).get("Vpcs")

        if not vpcs:
            raise ValueError(
                f"Failed to find {vpc_message}. "
                + "Network configuration cannot be inferred. "
                + "Pass an explicit `vpc_id`."
            )

        vpc_id = vpcs[0]["VpcId"]
        subnets = ec2_client.describe_subnets(
            Filters=[{"Name": "vpc-id", "Values": [vpc_id]}]
        )["Subnets"]

        if not subnets:
            raise ValueError(
                f"Failed to find subnets for {vpc_message}. "
                + "Network configuration cannot be inferred."
            )

        subnet_ids = [subnet["SubnetId"] for subnet in subnets]

        config_subnets = network_configuration.get("subnets", [])
        if not all(conf_sn in subnet_ids for conf_sn in config_subnets):
            raise ValueError(
                f"Subnets {config_subnets} not found within {vpc_message}."
                + "Please check that VPC is associated with supplied subnets."
            )

        return {"awsvpcConfiguration": network_configuration}

    def _prepare_task_run_request(
        self,
        configuration: ECSJobConfiguration,
        task_definition: dict,
        task_definition_arn: str,
    ) -> dict:
        """
        Prepare a task run request payload.
        """
        task_run_request = deepcopy(configuration.task_run_request)

        task_run_request.setdefault("taskDefinition", task_definition_arn)

        assert task_run_request["taskDefinition"] == task_definition_arn, (
            f"Task definition ARN mismatch: {task_run_request['taskDefinition']!r} "
            f"!= {task_definition_arn!r}"
        )
        capacityProviderStrategy = task_run_request.get("capacityProviderStrategy")

        if capacityProviderStrategy:
            # Should not be provided at all if capacityProviderStrategy is set, see https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_RunTask.html#ECS-RunTask-request-capacityProviderStrategy  # noqa
            self._logger.warning(
                "Found capacityProviderStrategy. "
                "Removing launchType from task run request."
            )
            task_run_request.pop("launchType", None)

        elif task_run_request.get("launchType") == "FARGATE_SPOT":
            # Should not be provided at all for FARGATE SPOT
            task_run_request.pop("launchType", None)

            # A capacity provider strategy is required for FARGATE SPOT
            task_run_request["capacityProviderStrategy"] = [
                {"capacityProvider": "FARGATE_SPOT", "weight": 1}
            ]
        overrides = task_run_request.get("overrides", {})
        container_overrides = overrides.get("containerOverrides", [])

        # Ensure the network configuration is present if using awsvpc for network mode
        if (
            task_definition.get("networkMode") == "awsvpc"
            and not task_run_request.get("networkConfiguration")
            and not configuration.network_configuration
        ):
            task_run_request["networkConfiguration"] = self._load_network_configuration(
                configuration.vpc_id, configuration
            )

        # Use networkConfiguration if supplied by user
        if (
            task_definition.get("networkMode") == "awsvpc"
            and configuration.network_configuration
            and configuration.vpc_id
        ):
            task_run_request[
                "networkConfiguration"
            ] = self._custom_network_configuration(
                configuration.vpc_id,
                configuration.network_configuration,
                configuration,
            )

        # Ensure the container name is set if not provided at template time

        container_name = (
            configuration.container_name
            or _container_name_from_task_definition(task_definition)
            or ECS_DEFAULT_CONTAINER_NAME
        )

        if container_overrides and not container_overrides[0].get("name"):
            container_overrides[0]["name"] = container_name

        # Ensure configuration command is respected post-templating

        orchestration_container = _get_container(container_overrides, container_name)

        if orchestration_container:
            # Override the command if given on the configuration
            if configuration.command:
                orchestration_container["command"] = configuration.command

        # Clean up templated variable formatting

        for container in container_overrides:
            if isinstance(container.get("command"), str):
                container["command"] = shlex.split(container["command"])
            if isinstance(container.get("environment"), dict):
                container["environment"] = [
                    {"name": k, "value": v} for k, v in container["environment"].items()
                ]

            # Remove null values — they're not allowed by AWS
            container["environment"] = [
                item
                for item in container.get("environment", [])
                if item["value"] is not None
            ]

        if isinstance(task_run_request.get("tags"), dict):
            task_run_request["tags"] = [
                {"key": k, "value": v} for k, v in task_run_request["tags"].items()
            ]

        if overrides.get("cpu"):
            overrides["cpu"] = str(overrides["cpu"])

        if overrides.get("memory"):
            overrides["memory"] = str(overrides["memory"])

        # Ensure configuration tags and env are respected post-templating

        tags = [
            item
            for item in task_run_request.get("tags", [])
            if item["key"] not in configuration.labels.keys()
        ] + [
            {"key": k, "value": v}
            for k, v in configuration.labels.items()
            if v is not None
        ]

        # Slugify tags keys and values
        tags = [
            {
                "key": slugify(
                    item["key"],
                    regex_pattern=_TAG_REGEX,
                    allow_unicode=True,
                    lowercase=False,
                ),
                "value": slugify(
                    item["value"],
                    regex_pattern=_TAG_REGEX,
                    allow_unicode=True,
                    lowercase=False,
                ),
            }
            for item in tags
        ]

        if tags:
            task_run_request["tags"] = tags

        if orchestration_container:
            environment = [
                item
                for item in orchestration_container.get("environment", [])
                if item["name"] not in configuration.env.keys()
            ] + [
                {"name": k, "value": v}
                for k, v in configuration.env.items()
                if v is not None
            ]
            if environment:
                orchestration_container["environment"] = environment

        # Remove empty container overrides

        overrides["containerOverrides"] = [v for v in container_overrides if v]

        return task_run_request

    @retry(
        stop=stop_after_attempt(MAX_CREATE_TASK_RUN_ATTEMPTS),
        wait=wait_fixed(CREATE_TASK_RUN_MIN_DELAY_SECONDS)
        + wait_random(
            CREATE_TASK_RUN_MIN_DELAY_JITTER_SECONDS,
            CREATE_TASK_RUN_MAX_DELAY_JITTER_SECONDS,
        ),
        reraise=True,
    )
    def _create_task_run(self, ecs_client: _ECSClient, task_run_request: dict) -> str:
        """
        Create a run of a task definition.

        Returns the task run ARN.
        """
        task = ecs_client.run_task(**task_run_request)
        if task["failures"]:
            raise RuntimeError(
                f"Failed to run ECS task: {task['failures'][0]['reason']}"
            )
        elif not task["tasks"]:
            raise RuntimeError(
                "Failed to run ECS task: no tasks or failures were returned."
            )
        return task["tasks"][0]

    def _task_definitions_equal(self, taskdef_1, taskdef_2) -> bool:
        """
        Compare two task definitions.

        Since one may come from the AWS API and have populated defaults, we do our best
        to homogenize the definitions without changing their meaning.
        """
        if taskdef_1 == taskdef_2:
            return True

        if taskdef_1 is None or taskdef_2 is None:
            return False

        taskdef_1 = copy.deepcopy(taskdef_1)
        taskdef_2 = copy.deepcopy(taskdef_2)

        for taskdef in (taskdef_1, taskdef_2):
            # Set defaults that AWS would set after registration
            container_definitions = taskdef.get("containerDefinitions", [])
            essential = any(
                container.get("essential") for container in container_definitions
            )
            if not essential:
                container_definitions[0].setdefault("essential", True)

            taskdef.setdefault("networkMode", "bridge")

        _drop_empty_keys_from_dict(taskdef_1)
        _drop_empty_keys_from_dict(taskdef_2)

        # Clear fields that change on registration for comparison
        for field in ECS_POST_REGISTRATION_FIELDS:
            taskdef_1.pop(field, None)
            taskdef_2.pop(field, None)

        return taskdef_1 == taskdef_2

run(flow_run, configuration, task_status=None) async

Runs a given flow run on the current worker.

Source code in prefect_aws/workers/ecs_worker.py
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
async def run(
    self,
    flow_run: "FlowRun",
    configuration: ECSJobConfiguration,
    task_status: Optional[anyio.abc.TaskStatus] = None,
) -> ECSWorkerResult:
    """
    Runs a given flow run on the current worker.
    """
    ecs_client = await run_sync_in_worker_thread(
        self._get_client, configuration, "ecs"
    )

    logger = self.get_flow_run_logger(flow_run)

    (
        task_arn,
        cluster_arn,
        task_definition,
        is_new_task_definition,
    ) = await run_sync_in_worker_thread(
        self._create_task_and_wait_for_start,
        logger,
        ecs_client,
        configuration,
        flow_run,
    )

    # The task identifier is "{cluster}::{task}" where we use the configured cluster
    # if set to preserve matching by name rather than arn
    # Note "::" is used despite the Prefect standard being ":" because ARNs contain
    # single colons.
    identifier = (
        (configuration.cluster if configuration.cluster else cluster_arn)
        + "::"
        + task_arn
    )

    if task_status:
        task_status.started(identifier)

    status_code = await run_sync_in_worker_thread(
        self._watch_task_and_get_exit_code,
        logger,
        configuration,
        task_arn,
        cluster_arn,
        task_definition,
        is_new_task_definition and configuration.auto_deregister_task_definition,
        ecs_client,
    )

    return ECSWorkerResult(
        identifier=identifier,
        # If the container does not start the exit code can be null but we must
        # still report a status code. We use a -1 to indicate a special code.
        status_code=status_code if status_code is not None else -1,
    )

LambdaFunction

Bases: Block

Invoke a Lambda function. This block is part of the prefect-aws collection. Install prefect-aws with pip install prefect-aws to use this block.

Attributes:

Name Type Description
function_name str

The name, ARN, or partial ARN of the Lambda function to run. This must be the name of a function that is already deployed to AWS Lambda.

qualifier Optional[str]

The version or alias of the Lambda function to use when invoked. If not specified, the latest (unqualified) version of the Lambda function will be used.

aws_credentials AwsCredentials

The AWS credentials to use to connect to AWS Lambda with a default factory of AwsCredentials.

Source code in prefect_aws/lambda_function.py
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
class LambdaFunction(Block):
    """Invoke a Lambda function. This block is part of the prefect-aws
    collection. Install prefect-aws with `pip install prefect-aws` to use this
    block.

    Attributes:
        function_name: The name, ARN, or partial ARN of the Lambda function to
            run. This must be the name of a function that is already deployed
            to AWS Lambda.
        qualifier: The version or alias of the Lambda function to use when
            invoked. If not specified, the latest (unqualified) version of the
            Lambda function will be used.
        aws_credentials: The AWS credentials to use to connect to AWS Lambda
            with a default factory of AwsCredentials.

    """

    _block_type_name = "Lambda Function"
    _logo_url = "https://cdn.sanity.io/images/3ugk85nk/production/d74b16fe84ce626345adf235a47008fea2869a60-225x225.png"  # noqa
    _documentation_url = "https://prefecthq.github.io/prefect-aws/s3/#prefect_aws.lambda_function.LambdaFunction"  # noqa

    function_name: str = Field(
        title="Function Name",
        description=(
            "The name, ARN, or partial ARN of the Lambda function to run. This"
            " must be the name of a function that is already deployed to AWS"
            " Lambda."
        ),
    )
    qualifier: Optional[str] = Field(
        default=None,
        title="Qualifier",
        description=(
            "The version or alias of the Lambda function to use when invoked. "
            "If not specified, the latest (unqualified) version of the Lambda "
            "function will be used."
        ),
    )
    aws_credentials: AwsCredentials = Field(
        title="AWS Credentials",
        default_factory=AwsCredentials,
        description="The AWS credentials to invoke the Lambda with.",
    )

    def _get_lambda_client(self):
        """
        Retrieve a boto3 session and Lambda client
        """
        boto_session = self.aws_credentials.get_boto3_session()
        lambda_client = boto_session.client("lambda")
        return lambda_client

    @sync_compatible
    async def invoke(
        self,
        payload: Optional[dict] = None,
        invocation_type: Literal[
            "RequestResponse", "Event", "DryRun"
        ] = "RequestResponse",
        tail: bool = False,
        client_context: Optional[dict] = None,
    ) -> dict:
        """
        [Invoke](https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/lambda/client/invoke.html)
        the Lambda function with the given payload.

        Args:
            payload: The payload to send to the Lambda function.
            invocation_type: The invocation type of the Lambda function. This
                can be one of "RequestResponse", "Event", or "DryRun". Uses
                "RequestResponse" by default.
            tail: If True, the response will include the base64-encoded last 4
                KB of log data produced by the Lambda function.
            client_context: The client context to send to the Lambda function.
                Limited to 3583 bytes.

        Returns:
            The response from the Lambda function.

        Examples:

            ```python
            from prefect_aws.lambda_function import LambdaFunction
            from prefect_aws.credentials import AwsCredentials

            credentials = AwsCredentials()
            lambda_function = LambdaFunction(
                function_name="test_lambda_function",
                aws_credentials=credentials,
            )
            response = lambda_function.invoke(
                payload={"foo": "bar"},
                invocation_type="RequestResponse",
            )
            response["Payload"].read()
            ```
            ```txt
            b'{"foo": "bar"}'
            ```

        """
        # Add invocation arguments
        kwargs = dict(FunctionName=self.function_name)

        if payload:
            kwargs["Payload"] = json.dumps(payload).encode()

        # Let boto handle invalid invocation types
        kwargs["InvocationType"] = invocation_type

        if self.qualifier is not None:
            kwargs["Qualifier"] = self.qualifier

        if tail:
            kwargs["LogType"] = "Tail"

        if client_context is not None:
            # For some reason this is string, but payload is bytes
            kwargs["ClientContext"] = json.dumps(client_context)

        # Get client and invoke
        lambda_client = await run_sync_in_worker_thread(self._get_lambda_client)
        return await run_sync_in_worker_thread(lambda_client.invoke, **kwargs)

invoke(payload=None, invocation_type='RequestResponse', tail=False, client_context=None) async

Invoke the Lambda function with the given payload.

Parameters:

Name Type Description Default
payload Optional[dict]

The payload to send to the Lambda function.

None
invocation_type Literal['RequestResponse', 'Event', 'DryRun']

The invocation type of the Lambda function. This can be one of "RequestResponse", "Event", or "DryRun". Uses "RequestResponse" by default.

'RequestResponse'
tail bool

If True, the response will include the base64-encoded last 4 KB of log data produced by the Lambda function.

False
client_context Optional[dict]

The client context to send to the Lambda function. Limited to 3583 bytes.

None

Returns:

Type Description
dict

The response from the Lambda function.

```python
from prefect_aws.lambda_function import LambdaFunction
from prefect_aws.credentials import AwsCredentials

credentials = AwsCredentials()
lambda_function = LambdaFunction(
    function_name="test_lambda_function",
    aws_credentials=credentials,
)
response = lambda_function.invoke(
    payload={"foo": "bar"},
    invocation_type="RequestResponse",
)
response["Payload"].read()
```
```txt
b'{"foo": "bar"}'
```
Source code in prefect_aws/lambda_function.py
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
@sync_compatible
async def invoke(
    self,
    payload: Optional[dict] = None,
    invocation_type: Literal[
        "RequestResponse", "Event", "DryRun"
    ] = "RequestResponse",
    tail: bool = False,
    client_context: Optional[dict] = None,
) -> dict:
    """
    [Invoke](https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/lambda/client/invoke.html)
    the Lambda function with the given payload.

    Args:
        payload: The payload to send to the Lambda function.
        invocation_type: The invocation type of the Lambda function. This
            can be one of "RequestResponse", "Event", or "DryRun". Uses
            "RequestResponse" by default.
        tail: If True, the response will include the base64-encoded last 4
            KB of log data produced by the Lambda function.
        client_context: The client context to send to the Lambda function.
            Limited to 3583 bytes.

    Returns:
        The response from the Lambda function.

    Examples:

        ```python
        from prefect_aws.lambda_function import LambdaFunction
        from prefect_aws.credentials import AwsCredentials

        credentials = AwsCredentials()
        lambda_function = LambdaFunction(
            function_name="test_lambda_function",
            aws_credentials=credentials,
        )
        response = lambda_function.invoke(
            payload={"foo": "bar"},
            invocation_type="RequestResponse",
        )
        response["Payload"].read()
        ```
        ```txt
        b'{"foo": "bar"}'
        ```

    """
    # Add invocation arguments
    kwargs = dict(FunctionName=self.function_name)

    if payload:
        kwargs["Payload"] = json.dumps(payload).encode()

    # Let boto handle invalid invocation types
    kwargs["InvocationType"] = invocation_type

    if self.qualifier is not None:
        kwargs["Qualifier"] = self.qualifier

    if tail:
        kwargs["LogType"] = "Tail"

    if client_context is not None:
        # For some reason this is string, but payload is bytes
        kwargs["ClientContext"] = json.dumps(client_context)

    # Get client and invoke
    lambda_client = await run_sync_in_worker_thread(self._get_lambda_client)
    return await run_sync_in_worker_thread(lambda_client.invoke, **kwargs)

MinIOCredentials

Bases: CredentialsBlock

Block used to manage authentication with MinIO. Refer to the MinIO docs for more info about the possible credential configurations.

Attributes:

Name Type Description
minio_root_user str

Admin or root user.

minio_root_password SecretStr

Admin or root password.

region_name Optional[str]

Location of server, e.g. "us-east-1".

Example

Load stored MinIO credentials:

from prefect_aws import MinIOCredentials

minio_credentials_block = MinIOCredentials.load("BLOCK_NAME")
Source code in prefect_aws/credentials.py
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
class MinIOCredentials(CredentialsBlock):
    """
    Block used to manage authentication with MinIO. Refer to the
    [MinIO docs](https://docs.min.io/docs/minio-server-configuration-guide.html)
    for more info about the possible credential configurations.

    Attributes:
        minio_root_user: Admin or root user.
        minio_root_password: Admin or root password.
        region_name: Location of server, e.g. "us-east-1".

    Example:
        Load stored MinIO credentials:
        ```python
        from prefect_aws import MinIOCredentials

        minio_credentials_block = MinIOCredentials.load("BLOCK_NAME")
        ```
    """  # noqa E501

    model_config = ConfigDict(arbitrary_types_allowed=True)

    _logo_url = "https://cdn.sanity.io/images/3ugk85nk/production/676cb17bcbdff601f97e0a02ff8bcb480e91ff40-250x250.png"  # noqa
    _block_type_name = "MinIO Credentials"
    _description = (
        "Block used to manage authentication with MinIO. Refer to the MinIO "
        "docs: https://docs.min.io/docs/minio-server-configuration-guide.html "
        "for more info about the possible credential configurations."
    )
    _documentation_url = "https://prefecthq.github.io/prefect-aws/credentials/#prefect_aws.credentials.MinIOCredentials"  # noqa

    minio_root_user: str = Field(default=..., description="Admin or root user.")
    minio_root_password: SecretStr = Field(
        default=..., description="Admin or root password."
    )
    region_name: Optional[str] = Field(
        default=None,
        description="The AWS Region where you want to create new connections.",
    )
    aws_client_parameters: AwsClientParameters = Field(
        default_factory=AwsClientParameters,
        description="Extra parameters to initialize the Client.",
    )

    def __hash__(self):
        return hash(
            (
                hash(self.minio_root_user),
                hash(self.minio_root_password),
                hash(self.region_name),
                hash(frozenset(self.aws_client_parameters.model_dump().items())),
            )
        )

    def get_boto3_session(self) -> boto3.Session:
        """
        Returns an authenticated boto3 session that can be used to create clients
        and perform object operations on MinIO server.

        Example:
            Create an S3 client from an authorized boto3 session

            ```python
            minio_credentials = MinIOCredentials(
                minio_root_user = "minio_root_user",
                minio_root_password = "minio_root_password"
            )
            s3_client = minio_credentials.get_boto3_session().client(
                service="s3",
                endpoint_url="http://localhost:9000"
            )
            ```
        """

        minio_root_password = (
            self.minio_root_password.get_secret_value()
            if self.minio_root_password
            else None
        )

        return boto3.Session(
            aws_access_key_id=self.minio_root_user,
            aws_secret_access_key=minio_root_password,
            region_name=self.region_name,
        )

    def get_client(self, client_type: Union[str, ClientType]):
        """
        Helper method to dynamically get a client type.

        Args:
            client_type: The client's service name.

        Returns:
            An authenticated client.

        Raises:
            ValueError: if the client is not supported.
        """
        if isinstance(client_type, ClientType):
            client_type = client_type.value

        return _get_client_cached(ctx=self, client_type=client_type)

    def get_s3_client(self) -> S3Client:
        """
        Gets an authenticated S3 client.

        Returns:
            An authenticated S3 client.
        """
        return self.get_client(client_type=ClientType.S3)

get_boto3_session()

Returns an authenticated boto3 session that can be used to create clients and perform object operations on MinIO server.

Example

Create an S3 client from an authorized boto3 session

minio_credentials = MinIOCredentials(
    minio_root_user = "minio_root_user",
    minio_root_password = "minio_root_password"
)
s3_client = minio_credentials.get_boto3_session().client(
    service="s3",
    endpoint_url="http://localhost:9000"
)
Source code in prefect_aws/credentials.py
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
def get_boto3_session(self) -> boto3.Session:
    """
    Returns an authenticated boto3 session that can be used to create clients
    and perform object operations on MinIO server.

    Example:
        Create an S3 client from an authorized boto3 session

        ```python
        minio_credentials = MinIOCredentials(
            minio_root_user = "minio_root_user",
            minio_root_password = "minio_root_password"
        )
        s3_client = minio_credentials.get_boto3_session().client(
            service="s3",
            endpoint_url="http://localhost:9000"
        )
        ```
    """

    minio_root_password = (
        self.minio_root_password.get_secret_value()
        if self.minio_root_password
        else None
    )

    return boto3.Session(
        aws_access_key_id=self.minio_root_user,
        aws_secret_access_key=minio_root_password,
        region_name=self.region_name,
    )

get_client(client_type)

Helper method to dynamically get a client type.

Parameters:

Name Type Description Default
client_type Union[str, ClientType]

The client's service name.

required

Returns:

Type Description

An authenticated client.

Raises:

Type Description
ValueError

if the client is not supported.

Source code in prefect_aws/credentials.py
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
def get_client(self, client_type: Union[str, ClientType]):
    """
    Helper method to dynamically get a client type.

    Args:
        client_type: The client's service name.

    Returns:
        An authenticated client.

    Raises:
        ValueError: if the client is not supported.
    """
    if isinstance(client_type, ClientType):
        client_type = client_type.value

    return _get_client_cached(ctx=self, client_type=client_type)

get_s3_client()

Gets an authenticated S3 client.

Returns:

Type Description
S3Client

An authenticated S3 client.

Source code in prefect_aws/credentials.py
287
288
289
290
291
292
293
294
def get_s3_client(self) -> S3Client:
    """
    Gets an authenticated S3 client.

    Returns:
        An authenticated S3 client.
    """
    return self.get_client(client_type=ClientType.S3)

S3Bucket

Bases: WritableFileSystem, WritableDeploymentStorage, ObjectStorageBlock

Block used to store data using AWS S3 or S3-compatible object storage like MinIO.

Attributes:

Name Type Description
bucket_name str

Name of your bucket.

credentials Union[MinIOCredentials, AwsCredentials]

A block containing your credentials to AWS or MinIO.

bucket_folder str

A default path to a folder within the S3 bucket to use for reading and writing objects.

Source code in prefect_aws/s3.py
 393
 394
 395
 396
 397
 398
 399
 400
 401
 402
 403
 404
 405
 406
 407
 408
 409
 410
 411
 412
 413
 414
 415
 416
 417
 418
 419
 420
 421
 422
 423
 424
 425
 426
 427
 428
 429
 430
 431
 432
 433
 434
 435
 436
 437
 438
 439
 440
 441
 442
 443
 444
 445
 446
 447
 448
 449
 450
 451
 452
 453
 454
 455
 456
 457
 458
 459
 460
 461
 462
 463
 464
 465
 466
 467
 468
 469
 470
 471
 472
 473
 474
 475
 476
 477
 478
 479
 480
 481
 482
 483
 484
 485
 486
 487
 488
 489
 490
 491
 492
 493
 494
 495
 496
 497
 498
 499
 500
 501
 502
 503
 504
 505
 506
 507
 508
 509
 510
 511
 512
 513
 514
 515
 516
 517
 518
 519
 520
 521
 522
 523
 524
 525
 526
 527
 528
 529
 530
 531
 532
 533
 534
 535
 536
 537
 538
 539
 540
 541
 542
 543
 544
 545
 546
 547
 548
 549
 550
 551
 552
 553
 554
 555
 556
 557
 558
 559
 560
 561
 562
 563
 564
 565
 566
 567
 568
 569
 570
 571
 572
 573
 574
 575
 576
 577
 578
 579
 580
 581
 582
 583
 584
 585
 586
 587
 588
 589
 590
 591
 592
 593
 594
 595
 596
 597
 598
 599
 600
 601
 602
 603
 604
 605
 606
 607
 608
 609
 610
 611
 612
 613
 614
 615
 616
 617
 618
 619
 620
 621
 622
 623
 624
 625
 626
 627
 628
 629
 630
 631
 632
 633
 634
 635
 636
 637
 638
 639
 640
 641
 642
 643
 644
 645
 646
 647
 648
 649
 650
 651
 652
 653
 654
 655
 656
 657
 658
 659
 660
 661
 662
 663
 664
 665
 666
 667
 668
 669
 670
 671
 672
 673
 674
 675
 676
 677
 678
 679
 680
 681
 682
 683
 684
 685
 686
 687
 688
 689
 690
 691
 692
 693
 694
 695
 696
 697
 698
 699
 700
 701
 702
 703
 704
 705
 706
 707
 708
 709
 710
 711
 712
 713
 714
 715
 716
 717
 718
 719
 720
 721
 722
 723
 724
 725
 726
 727
 728
 729
 730
 731
 732
 733
 734
 735
 736
 737
 738
 739
 740
 741
 742
 743
 744
 745
 746
 747
 748
 749
 750
 751
 752
 753
 754
 755
 756
 757
 758
 759
 760
 761
 762
 763
 764
 765
 766
 767
 768
 769
 770
 771
 772
 773
 774
 775
 776
 777
 778
 779
 780
 781
 782
 783
 784
 785
 786
 787
 788
 789
 790
 791
 792
 793
 794
 795
 796
 797
 798
 799
 800
 801
 802
 803
 804
 805
 806
 807
 808
 809
 810
 811
 812
 813
 814
 815
 816
 817
 818
 819
 820
 821
 822
 823
 824
 825
 826
 827
 828
 829
 830
 831
 832
 833
 834
 835
 836
 837
 838
 839
 840
 841
 842
 843
 844
 845
 846
 847
 848
 849
 850
 851
 852
 853
 854
 855
 856
 857
 858
 859
 860
 861
 862
 863
 864
 865
 866
 867
 868
 869
 870
 871
 872
 873
 874
 875
 876
 877
 878
 879
 880
 881
 882
 883
 884
 885
 886
 887
 888
 889
 890
 891
 892
 893
 894
 895
 896
 897
 898
 899
 900
 901
 902
 903
 904
 905
 906
 907
 908
 909
 910
 911
 912
 913
 914
 915
 916
 917
 918
 919
 920
 921
 922
 923
 924
 925
 926
 927
 928
 929
 930
 931
 932
 933
 934
 935
 936
 937
 938
 939
 940
 941
 942
 943
 944
 945
 946
 947
 948
 949
 950
 951
 952
 953
 954
 955
 956
 957
 958
 959
 960
 961
 962
 963
 964
 965
 966
 967
 968
 969
 970
 971
 972
 973
 974
 975
 976
 977
 978
 979
 980
 981
 982
 983
 984
 985
 986
 987
 988
 989
 990
 991
 992
 993
 994
 995
 996
 997
 998
 999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
class S3Bucket(WritableFileSystem, WritableDeploymentStorage, ObjectStorageBlock):
    """
    Block used to store data using AWS S3 or S3-compatible object storage like MinIO.

    Attributes:
        bucket_name: Name of your bucket.
        credentials: A block containing your credentials to AWS or MinIO.
        bucket_folder: A default path to a folder within the S3 bucket to use
            for reading and writing objects.
    """

    _logo_url = "https://cdn.sanity.io/images/3ugk85nk/production/d74b16fe84ce626345adf235a47008fea2869a60-225x225.png"  # noqa
    _block_type_name = "S3 Bucket"
    _documentation_url = (
        "https://prefecthq.github.io/prefect-aws/s3/#prefect_aws.s3.S3Bucket"  # noqa
    )

    bucket_name: str = Field(default=..., description="Name of your bucket.")

    credentials: Union[MinIOCredentials, AwsCredentials] = Field(
        default_factory=AwsCredentials,
        description="A block containing your credentials to AWS or MinIO.",
    )

    bucket_folder: str = Field(
        default="",
        description=(
            "A default path to a folder within the S3 bucket to use "
            "for reading and writing objects."
        ),
    )

    @field_validator("credentials", mode="before")
    def validate_credentials(cls, value, field):
        if isinstance(value, dict):
            # There is an issue with pydantic and nested blocks with union
            # types, in this case credentials is affected by it. What happens
            # is that the credentials block appears to be correctly initialized
            # but when it's attached to the parent block it's an
            # _uninitialized_ instance without the field attributes.

            # This validator is a workaround to check for the correct type
            # or fallback to iterating over the possible credential types
            # and trying to initialize them.

            block_type_slug = value.pop("block_type_slug", None)
            if block_type_slug:
                credential_classes = (
                    lookup_type(CredentialsBlock, dispatch_key=block_type_slug),
                )
            else:
                credential_classes = get_args(
                    cls.model_fields["credentials"].annotation
                )

            for credentials_cls in credential_classes:
                try:
                    return credentials_cls(**value)  # type: ignore
                except ValueError:
                    pass

            valid_classes = ", ".join(c.__name__ for c in credential_classes)
            raise ValueError(
                f"Invalid credentials data: does not match any credential type. Valid types: {valid_classes}"
            )

        return value

    # Property to maintain compatibility with storage block based deployments
    @property
    def basepath(self) -> str:
        """
        The base path of the S3 bucket.

        Returns:
            str: The base path of the S3 bucket.
        """
        return self.bucket_folder

    @basepath.setter
    def basepath(self, value: str) -> None:
        self.bucket_folder = value

    def _resolve_path(self, path: str) -> str:
        """
        A helper function used in write_path to join `self.basepath` and `path`.

        Args:

            path: Name of the key, e.g. "file1". Each object in your
                bucket has a unique key (or key name).

        """
        # If bucket_folder provided, it means we won't write to the root dir of
        # the bucket. So we need to add it on the front of the path.
        #
        # AWS object key naming guidelines require '/' for bucket folders.
        # Get POSIX path to prevent `pathlib` from inferring '\' on Windows OS
        path = (
            (Path(self.bucket_folder) / path).as_posix() if self.bucket_folder else path
        )

        return path

    def _get_s3_client(self) -> boto3.client:
        """
        Authenticate MinIO credentials or AWS credentials and return an S3 client.
        This is a helper function called by read_path() or write_path().
        """
        return self.credentials.get_client("s3")

    def _get_bucket_resource(self) -> boto3.resource:
        """
        Retrieves boto3 resource object for the configured bucket
        """
        params_override = self.credentials.aws_client_parameters.get_params_override()
        bucket = (
            self.credentials.get_boto3_session()
            .resource("s3", **params_override)
            .Bucket(self.bucket_name)
        )
        return bucket

    @sync_compatible
    async def get_directory(
        self, from_path: Optional[str] = None, local_path: Optional[str] = None
    ) -> None:
        """
        Copies a folder from the configured S3 bucket to a local directory.

        Defaults to copying the entire contents of the block's basepath to the current
        working directory.

        Args:
            from_path: Path in S3 bucket to download from. Defaults to the block's
                configured basepath.
            local_path: Local path to download S3 contents to. Defaults to the current
                working directory.
        """
        bucket_folder = self.bucket_folder
        if from_path is None:
            from_path = str(bucket_folder) if bucket_folder else ""

        if local_path is None:
            local_path = str(Path(".").absolute())
        else:
            local_path = str(Path(local_path).expanduser())

        bucket = self._get_bucket_resource()
        for obj in bucket.objects.filter(Prefix=from_path):
            if obj.key[-1] == "/":
                # object is a folder and will be created if it contains any objects
                continue
            target = os.path.join(
                local_path,
                os.path.relpath(obj.key, from_path),
            )
            os.makedirs(os.path.dirname(target), exist_ok=True)
            bucket.download_file(obj.key, target)

    @sync_compatible
    async def put_directory(
        self,
        local_path: Optional[str] = None,
        to_path: Optional[str] = None,
        ignore_file: Optional[str] = None,
    ) -> int:
        """
        Uploads a directory from a given local path to the configured S3 bucket in a
        given folder.

        Defaults to uploading the entire contents the current working directory to the
        block's basepath.

        Args:
            local_path: Path to local directory to upload from.
            to_path: Path in S3 bucket to upload to. Defaults to block's configured
                basepath.
            ignore_file: Path to file containing gitignore style expressions for
                filepaths to ignore.

        """
        to_path = "" if to_path is None else to_path

        if local_path is None:
            local_path = "."

        included_files = None
        if ignore_file:
            with open(ignore_file, "r") as f:
                ignore_patterns = f.readlines()

            included_files = filter_files(local_path, ignore_patterns)

        uploaded_file_count = 0
        for local_file_path in Path(local_path).expanduser().rglob("*"):
            if (
                included_files is not None
                and str(local_file_path.relative_to(local_path)) not in included_files
            ):
                continue
            elif not local_file_path.is_dir():
                remote_file_path = Path(to_path) / local_file_path.relative_to(
                    local_path
                )
                with open(local_file_path, "rb") as local_file:
                    local_file_content = local_file.read()

                await self.write_path(
                    remote_file_path.as_posix(), content=local_file_content
                )
                uploaded_file_count += 1

        return uploaded_file_count

    @sync_compatible
    async def read_path(self, path: str) -> bytes:
        """
        Read specified path from S3 and return contents. Provide the entire
        path to the key in S3.

        Args:
            path: Entire path to (and including) the key.

        Example:
            Read "subfolder/file1" contents from an S3 bucket named "bucket":
            ```python
            from prefect_aws import AwsCredentials
            from prefect_aws.s3 import S3Bucket

            aws_creds = AwsCredentials(
                aws_access_key_id=AWS_ACCESS_KEY_ID,
                aws_secret_access_key=AWS_SECRET_ACCESS_KEY
            )

            s3_bucket_block = S3Bucket(
                bucket_name="bucket",
                credentials=aws_creds,
                bucket_folder="subfolder"
            )

            key_contents = s3_bucket_block.read_path(path="subfolder/file1")
            ```
        """
        path = self._resolve_path(path)

        return await run_sync_in_worker_thread(self._read_sync, path)

    def _read_sync(self, key: str) -> bytes:
        """
        Called by read_path(). Creates an S3 client and retrieves the
        contents from  a specified path.
        """

        s3_client = self._get_s3_client()

        with io.BytesIO() as stream:
            s3_client.download_fileobj(Bucket=self.bucket_name, Key=key, Fileobj=stream)
            stream.seek(0)
            output = stream.read()
            return output

    @sync_compatible
    async def write_path(self, path: str, content: bytes) -> str:
        """
        Writes to an S3 bucket.

        Args:

            path: The key name. Each object in your bucket has a unique
                key (or key name).
            content: What you are uploading to S3.

        Example:

            Write data to the path `dogs/small_dogs/havanese` in an S3 Bucket:
            ```python
            from prefect_aws import MinioCredentials
            from prefect_aws.s3 import S3Bucket

            minio_creds = MinIOCredentials(
                minio_root_user = "minioadmin",
                minio_root_password = "minioadmin",
            )

            s3_bucket_block = S3Bucket(
                bucket_name="bucket",
                minio_credentials=minio_creds,
                bucket_folder="dogs/smalldogs",
                endpoint_url="http://localhost:9000",
            )
            s3_havanese_path = s3_bucket_block.write_path(path="havanese", content=data)
            ```
        """

        path = self._resolve_path(path)

        await run_sync_in_worker_thread(self._write_sync, path, content)

        return path

    def _write_sync(self, key: str, data: bytes) -> None:
        """
        Called by write_path(). Creates an S3 client and uploads a file
        object.
        """

        s3_client = self._get_s3_client()

        with io.BytesIO(data) as stream:
            s3_client.upload_fileobj(Fileobj=stream, Bucket=self.bucket_name, Key=key)

    # NEW BLOCK INTERFACE METHODS BELOW
    @staticmethod
    def _list_objects_sync(page_iterator: PageIterator) -> List[Dict[str, Any]]:
        """
        Synchronous method to collect S3 objects into a list

        Args:
            page_iterator: AWS Paginator for S3 objects

        Returns:
            List[Dict]: List of object information
        """
        return [
            content for page in page_iterator for content in page.get("Contents", [])
        ]

    def _join_bucket_folder(self, bucket_path: str = "") -> str:
        """
        Joins the base bucket folder to the bucket path.
        NOTE: If a method reuses another method in this class, be careful to not
        call this  twice because it'll join the bucket folder twice.
        See https://github.com/PrefectHQ/prefect-aws/issues/141 for a past issue.
        """
        if not self.bucket_folder and not bucket_path:
            # there's a difference between "." and "", at least in the tests
            return ""

        bucket_path = str(bucket_path)
        if self.bucket_folder != "" and bucket_path.startswith(self.bucket_folder):
            self.logger.info(
                f"Bucket path {bucket_path!r} is already prefixed with "
                f"bucket folder {self.bucket_folder!r}; is this intentional?"
            )

        return (Path(self.bucket_folder) / bucket_path).as_posix() + (
            "" if not bucket_path.endswith("/") else "/"
        )

    @sync_compatible
    async def list_objects(
        self,
        folder: str = "",
        delimiter: str = "",
        page_size: Optional[int] = None,
        max_items: Optional[int] = None,
        jmespath_query: Optional[str] = None,
    ) -> List[Dict[str, Any]]:
        """
        Args:
            folder: Folder to list objects from.
            delimiter: Character used to group keys of listed objects.
            page_size: Number of objects to return in each request to the AWS API.
            max_items: Maximum number of objects that to be returned by task.
            jmespath_query: Query used to filter objects based on object attributes refer to
                the [boto3 docs](https://boto3.amazonaws.com/v1/documentation/api/latest/guide/paginators.html#filtering-results-with-jmespath)
                for more information on how to construct queries.

        Returns:
            List of objects and their metadata in the bucket.

        Examples:
            List objects under the `base_folder`.
            ```python
            from prefect_aws.s3 import S3Bucket

            s3_bucket = S3Bucket.load("my-bucket")
            s3_bucket.list_objects("base_folder")
            ```
        """  # noqa: E501
        bucket_path = self._join_bucket_folder(folder)
        client = self.credentials.get_s3_client()
        paginator = client.get_paginator("list_objects_v2")
        page_iterator = paginator.paginate(
            Bucket=self.bucket_name,
            Prefix=bucket_path,
            Delimiter=delimiter,
            PaginationConfig={"PageSize": page_size, "MaxItems": max_items},
        )
        if jmespath_query:
            page_iterator = page_iterator.search(f"{jmespath_query} | {{Contents: @}}")

        self.logger.info(f"Listing objects in bucket {bucket_path}.")
        objects = await run_sync_in_worker_thread(
            self._list_objects_sync, page_iterator
        )
        return objects

    @sync_compatible
    async def download_object_to_path(
        self,
        from_path: str,
        to_path: Optional[Union[str, Path]],
        **download_kwargs: Dict[str, Any],
    ) -> Path:
        """
        Downloads an object from the S3 bucket to a path.

        Args:
            from_path: The path to the object to download; this gets prefixed
                with the bucket_folder.
            to_path: The path to download the object to. If not provided, the
                object's name will be used.
            **download_kwargs: Additional keyword arguments to pass to
                `Client.download_file`.

        Returns:
            The absolute path that the object was downloaded to.

        Examples:
            Download my_folder/notes.txt object to notes.txt.
            ```python
            from prefect_aws.s3 import S3Bucket

            s3_bucket = S3Bucket.load("my-bucket")
            s3_bucket.download_object_to_path("my_folder/notes.txt", "notes.txt")
            ```
        """
        if to_path is None:
            to_path = Path(from_path).name

        # making path absolute, but converting back to str here
        # since !r looks nicer that way and filename arg expects str
        to_path = str(Path(to_path).absolute())
        bucket_path = self._join_bucket_folder(from_path)
        client = self.credentials.get_s3_client()

        self.logger.debug(
            f"Preparing to download object from bucket {self.bucket_name!r} "
            f"path {bucket_path!r} to {to_path!r}."
        )
        await run_sync_in_worker_thread(
            client.download_file,
            Bucket=self.bucket_name,
            Key=bucket_path,
            Filename=to_path,
            **download_kwargs,
        )
        self.logger.info(
            f"Downloaded object from bucket {self.bucket_name!r} path {bucket_path!r} "
            f"to {to_path!r}."
        )
        return Path(to_path)

    @sync_compatible
    async def download_object_to_file_object(
        self,
        from_path: str,
        to_file_object: BinaryIO,
        **download_kwargs: Dict[str, Any],
    ) -> BinaryIO:
        """
        Downloads an object from the object storage service to a file-like object,
        which can be a BytesIO object or a BufferedWriter.

        Args:
            from_path: The path to the object to download from; this gets prefixed
                with the bucket_folder.
            to_file_object: The file-like object to download the object to.
            **download_kwargs: Additional keyword arguments to pass to
                `Client.download_fileobj`.

        Returns:
            The file-like object that the object was downloaded to.

        Examples:
            Download my_folder/notes.txt object to a BytesIO object.
            ```python
            from io import BytesIO

            from prefect_aws.s3 import S3Bucket

            s3_bucket = S3Bucket.load("my-bucket")
            with BytesIO() as buf:
                s3_bucket.download_object_to_file_object("my_folder/notes.txt", buf)
            ```

            Download my_folder/notes.txt object to a BufferedWriter.
            ```python
            from prefect_aws.s3 import S3Bucket

            s3_bucket = S3Bucket.load("my-bucket")
            with open("notes.txt", "wb") as f:
                s3_bucket.download_object_to_file_object("my_folder/notes.txt", f)
            ```
        """
        client = self.credentials.get_s3_client()
        bucket_path = self._join_bucket_folder(from_path)

        self.logger.debug(
            f"Preparing to download object from bucket {self.bucket_name!r} "
            f"path {bucket_path!r} to file object."
        )
        await run_sync_in_worker_thread(
            client.download_fileobj,
            Bucket=self.bucket_name,
            Key=bucket_path,
            Fileobj=to_file_object,
            **download_kwargs,
        )
        self.logger.info(
            f"Downloaded object from bucket {self.bucket_name!r} path {bucket_path!r} "
            "to file object."
        )
        return to_file_object

    @sync_compatible
    async def download_folder_to_path(
        self,
        from_folder: str,
        to_folder: Optional[Union[str, Path]] = None,
        **download_kwargs: Dict[str, Any],
    ) -> Path:
        """
        Downloads objects *within* a folder (excluding the folder itself)
        from the S3 bucket to a folder.

        Args:
            from_folder: The path to the folder to download from.
            to_folder: The path to download the folder to.
            **download_kwargs: Additional keyword arguments to pass to
                `Client.download_file`.

        Returns:
            The absolute path that the folder was downloaded to.

        Examples:
            Download my_folder to a local folder named my_folder.
            ```python
            from prefect_aws.s3 import S3Bucket

            s3_bucket = S3Bucket.load("my-bucket")
            s3_bucket.download_folder_to_path("my_folder", "my_folder")
            ```
        """
        if to_folder is None:
            to_folder = ""
        to_folder = Path(to_folder).absolute()

        client = self.credentials.get_s3_client()
        objects = await self.list_objects(folder=from_folder)

        # do not call self._join_bucket_folder for filter
        # because it's built-in to that method already!
        # however, we still need to do it because we're using relative_to
        bucket_folder = self._join_bucket_folder(from_folder)

        async_coros = []
        for object in objects:
            bucket_path = Path(object["Key"]).relative_to(bucket_folder)
            # this skips the actual directory itself, e.g.
            # `my_folder/` will be skipped
            # `my_folder/notes.txt` will be downloaded
            if bucket_path.is_dir():
                continue
            to_path = to_folder / bucket_path
            to_path.parent.mkdir(parents=True, exist_ok=True)
            to_path = str(to_path)  # must be string
            self.logger.info(
                f"Downloading object from bucket {self.bucket_name!r} path "
                f"{bucket_path.as_posix()!r} to {to_path!r}."
            )
            async_coros.append(
                run_sync_in_worker_thread(
                    client.download_file,
                    Bucket=self.bucket_name,
                    Key=object["Key"],
                    Filename=to_path,
                    **download_kwargs,
                )
            )
        await asyncio.gather(*async_coros)

        return Path(to_folder)

    @sync_compatible
    async def stream_from(
        self,
        bucket: "S3Bucket",
        from_path: str,
        to_path: Optional[str] = None,
        **upload_kwargs: Dict[str, Any],
    ) -> str:
        """Streams an object from another bucket to this bucket. Requires the
        object to be downloaded and uploaded in chunks. If `self`'s credentials
        allow for writes to the other bucket, try using `S3Bucket.copy_object`.

        Args:
            bucket: The bucket to stream from.
            from_path: The path of the object to stream.
            to_path: The path to stream the object to. Defaults to the object's name.
            **upload_kwargs: Additional keyword arguments to pass to
                `Client.upload_fileobj`.

        Returns:
            The path that the object was uploaded to.

        Examples:
            Stream notes.txt from your-bucket/notes.txt to my-bucket/landed/notes.txt.

            ```python
            from prefect_aws.s3 import S3Bucket

            your_s3_bucket = S3Bucket.load("your-bucket")
            my_s3_bucket = S3Bucket.load("my-bucket")

            my_s3_bucket.stream_from(
                your_s3_bucket,
                "notes.txt",
                to_path="landed/notes.txt"
            )
            ```

        """
        if to_path is None:
            to_path = Path(from_path).name

        # Get the source object's StreamingBody
        from_path: str = bucket._join_bucket_folder(from_path)
        from_client = bucket.credentials.get_s3_client()
        obj = await run_sync_in_worker_thread(
            from_client.get_object, Bucket=bucket.bucket_name, Key=from_path
        )
        body: StreamingBody = obj["Body"]

        # Upload the StreamingBody to this bucket
        bucket_path = str(self._join_bucket_folder(to_path))
        to_client = self.credentials.get_s3_client()
        await run_sync_in_worker_thread(
            to_client.upload_fileobj,
            Fileobj=body,
            Bucket=self.bucket_name,
            Key=bucket_path,
            **upload_kwargs,
        )
        self.logger.info(
            f"Streamed s3://{bucket.bucket_name}/{from_path} to the bucket "
            f"{self.bucket_name!r} path {bucket_path!r}."
        )
        return bucket_path

    @sync_compatible
    async def upload_from_path(
        self,
        from_path: Union[str, Path],
        to_path: Optional[str] = None,
        **upload_kwargs: Dict[str, Any],
    ) -> str:
        """
        Uploads an object from a path to the S3 bucket.

        Args:
            from_path: The path to the file to upload from.
            to_path: The path to upload the file to.
            **upload_kwargs: Additional keyword arguments to pass to `Client.upload`.

        Returns:
            The path that the object was uploaded to.

        Examples:
            Upload notes.txt to my_folder/notes.txt.
            ```python
            from prefect_aws.s3 import S3Bucket

            s3_bucket = S3Bucket.load("my-bucket")
            s3_bucket.upload_from_path("notes.txt", "my_folder/notes.txt")
            ```
        """
        from_path = str(Path(from_path).absolute())
        if to_path is None:
            to_path = Path(from_path).name

        bucket_path = str(self._join_bucket_folder(to_path))
        client = self.credentials.get_s3_client()

        await run_sync_in_worker_thread(
            client.upload_file,
            Filename=from_path,
            Bucket=self.bucket_name,
            Key=bucket_path,
            **upload_kwargs,
        )
        self.logger.info(
            f"Uploaded from {from_path!r} to the bucket "
            f"{self.bucket_name!r} path {bucket_path!r}."
        )
        return bucket_path

    @sync_compatible
    async def upload_from_file_object(
        self, from_file_object: BinaryIO, to_path: str, **upload_kwargs: Dict[str, Any]
    ) -> str:
        """
        Uploads an object to the S3 bucket from a file-like object,
        which can be a BytesIO object or a BufferedReader.

        Args:
            from_file_object: The file-like object to upload from.
            to_path: The path to upload the object to.
            **upload_kwargs: Additional keyword arguments to pass to
                `Client.upload_fileobj`.

        Returns:
            The path that the object was uploaded to.

        Examples:
            Upload BytesIO object to my_folder/notes.txt.
            ```python
            from io import BytesIO

            from prefect_aws.s3 import S3Bucket

            s3_bucket = S3Bucket.load("my-bucket")
            with open("notes.txt", "rb") as f:
                s3_bucket.upload_from_file_object(f, "my_folder/notes.txt")
            ```

            Upload BufferedReader object to my_folder/notes.txt.
            ```python
            from prefect_aws.s3 import S3Bucket

            s3_bucket = S3Bucket.load("my-bucket")
            with open("notes.txt", "rb") as f:
                s3_bucket.upload_from_file_object(
                    f, "my_folder/notes.txt"
                )
            ```
        """
        bucket_path = str(self._join_bucket_folder(to_path))
        client = self.credentials.get_s3_client()
        await run_sync_in_worker_thread(
            client.upload_fileobj,
            Fileobj=from_file_object,
            Bucket=self.bucket_name,
            Key=bucket_path,
            **upload_kwargs,
        )
        self.logger.info(
            "Uploaded from file object to the bucket "
            f"{self.bucket_name!r} path {bucket_path!r}."
        )
        return bucket_path

    @sync_compatible
    async def upload_from_folder(
        self,
        from_folder: Union[str, Path],
        to_folder: Optional[str] = None,
        **upload_kwargs: Dict[str, Any],
    ) -> str:
        """
        Uploads files *within* a folder (excluding the folder itself)
        to the object storage service folder.

        Args:
            from_folder: The path to the folder to upload from.
            to_folder: The path to upload the folder to.
            **upload_kwargs: Additional keyword arguments to pass to
                `Client.upload_fileobj`.

        Returns:
            The path that the folder was uploaded to.

        Examples:
            Upload contents from my_folder to new_folder.
            ```python
            from prefect_aws.s3 import S3Bucket

            s3_bucket = S3Bucket.load("my-bucket")
            s3_bucket.upload_from_folder("my_folder", "new_folder")
            ```
        """
        from_folder = Path(from_folder)
        bucket_folder = self._join_bucket_folder(to_folder or "")

        num_uploaded = 0
        client = self.credentials.get_s3_client()

        async_coros = []
        for from_path in from_folder.rglob("**/*"):
            # this skips the actual directory itself, e.g.
            # `my_folder/` will be skipped
            # `my_folder/notes.txt` will be uploaded
            if from_path.is_dir():
                continue
            bucket_path = (
                Path(bucket_folder) / from_path.relative_to(from_folder)
            ).as_posix()
            self.logger.info(
                f"Uploading from {str(from_path)!r} to the bucket "
                f"{self.bucket_name!r} path {bucket_path!r}."
            )
            async_coros.append(
                run_sync_in_worker_thread(
                    client.upload_file,
                    Filename=str(from_path),
                    Bucket=self.bucket_name,
                    Key=bucket_path,
                    **upload_kwargs,
                )
            )
            num_uploaded += 1
        await asyncio.gather(*async_coros)

        if num_uploaded == 0:
            self.logger.warning(f"No files were uploaded from {str(from_folder)!r}.")
        else:
            self.logger.info(
                f"Uploaded {num_uploaded} files from {str(from_folder)!r} to "
                f"the bucket {self.bucket_name!r} path {bucket_path!r}"
            )

        return to_folder

    @sync_compatible
    async def copy_object(
        self,
        from_path: Union[str, Path],
        to_path: Union[str, Path],
        to_bucket: Optional[Union["S3Bucket", str]] = None,
        **copy_kwargs,
    ) -> str:
        """Uses S3's internal
        [CopyObject](https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html)
        to copy objects within or between buckets. To copy objects between buckets,
        `self`'s credentials must have permission to read the source object and write
        to the target object. If the credentials do not have those permissions, try
        using `S3Bucket.stream_from`.

        Args:
            from_path: The path of the object to copy.
            to_path: The path to copy the object to.
            to_bucket: The bucket to copy to. Defaults to the current bucket.
            **copy_kwargs: Additional keyword arguments to pass to
                `S3Client.copy_object`.

        Returns:
            The path that the object was copied to. Excludes the bucket name.

        Examples:

            Copy notes.txt from my_folder/notes.txt to my_folder/notes_copy.txt.

            ```python
            from prefect_aws.s3 import S3Bucket

            s3_bucket = S3Bucket.load("my-bucket")
            s3_bucket.copy_object("my_folder/notes.txt", "my_folder/notes_copy.txt")
            ```

            Copy notes.txt from my_folder/notes.txt to my_folder/notes_copy.txt in
            another bucket.

            ```python
            from prefect_aws.s3 import S3Bucket

            s3_bucket = S3Bucket.load("my-bucket")
            s3_bucket.copy_object(
                "my_folder/notes.txt",
                "my_folder/notes_copy.txt",
                to_bucket="other-bucket"
            )
            ```
        """
        s3_client = self.credentials.get_s3_client()

        source_bucket_name = self.bucket_name
        source_path = self._resolve_path(Path(from_path).as_posix())

        # Default to copying within the same bucket
        to_bucket = to_bucket or self

        target_bucket_name: str
        target_path: str
        if isinstance(to_bucket, S3Bucket):
            target_bucket_name = to_bucket.bucket_name
            target_path = to_bucket._resolve_path(Path(to_path).as_posix())
        elif isinstance(to_bucket, str):
            target_bucket_name = to_bucket
            target_path = Path(to_path).as_posix()
        else:
            raise TypeError(
                f"to_bucket must be a string or S3Bucket, not {type(to_bucket)}"
            )

        self.logger.info(
            "Copying object from bucket %s with key %s to bucket %s with key %s",
            source_bucket_name,
            source_path,
            target_bucket_name,
            target_path,
        )

        s3_client.copy_object(
            CopySource={"Bucket": source_bucket_name, "Key": source_path},
            Bucket=target_bucket_name,
            Key=target_path,
            **copy_kwargs,
        )

        return target_path

    @sync_compatible
    async def move_object(
        self,
        from_path: Union[str, Path],
        to_path: Union[str, Path],
        to_bucket: Optional[Union["S3Bucket", str]] = None,
    ) -> str:
        """Uses S3's internal CopyObject and DeleteObject to move objects within or
        between buckets. To move objects between buckets, `self`'s credentials must
        have permission to read and delete the source object and write to the target
        object. If the credentials do not have those permissions, this method will
        raise an error. If the credentials have permission to read the source object
        but not delete it, the object will be copied but not deleted.

        Args:
            from_path: The path of the object to move.
            to_path: The path to move the object to.
            to_bucket: The bucket to move to. Defaults to the current bucket.

        Returns:
            The path that the object was moved to. Excludes the bucket name.

        Examples:

            Move notes.txt from my_folder/notes.txt to my_folder/notes_copy.txt.

            ```python
            from prefect_aws.s3 import S3Bucket

            s3_bucket = S3Bucket.load("my-bucket")
            s3_bucket.move_object("my_folder/notes.txt", "my_folder/notes_copy.txt")
            ```

            Move notes.txt from my_folder/notes.txt to my_folder/notes_copy.txt in
            another bucket.

            ```python
            from prefect_aws.s3 import S3Bucket

            s3_bucket = S3Bucket.load("my-bucket")
            s3_bucket.move_object(
                "my_folder/notes.txt",
                "my_folder/notes_copy.txt",
                to_bucket="other-bucket"
            )
            ```
        """
        s3_client = self.credentials.get_s3_client()

        source_bucket_name = self.bucket_name
        source_path = self._resolve_path(Path(from_path).as_posix())

        # Default to moving within the same bucket
        to_bucket = to_bucket or self

        target_bucket_name: str
        target_path: str
        if isinstance(to_bucket, S3Bucket):
            target_bucket_name = to_bucket.bucket_name
            target_path = to_bucket._resolve_path(Path(to_path).as_posix())
        elif isinstance(to_bucket, str):
            target_bucket_name = to_bucket
            target_path = Path(to_path).as_posix()
        else:
            raise TypeError(
                f"to_bucket must be a string or S3Bucket, not {type(to_bucket)}"
            )

        self.logger.info(
            "Moving object from s3://%s/%s to s3://%s/%s",
            source_bucket_name,
            source_path,
            target_bucket_name,
            target_path,
        )

        # If invalid, should error and prevent next operation
        s3_client.copy(
            CopySource={"Bucket": source_bucket_name, "Key": source_path},
            Bucket=target_bucket_name,
            Key=target_path,
        )
        s3_client.delete_object(Bucket=source_bucket_name, Key=source_path)
        return target_path

basepath: str property writable

The base path of the S3 bucket.

Returns:

Name Type Description
str str

The base path of the S3 bucket.

copy_object(from_path, to_path, to_bucket=None, **copy_kwargs) async

Uses S3's internal CopyObject to copy objects within or between buckets. To copy objects between buckets, self's credentials must have permission to read the source object and write to the target object. If the credentials do not have those permissions, try using S3Bucket.stream_from.

Parameters:

Name Type Description Default
from_path Union[str, Path]

The path of the object to copy.

required
to_path Union[str, Path]

The path to copy the object to.

required
to_bucket Optional[Union[S3Bucket, str]]

The bucket to copy to. Defaults to the current bucket.

None
**copy_kwargs

Additional keyword arguments to pass to S3Client.copy_object.

{}

Returns:

Type Description
str

The path that the object was copied to. Excludes the bucket name.

Copy notes.txt from my_folder/notes.txt to my_folder/notes_copy.txt.

```python
from prefect_aws.s3 import S3Bucket

s3_bucket = S3Bucket.load("my-bucket")
s3_bucket.copy_object("my_folder/notes.txt", "my_folder/notes_copy.txt")
```

Copy notes.txt from my_folder/notes.txt to my_folder/notes_copy.txt in
another bucket.

```python
from prefect_aws.s3 import S3Bucket

s3_bucket = S3Bucket.load("my-bucket")
s3_bucket.copy_object(
    "my_folder/notes.txt",
    "my_folder/notes_copy.txt",
    to_bucket="other-bucket"
)
```
Source code in prefect_aws/s3.py
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
@sync_compatible
async def copy_object(
    self,
    from_path: Union[str, Path],
    to_path: Union[str, Path],
    to_bucket: Optional[Union["S3Bucket", str]] = None,
    **copy_kwargs,
) -> str:
    """Uses S3's internal
    [CopyObject](https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html)
    to copy objects within or between buckets. To copy objects between buckets,
    `self`'s credentials must have permission to read the source object and write
    to the target object. If the credentials do not have those permissions, try
    using `S3Bucket.stream_from`.

    Args:
        from_path: The path of the object to copy.
        to_path: The path to copy the object to.
        to_bucket: The bucket to copy to. Defaults to the current bucket.
        **copy_kwargs: Additional keyword arguments to pass to
            `S3Client.copy_object`.

    Returns:
        The path that the object was copied to. Excludes the bucket name.

    Examples:

        Copy notes.txt from my_folder/notes.txt to my_folder/notes_copy.txt.

        ```python
        from prefect_aws.s3 import S3Bucket

        s3_bucket = S3Bucket.load("my-bucket")
        s3_bucket.copy_object("my_folder/notes.txt", "my_folder/notes_copy.txt")
        ```

        Copy notes.txt from my_folder/notes.txt to my_folder/notes_copy.txt in
        another bucket.

        ```python
        from prefect_aws.s3 import S3Bucket

        s3_bucket = S3Bucket.load("my-bucket")
        s3_bucket.copy_object(
            "my_folder/notes.txt",
            "my_folder/notes_copy.txt",
            to_bucket="other-bucket"
        )
        ```
    """
    s3_client = self.credentials.get_s3_client()

    source_bucket_name = self.bucket_name
    source_path = self._resolve_path(Path(from_path).as_posix())

    # Default to copying within the same bucket
    to_bucket = to_bucket or self

    target_bucket_name: str
    target_path: str
    if isinstance(to_bucket, S3Bucket):
        target_bucket_name = to_bucket.bucket_name
        target_path = to_bucket._resolve_path(Path(to_path).as_posix())
    elif isinstance(to_bucket, str):
        target_bucket_name = to_bucket
        target_path = Path(to_path).as_posix()
    else:
        raise TypeError(
            f"to_bucket must be a string or S3Bucket, not {type(to_bucket)}"
        )

    self.logger.info(
        "Copying object from bucket %s with key %s to bucket %s with key %s",
        source_bucket_name,
        source_path,
        target_bucket_name,
        target_path,
    )

    s3_client.copy_object(
        CopySource={"Bucket": source_bucket_name, "Key": source_path},
        Bucket=target_bucket_name,
        Key=target_path,
        **copy_kwargs,
    )

    return target_path

download_folder_to_path(from_folder, to_folder=None, **download_kwargs) async

Downloads objects within a folder (excluding the folder itself) from the S3 bucket to a folder.

Parameters:

Name Type Description Default
from_folder str

The path to the folder to download from.

required
to_folder Optional[Union[str, Path]]

The path to download the folder to.

None
**download_kwargs Dict[str, Any]

Additional keyword arguments to pass to Client.download_file.

{}

Returns:

Type Description
Path

The absolute path that the folder was downloaded to.

Examples:

Download my_folder to a local folder named my_folder.

from prefect_aws.s3 import S3Bucket

s3_bucket = S3Bucket.load("my-bucket")
s3_bucket.download_folder_to_path("my_folder", "my_folder")
Source code in prefect_aws/s3.py
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
@sync_compatible
async def download_folder_to_path(
    self,
    from_folder: str,
    to_folder: Optional[Union[str, Path]] = None,
    **download_kwargs: Dict[str, Any],
) -> Path:
    """
    Downloads objects *within* a folder (excluding the folder itself)
    from the S3 bucket to a folder.

    Args:
        from_folder: The path to the folder to download from.
        to_folder: The path to download the folder to.
        **download_kwargs: Additional keyword arguments to pass to
            `Client.download_file`.

    Returns:
        The absolute path that the folder was downloaded to.

    Examples:
        Download my_folder to a local folder named my_folder.
        ```python
        from prefect_aws.s3 import S3Bucket

        s3_bucket = S3Bucket.load("my-bucket")
        s3_bucket.download_folder_to_path("my_folder", "my_folder")
        ```
    """
    if to_folder is None:
        to_folder = ""
    to_folder = Path(to_folder).absolute()

    client = self.credentials.get_s3_client()
    objects = await self.list_objects(folder=from_folder)

    # do not call self._join_bucket_folder for filter
    # because it's built-in to that method already!
    # however, we still need to do it because we're using relative_to
    bucket_folder = self._join_bucket_folder(from_folder)

    async_coros = []
    for object in objects:
        bucket_path = Path(object["Key"]).relative_to(bucket_folder)
        # this skips the actual directory itself, e.g.
        # `my_folder/` will be skipped
        # `my_folder/notes.txt` will be downloaded
        if bucket_path.is_dir():
            continue
        to_path = to_folder / bucket_path
        to_path.parent.mkdir(parents=True, exist_ok=True)
        to_path = str(to_path)  # must be string
        self.logger.info(
            f"Downloading object from bucket {self.bucket_name!r} path "
            f"{bucket_path.as_posix()!r} to {to_path!r}."
        )
        async_coros.append(
            run_sync_in_worker_thread(
                client.download_file,
                Bucket=self.bucket_name,
                Key=object["Key"],
                Filename=to_path,
                **download_kwargs,
            )
        )
    await asyncio.gather(*async_coros)

    return Path(to_folder)

download_object_to_file_object(from_path, to_file_object, **download_kwargs) async

Downloads an object from the object storage service to a file-like object, which can be a BytesIO object or a BufferedWriter.

Parameters:

Name Type Description Default
from_path str

The path to the object to download from; this gets prefixed with the bucket_folder.

required
to_file_object BinaryIO

The file-like object to download the object to.

required
**download_kwargs Dict[str, Any]

Additional keyword arguments to pass to Client.download_fileobj.

{}

Returns:

Type Description
BinaryIO

The file-like object that the object was downloaded to.

Examples:

Download my_folder/notes.txt object to a BytesIO object.

from io import BytesIO

from prefect_aws.s3 import S3Bucket

s3_bucket = S3Bucket.load("my-bucket")
with BytesIO() as buf:
    s3_bucket.download_object_to_file_object("my_folder/notes.txt", buf)

Download my_folder/notes.txt object to a BufferedWriter.

from prefect_aws.s3 import S3Bucket

s3_bucket = S3Bucket.load("my-bucket")
with open("notes.txt", "wb") as f:
    s3_bucket.download_object_to_file_object("my_folder/notes.txt", f)
Source code in prefect_aws/s3.py
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
@sync_compatible
async def download_object_to_file_object(
    self,
    from_path: str,
    to_file_object: BinaryIO,
    **download_kwargs: Dict[str, Any],
) -> BinaryIO:
    """
    Downloads an object from the object storage service to a file-like object,
    which can be a BytesIO object or a BufferedWriter.

    Args:
        from_path: The path to the object to download from; this gets prefixed
            with the bucket_folder.
        to_file_object: The file-like object to download the object to.
        **download_kwargs: Additional keyword arguments to pass to
            `Client.download_fileobj`.

    Returns:
        The file-like object that the object was downloaded to.

    Examples:
        Download my_folder/notes.txt object to a BytesIO object.
        ```python
        from io import BytesIO

        from prefect_aws.s3 import S3Bucket

        s3_bucket = S3Bucket.load("my-bucket")
        with BytesIO() as buf:
            s3_bucket.download_object_to_file_object("my_folder/notes.txt", buf)
        ```

        Download my_folder/notes.txt object to a BufferedWriter.
        ```python
        from prefect_aws.s3 import S3Bucket

        s3_bucket = S3Bucket.load("my-bucket")
        with open("notes.txt", "wb") as f:
            s3_bucket.download_object_to_file_object("my_folder/notes.txt", f)
        ```
    """
    client = self.credentials.get_s3_client()
    bucket_path = self._join_bucket_folder(from_path)

    self.logger.debug(
        f"Preparing to download object from bucket {self.bucket_name!r} "
        f"path {bucket_path!r} to file object."
    )
    await run_sync_in_worker_thread(
        client.download_fileobj,
        Bucket=self.bucket_name,
        Key=bucket_path,
        Fileobj=to_file_object,
        **download_kwargs,
    )
    self.logger.info(
        f"Downloaded object from bucket {self.bucket_name!r} path {bucket_path!r} "
        "to file object."
    )
    return to_file_object

download_object_to_path(from_path, to_path, **download_kwargs) async

Downloads an object from the S3 bucket to a path.

Parameters:

Name Type Description Default
from_path str

The path to the object to download; this gets prefixed with the bucket_folder.

required
to_path Optional[Union[str, Path]]

The path to download the object to. If not provided, the object's name will be used.

required
**download_kwargs Dict[str, Any]

Additional keyword arguments to pass to Client.download_file.

{}

Returns:

Type Description
Path

The absolute path that the object was downloaded to.

Examples:

Download my_folder/notes.txt object to notes.txt.

from prefect_aws.s3 import S3Bucket

s3_bucket = S3Bucket.load("my-bucket")
s3_bucket.download_object_to_path("my_folder/notes.txt", "notes.txt")
Source code in prefect_aws/s3.py
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
@sync_compatible
async def download_object_to_path(
    self,
    from_path: str,
    to_path: Optional[Union[str, Path]],
    **download_kwargs: Dict[str, Any],
) -> Path:
    """
    Downloads an object from the S3 bucket to a path.

    Args:
        from_path: The path to the object to download; this gets prefixed
            with the bucket_folder.
        to_path: The path to download the object to. If not provided, the
            object's name will be used.
        **download_kwargs: Additional keyword arguments to pass to
            `Client.download_file`.

    Returns:
        The absolute path that the object was downloaded to.

    Examples:
        Download my_folder/notes.txt object to notes.txt.
        ```python
        from prefect_aws.s3 import S3Bucket

        s3_bucket = S3Bucket.load("my-bucket")
        s3_bucket.download_object_to_path("my_folder/notes.txt", "notes.txt")
        ```
    """
    if to_path is None:
        to_path = Path(from_path).name

    # making path absolute, but converting back to str here
    # since !r looks nicer that way and filename arg expects str
    to_path = str(Path(to_path).absolute())
    bucket_path = self._join_bucket_folder(from_path)
    client = self.credentials.get_s3_client()

    self.logger.debug(
        f"Preparing to download object from bucket {self.bucket_name!r} "
        f"path {bucket_path!r} to {to_path!r}."
    )
    await run_sync_in_worker_thread(
        client.download_file,
        Bucket=self.bucket_name,
        Key=bucket_path,
        Filename=to_path,
        **download_kwargs,
    )
    self.logger.info(
        f"Downloaded object from bucket {self.bucket_name!r} path {bucket_path!r} "
        f"to {to_path!r}."
    )
    return Path(to_path)

get_directory(from_path=None, local_path=None) async

Copies a folder from the configured S3 bucket to a local directory.

Defaults to copying the entire contents of the block's basepath to the current working directory.

Parameters:

Name Type Description Default
from_path Optional[str]

Path in S3 bucket to download from. Defaults to the block's configured basepath.

None
local_path Optional[str]

Local path to download S3 contents to. Defaults to the current working directory.

None
Source code in prefect_aws/s3.py
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
@sync_compatible
async def get_directory(
    self, from_path: Optional[str] = None, local_path: Optional[str] = None
) -> None:
    """
    Copies a folder from the configured S3 bucket to a local directory.

    Defaults to copying the entire contents of the block's basepath to the current
    working directory.

    Args:
        from_path: Path in S3 bucket to download from. Defaults to the block's
            configured basepath.
        local_path: Local path to download S3 contents to. Defaults to the current
            working directory.
    """
    bucket_folder = self.bucket_folder
    if from_path is None:
        from_path = str(bucket_folder) if bucket_folder else ""

    if local_path is None:
        local_path = str(Path(".").absolute())
    else:
        local_path = str(Path(local_path).expanduser())

    bucket = self._get_bucket_resource()
    for obj in bucket.objects.filter(Prefix=from_path):
        if obj.key[-1] == "/":
            # object is a folder and will be created if it contains any objects
            continue
        target = os.path.join(
            local_path,
            os.path.relpath(obj.key, from_path),
        )
        os.makedirs(os.path.dirname(target), exist_ok=True)
        bucket.download_file(obj.key, target)

list_objects(folder='', delimiter='', page_size=None, max_items=None, jmespath_query=None) async

Parameters:

Name Type Description Default
folder str

Folder to list objects from.

''
delimiter str

Character used to group keys of listed objects.

''
page_size Optional[int]

Number of objects to return in each request to the AWS API.

None
max_items Optional[int]

Maximum number of objects that to be returned by task.

None
jmespath_query Optional[str]

Query used to filter objects based on object attributes refer to the boto3 docs for more information on how to construct queries.

None

Returns:

Type Description
List[Dict[str, Any]]

List of objects and their metadata in the bucket.

Examples:

List objects under the base_folder.

from prefect_aws.s3 import S3Bucket

s3_bucket = S3Bucket.load("my-bucket")
s3_bucket.list_objects("base_folder")
Source code in prefect_aws/s3.py
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
@sync_compatible
async def list_objects(
    self,
    folder: str = "",
    delimiter: str = "",
    page_size: Optional[int] = None,
    max_items: Optional[int] = None,
    jmespath_query: Optional[str] = None,
) -> List[Dict[str, Any]]:
    """
    Args:
        folder: Folder to list objects from.
        delimiter: Character used to group keys of listed objects.
        page_size: Number of objects to return in each request to the AWS API.
        max_items: Maximum number of objects that to be returned by task.
        jmespath_query: Query used to filter objects based on object attributes refer to
            the [boto3 docs](https://boto3.amazonaws.com/v1/documentation/api/latest/guide/paginators.html#filtering-results-with-jmespath)
            for more information on how to construct queries.

    Returns:
        List of objects and their metadata in the bucket.

    Examples:
        List objects under the `base_folder`.
        ```python
        from prefect_aws.s3 import S3Bucket

        s3_bucket = S3Bucket.load("my-bucket")
        s3_bucket.list_objects("base_folder")
        ```
    """  # noqa: E501
    bucket_path = self._join_bucket_folder(folder)
    client = self.credentials.get_s3_client()
    paginator = client.get_paginator("list_objects_v2")
    page_iterator = paginator.paginate(
        Bucket=self.bucket_name,
        Prefix=bucket_path,
        Delimiter=delimiter,
        PaginationConfig={"PageSize": page_size, "MaxItems": max_items},
    )
    if jmespath_query:
        page_iterator = page_iterator.search(f"{jmespath_query} | {{Contents: @}}")

    self.logger.info(f"Listing objects in bucket {bucket_path}.")
    objects = await run_sync_in_worker_thread(
        self._list_objects_sync, page_iterator
    )
    return objects

move_object(from_path, to_path, to_bucket=None) async

Uses S3's internal CopyObject and DeleteObject to move objects within or between buckets. To move objects between buckets, self's credentials must have permission to read and delete the source object and write to the target object. If the credentials do not have those permissions, this method will raise an error. If the credentials have permission to read the source object but not delete it, the object will be copied but not deleted.

Parameters:

Name Type Description Default
from_path Union[str, Path]

The path of the object to move.

required
to_path Union[str, Path]

The path to move the object to.

required
to_bucket Optional[Union[S3Bucket, str]]

The bucket to move to. Defaults to the current bucket.

None

Returns:

Type Description
str

The path that the object was moved to. Excludes the bucket name.

Move notes.txt from my_folder/notes.txt to my_folder/notes_copy.txt.

```python
from prefect_aws.s3 import S3Bucket

s3_bucket = S3Bucket.load("my-bucket")
s3_bucket.move_object("my_folder/notes.txt", "my_folder/notes_copy.txt")
```

Move notes.txt from my_folder/notes.txt to my_folder/notes_copy.txt in
another bucket.

```python
from prefect_aws.s3 import S3Bucket

s3_bucket = S3Bucket.load("my-bucket")
s3_bucket.move_object(
    "my_folder/notes.txt",
    "my_folder/notes_copy.txt",
    to_bucket="other-bucket"
)
```
Source code in prefect_aws/s3.py
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
@sync_compatible
async def move_object(
    self,
    from_path: Union[str, Path],
    to_path: Union[str, Path],
    to_bucket: Optional[Union["S3Bucket", str]] = None,
) -> str:
    """Uses S3's internal CopyObject and DeleteObject to move objects within or
    between buckets. To move objects between buckets, `self`'s credentials must
    have permission to read and delete the source object and write to the target
    object. If the credentials do not have those permissions, this method will
    raise an error. If the credentials have permission to read the source object
    but not delete it, the object will be copied but not deleted.

    Args:
        from_path: The path of the object to move.
        to_path: The path to move the object to.
        to_bucket: The bucket to move to. Defaults to the current bucket.

    Returns:
        The path that the object was moved to. Excludes the bucket name.

    Examples:

        Move notes.txt from my_folder/notes.txt to my_folder/notes_copy.txt.

        ```python
        from prefect_aws.s3 import S3Bucket

        s3_bucket = S3Bucket.load("my-bucket")
        s3_bucket.move_object("my_folder/notes.txt", "my_folder/notes_copy.txt")
        ```

        Move notes.txt from my_folder/notes.txt to my_folder/notes_copy.txt in
        another bucket.

        ```python
        from prefect_aws.s3 import S3Bucket

        s3_bucket = S3Bucket.load("my-bucket")
        s3_bucket.move_object(
            "my_folder/notes.txt",
            "my_folder/notes_copy.txt",
            to_bucket="other-bucket"
        )
        ```
    """
    s3_client = self.credentials.get_s3_client()

    source_bucket_name = self.bucket_name
    source_path = self._resolve_path(Path(from_path).as_posix())

    # Default to moving within the same bucket
    to_bucket = to_bucket or self

    target_bucket_name: str
    target_path: str
    if isinstance(to_bucket, S3Bucket):
        target_bucket_name = to_bucket.bucket_name
        target_path = to_bucket._resolve_path(Path(to_path).as_posix())
    elif isinstance(to_bucket, str):
        target_bucket_name = to_bucket
        target_path = Path(to_path).as_posix()
    else:
        raise TypeError(
            f"to_bucket must be a string or S3Bucket, not {type(to_bucket)}"
        )

    self.logger.info(
        "Moving object from s3://%s/%s to s3://%s/%s",
        source_bucket_name,
        source_path,
        target_bucket_name,
        target_path,
    )

    # If invalid, should error and prevent next operation
    s3_client.copy(
        CopySource={"Bucket": source_bucket_name, "Key": source_path},
        Bucket=target_bucket_name,
        Key=target_path,
    )
    s3_client.delete_object(Bucket=source_bucket_name, Key=source_path)
    return target_path

put_directory(local_path=None, to_path=None, ignore_file=None) async

Uploads a directory from a given local path to the configured S3 bucket in a given folder.

Defaults to uploading the entire contents the current working directory to the block's basepath.

Parameters:

Name Type Description Default
local_path Optional[str]

Path to local directory to upload from.

None
to_path Optional[str]

Path in S3 bucket to upload to. Defaults to block's configured basepath.

None
ignore_file Optional[str]

Path to file containing gitignore style expressions for filepaths to ignore.

None
Source code in prefect_aws/s3.py
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
@sync_compatible
async def put_directory(
    self,
    local_path: Optional[str] = None,
    to_path: Optional[str] = None,
    ignore_file: Optional[str] = None,
) -> int:
    """
    Uploads a directory from a given local path to the configured S3 bucket in a
    given folder.

    Defaults to uploading the entire contents the current working directory to the
    block's basepath.

    Args:
        local_path: Path to local directory to upload from.
        to_path: Path in S3 bucket to upload to. Defaults to block's configured
            basepath.
        ignore_file: Path to file containing gitignore style expressions for
            filepaths to ignore.

    """
    to_path = "" if to_path is None else to_path

    if local_path is None:
        local_path = "."

    included_files = None
    if ignore_file:
        with open(ignore_file, "r") as f:
            ignore_patterns = f.readlines()

        included_files = filter_files(local_path, ignore_patterns)

    uploaded_file_count = 0
    for local_file_path in Path(local_path).expanduser().rglob("*"):
        if (
            included_files is not None
            and str(local_file_path.relative_to(local_path)) not in included_files
        ):
            continue
        elif not local_file_path.is_dir():
            remote_file_path = Path(to_path) / local_file_path.relative_to(
                local_path
            )
            with open(local_file_path, "rb") as local_file:
                local_file_content = local_file.read()

            await self.write_path(
                remote_file_path.as_posix(), content=local_file_content
            )
            uploaded_file_count += 1

    return uploaded_file_count

read_path(path) async

Read specified path from S3 and return contents. Provide the entire path to the key in S3.

Parameters:

Name Type Description Default
path str

Entire path to (and including) the key.

required
Example

Read "subfolder/file1" contents from an S3 bucket named "bucket":

from prefect_aws import AwsCredentials
from prefect_aws.s3 import S3Bucket

aws_creds = AwsCredentials(
    aws_access_key_id=AWS_ACCESS_KEY_ID,
    aws_secret_access_key=AWS_SECRET_ACCESS_KEY
)

s3_bucket_block = S3Bucket(
    bucket_name="bucket",
    credentials=aws_creds,
    bucket_folder="subfolder"
)

key_contents = s3_bucket_block.read_path(path="subfolder/file1")
Source code in prefect_aws/s3.py
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
@sync_compatible
async def read_path(self, path: str) -> bytes:
    """
    Read specified path from S3 and return contents. Provide the entire
    path to the key in S3.

    Args:
        path: Entire path to (and including) the key.

    Example:
        Read "subfolder/file1" contents from an S3 bucket named "bucket":
        ```python
        from prefect_aws import AwsCredentials
        from prefect_aws.s3 import S3Bucket

        aws_creds = AwsCredentials(
            aws_access_key_id=AWS_ACCESS_KEY_ID,
            aws_secret_access_key=AWS_SECRET_ACCESS_KEY
        )

        s3_bucket_block = S3Bucket(
            bucket_name="bucket",
            credentials=aws_creds,
            bucket_folder="subfolder"
        )

        key_contents = s3_bucket_block.read_path(path="subfolder/file1")
        ```
    """
    path = self._resolve_path(path)

    return await run_sync_in_worker_thread(self._read_sync, path)

stream_from(bucket, from_path, to_path=None, **upload_kwargs) async

Streams an object from another bucket to this bucket. Requires the object to be downloaded and uploaded in chunks. If self's credentials allow for writes to the other bucket, try using S3Bucket.copy_object.

Parameters:

Name Type Description Default
bucket S3Bucket

The bucket to stream from.

required
from_path str

The path of the object to stream.

required
to_path Optional[str]

The path to stream the object to. Defaults to the object's name.

None
**upload_kwargs Dict[str, Any]

Additional keyword arguments to pass to Client.upload_fileobj.

{}

Returns:

Type Description
str

The path that the object was uploaded to.

Examples:

Stream notes.txt from your-bucket/notes.txt to my-bucket/landed/notes.txt.

from prefect_aws.s3 import S3Bucket

your_s3_bucket = S3Bucket.load("your-bucket")
my_s3_bucket = S3Bucket.load("my-bucket")

my_s3_bucket.stream_from(
    your_s3_bucket,
    "notes.txt",
    to_path="landed/notes.txt"
)
Source code in prefect_aws/s3.py
 979
 980
 981
 982
 983
 984
 985
 986
 987
 988
 989
 990
 991
 992
 993
 994
 995
 996
 997
 998
 999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
@sync_compatible
async def stream_from(
    self,
    bucket: "S3Bucket",
    from_path: str,
    to_path: Optional[str] = None,
    **upload_kwargs: Dict[str, Any],
) -> str:
    """Streams an object from another bucket to this bucket. Requires the
    object to be downloaded and uploaded in chunks. If `self`'s credentials
    allow for writes to the other bucket, try using `S3Bucket.copy_object`.

    Args:
        bucket: The bucket to stream from.
        from_path: The path of the object to stream.
        to_path: The path to stream the object to. Defaults to the object's name.
        **upload_kwargs: Additional keyword arguments to pass to
            `Client.upload_fileobj`.

    Returns:
        The path that the object was uploaded to.

    Examples:
        Stream notes.txt from your-bucket/notes.txt to my-bucket/landed/notes.txt.

        ```python
        from prefect_aws.s3 import S3Bucket

        your_s3_bucket = S3Bucket.load("your-bucket")
        my_s3_bucket = S3Bucket.load("my-bucket")

        my_s3_bucket.stream_from(
            your_s3_bucket,
            "notes.txt",
            to_path="landed/notes.txt"
        )
        ```

    """
    if to_path is None:
        to_path = Path(from_path).name

    # Get the source object's StreamingBody
    from_path: str = bucket._join_bucket_folder(from_path)
    from_client = bucket.credentials.get_s3_client()
    obj = await run_sync_in_worker_thread(
        from_client.get_object, Bucket=bucket.bucket_name, Key=from_path
    )
    body: StreamingBody = obj["Body"]

    # Upload the StreamingBody to this bucket
    bucket_path = str(self._join_bucket_folder(to_path))
    to_client = self.credentials.get_s3_client()
    await run_sync_in_worker_thread(
        to_client.upload_fileobj,
        Fileobj=body,
        Bucket=self.bucket_name,
        Key=bucket_path,
        **upload_kwargs,
    )
    self.logger.info(
        f"Streamed s3://{bucket.bucket_name}/{from_path} to the bucket "
        f"{self.bucket_name!r} path {bucket_path!r}."
    )
    return bucket_path

upload_from_file_object(from_file_object, to_path, **upload_kwargs) async

Uploads an object to the S3 bucket from a file-like object, which can be a BytesIO object or a BufferedReader.

Parameters:

Name Type Description Default
from_file_object BinaryIO

The file-like object to upload from.

required
to_path str

The path to upload the object to.

required
**upload_kwargs Dict[str, Any]

Additional keyword arguments to pass to Client.upload_fileobj.

{}

Returns:

Type Description
str

The path that the object was uploaded to.

Examples:

Upload BytesIO object to my_folder/notes.txt.

from io import BytesIO

from prefect_aws.s3 import S3Bucket

s3_bucket = S3Bucket.load("my-bucket")
with open("notes.txt", "rb") as f:
    s3_bucket.upload_from_file_object(f, "my_folder/notes.txt")

Upload BufferedReader object to my_folder/notes.txt.

from prefect_aws.s3 import S3Bucket

s3_bucket = S3Bucket.load("my-bucket")
with open("notes.txt", "rb") as f:
    s3_bucket.upload_from_file_object(
        f, "my_folder/notes.txt"
    )
Source code in prefect_aws/s3.py
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
@sync_compatible
async def upload_from_file_object(
    self, from_file_object: BinaryIO, to_path: str, **upload_kwargs: Dict[str, Any]
) -> str:
    """
    Uploads an object to the S3 bucket from a file-like object,
    which can be a BytesIO object or a BufferedReader.

    Args:
        from_file_object: The file-like object to upload from.
        to_path: The path to upload the object to.
        **upload_kwargs: Additional keyword arguments to pass to
            `Client.upload_fileobj`.

    Returns:
        The path that the object was uploaded to.

    Examples:
        Upload BytesIO object to my_folder/notes.txt.
        ```python
        from io import BytesIO

        from prefect_aws.s3 import S3Bucket

        s3_bucket = S3Bucket.load("my-bucket")
        with open("notes.txt", "rb") as f:
            s3_bucket.upload_from_file_object(f, "my_folder/notes.txt")
        ```

        Upload BufferedReader object to my_folder/notes.txt.
        ```python
        from prefect_aws.s3 import S3Bucket

        s3_bucket = S3Bucket.load("my-bucket")
        with open("notes.txt", "rb") as f:
            s3_bucket.upload_from_file_object(
                f, "my_folder/notes.txt"
            )
        ```
    """
    bucket_path = str(self._join_bucket_folder(to_path))
    client = self.credentials.get_s3_client()
    await run_sync_in_worker_thread(
        client.upload_fileobj,
        Fileobj=from_file_object,
        Bucket=self.bucket_name,
        Key=bucket_path,
        **upload_kwargs,
    )
    self.logger.info(
        "Uploaded from file object to the bucket "
        f"{self.bucket_name!r} path {bucket_path!r}."
    )
    return bucket_path

upload_from_folder(from_folder, to_folder=None, **upload_kwargs) async

Uploads files within a folder (excluding the folder itself) to the object storage service folder.

Parameters:

Name Type Description Default
from_folder Union[str, Path]

The path to the folder to upload from.

required
to_folder Optional[str]

The path to upload the folder to.

None
**upload_kwargs Dict[str, Any]

Additional keyword arguments to pass to Client.upload_fileobj.

{}

Returns:

Type Description
str

The path that the folder was uploaded to.

Examples:

Upload contents from my_folder to new_folder.

from prefect_aws.s3 import S3Bucket

s3_bucket = S3Bucket.load("my-bucket")
s3_bucket.upload_from_folder("my_folder", "new_folder")
Source code in prefect_aws/s3.py
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
@sync_compatible
async def upload_from_folder(
    self,
    from_folder: Union[str, Path],
    to_folder: Optional[str] = None,
    **upload_kwargs: Dict[str, Any],
) -> str:
    """
    Uploads files *within* a folder (excluding the folder itself)
    to the object storage service folder.

    Args:
        from_folder: The path to the folder to upload from.
        to_folder: The path to upload the folder to.
        **upload_kwargs: Additional keyword arguments to pass to
            `Client.upload_fileobj`.

    Returns:
        The path that the folder was uploaded to.

    Examples:
        Upload contents from my_folder to new_folder.
        ```python
        from prefect_aws.s3 import S3Bucket

        s3_bucket = S3Bucket.load("my-bucket")
        s3_bucket.upload_from_folder("my_folder", "new_folder")
        ```
    """
    from_folder = Path(from_folder)
    bucket_folder = self._join_bucket_folder(to_folder or "")

    num_uploaded = 0
    client = self.credentials.get_s3_client()

    async_coros = []
    for from_path in from_folder.rglob("**/*"):
        # this skips the actual directory itself, e.g.
        # `my_folder/` will be skipped
        # `my_folder/notes.txt` will be uploaded
        if from_path.is_dir():
            continue
        bucket_path = (
            Path(bucket_folder) / from_path.relative_to(from_folder)
        ).as_posix()
        self.logger.info(
            f"Uploading from {str(from_path)!r} to the bucket "
            f"{self.bucket_name!r} path {bucket_path!r}."
        )
        async_coros.append(
            run_sync_in_worker_thread(
                client.upload_file,
                Filename=str(from_path),
                Bucket=self.bucket_name,
                Key=bucket_path,
                **upload_kwargs,
            )
        )
        num_uploaded += 1
    await asyncio.gather(*async_coros)

    if num_uploaded == 0:
        self.logger.warning(f"No files were uploaded from {str(from_folder)!r}.")
    else:
        self.logger.info(
            f"Uploaded {num_uploaded} files from {str(from_folder)!r} to "
            f"the bucket {self.bucket_name!r} path {bucket_path!r}"
        )

    return to_folder

upload_from_path(from_path, to_path=None, **upload_kwargs) async

Uploads an object from a path to the S3 bucket.

Parameters:

Name Type Description Default
from_path Union[str, Path]

The path to the file to upload from.

required
to_path Optional[str]

The path to upload the file to.

None
**upload_kwargs Dict[str, Any]

Additional keyword arguments to pass to Client.upload.

{}

Returns:

Type Description
str

The path that the object was uploaded to.

Examples:

Upload notes.txt to my_folder/notes.txt.

from prefect_aws.s3 import S3Bucket

s3_bucket = S3Bucket.load("my-bucket")
s3_bucket.upload_from_path("notes.txt", "my_folder/notes.txt")
Source code in prefect_aws/s3.py
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
@sync_compatible
async def upload_from_path(
    self,
    from_path: Union[str, Path],
    to_path: Optional[str] = None,
    **upload_kwargs: Dict[str, Any],
) -> str:
    """
    Uploads an object from a path to the S3 bucket.

    Args:
        from_path: The path to the file to upload from.
        to_path: The path to upload the file to.
        **upload_kwargs: Additional keyword arguments to pass to `Client.upload`.

    Returns:
        The path that the object was uploaded to.

    Examples:
        Upload notes.txt to my_folder/notes.txt.
        ```python
        from prefect_aws.s3 import S3Bucket

        s3_bucket = S3Bucket.load("my-bucket")
        s3_bucket.upload_from_path("notes.txt", "my_folder/notes.txt")
        ```
    """
    from_path = str(Path(from_path).absolute())
    if to_path is None:
        to_path = Path(from_path).name

    bucket_path = str(self._join_bucket_folder(to_path))
    client = self.credentials.get_s3_client()

    await run_sync_in_worker_thread(
        client.upload_file,
        Filename=from_path,
        Bucket=self.bucket_name,
        Key=bucket_path,
        **upload_kwargs,
    )
    self.logger.info(
        f"Uploaded from {from_path!r} to the bucket "
        f"{self.bucket_name!r} path {bucket_path!r}."
    )
    return bucket_path

write_path(path, content) async

Writes to an S3 bucket.

Args:

path: The key name. Each object in your bucket has a unique
    key (or key name).
content: What you are uploading to S3.

Example:

Write data to the path `dogs/small_dogs/havanese` in an S3 Bucket:
```python
from prefect_aws import MinioCredentials
from prefect_aws.s3 import S3Bucket

minio_creds = MinIOCredentials(
    minio_root_user = "minioadmin",
    minio_root_password = "minioadmin",
)

s3_bucket_block = S3Bucket(
    bucket_name="bucket",
    minio_credentials=minio_creds,
    bucket_folder="dogs/smalldogs",
    endpoint_url="http://localhost:9000",
)
s3_havanese_path = s3_bucket_block.write_path(path="havanese", content=data)
```
Source code in prefect_aws/s3.py
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
@sync_compatible
async def write_path(self, path: str, content: bytes) -> str:
    """
    Writes to an S3 bucket.

    Args:

        path: The key name. Each object in your bucket has a unique
            key (or key name).
        content: What you are uploading to S3.

    Example:

        Write data to the path `dogs/small_dogs/havanese` in an S3 Bucket:
        ```python
        from prefect_aws import MinioCredentials
        from prefect_aws.s3 import S3Bucket

        minio_creds = MinIOCredentials(
            minio_root_user = "minioadmin",
            minio_root_password = "minioadmin",
        )

        s3_bucket_block = S3Bucket(
            bucket_name="bucket",
            minio_credentials=minio_creds,
            bucket_folder="dogs/smalldogs",
            endpoint_url="http://localhost:9000",
        )
        s3_havanese_path = s3_bucket_block.write_path(path="havanese", content=data)
        ```
    """

    path = self._resolve_path(path)

    await run_sync_in_worker_thread(self._write_sync, path, content)

    return path