diff --git a/.github/ISSUE_TEMPLATE/bug_report.yml b/.github/ISSUE_TEMPLATE/bug_report.yml
new file mode 100644
index 0000000..3612c14
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/bug_report.yml
@@ -0,0 +1,91 @@
+name: 🐛 Bug Report
+description: Report a bug or unexpected behavior
+title: "[Bug]: "
+labels: ["bug"]
+body:
+ - type: markdown
+ attributes:
+ value: |
+ Thanks for reporting a bug! Please fill out the information below.
+
+ - type: textarea
+ id: expected
+ attributes:
+ label: Expected Behavior
+ description: What did you expect to happen?
+ placeholder: I expected...
+ validations:
+ required: true
+
+ - type: textarea
+ id: actual
+ attributes:
+ label: Actual Behavior
+ description: What actually happened?
+ placeholder: Instead, what happened was...
+ validations:
+ required: true
+
+ - type: textarea
+ id: reproduce
+ attributes:
+ label: Steps to Reproduce
+ description: Provide steps to reproduce the issue
+ placeholder: |
+ 1.
+ 2.
+ 3.
+ validations:
+ required: true
+
+ - type: input
+ id: sdk-version
+ attributes:
+ label: SDK Version
+ description: What version of the SDK are you using?
+ placeholder: e.g., 1.0.0
+ validations:
+ required: true
+
+ - type: dropdown
+ id: python-version
+ attributes:
+ label: Python Version
+ description: What version of Python are you using?
+ options:
+ - "3.14"
+ - "3.13"
+ - "3.12"
+ - "3.11"
+ - Other (specify in additional context)
+ validations:
+ required: true
+
+ - type: dropdown
+ id: regression
+ attributes:
+ label: Is this a regression?
+ description: Did this work in a previous version?
+ options:
+ - "No"
+ - "Yes"
+ validations:
+ required: true
+
+ - type: input
+ id: worked-version
+ attributes:
+ label: Last Working Version
+ description: If this is a regression, what version did this work in?
+ placeholder: e.g., 0.9.0
+ validations:
+ required: false
+
+ - type: textarea
+ id: context
+ attributes:
+ label: Additional Context
+ description: Add any other context, logs, or screenshots
+ placeholder: Additional information...
+ validations:
+ required: false
diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml
new file mode 100644
index 0000000..aa6cbb7
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/config.yml
@@ -0,0 +1,5 @@
+blank_issues_enabled: false
+contact_links:
+ - name: Ask a question
+ url: https://github.com/aws/aws-durable-execution-sdk-python/discussions/new
+ about: Ask a general question about Lambda Powertools
diff --git a/.github/ISSUE_TEMPLATE/documentation.yml b/.github/ISSUE_TEMPLATE/documentation.yml
new file mode 100644
index 0000000..cdd8e3e
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/documentation.yml
@@ -0,0 +1,36 @@
+name: 📚 Documentation Issue
+description: Report an issue with documentation
+title: "[Docs]: "
+labels: ["documentation"]
+body:
+ - type: markdown
+ attributes:
+ value: |
+ Thanks for helping improve our documentation!
+
+ - type: textarea
+ id: issue
+ attributes:
+ label: Issue
+ description: Describe the documentation issue
+ placeholder: The documentation says... but it should say...
+ validations:
+ required: true
+
+ - type: input
+ id: page
+ attributes:
+ label: Page/Location
+ description: Link to the page or specify where in the docs this occurs
+ placeholder: https://... or README.md section "..."
+ validations:
+ required: true
+
+ - type: textarea
+ id: fix
+ attributes:
+ label: Suggested Fix
+ description: How should this be corrected?
+ placeholder: This could be fixed by...
+ validations:
+ required: false
diff --git a/.github/ISSUE_TEMPLATE/feature_request.yml b/.github/ISSUE_TEMPLATE/feature_request.yml
new file mode 100644
index 0000000..f4b648b
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/feature_request.yml
@@ -0,0 +1,57 @@
+name: ✨ Feature Request
+description: Suggest a new feature or enhancement
+title: "[Feature]: "
+labels: ["enhancement"]
+body:
+ - type: markdown
+ attributes:
+ value: |
+ Thanks for taking the time to suggest a new feature!
+
+ - type: textarea
+ id: what
+ attributes:
+ label: What would you like?
+ description: Describe the feature you'd like to see
+ placeholder: I would like to...
+ validations:
+ required: true
+
+ - type: textarea
+ id: implementation
+ attributes:
+ label: Possible Implementation
+ description: Suggest how this could be implemented
+ placeholder: This could be implemented by...
+ validations:
+ required: false
+
+ - type: dropdown
+ id: breaking-change
+ attributes:
+ label: Is this a breaking change?
+ options:
+ - "No"
+ - "Yes"
+ validations:
+ required: true
+
+ - type: dropdown
+ id: rfc
+ attributes:
+ label: Does this require an RFC?
+ description: RFC is required when changing existing behavior or for new features that require research
+ options:
+ - "No"
+ - "Yes"
+ validations:
+ required: true
+
+ - type: textarea
+ id: context
+ attributes:
+ label: Additional Context
+ description: Add any other context, examples, or screenshots
+ placeholder: Additional information...
+ validations:
+ required: false
diff --git a/.github/dependabot.yml b/.github/dependabot.yml
new file mode 100644
index 0000000..33209d2
--- /dev/null
+++ b/.github/dependabot.yml
@@ -0,0 +1,14 @@
+# Reference: https://docs.github.com/en/github/administering-a-repository/configuration-options-for-dependency-updates
+version: 2
+updates:
+ - package-ecosystem: "github-actions"
+ directory: "/"
+ schedule:
+ interval: "weekly"
+ groups:
+ # Group updates together, so that they are all applied in a single PR.
+ # Grouped updates are currently in beta and is subject to change.
+ # xref: https://docs.github.com/en/code-security/dependabot/dependabot-version-updates/configuration-options-for-the-dependabot.yml-file#groups
+ actions-deps:
+ patterns:
+ - "*"
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index 1af4b28..376613a 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -16,9 +16,9 @@ jobs:
# Note: To re-run `lint-commits` after fixing the PR title, close-and-reopen the PR.
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@v5
+ - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
- name: Use Node.js
- uses: actions/setup-node@v4
+ uses: actions/setup-node@395ad3262231945c25e8478fd5baf05154b1d79f # v6.1.0
with:
node-version: 22.x
- name: Check PR title
@@ -32,17 +32,17 @@ jobs:
strategy:
fail-fast: false
matrix:
- python-version: ["3.13"]
+ python-version: ["3.11","3.12","3.13","3.14"]
steps:
- - uses: actions/checkout@v5
+ - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
- name: Set up Python ${{ matrix.python-version }}
- uses: actions/setup-python@v6
+ uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0
with:
python-version: ${{ matrix.python-version }}
- name: Install Hatch
run: |
- python -m pip install --upgrade hatch
+ python -m pip install hatch==1.15.0
- name: static analysis
run: hatch fmt --check
- name: type checking
diff --git a/.github/workflows/integration-tests.yml b/.github/workflows/integration-tests.yml
index 8f47f48..ca66bb1 100644
--- a/.github/workflows/integration-tests.yml
+++ b/.github/workflows/integration-tests.yml
@@ -17,22 +17,19 @@ jobs:
python-version: ["3.13"]
steps:
- - name: Parse testing SDK branch from PR body
- id: parse
- run: |
- # Look for a line like: TESTING_SDK_BRANCH: feature/foo
- REF=$(printf '%s\n' '${{ github.event.pull_request.body }}' | sed -n 's/^TESTING_SDK_BRANCH:[[:space:]]*//p' | head -n1)
- if [ -z "$REF" ]; then REF="main"; fi
- echo "testing_ref=$REF" >> "$GITHUB_OUTPUT"
- echo "Using testing SDK branch: $REF"
-
- name: Checkout Language SDK (this PR)
- uses: actions/checkout@v5
+ uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
with:
path: language-sdk
+ - name: Parse testing SDK branch from PR body
+ id: parse
+ run: python language-sdk/ops/parse_sdk_branch.py
+ env:
+ PR_BODY: ${{ github.event.pull_request.body }}
+
- name: Checkout Testing SDK
- uses: actions/checkout@v5
+ uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
with:
repository: aws/aws-durable-execution-sdk-python-testing
ref: ${{ steps.parse.outputs.testing_ref }}
@@ -40,12 +37,12 @@ jobs:
path: testing-sdk
- name: Set up Python ${{ matrix.python-version }}
- uses: actions/setup-python@v6
+ uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0
with:
python-version: ${{ matrix.python-version }}
- name: Install Hatch
- run: python -m pip install --upgrade hatch
+ run: python -m pip install hatch==1.15.0
- name: Setup and run Testing SDK
working-directory: testing-sdk
@@ -67,24 +64,21 @@ jobs:
if: github.event_name == 'pull_request'
env:
AWS_REGION: us-west-2
-
- steps:
- - name: Parse testing SDK branch from PR body
- id: parse
- run: |
- # Look for a line like: TESTING_SDK_BRANCH: feature/foo
- REF=$(printf '%s\n' '${{ github.event.pull_request.body }}' | sed -n 's/^TESTING_SDK_BRANCH:[[:space:]]*//p' | head -n1)
- if [ -z "$REF" ]; then REF="main"; fi
- echo "testing_ref=$REF" >> "$GITHUB_OUTPUT"
- echo "Using testing SDK branch: $REF"
+ steps:
- name: Checkout Language SDK (this PR)
- uses: actions/checkout@v5
+ uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
with:
path: language-sdk
+ - name: Parse testing SDK branch from PR body
+ id: parse
+ run: python language-sdk/ops/parse_sdk_branch.py
+ env:
+ PR_BODY: ${{ github.event.pull_request.body }}
+
- name: Checkout Testing SDK
- uses: actions/checkout@v5
+ uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
with:
repository: aws/aws-durable-execution-sdk-python-testing
ref: ${{ steps.parse.outputs.testing_ref }}
@@ -92,28 +86,23 @@ jobs:
path: testing-sdk
- name: Set up Python 3.13
- uses: actions/setup-python@v6
+ uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0
with:
python-version: '3.13'
- name: Configure AWS credentials
- uses: aws-actions/configure-aws-credentials@v4
+ uses: aws-actions/configure-aws-credentials@61815dcd50bd041e203e49132bacad1fd04d2708 # v5.1.1
with:
role-to-assume: "${{ secrets.ACTIONS_INTEGRATION_ROLE_NAME }}"
role-session-name: languageSDKIntegrationTest
aws-region: ${{ env.AWS_REGION }}
- - name: Install custom Lambda model
- working-directory: testing-sdk
- run: |
- aws configure add-model --service-model file://.github/model/lambda.json --service-name lambda
-
- name: Install Hatch and setup Testing SDK
working-directory: testing-sdk
env:
AWS_DURABLE_SDK_URL: file://${{ github.workspace }}/language-sdk
run: |
- pip install hatch
+ pip install hatch==1.15.0
python -m pip install -e .
- name: Get integration examples
@@ -122,6 +111,13 @@ jobs:
run: |
echo "examples=$(jq -c '.examples | map(select(.integration == true)) | .[0:2]' examples-catalog.json)" >> $GITHUB_OUTPUT
+ - name: Install AWS CLI v2
+ run: |
+ curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "/tmp/awscliv2.zip"
+ unzip -q /tmp/awscliv2.zip -d /tmp
+ rm /tmp/awscliv2.zip
+ sudo /tmp/aws/install --update
+ rm -rf /tmp/aws/
- name: Deploy and test examples
working-directory: testing-sdk
env:
@@ -133,20 +129,20 @@ jobs:
run: |
echo "Building examples..."
hatch run examples:build
-
+
# Get first integration example for testing
EXAMPLE_NAME=$(echo '${{ steps.get-examples.outputs.examples }}' | jq -r '.[0].name')
EXAMPLE_NAME_CLEAN=$(echo "$EXAMPLE_NAME" | sed 's/ //g')
FUNCTION_NAME="${EXAMPLE_NAME_CLEAN}-LanguageSDK-PR-${{ github.event.number }}"
-
+
echo "Deploying example: $EXAMPLE_NAME as $FUNCTION_NAME"
hatch run examples:deploy "$EXAMPLE_NAME" --function-name "$FUNCTION_NAME"
-
+
QUALIFIED_FUNCTION_NAME="$FUNCTION_NAME:\$LATEST"
-
+
echo "Waiting for function to be ready..."
aws lambda wait function-active --function-name "$FUNCTION_NAME" --endpoint-url "$LAMBDA_ENDPOINT" --region "${{ env.AWS_REGION }}"
-
+
echo "Invoking Lambda function: $QUALIFIED_FUNCTION_NAME"
aws lambda invoke \
--function-name "$QUALIFIED_FUNCTION_NAME" \
@@ -156,10 +152,10 @@ jobs:
--endpoint-url "$LAMBDA_ENDPOINT" \
/tmp/response.json \
> /tmp/invoke_response.json
-
+
echo "Response:"
cat /tmp/response.json
-
+
# Check for function errors
FUNCTION_ERROR=$(jq -r '.FunctionError // empty' /tmp/invoke_response.json)
if [ -n "$FUNCTION_ERROR" ]; then
@@ -167,7 +163,7 @@ jobs:
cat /tmp/response.json
exit 1
fi
-
+
echo "Getting durable executions..."
aws lambda list-durable-executions-by-function \
--function-name "$QUALIFIED_FUNCTION_NAME" \
@@ -176,15 +172,13 @@ jobs:
--endpoint-url "$LAMBDA_ENDPOINT" \
--cli-binary-format raw-in-base64-out \
> /tmp/executions.json
-
+
echo "Durable Executions:"
cat /tmp/executions.json
-
+
# Cleanup
echo "Cleaning up function: $FUNCTION_NAME"
aws lambda delete-function \
--function-name "$FUNCTION_NAME" \
--endpoint-url "$LAMBDA_ENDPOINT" \
--region "${{ env.AWS_REGION }}" || echo "Function cleanup failed or already deleted"
-
-
diff --git a/.github/workflows/pypi-publish.yml b/.github/workflows/pypi-publish.yml
new file mode 100644
index 0000000..95278fa
--- /dev/null
+++ b/.github/workflows/pypi-publish.yml
@@ -0,0 +1,71 @@
+# This workflow will upload a Python Package to PyPI when a release is created
+# For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-python#publishing-to-package-registries
+
+# This workflow uses actions that are not certified by GitHub.
+# They are provided by a third-party and are governed by
+# separate terms of service, privacy policy, and support
+# documentation.
+
+name: Upload PyPI Package
+
+on:
+ release:
+ types: [published]
+
+permissions:
+ contents: read
+
+jobs:
+ release-build:
+ runs-on: ubuntu-latest
+
+ steps:
+ - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
+
+ - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0
+ with:
+ python-version: "3.11"
+ - name: Install Hatch
+ run: |
+ python -m pip install --upgrade hatch==1.15.0
+ - name: Build release distributions
+ run: |
+ # NOTE: put your own distribution build steps here.
+ hatch build
+
+ - name: Upload distributions
+ uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0
+ with:
+ name: release-dists
+ path: dist/
+
+ pypi-publish:
+ runs-on: ubuntu-latest
+ needs:
+ - release-build
+ permissions:
+ # IMPORTANT: this permission is mandatory for trusted publishing
+ id-token: write
+
+ # Dedicated environments with protections for publishing are strongly recommended.
+ # For more information, see: https://docs.github.com/en/actions/deployment/targeting-different-environments/using-environments-for-deployment#deployment-protection-rules
+ environment:
+ name: pypi
+ # OPTIONAL: uncomment and update to include your PyPI project URL in the deployment status:
+ # url: https://pypi.org/p/aws-durable-execution-sdk-python
+ #
+ # ALTERNATIVE: if your GitHub Release name is the PyPI project version string
+ # ALTERNATIVE: exactly, uncomment the following line instead:
+ url: https://pypi.org/project/aws-durable-execution-sdk-python/${{ github.event.release.name }}
+
+ steps:
+ - name: Retrieve release distributions
+ uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0
+ with:
+ name: release-dists
+ path: dist/
+
+ - name: Publish release distributions to PyPI
+ uses: pypa/gh-action-pypi-publish@ed0c53931b1dc9bd32cbe73a98c7f6766f8a527e # v1.13.0
+ with:
+ packages-dir: dist/
diff --git a/.github/workflows/scorecard.yml b/.github/workflows/scorecard.yml
new file mode 100644
index 0000000..9235ec3
--- /dev/null
+++ b/.github/workflows/scorecard.yml
@@ -0,0 +1,80 @@
+# This workflow uses actions that are not certified by GitHub. They are provided
+# by a third-party and are governed by separate terms of service, privacy
+# policy, and support documentation.
+
+name: Scorecard supply-chain security
+on:
+ # For Branch-Protection check. Only the default branch is supported. See
+ # https://github.com/ossf/scorecard/blob/main/docs/checks.md#branch-protection
+ branch_protection_rule:
+ # To guarantee Maintained check is occasionally updated. See
+ # https://github.com/ossf/scorecard/blob/main/docs/checks.md#maintained
+ schedule:
+ - cron: '21 16 * * 4'
+ push:
+ branches: [ "main" ]
+ workflow_dispatch:
+
+# Declare default permissions as read only.
+permissions:
+ contents: read
+
+jobs:
+ analysis:
+ name: Scorecard analysis
+ runs-on: ubuntu-latest
+ # `publish_results: true` only works when run from the default branch. conditional can be removed if disabled.
+ if: github.event.repository.default_branch == github.ref_name || github.event_name == 'pull_request'
+ permissions:
+ # Needed to upload the results to code-scanning dashboard.
+ security-events: write
+ # Needed to publish results and get a badge (see publish_results below).
+ id-token: write
+ # Uncomment the permissions below if installing in a private repository.
+ # contents: read
+ # actions: read
+
+ steps:
+ - name: "Checkout code"
+ uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
+ with:
+ persist-credentials: false
+
+ - name: "Run analysis"
+ uses: ossf/scorecard-action@4eaacf0543bb3f2c246792bd56e8cdeffafb205a # v2.4.3
+ with:
+ results_file: results.sarif
+ results_format: sarif
+ # (Optional) "write" PAT token. Uncomment the `repo_token` line below if:
+ # - you want to enable the Branch-Protection check on a *public* repository, or
+ # - you are installing Scorecard on a *private* repository
+ # To create the PAT, follow the steps in https://github.com/ossf/scorecard-action?tab=readme-ov-file#authentication-with-fine-grained-pat-optional.
+ # repo_token: ${{ secrets.SCORECARD_TOKEN }}
+
+ # Public repositories:
+ # - Publish results to OpenSSF REST API for easy access by consumers
+ # - Allows the repository to include the Scorecard badge.
+ # - See https://github.com/ossf/scorecard-action#publishing-results.
+ # For private repositories:
+ # - `publish_results` will always be set to `false`, regardless
+ # of the value entered here.
+ publish_results: true
+
+ # (Optional) Uncomment file_mode if you have a .gitattributes with files marked export-ignore
+ # file_mode: git
+
+ # Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF
+ # format to the repository Actions tab.
+ - name: "Upload artifact"
+ uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0
+ with:
+ name: SARIF file
+ path: results.sarif
+ retention-days: 5
+
+ # Upload the results to GitHub's code scanning dashboard (optional).
+ # Commenting out will disable upload of results to your repo's Code Scanning dashboard
+ - name: "Upload to code-scanning"
+ uses: github/codeql-action/upload-sarif@cf1bb45a277cb3c205638b2cd5c984db1c46a412 # v4.31.7
+ with:
+ sarif_file: results.sarif
diff --git a/.github/workflows/sync-package.yml b/.github/workflows/sync-package.yml
index 6fd9c6b..7123109 100644
--- a/.github/workflows/sync-package.yml
+++ b/.github/workflows/sync-package.yml
@@ -20,18 +20,18 @@ jobs:
python-version: ["3.13"]
steps:
- - uses: actions/checkout@v5
+ - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
- name: Set up Python ${{ matrix.python-version }}
- uses: actions/setup-python@v6
+ uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0
with:
python-version: ${{ matrix.python-version }}
- name: Install Hatch
run: |
- python -m pip install --upgrade hatch
+ python -m pip install --upgrade hatch==1.15.0
- name: Build distribution
run: hatch build
- name: configure aws credentials
- uses: aws-actions/configure-aws-credentials@v4
+ uses: aws-actions/configure-aws-credentials@61815dcd50bd041e203e49132bacad1fd04d2708 # v5.1.1
with:
role-to-assume: "${{ secrets.ACTIONS_SYNC_ROLE_NAME }}"
role-session-name: gh-python
diff --git a/.github/workflows/test-parser.yml b/.github/workflows/test-parser.yml
new file mode 100644
index 0000000..da142d1
--- /dev/null
+++ b/.github/workflows/test-parser.yml
@@ -0,0 +1,24 @@
+name: Test Parser
+
+on:
+ pull_request:
+ paths:
+ - 'ops/parse_sdk_branch.py'
+ - 'ops/__tests__/**'
+ push:
+ branches: [ main ]
+ paths:
+ - 'ops/parse_sdk_branch.py'
+ - 'ops/__tests__/**'
+
+permissions:
+ contents: read
+
+jobs:
+ test-parser:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
+
+ - name: Run parser tests
+ run: python ops/__tests__/test_parse_sdk_branch.py
diff --git a/README.md b/README.md
index c11017f..3ec5798 100644
--- a/README.md
+++ b/README.md
@@ -1,7 +1,9 @@
-# aws-durable-functions-sdk-python
+# AWS Durable Execution SDK for Python
-[](https://pypi.org/project/aws-durable-functions-sdk-python)
-[](https://pypi.org/project/aws-durable-functions-sdk-python)
+[](https://pypi.org/project/aws-durable-execution-sdk-python)
+[](https://pypi.org/project/aws-durable-execution-sdk-python)
+
+[](https://scorecard.dev/viewer/?uri=github.com/aws/aws-durable-execution-sdk-python)
-----
diff --git a/docs/advanced/error-handling.md b/docs/advanced/error-handling.md
new file mode 100644
index 0000000..1123ad7
--- /dev/null
+++ b/docs/advanced/error-handling.md
@@ -0,0 +1,955 @@
+# Error Handling
+
+## Table of Contents
+
+- [Overview](#overview)
+- [Terminology](#terminology)
+- [Getting started](#getting-started)
+- [Exception types](#exception-types)
+- [Retry strategies](#retry-strategies)
+- [Error response formats](#error-response-formats)
+- [Common error scenarios](#common-error-scenarios)
+- [Troubleshooting](#troubleshooting)
+- [Best practices](#best-practices)
+- [FAQ](#faq)
+- [Testing](#testing)
+- [See also](#see-also)
+
+[← Back to main index](../index.md)
+
+## Overview
+
+Error handling in durable functions determines how your code responds to failures. The SDK provides typed exceptions, automatic retry with exponential backoff, and AWS-compliant error responses that help you build resilient workflows.
+
+When errors occur, the SDK can:
+- Retry transient failures automatically with configurable backoff
+- Checkpoint failures with detailed error information
+- Distinguish between recoverable and unrecoverable errors
+- Provide clear termination reasons and stack traces for debugging
+
+[↑ Back to top](#table-of-contents)
+
+## Terminology
+
+**Exception** - A Python error that interrupts normal execution flow. The SDK provides specific exception types for different failure scenarios.
+
+**Retry strategy** - A function that determines whether to retry an operation after an exception and how long to wait before retrying.
+
+**Termination reason** - A code indicating why a durable execution terminated, such as `UNHANDLED_ERROR` or `INVOCATION_ERROR`.
+
+**Recoverable error** - An error that can be retried, such as transient network failures or rate limiting.
+
+**Unrecoverable error** - An error that terminates execution immediately without retry, such as validation errors or non-deterministic execution.
+
+**Backoff** - The delay between retry attempts, typically increasing exponentially to avoid overwhelming failing services.
+
+[↑ Back to top](#table-of-contents)
+
+## Getting started
+
+Here's a simple example of handling errors in a durable function:
+
+```python
+from aws_durable_execution_sdk_python import (
+ DurableContext,
+ durable_execution,
+ durable_step,
+ StepContext,
+)
+
+@durable_step
+def process_order(step_context: StepContext, order_id: str) -> dict:
+ """Process an order with validation."""
+ if not order_id:
+ raise ValueError("Order ID is required")
+
+ # Process the order
+ return {"order_id": order_id, "status": "processed"}
+
+@durable_execution
+def handler(event: dict, context: DurableContext) -> dict:
+ """Handle order processing with error handling."""
+ try:
+ order_id = event.get("order_id")
+ result = context.step(process_order(order_id))
+ return result
+ except ValueError as e:
+ # Handle validation errors from your code
+ return {"error": "InvalidInput", "message": str(e)}
+```
+
+When this function runs:
+1. If `order_id` is missing, `ValueError` is raised from your code
+2. The exception is caught and handled gracefully
+3. A structured error response is returned to the caller
+
+[↑ Back to top](#table-of-contents)
+
+## Exception types
+
+The SDK provides several exception types for different failure scenarios.
+
+### Exception summary
+
+| Exception | Retryable | Behavior | Use case |
+|-----------|-----------|----------|----------|
+| `ValidationError` | No | Fails immediately | SDK detects invalid arguments |
+| `ExecutionError` | No | Returns FAILED status | Permanent business logic failures |
+| `InvocationError` | Yes (by Lambda) | Lambda retries invocation | Transient infrastructure issues |
+| `CallbackError` | No | Returns FAILED status | Callback handling failures |
+| `StepInterruptedError` | Yes (automatic) | Retries on next invocation | Step interrupted before checkpoint |
+| `CheckpointError` | Depends | Retries if 4xx (except invalid token) | Failed to save execution state |
+| `SerDesError` | No | Returns FAILED status | Serialization failures |
+
+### Base exceptions
+
+**DurableExecutionsError** - Base class for all SDK exceptions.
+
+```python
+from aws_durable_execution_sdk_python import DurableExecutionsError
+
+try:
+ # Your code here
+ pass
+except DurableExecutionsError as e:
+ # Handle any SDK exception
+ print(f"SDK error: {e}")
+```
+
+**UnrecoverableError** - Base class for errors that terminate execution. These errors include a `termination_reason` attribute.
+
+```python
+from aws_durable_execution_sdk_python import (
+ ExecutionError,
+ InvocationError,
+)
+
+try:
+ # Your code here
+ pass
+except (ExecutionError, InvocationError) as e:
+ # Access termination reason from unrecoverable errors
+ print(f"Execution terminated: {e.termination_reason}")
+```
+
+### Validation errors
+
+**ValidationError** - Raised by the SDK when you pass invalid arguments to SDK operations.
+
+```python
+from aws_durable_execution_sdk_python import (
+ DurableContext,
+ durable_execution,
+ ValidationError,
+)
+from aws_durable_execution_sdk_python.config import CallbackConfig
+
+@durable_execution
+def handler(event: dict, context: DurableContext) -> dict:
+ """Handle SDK validation errors."""
+ try:
+ # SDK raises ValidationError if timeout is invalid
+ callback = context.create_callback(
+ config=CallbackConfig(timeout_seconds=-1), # Invalid!
+ name="approval"
+ )
+ return {"callback_id": callback}
+ except ValidationError as e:
+ # SDK caught invalid configuration
+ return {"error": "InvalidConfiguration", "message": str(e)}
+```
+
+The SDK raises `ValidationError` when:
+- Operation arguments are invalid (negative timeouts, empty names)
+- Required parameters are missing
+- Configuration values are out of range
+
+### Execution errors
+
+**ExecutionError** - Raised when execution fails in a way that shouldn't be retried. Returns `FAILED` status without retry.
+
+```python
+from aws_durable_execution_sdk_python import ExecutionError
+
+@durable_step
+def process_data(step_context: StepContext, data: dict) -> dict:
+ """Process data with business logic validation."""
+ if not data.get("required_field"):
+ raise ExecutionError("Required field missing")
+ return {"processed": True}
+```
+
+Use `ExecutionError` for:
+- Business logic failures
+- Invalid data that won't be fixed by retry
+- Permanent failures that should fail fast
+
+### Invocation errors
+
+**InvocationError** - Raised when Lambda should retry the entire invocation. Causes Lambda to retry by throwing from the handler.
+
+```python
+from aws_durable_execution_sdk_python import InvocationError
+
+@durable_step
+def call_external_service(step_context: StepContext) -> dict:
+ """Call external service with retry."""
+ try:
+ # Call external service
+ response = make_api_call()
+ return response
+ except ConnectionError:
+ # Trigger Lambda retry
+ raise InvocationError("Service unavailable")
+```
+
+Use `InvocationError` for:
+- Service unavailability
+- Network failures
+- Transient infrastructure issues
+
+### Callback errors
+
+**CallbackError** - Raised when callback handling fails.
+
+```python
+from aws_durable_execution_sdk_python import CallbackError
+from aws_durable_execution_sdk_python.config import CallbackConfig
+
+@durable_execution
+def handler(event: dict, context: DurableContext) -> dict:
+ """Handle callback with error handling."""
+ try:
+ callback = context.create_callback(
+ config=CallbackConfig(timeout_seconds=3600),
+ name="approval"
+ )
+ context.wait_for_callback(callback)
+ return {"status": "approved"}
+ except CallbackError as e:
+ return {"error": "CallbackError", "callback_id": e.callback_id}
+```
+
+### Step interrupted errors
+
+**StepInterruptedError** - Raised when a step is interrupted before checkpointing.
+
+```python
+from aws_durable_execution_sdk_python import StepInterruptedError
+
+# This can happen if Lambda times out during step execution
+@durable_execution
+def handler(event: dict, context: DurableContext) -> dict:
+ try:
+ result = context.step(long_running_operation())
+ return result
+ except StepInterruptedError as e:
+ # Step was interrupted, will retry on next invocation
+ context.logger.warning(f"Step interrupted: {e.step_id}")
+ raise
+```
+
+### Serialization errors
+
+**SerDesError** - Raised when serialization or deserialization fails.
+
+```python
+from aws_durable_execution_sdk_python import SerDesError
+
+@durable_step
+def process_complex_data(step_context: StepContext, data: object) -> dict:
+ """Process data that might not be serializable."""
+ try:
+ # Process data
+ return {"result": data}
+ except SerDesError as e:
+ # Handle serialization failure
+ return {"error": "Cannot serialize result"}
+```
+
+[↑ Back to top](#table-of-contents)
+
+## Retry strategies
+
+Configure retry behavior for steps using retry strategies.
+
+### Creating retry strategies
+
+Use `RetryStrategyConfig` to define retry behavior:
+
+```python
+from aws_durable_execution_sdk_python import (
+ DurableContext,
+ durable_execution,
+ durable_step,
+ StepContext,
+)
+from aws_durable_execution_sdk_python.config import StepConfig
+from aws_durable_execution_sdk_python.retries import (
+ RetryStrategyConfig,
+ create_retry_strategy,
+)
+
+@durable_step
+def unreliable_operation(step_context: StepContext) -> str:
+ """Operation that might fail."""
+ # Your code here
+ return "success"
+
+@durable_execution
+def handler(event: dict, context: DurableContext) -> str:
+ # Configure retry strategy
+ retry_config = RetryStrategyConfig(
+ max_attempts=3,
+ initial_delay_seconds=1,
+ max_delay_seconds=10,
+ backoff_rate=2.0,
+ retryable_error_types=[RuntimeError, ConnectionError],
+ )
+
+ # Create step config with retry
+ step_config = StepConfig(
+ retry_strategy=create_retry_strategy(retry_config)
+ )
+
+ # Execute with retry
+ result = context.step(unreliable_operation(), config=step_config)
+ return result
+```
+
+### RetryStrategyConfig parameters
+
+**max_attempts** - Maximum number of attempts (including the initial attempt). Default: 3.
+
+**initial_delay_seconds** - Initial delay before first retry in seconds. Default: 5.
+
+**max_delay_seconds** - Maximum delay between retries in seconds. Default: 300 (5 minutes).
+
+**backoff_rate** - Multiplier for exponential backoff. Default: 2.0.
+
+**jitter_strategy** - Jitter strategy to add randomness to delays. Default: `JitterStrategy.FULL`.
+
+**retryable_errors** - List of error message patterns to retry (strings or regex patterns). Default: matches all errors.
+
+**retryable_error_types** - List of exception types to retry. Default: empty (retry all).
+
+### Retry presets
+
+The SDK provides preset retry strategies for common scenarios:
+
+```python
+from aws_durable_execution_sdk_python.retries import RetryPresets
+from aws_durable_execution_sdk_python.config import StepConfig
+
+# No retries
+step_config = StepConfig(retry_strategy=RetryPresets.none())
+
+# Default retries (6 attempts, 5s initial delay)
+step_config = StepConfig(retry_strategy=RetryPresets.default())
+
+# Quick retries for transient errors (3 attempts)
+step_config = StepConfig(retry_strategy=RetryPresets.transient())
+
+# Longer retries for resource availability (5 attempts, up to 5 minutes)
+step_config = StepConfig(retry_strategy=RetryPresets.resource_availability())
+
+# Aggressive retries for critical operations (10 attempts)
+step_config = StepConfig(retry_strategy=RetryPresets.critical())
+```
+
+### Retrying specific exceptions
+
+Only retry certain exception types:
+
+```python
+from random import random
+from aws_durable_execution_sdk_python import (
+ DurableContext,
+ durable_execution,
+ durable_step,
+ StepContext,
+)
+from aws_durable_execution_sdk_python.config import StepConfig
+from aws_durable_execution_sdk_python.retries import (
+ RetryStrategyConfig,
+ create_retry_strategy,
+)
+
+@durable_step
+def call_api(step_context: StepContext) -> dict:
+ """Call external API that might fail."""
+ if random() > 0.5:
+ raise ConnectionError("Network timeout")
+ return {"status": "success"}
+
+@durable_execution
+def handler(event: dict, context: DurableContext) -> dict:
+ # Only retry ConnectionError, not other exceptions
+ retry_config = RetryStrategyConfig(
+ max_attempts=3,
+ retryable_error_types=[ConnectionError],
+ )
+
+ result = context.step(
+ call_api(),
+ config=StepConfig(create_retry_strategy(retry_config)),
+ )
+
+ return result
+```
+
+### Exponential backoff
+
+Configure exponential backoff to avoid overwhelming failing services:
+
+```python
+retry_config = RetryStrategyConfig(
+ max_attempts=5,
+ initial_delay_seconds=1, # First retry after 1 second
+ max_delay_seconds=60, # Cap at 60 seconds
+ backoff_rate=2.0, # Double delay each time: 1s, 2s, 4s, 8s, 16s...
+)
+```
+
+With this configuration:
+- Attempt 1: Immediate
+- Attempt 2: After 1 second
+- Attempt 3: After 2 seconds
+- Attempt 4: After 4 seconds
+- Attempt 5: After 8 seconds
+
+[↑ Back to top](#table-of-contents)
+
+## Error response formats
+
+The SDK follows AWS service conventions for error responses.
+
+### Error response structure
+
+When a durable function fails, the response includes:
+
+```json
+{
+ "errorType": "ExecutionError",
+ "errorMessage": "Order validation failed",
+ "termination_reason": "EXECUTION_ERROR",
+ "stackTrace": [
+ " File \"/var/task/handler.py\", line 42, in process_order",
+ " raise ExecutionError(\"Order validation failed\")"
+ ]
+}
+```
+
+### Termination reasons
+
+**UNHANDLED_ERROR** - An unhandled exception occurred in user code.
+
+**INVOCATION_ERROR** - Lambda should retry the invocation.
+
+**EXECUTION_ERROR** - Execution failed and shouldn't be retried.
+
+**CHECKPOINT_FAILED** - Failed to checkpoint execution state.
+
+**NON_DETERMINISTIC_EXECUTION** - Execution produced different results on replay.
+
+**STEP_INTERRUPTED** - A step was interrupted before completing.
+
+**CALLBACK_ERROR** - Callback handling failed.
+
+**SERIALIZATION_ERROR** - Failed to serialize or deserialize data.
+
+### HTTP status codes
+
+When calling durable functions via API Gateway or Lambda URLs:
+
+- **200 OK** - Execution succeeded
+- **400 Bad Request** - Validation error or invalid input
+- **500 Internal Server Error** - Execution error or unhandled exception
+- **503 Service Unavailable** - Invocation error (Lambda will retry)
+
+[↑ Back to top](#table-of-contents)
+
+## Common error scenarios
+
+### Handling input validation
+
+Validate input early and return clear error messages:
+
+```python
+from aws_durable_execution_sdk_python import (
+ DurableContext,
+ durable_execution,
+)
+
+@durable_execution
+def handler(event: dict, context: DurableContext) -> dict:
+ """Validate input and handle errors."""
+ # Validate required fields
+ if not event.get("user_id"):
+ return {"error": "InvalidInput", "message": "user_id is required"}
+
+ if not event.get("action"):
+ return {"error": "InvalidInput", "message": "action is required"}
+
+ # Process valid input
+ user_id = event["user_id"]
+ action = event["action"]
+
+ result = context.step(
+ lambda _: {"user_id": user_id, "action": action, "status": "completed"},
+ name="process_action"
+ )
+
+ return result
+```
+
+### Handling transient failures
+
+Retry transient failures automatically:
+
+```python
+from aws_durable_execution_sdk_python import (
+ DurableContext,
+ durable_execution,
+ durable_step,
+ StepContext,
+)
+from aws_durable_execution_sdk_python.config import StepConfig
+from aws_durable_execution_sdk_python.retries import RetryPresets
+
+@durable_step
+def call_external_api(step_context: StepContext, endpoint: str) -> dict:
+ """Call external API with retry."""
+ # API call that might fail transiently
+ response = make_http_request(endpoint)
+ return response
+
+@durable_execution
+def handler(event: dict, context: DurableContext) -> dict:
+ """Handle API calls with automatic retry."""
+ # Use transient preset for quick retries
+ step_config = StepConfig(retry_strategy=RetryPresets.transient())
+
+ try:
+ result = context.step(
+ call_external_api(event["endpoint"]),
+ config=step_config,
+ )
+ return {"status": "success", "data": result}
+ except Exception as e:
+ # All retries exhausted
+ return {"status": "failed", "error": str(e)}
+```
+
+### Handling permanent failures
+
+Fail fast for permanent errors:
+
+```python
+from aws_durable_execution_sdk_python import (
+ DurableContext,
+ durable_execution,
+ durable_step,
+ ExecutionError,
+ StepContext,
+)
+
+@durable_step
+def process_payment(step_context: StepContext, amount: float, card: str) -> dict:
+ """Process payment with validation."""
+ # Validate card
+ if not is_valid_card(card):
+ # Don't retry invalid cards
+ raise ExecutionError("Invalid card number")
+
+ # Process payment
+ return {"transaction_id": "txn_123", "amount": amount}
+
+@durable_execution
+def handler(event: dict, context: DurableContext) -> dict:
+ """Handle payment with error handling."""
+ try:
+ result = context.step(
+ process_payment(event["amount"], event["card"])
+ )
+ return {"status": "success", "transaction": result}
+ except ExecutionError as e:
+ # Permanent failure, don't retry
+ return {"status": "failed", "error": str(e)}
+```
+
+### Handling multiple error types
+
+Handle different error types appropriately:
+
+```python
+from aws_durable_execution_sdk_python import (
+ DurableContext,
+ durable_execution,
+ durable_step,
+ ExecutionError,
+ InvocationError,
+ ValidationError,
+ StepContext,
+)
+
+@durable_step
+def complex_operation(step_context: StepContext, data: dict) -> dict:
+ """Operation with multiple failure modes."""
+ # Validate input
+ if not data:
+ raise ValueError("Data is required")
+
+ # Check business rules
+ if data.get("amount", 0) < 0:
+ raise ExecutionError("Amount must be positive")
+
+ # Call external service
+ try:
+ result = call_external_service(data)
+ return result
+ except ConnectionError:
+ # Transient failure
+ raise InvocationError("Service unavailable")
+
+@durable_execution
+def handler(event: dict, context: DurableContext) -> dict:
+ """Handle multiple error types."""
+ try:
+ result = context.step(complex_operation(event))
+ return {"status": "success", "result": result}
+ except ValueError as e:
+ return {"status": "invalid", "error": str(e)}
+ except ExecutionError as e:
+ return {"status": "failed", "error": str(e)}
+ except InvocationError as e:
+ # Let Lambda retry
+ raise
+```
+
+[↑ Back to top](#table-of-contents)
+
+## Troubleshooting
+
+### Step retries exhausted
+
+**Problem:** Your step fails after exhausting all retry attempts.
+
+**Cause:** The operation continues to fail, or the error isn't retryable.
+
+**Solution:** Check your retry configuration and error types:
+
+```python
+# Ensure you're retrying the right errors
+retry_config = RetryStrategyConfig(
+ max_attempts=5, # Increase attempts
+ retryable_error_types=[ConnectionError, TimeoutError], # Add error types
+)
+```
+
+### Checkpoint failed errors
+
+**Problem:** Execution fails with `CheckpointError`.
+
+**Cause:** Failed to save execution state, possibly due to payload size limits or service issues.
+
+**Solution:** Reduce checkpoint payload size or check service health:
+
+```python
+# Reduce payload size by returning only necessary data
+@durable_step
+def large_operation(step_context: StepContext) -> dict:
+ # Process large data
+ large_result = process_data()
+
+ # Return only summary, not full data
+ return {"summary": large_result["summary"], "count": len(large_result["items"])}
+```
+
+### Callback timeout
+
+**Problem:** Callback times out before receiving a response.
+
+**Cause:** External system didn't respond within the timeout period.
+
+**Solution:** Increase callback timeout or implement retry logic:
+
+```python
+from aws_durable_execution_sdk_python.config import CallbackConfig
+
+# Increase timeout
+callback = context.create_callback(
+ config=CallbackConfig(
+ timeout_seconds=7200, # 2 hours
+ heartbeat_timeout_seconds=300, # 5 minutes
+ ),
+ name="long_running_approval"
+)
+```
+
+### Step interrupted errors
+
+**Problem:** Steps are interrupted before completing.
+
+**Cause:** Lambda timeout or memory limit reached during step execution.
+
+**Solution:** Increase Lambda timeout or break large steps into smaller ones:
+
+```python
+# Break large operation into smaller steps
+@durable_execution
+def handler(event: dict, context: DurableContext) -> dict:
+ # Process in chunks instead of all at once
+ items = event["items"]
+ chunk_size = 100
+
+ results = []
+ for i in range(0, len(items), chunk_size):
+ chunk = items[i:i + chunk_size]
+ result = context.step(
+ lambda _, c=chunk: process_chunk(c),
+ name=f"process_chunk_{i}"
+ )
+ results.extend(result)
+
+ return {"processed": len(results)}
+```
+
+[↑ Back to top](#table-of-contents)
+
+## Best practices
+
+**Validate input early** - Check for invalid input at the start of your function and return clear error responses or raise appropriate exceptions like `ValueError`.
+
+**Use appropriate exception types** - Choose the right exception type for each failure scenario. Use `ExecutionError` for permanent failures and `InvocationError` for transient issues.
+
+**Configure retry for transient failures** - Use retry strategies for operations that might fail temporarily, such as network calls or rate limits.
+
+**Fail fast for permanent errors** - Don't retry errors that won't be fixed by retrying, such as validation failures or business logic errors.
+
+**Wrap non-deterministic code in steps** - All code that produces different results on replay must be wrapped in steps, including random values, timestamps, and external API calls.
+
+**Handle errors explicitly** - Catch and handle exceptions in your code. Provide meaningful error messages to callers.
+
+**Log errors with context** - Use `context.logger` to log errors with execution context for debugging.
+
+**Keep error messages clear** - Write error messages that help users understand what went wrong and how to fix it.
+
+**Test error scenarios** - Write tests for both success and failure cases to ensure your error handling works correctly.
+
+**Monitor error rates** - Track error rates and termination reasons to identify issues in production.
+
+[↑ Back to top](#table-of-contents)
+
+## FAQ
+
+**Q: What's the difference between ExecutionError and InvocationError?**
+
+A: `ExecutionError` fails the execution without retry (returns FAILED status). `InvocationError` triggers Lambda to retry the entire invocation. Use `ExecutionError` for permanent failures and `InvocationError` for transient issues.
+
+**Q: How do I retry only specific exceptions?**
+
+A: Use `retryable_error_types` in `RetryStrategyConfig`:
+
+```python
+retry_config = RetryStrategyConfig(
+ max_attempts=3,
+ retryable_error_types=[ConnectionError, TimeoutError],
+)
+```
+
+**Q: Can I customize the backoff strategy?**
+
+A: Yes, configure `initial_delay_seconds`, `max_delay_seconds`, `backoff_rate`, and `jitter_strategy` in `RetryStrategyConfig`.
+
+**Q: What happens when retries are exhausted?**
+
+A: The step checkpoints the error and the exception propagates to your handler. You can catch and handle it there.
+
+**Q: How do I prevent duplicate operations on retry?**
+
+A: Use at-most-once semantics for operations with side effects:
+
+```python
+from aws_durable_execution_sdk_python.config import StepConfig, StepSemantics
+
+step_config = StepConfig(
+ step_semantics=StepSemantics.AT_MOST_ONCE_PER_RETRY
+)
+```
+
+**Q: Can I access error details in my code?**
+
+A: Yes, catch the exception and access its attributes:
+
+```python
+try:
+ result = context.step(operation())
+except CallbackError as e:
+ print(f"Callback failed: {e.callback_id}")
+except NonDeterministicExecutionError as e:
+ print(f"Non-deterministic step: {e.step_id}")
+```
+
+**Q: How do I handle errors in parallel operations?**
+
+A: Wrap each parallel operation in a try-except block or let errors propagate to fail the entire execution:
+
+```python
+results = []
+for item in items:
+ try:
+ result = context.step(lambda _, i=item: process(i), name=f"process_{item}")
+ results.append(result)
+ except Exception as e:
+ results.append({"error": str(e)})
+```
+
+**Q: What's the maximum number of retry attempts?**
+
+A: You can configure any number of attempts, but consider Lambda timeout limits. The default is 6 attempts.
+
+[↑ Back to top](#table-of-contents)
+
+## Testing
+
+You can test error handling using the testing SDK. The test runner executes your function and lets you inspect errors.
+
+### Testing successful execution
+
+```python
+import pytest
+from aws_durable_execution_sdk_python_testing import InvocationStatus
+from my_function import handler
+
+@pytest.mark.durable_execution(
+ handler=handler,
+ lambda_function_name="my_function",
+)
+def test_success(durable_runner):
+ """Test successful execution."""
+ with durable_runner:
+ result = durable_runner.run(input={"data": "test"}, timeout=10)
+
+ assert result.status is InvocationStatus.SUCCEEDED
+```
+
+### Testing error conditions
+
+Test that your function handles errors correctly:
+
+```python
+@pytest.mark.durable_execution(
+ handler=handler_with_validation,
+ lambda_function_name="validation_function",
+)
+def test_input_validation(durable_runner):
+ """Test input validation handling."""
+ with durable_runner:
+ result = durable_runner.run(input={}, timeout=10)
+
+ # Function should return error response for invalid input
+ assert result.status is InvocationStatus.SUCCEEDED
+ assert "error" in result.result
+ assert result.result["error"] == "InvalidInput"
+```
+
+### Testing SDK validation errors
+
+Test that the SDK catches invalid configuration:
+
+```python
+@pytest.mark.durable_execution(
+ handler=handler_with_invalid_config,
+ lambda_function_name="sdk_validation_function",
+)
+def test_sdk_validation_error(durable_runner):
+ """Test SDK validation error handling."""
+ with durable_runner:
+ result = durable_runner.run(input={}, timeout=10)
+
+ # SDK should catch invalid configuration
+ assert result.status is InvocationStatus.FAILED
+ assert "ValidationError" in str(result.error)
+```
+
+### Testing retry behavior
+
+Test that steps retry correctly:
+
+```python
+@pytest.mark.durable_execution(
+ handler=handler_with_retry,
+ lambda_function_name="retry_function",
+)
+def test_retry_success(durable_runner):
+ """Test that retries eventually succeed."""
+ with durable_runner:
+ result = durable_runner.run(input={}, timeout=30)
+
+ # Should succeed after retries
+ assert result.status is InvocationStatus.SUCCEEDED
+```
+
+### Testing retry exhaustion
+
+Test that execution fails when retries are exhausted:
+
+```python
+@pytest.mark.durable_execution(
+ handler=handler_always_fails,
+ lambda_function_name="failing_function",
+)
+def test_retry_exhausted(durable_runner):
+ """Test that execution fails after exhausting retries."""
+ with durable_runner:
+ result = durable_runner.run(input={}, timeout=30)
+
+ # Should fail after all retries
+ assert result.status is InvocationStatus.FAILED
+ assert "RuntimeError" in str(result.error)
+```
+
+### Inspecting error details
+
+Inspect error details in test results:
+
+```python
+@pytest.mark.durable_execution(
+ handler=handler_with_error,
+ lambda_function_name="error_function",
+)
+def test_error_details(durable_runner):
+ """Test error details are captured."""
+ with durable_runner:
+ result = durable_runner.run(input={}, timeout=10)
+
+ # Check error details
+ assert result.status is InvocationStatus.FAILED
+ assert result.error is not None
+ assert "error_type" in result.error
+ assert "message" in result.error
+```
+
+For more testing patterns, see:
+- [Basic tests](../testing-patterns/basic-tests.md) - Simple test examples
+- [Complex workflows](../testing-patterns/complex-workflows.md) - Multi-step workflow testing
+- [Best practices](../testing-patterns/best-practices.md) - Testing recommendations
+
+[↑ Back to top](#table-of-contents)
+
+## See also
+
+- [Steps](../core/steps.md) - Configure retry for steps
+- [Callbacks](../core/callbacks.md) - Handle callback errors
+- [Child contexts](../core/child-contexts.md) - Error handling in nested contexts
+- [Retry strategies](../api-reference/config.md) - Retry configuration reference
+- [Examples](https://github.com/awslabs/aws-durable-execution-sdk-python/tree/main/examples/src/step) - Error handling examples
+
+[↑ Back to top](#table-of-contents)
+
+## License
+
+See the [LICENSE](../../LICENSE) file for our project's licensing.
+
+[↑ Back to top](#table-of-contents)
diff --git a/docs/advanced/serialization.md b/docs/advanced/serialization.md
new file mode 100644
index 0000000..112131a
--- /dev/null
+++ b/docs/advanced/serialization.md
@@ -0,0 +1,771 @@
+# Serialization
+
+Learn how the SDK serializes and deserializes data for durable execution checkpoints.
+
+## Table of Contents
+
+- [Terminology](#terminology)
+- [What is serialization?](#what-is-serialization)
+- [Key features](#key-features)
+- [Default serialization behavior](#default-serialization-behavior)
+- [Supported types](#supported-types)
+- [Converting non-serializable types](#converting-non-serializable-types)
+- [Custom serialization](#custom-serialization)
+- [Serialization in configurations](#serialization-in-configurations)
+- [Best practices](#best-practices)
+- [Troubleshooting](#troubleshooting)
+- [FAQ](#faq)
+
+[← Back to main index](../index.md)
+
+## Terminology
+
+**Serialization** - Converting Python objects to strings for storage in checkpoints.
+
+**Deserialization** - Converting checkpoint strings back to Python objects.
+
+**SerDes** - Short for Serializer/Deserializer, a custom class that handles both serialization and deserialization.
+
+**Checkpoint** - A saved state of execution that includes serialized operation results.
+
+**Extended types** - Types beyond basic JSON (datetime, Decimal, UUID, bytes) that the SDK serializes automatically.
+
+**Envelope format** - The SDK's internal format that wraps complex types with type tags for accurate deserialization.
+
+[↑ Back to top](#table-of-contents)
+
+## What is serialization?
+
+Serialization converts Python objects into strings that can be stored in checkpoints. When your durable function resumes, deserialization converts those strings back into Python objects. The SDK handles this automatically for most types.
+
+[↑ Back to top](#table-of-contents)
+
+## Key features
+
+- Automatic serialization for common Python types
+- Extended type support (datetime, Decimal, UUID, bytes)
+- Custom serialization for complex objects
+- Type preservation during round-trip serialization
+- Efficient plain JSON for primitives
+
+[↑ Back to top](#table-of-contents)
+
+## Default serialization behavior
+
+The SDK handles most Python types automatically:
+
+```python
+from aws_durable_execution_sdk_python import DurableContext, durable_execution
+from datetime import datetime
+from decimal import Decimal
+from uuid import uuid4
+
+@durable_execution
+def handler(event: dict, context: DurableContext) -> dict:
+ # All these types serialize automatically
+ result = context.step(
+ process_order,
+ order_id=uuid4(),
+ amount=Decimal("99.99"),
+ timestamp=datetime.now()
+ )
+ return result
+```
+
+The SDK serializes data automatically when:
+- Checkpointing step results
+- Storing callback payloads
+- Passing data to child contexts
+- Returning results from your handler
+
+[↑ Back to top](#table-of-contents)
+
+## Supported types
+
+### Primitive types
+
+These types serialize as plain JSON for performance:
+
+```python
+from aws_durable_execution_sdk_python import DurableContext, durable_execution
+
+@durable_execution
+def handler(event: dict, context: DurableContext) -> dict:
+ # Primitives - plain JSON
+ none_value = None
+ text = "hello"
+ number = 42
+ decimal_num = 3.14
+ flag = True
+
+ # Simple lists of primitives - plain JSON
+ numbers = [1, 2, 3, 4, 5]
+
+ return {
+ "none": none_value,
+ "text": text,
+ "number": number,
+ "decimal": decimal_num,
+ "flag": flag,
+ "numbers": numbers
+ }
+```
+
+**Supported primitive types:**
+- `None`
+- `str`
+- `int`
+- `float`
+- `bool`
+- Lists containing only primitives
+
+[↑ Back to top](#table-of-contents)
+
+### Extended types
+
+The SDK automatically handles these types using envelope format:
+
+```python
+from aws_durable_execution_sdk_python import DurableContext, durable_execution
+from datetime import datetime, date
+from decimal import Decimal
+from uuid import UUID, uuid4
+
+@durable_execution
+def handler(event: dict, context: DurableContext) -> dict:
+ # Extended types - automatic serialization
+ order_data = {
+ "order_id": uuid4(), # UUID
+ "amount": Decimal("99.99"), # Decimal
+ "created_at": datetime.now(), # datetime
+ "delivery_date": date.today(), # date
+ "signature": b"binary_signature_data", # bytes
+ "coordinates": (40.7128, -74.0060), # tuple
+ }
+
+ result = context.step(process_order, order_data)
+ return result
+```
+
+**Supported extended types:**
+- `datetime` - ISO format with timezone
+- `date` - ISO date format
+- `Decimal` - Precise decimal numbers
+- `UUID` - Universally unique identifiers
+- `bytes`, `bytearray`, `memoryview` - Binary data (base64 encoded)
+- `tuple` - Immutable sequences
+- `list` - Mutable sequences (including nested)
+- `dict` - Dictionaries (including nested)
+
+[↑ Back to top](#table-of-contents)
+
+### Container types
+
+Containers can hold any supported type, including nested containers:
+
+```python
+from aws_durable_execution_sdk_python import DurableContext, durable_execution
+from datetime import datetime
+from decimal import Decimal
+from uuid import uuid4
+
+@durable_execution
+def handler(event: dict, context: DurableContext) -> dict:
+ # Nested structures serialize automatically
+ complex_data = {
+ "user": {
+ "id": uuid4(),
+ "created": datetime.now(),
+ "balance": Decimal("1234.56"),
+ "metadata": b"binary_data",
+ "coordinates": (40.7128, -74.0060),
+ "tags": ["premium", "verified"],
+ "settings": {
+ "notifications": True,
+ "theme": "dark",
+ "limits": {
+ "daily": Decimal("500.00"),
+ "monthly": Decimal("10000.00"),
+ },
+ },
+ }
+ }
+
+ result = context.step(process_user, complex_data)
+ return result
+```
+
+[↑ Back to top](#table-of-contents)
+
+## Converting non-serializable types
+
+Some Python types aren't serializable by default. Convert them before passing to durable operations.
+
+### Dataclasses
+
+Convert dataclasses to dictionaries:
+
+```python
+from dataclasses import dataclass, asdict
+from aws_durable_execution_sdk_python import DurableContext, durable_execution
+
+@dataclass
+class Order:
+ order_id: str
+ amount: float
+ customer: str
+
+@durable_execution
+def handler(event: dict, context: DurableContext) -> dict:
+ order = Order(
+ order_id="ORD-123",
+ amount=99.99,
+ customer="Jane Doe"
+ )
+
+ # Convert to dict before passing to step
+ result = context.step(process_order, asdict(order))
+ return result
+```
+
+### Pydantic models
+
+Use Pydantic's built-in serialization:
+
+```python
+from pydantic import BaseModel
+from aws_durable_execution_sdk_python import DurableContext, durable_execution
+
+class Order(BaseModel):
+ order_id: str
+ amount: float
+ customer: str
+
+@durable_execution
+def handler(event: dict, context: DurableContext) -> dict:
+ order = Order(
+ order_id="ORD-123",
+ amount=99.99,
+ customer="Jane Doe"
+ )
+
+ # Use model_dump() to convert to dict
+ result = context.step(process_order, order.model_dump())
+ return result
+```
+
+### Custom objects
+
+Implement `to_dict()` and `from_dict()` methods:
+
+```python
+from aws_durable_execution_sdk_python import DurableContext, durable_execution
+
+class Order:
+ def __init__(self, order_id: str, amount: float, customer: str):
+ self.order_id = order_id
+ self.amount = amount
+ self.customer = customer
+
+ def to_dict(self) -> dict:
+ return {
+ "order_id": self.order_id,
+ "amount": self.amount,
+ "customer": self.customer
+ }
+
+ @classmethod
+ def from_dict(cls, data: dict) -> "Order":
+ return cls(
+ order_id=data["order_id"],
+ amount=data["amount"],
+ customer=data["customer"]
+ )
+
+@durable_execution
+def handler(event: dict, context: DurableContext) -> dict:
+ order = Order("ORD-123", 99.99, "Jane Doe")
+
+ # Convert to dict before passing to step
+ result = context.step(process_order, order.to_dict())
+ return result
+```
+
+[↑ Back to top](#table-of-contents)
+
+## Custom serialization
+
+Implement custom serialization for specialized needs like encryption or compression.
+
+### Creating a custom SerDes
+
+Extend the `SerDes` base class:
+
+```python
+from aws_durable_execution_sdk_python.serdes import SerDes, SerDesContext
+import json
+
+class UpperCaseSerDes(SerDes[str]):
+ """Example: Convert strings to uppercase during serialization."""
+
+ def serialize(self, value: str, serdes_context: SerDesContext) -> str:
+ return value.upper()
+
+ def deserialize(self, data: str, serdes_context: SerDesContext) -> str:
+ return data.lower()
+```
+
+### Using custom SerDes with steps
+
+Pass your custom SerDes in `StepConfig`:
+
+```python
+from aws_durable_execution_sdk_python import DurableContext, durable_execution, durable_step, StepContext
+from aws_durable_execution_sdk_python.config import StepConfig
+from aws_durable_execution_sdk_python.serdes import SerDes, SerDesContext
+import json
+
+class CompressedSerDes(SerDes[dict]):
+ """Example: Compress large dictionaries."""
+
+ def serialize(self, value: dict, serdes_context: SerDesContext) -> str:
+ # In production, use actual compression like gzip
+ return json.dumps(value, separators=(',', ':'))
+
+ def deserialize(self, data: str, serdes_context: SerDesContext) -> dict:
+ return json.loads(data)
+
+@durable_step
+def process_large_data(step_context: StepContext, data: dict) -> dict:
+ # Process the data
+ return {"processed": True, "items": len(data)}
+
+@durable_execution
+def handler(event: dict, context: DurableContext) -> dict:
+ large_data = {"items": [f"item_{i}" for i in range(1000)]}
+
+ # Use custom SerDes for this step
+ config = StepConfig(serdes=CompressedSerDes())
+ result = context.step(process_large_data(large_data), config=config)
+
+ return result
+```
+
+### Encryption example
+
+Encrypt sensitive data in checkpoints:
+
+```python
+from aws_durable_execution_sdk_python import DurableContext, durable_execution, durable_step, StepContext
+from aws_durable_execution_sdk_python.config import StepConfig
+from aws_durable_execution_sdk_python.serdes import SerDes, SerDesContext
+import json
+import base64
+
+class EncryptedSerDes(SerDes[dict]):
+ """Example: Encrypt sensitive data (simplified for demonstration)."""
+
+ def __init__(self, encryption_key: str):
+ self.encryption_key = encryption_key
+
+ def serialize(self, value: dict, serdes_context: SerDesContext) -> str:
+ json_str = json.dumps(value)
+ # In production, use proper encryption like AWS KMS
+ encrypted = base64.b64encode(json_str.encode()).decode()
+ return encrypted
+
+ def deserialize(self, data: str, serdes_context: SerDesContext) -> dict:
+ # In production, use proper decryption
+ decrypted = base64.b64decode(data.encode()).decode()
+ return json.loads(decrypted)
+
+@durable_step
+def process_sensitive_data(step_context: StepContext, data: dict) -> dict:
+ return {"processed": True}
+
+@durable_execution
+def handler(event: dict, context: DurableContext) -> dict:
+ sensitive_data = {
+ "ssn": "123-45-6789",
+ "credit_card": "4111-1111-1111-1111"
+ }
+
+ # Encrypt data in checkpoints
+ config = StepConfig(serdes=EncryptedSerDes("my-key"))
+ result = context.step(process_sensitive_data(sensitive_data), config=config)
+
+ return result
+```
+
+[↑ Back to top](#table-of-contents)
+
+## Serialization in configurations
+
+Different operations support custom serialization through their configuration objects.
+
+### StepConfig
+
+Control serialization for step results:
+
+```python
+from aws_durable_execution_sdk_python import DurableContext, durable_execution
+from aws_durable_execution_sdk_python.config import StepConfig
+
+@durable_execution
+def handler(event: dict, context: DurableContext) -> dict:
+ config = StepConfig(serdes=CustomSerDes())
+ result = context.step(my_function(), config=config)
+ return result
+```
+
+### CallbackConfig
+
+Control serialization for callback payloads:
+
+```python
+from aws_durable_execution_sdk_python import DurableContext, durable_execution
+from aws_durable_execution_sdk_python.config import CallbackConfig, Duration
+
+@durable_execution
+def handler(event: dict, context: DurableContext) -> dict:
+ config = CallbackConfig(
+ timeout=Duration.from_hours(2),
+ serdes=CustomSerDes()
+ )
+ callback = context.create_callback(config=config)
+
+ # Send callback.callback_id to external system
+ return {"callback_id": callback.callback_id}
+```
+
+### MapConfig and ParallelConfig
+
+Control serialization for batch results:
+
+```python
+from aws_durable_execution_sdk_python import DurableContext, durable_execution
+from aws_durable_execution_sdk_python.config import MapConfig
+
+@durable_execution
+def handler(event: dict, context: DurableContext) -> dict:
+ items = [1, 2, 3, 4, 5]
+
+ # Custom serialization for BatchResult
+ config = MapConfig(
+ serdes=CustomSerDes(), # For the entire BatchResult
+ item_serdes=ItemSerDes() # For individual item results
+ )
+
+ result = context.map(process_item, items, config=config)
+ return {"processed": len(result.succeeded)}
+```
+
+**Note:** When both `serdes` and `item_serdes` are provided:
+- `item_serdes` serializes individual item results in child contexts
+- `serdes` serializes the entire `BatchResult` at the handler level
+
+For backward compatibility, if only `serdes` is provided, it's used for both individual items and the `BatchResult`.
+
+[↑ Back to top](#table-of-contents)
+
+## Best practices
+
+### Use default serialization when possible
+
+The SDK handles most cases efficiently without custom serialization:
+
+```python
+# Good - uses default serialization
+from datetime import datetime
+from decimal import Decimal
+
+result = context.step(
+ process_order,
+ order_id="ORD-123",
+ amount=Decimal("99.99"),
+ timestamp=datetime.now()
+)
+```
+
+### Convert complex objects to dicts
+
+Convert custom objects to dictionaries before passing to durable operations:
+
+```python
+# Good - convert to dict first
+order_dict = order.to_dict()
+result = context.step(process_order, order_dict)
+
+# Avoid - custom objects aren't serializable
+result = context.step(process_order, order) # Will fail
+```
+
+### Keep serialized data small
+
+Large checkpoints might slow down execution. Keep data compact:
+
+```python
+# Good - only checkpoint what you need
+result = context.step(
+ process_data,
+ {"id": order.id, "amount": order.amount}
+)
+
+# Avoid - large objects in checkpoints
+result = context.step(
+ process_data,
+ entire_database_dump # Too large
+)
+```
+
+### Use appropriate types
+
+Choose types that serialize efficiently:
+
+```python
+# Good - Decimal for precise amounts
+amount = Decimal("99.99")
+
+# Avoid - float for money (precision issues)
+amount = 99.99
+```
+
+### Test serialization round-trips
+
+Verify your data survives serialization:
+
+```python
+from aws_durable_execution_sdk_python.serdes import serialize, deserialize
+
+def test_serialization():
+ original = {"amount": Decimal("99.99")}
+ serialized = serialize(None, original, "test-op", "test-arn")
+ deserialized = deserialize(None, serialized, "test-op", "test-arn")
+
+ assert deserialized == original
+```
+
+### Handle serialization errors gracefully
+
+Catch and handle serialization errors:
+
+```python
+from aws_durable_execution_sdk_python.exceptions import ExecutionError
+
+@durable_execution
+def handler(event: dict, context: DurableContext) -> dict:
+ try:
+ result = context.step(process_data, complex_object)
+ except ExecutionError as e:
+ if "Serialization failed" in str(e):
+ # Convert to serializable format
+ simple_data = convert_to_dict(complex_object)
+ result = context.step(process_data, simple_data)
+ else:
+ raise
+
+ return result
+```
+
+[↑ Back to top](#table-of-contents)
+
+## Troubleshooting
+
+### Unsupported type error
+
+**Problem:** `SerDesError: Unsupported type: Adds permissions to the resource-based policy of a version of an Lambda layer. Use this action to grant layer usage permission to other accounts. You can grant permission to a single account, all accounts in an organization, or all Amazon Web Services accounts. To revoke permission, call RemoveLayerVersionPermission with the statement ID that you specified when you added it. Grants a principal permission to use a function. You can apply the policy at the function level, or specify a qualifier to restrict access to a single version or alias. If you use a qualifier, the invoker must use the full Amazon Resource Name (ARN) of that version or alias to invoke the function. Note: Lambda does not support adding policies to version $LATEST. To grant permission to another account, specify the account ID as the This operation adds a statement to a resource-based permissions policy for the function. For more information about function policies, see Using resource-based policies for Lambda. Creates an alias for a Lambda function version. Use aliases to provide clients with a function identifier that you can update to invoke a different version. You can also map an alias to split invocation requests between two versions. Use the Creates a code signing configuration. A code signing configuration defines a list of allowed signing profiles and defines the code-signing validation policy (action to be taken if deployment validation checks fail). Creates a mapping between an event source and an Lambda function. Lambda reads items from the event source and invokes the function. For details about how to configure different event sources, see the following topics. The following error handling options are available only for DynamoDB and Kinesis event sources: For stream sources (DynamoDB, Kinesis, Amazon MSK, and self-managed Apache Kafka), the following option is also available: For information about which configuration parameters apply to each event source, see the following topics. Creates a Lambda function. To create a function, you need a deployment package and an execution role. The deployment package is a .zip file archive or container image that contains your function code. The execution role grants the function permission to use Amazon Web Services services, such as Amazon CloudWatch Logs for log streaming and X-Ray for request tracing. If the deployment package is a container image, then you set the package type to If the deployment package is a .zip file archive, then you set the package type to When you create a function, Lambda provisions an instance of the function and its supporting resources. If your function connects to a VPC, this process can take a minute or so. During this time, you can't invoke or modify the function. The A function has an unpublished version, and can have published versions and aliases. The unpublished version changes when you update your function's code and configuration. A published version is a snapshot of your function code and configuration that can't be changed. An alias is a named resource that maps to a version, and can be changed to map to a different version. Use the The other parameters let you configure version-specific and function-level settings. You can modify version-specific settings later with UpdateFunctionConfiguration. Function-level settings apply to both the unpublished and published versions of the function, and include tags (TagResource) and per-function concurrency limits (PutFunctionConcurrency). You can use code signing if your deployment package is a .zip file archive. To enable code signing for this function, specify the ARN of a code-signing configuration. When a user attempts to deploy a code package with UpdateFunctionCode, Lambda checks that the code package has a valid signature from a trusted publisher. The code-signing configuration includes set of signing profiles, which define the trusted publishers for this function. If another Amazon Web Services account or an Amazon Web Services service invokes your function, use AddPermission to grant permission by creating a resource-based Identity and Access Management (IAM) policy. You can grant permissions at the function level, on a version, or on an alias. To invoke your function directly, use Invoke. To invoke your function in response to events in other Amazon Web Services services, create an event source mapping (CreateEventSourceMapping), or configure a function trigger in the other service. For more information, see Invoking Lambda functions. Creates a Lambda function URL with the specified configuration parameters. A function URL is a dedicated HTTP(S) endpoint that you can use to invoke your function. Deletes a Lambda function alias. Deletes the code signing configuration. You can delete the code signing configuration only if no function is using it. Deletes an event source mapping. You can get the identifier of a mapping from the output of ListEventSourceMappings. When you delete an event source mapping, it enters a Deletes a Lambda function. To delete a specific function version, use the To delete Lambda event source mappings that invoke a function, use DeleteEventSourceMapping. For Amazon Web Services services and resources that invoke your function directly, delete the trigger in the service where you originally configured it. Removes the code signing configuration from the function. Removes a concurrent execution limit from a function. Deletes the configuration for asynchronous invocation for a function, version, or alias. To configure options for asynchronous invocation, use PutFunctionEventInvokeConfig. Deletes a Lambda function URL. When you delete a function URL, you can't recover it. Creating a new function URL results in a different URL address. Deletes a version of an Lambda layer. Deleted versions can no longer be viewed or added to functions. To avoid breaking functions, a copy of the version remains in Lambda until no functions refer to it. Deletes the provisioned concurrency configuration for a function. Retrieves details about your account's limits and usage in an Amazon Web Services Region. Returns details about a Lambda function alias. Returns information about the specified code signing configuration. Returns details about an event source mapping. You can get the identifier of a mapping from the output of ListEventSourceMappings. Returns information about the function or function version, with a link to download the deployment package that's valid for 10 minutes. If you specify a function version, only details that are specific to that version are returned. Returns the code signing configuration for the specified function. Returns details about the reserved concurrency configuration for a function. To set a concurrency limit for a function, use PutFunctionConcurrency. Returns the version-specific settings of a Lambda function or version. The output includes only options that can vary between versions of a function. To modify these settings, use UpdateFunctionConfiguration. To get all of a function's details, including function-level settings, use GetFunction. Retrieves the configuration for asynchronous invocation for a function, version, or alias. To configure options for asynchronous invocation, use PutFunctionEventInvokeConfig. Returns your function's recursive loop detection configuration. Returns details about a Lambda function URL. Returns information about a version of an Lambda layer, with a link to download the layer archive that's valid for 10 minutes. Returns information about a version of an Lambda layer, with a link to download the layer archive that's valid for 10 minutes. Returns the permission policy for a version of an Lambda layer. For more information, see AddLayerVersionPermission. Returns the resource-based IAM policy for a function, version, or alias. Retrieves the provisioned concurrency configuration for a function's alias or version. Retrieves the runtime management configuration for a function's version. If the runtime update mode is Manual, this includes the ARN of the runtime version and the runtime update mode. If the runtime update mode is Auto or Function update, this includes the runtime update mode and Invokes a Lambda function. You can invoke a function synchronously (and wait for the response), or asynchronously. By default, Lambda invokes your function synchronously (i.e. the For synchronous invocation, details about the function response, including errors, are included in the response body and headers. For either invocation type, you can find more information in the execution log and trace. When an error occurs, your function may be invoked multiple times. Retry behavior varies by error type, client, event source, and invocation type. For example, if you invoke a function asynchronously and it returns an error, Lambda executes the function up to two more times. For more information, see Error handling and automatic retries in Lambda. For asynchronous invocation, Lambda adds events to a queue before sending them to your function. If your function does not have enough capacity to keep up with the queue, events may be lost. Occasionally, your function may receive the same event multiple times, even if no error occurs. To retain events that were not processed, configure your function with a dead-letter queue. The status code in the API response doesn't reflect function errors. Error codes are reserved for errors that prevent your function from executing, such as permissions errors, quota errors, or issues with your function's code and configuration. For example, Lambda returns For functions with a long timeout, your client might disconnect during synchronous invocation while it waits for a response. Configure your HTTP client, SDK, firewall, proxy, or operating system to allow for long connections with timeout or keep-alive settings. This operation requires permission for the lambda:InvokeFunction action. For details on how to set up permissions for cross-account invocations, see Granting function access to other accounts. For asynchronous function invocation, use Invoke. Invokes a function asynchronously. If you do use the InvokeAsync action, note that it doesn't support the use of X-Ray active tracing. Trace ID is not propagated to the function, even if X-Ray active tracing is turned on.
aws-durable-execution-sdk-python"]
+ B["2. Write Tests
aws-durable-execution-sdk-python-testing"]
+ C["3. Run Tests
pytest"]
+ end
+
+ subgraph prod["Production (AWS)"]
+ direction LR
+ D["4. Deploy
SAM/CDK/Terraform"]
+ E["5. Test in Cloud
pytest --runner-mode=cloud"]
+ end
+
+ A --> B --> C --> D --> E
+
+ style dev fill:#e3f2fd
+ style prod fill:#fff3e0
+```
+
+Here's how you build and test durable functions:
+
+### 1. Write your function (execution SDK)
+
+Install the execution SDK and write your Lambda handler:
+
+```console
+pip install aws-durable-execution-sdk-python
+```
+
+```python
+from aws_durable_execution_sdk_python import (
+ DurableContext,
+ durable_execution,
+ durable_step,
+)
+
+@durable_step
+def my_step(step_context, data):
+ # Your business logic
+ return result
+
+@durable_execution
+def handler(event, context: DurableContext):
+ result = context.step(my_step(event["data"]))
+ return result
+```
+
+### 2. Test locally (testing SDK)
+
+Install the testing SDK and write tests:
+
+```console
+pip install aws-durable-execution-sdk-python-testing
+```
+
+```python
+import pytest
+from my_function import handler
+
+@pytest.mark.durable_execution(handler=handler, lambda_function_name="my_function")
+def test_my_function(durable_runner):
+ with durable_runner:
+ result = durable_runner.run(input={"data": "test"}, timeout=10)
+ assert result.status == "SUCCEEDED"
+```
+
+Run tests without AWS credentials:
+
+```console
+pytest test_my_function.py
+```
+
+### 3. Deploy to Lambda
+
+Package your function with the execution SDK (not the testing SDK) and deploy using your preferred tool (SAM, CDK, Terraform, etc.).
+
+### 4. Test in the cloud (optional)
+
+Run the same tests against your deployed function:
+
+```console
+export AWS_REGION=us-west-2
+export QUALIFIED_FUNCTION_NAME="MyFunction:$LATEST"
+export LAMBDA_FUNCTION_TEST_NAME="my_function"
+
+pytest --runner-mode=cloud test_my_function.py
+```
+
+[↑ Back to top](#table-of-contents)
+
+## Quick start
+
+Ready to build your first durable function? Here's a minimal example:
+
+```python
+from aws_durable_execution_sdk_python import (
+ DurableContext,
+ durable_execution,
+ durable_step,
+ StepContext,
+)
+
+@durable_step
+def greet_user(step_context: StepContext, name: str) -> str:
+ """Generate a greeting."""
+ return f"Hello {name}!"
+
+@durable_execution
+def handler(event: dict, context: DurableContext) -> str:
+ """Simple durable function."""
+ name = event.get("name", "World")
+ greeting = context.step(greet_user(name))
+ return greeting
+```
+
+Deploy this to Lambda and you have a durable function. The `greet_user` step is checkpointed automatically.
+
+### Using a custom boto3 Lambda client
+
+If you need to customize the boto3 Lambda client used for durable execution operations (for example, to configure custom endpoints, retry settings, or credentials), you can pass a `boto3_client` parameter to the decorator. The client must be a boto3 Lambda client:
+
+```python
+import boto3
+from botocore.config import Config
+from aws_durable_execution_sdk_python import durable_execution, DurableContext
+
+# Create a custom boto3 Lambda client with specific configuration
+custom_lambda_client = boto3.client(
+ 'lambda',
+ config=Config(
+ retries={'max_attempts': 5, 'mode': 'adaptive'},
+ connect_timeout=10,
+ read_timeout=60,
+ )
+)
+
+@durable_execution(boto3_client=custom_lambda_client)
+def handler(event: dict, context: DurableContext) -> dict:
+ # Your durable function logic
+ return {"status": "success"}
+```
+
+The custom Lambda client is used for all checkpoint and state management operations. If you don't provide a `boto3_client`, the SDK initializes a default Lambda client from your environment.
+
+[↑ Back to top](#table-of-contents)
+
+## Next steps
+
+Now that you've built your first durable function, explore the core features:
+
+**Learn the operations:**
+- [Steps](core/steps.md) - Execute code with retry strategies and checkpointing
+- [Wait operations](core/wait.md) - Pause execution for seconds, minutes, or hours
+- [Callbacks](core/callbacks.md) - Wait for external systems to respond
+- [Child contexts](core/child-contexts.md) - Organize complex workflows
+- [Parallel operations](core/parallel.md) - Run multiple operations concurrently
+- [Map operations](core/map.md) - Process collections in parallel
+
+**Dive deeper:**
+- [Error handling](advanced/error-handling.md) - Handle failures and implement retry strategies
+- [Testing patterns](testing-patterns/basic-tests.md) - Write effective tests for your workflows
+- [Best practices](best-practices.md) - Avoid common pitfalls
+
+[↑ Back to top](#table-of-contents)
+
+## See also
+
+- [DurableContext API](api-reference/context.md) - Complete reference for the context object
+- [Decorators](api-reference/decorators.md) - All available decorators
+- [Examples directory](https://github.com/awslabs/aws-durable-execution-sdk-python/tree/main/examples) - More working examples
+
+[↑ Back to top](#table-of-contents)
+
+## License
+
+See the [LICENSE](../LICENSE) file for our project's licensing.
+
+[↑ Back to top](#table-of-contents)
diff --git a/docs/index.md b/docs/index.md
new file mode 100644
index 0000000..443f988
--- /dev/null
+++ b/docs/index.md
@@ -0,0 +1,234 @@
+# AWS Durable Execution SDK for Python
+
+> **Using JavaScript or TypeScript?** Check out the [AWS Durable Execution SDK for JavaScript](https://github.com/aws/aws-durable-execution-sdk-js) instead.
+
+## Table of Contents
+
+- [What is the Durable Execution SDK?](#what-is-the-durable-execution-sdk)
+- [Key features](#key-features)
+- [Quick navigation](#quick-navigation)
+- [Installation](#installation)
+- [Quick example](#quick-example)
+- [Core concepts](#core-concepts)
+- [Architecture](#architecture)
+- [Use cases](#use-cases)
+- [Getting help](#getting-help)
+- [License](#license)
+
+## What is the Durable Execution SDK?
+
+The AWS Durable Execution SDK for Python lets you build reliable, long-running workflows in AWS Lambda. Your functions can pause execution, wait for external events, retry failed operations, and resume exactly where they left off—even if Lambda recycles your execution environment.
+
+The SDK provides a `DurableContext` that gives you operations like steps, waits, callbacks, and parallel execution. Each operation is checkpointed automatically, so your workflow state is preserved across interruptions.
+
+[↑ Back to top](#table-of-contents)
+
+## Key features
+
+- **Automatic checkpointing** - Your workflow state is saved automatically after each operation
+- **Durable steps** - Execute code with configurable retry strategies and at-most-once or at-least-once semantics
+- **Wait operations** - Pause execution for seconds, minutes, or hours without blocking Lambda resources
+- **Callbacks** - Wait for external systems to respond with results or approvals
+- **Parallel execution** - Run multiple operations concurrently with configurable completion criteria
+- **Map operations** - Process collections in parallel with batching and failure tolerance
+- **Child contexts** - Isolate nested workflows for better organization and error handling
+- **Structured logging** - Integrate with your logger to track execution flow and debug issues
+
+[↑ Back to top](#table-of-contents)
+
+## Quick navigation
+
+**New to durable functions?**
+- [Getting started guide](getting-started.md) - Build your first durable function
+
+**Core operations:**
+- [Steps](core/steps.md) - Execute code with automatic checkpointing and retry support
+- [Wait operations](core/wait.md) - Pause execution without blocking Lambda resources
+- [Callbacks](core/callbacks.md) - Wait for external systems to respond
+- [Invoke operations](core/invoke.md) - Call other durable functions and compose workflows
+- [Child contexts](core/child-contexts.md) - Organize complex workflows into isolated units
+- [Parallel operations](core/parallel.md) - Run multiple operations concurrently
+- [Map operations](core/map.md) - Process collections in parallel with batching
+- [Logger integration](core/logger.md) - Add structured logging to track execution
+
+**Advanced topics:**
+- [Error handling](advanced/error-handling.md) - Handle failures and implement retry strategies
+- [Testing modes](advanced/testing-modes.md) - Run tests locally or against deployed Lambda functions
+- [Serialization](advanced/serialization.md) - Customize how data is serialized in checkpoints
+- [Configuration](advanced/configuration.md) - Fine-tune operation behavior
+- [Performance optimization](advanced/performance.md) - Best practices for efficient workflows
+
+**API reference:**
+- [DurableContext](api-reference/context.md) - Main context class and methods
+- [Configuration classes](api-reference/config.md) - StepConfig, CallbackConfig, and more
+- [Decorators](api-reference/decorators.md) - @durable_execution, @durable_step, etc.
+- [Types and protocols](api-reference/types.md) - Type definitions and interfaces
+- [Exceptions](api-reference/exceptions.md) - DurableExecutionsError, InvocationError, and more
+
+[↑ Back to top](#table-of-contents)
+
+## Installation
+
+Install the SDK using pip:
+
+```console
+pip install aws-durable-execution-sdk-python
+```
+
+[↑ Back to top](#table-of-contents)
+
+## Quick example
+
+Here's a simple durable function that processes an order:
+
+```python
+from aws_durable_execution_sdk_python import (
+ DurableContext,
+ durable_execution,
+ durable_step,
+)
+
+@durable_step
+def validate_order(order_id: str) -> dict:
+ # Validation logic here
+ return {"order_id": order_id, "valid": True}
+
+@durable_step
+def charge_payment(order_id: str, amount: float) -> dict:
+ # Payment processing logic here
+ return {"transaction_id": "txn_123", "status": "completed"}
+
+@durable_step
+def fulfill_order(order_id: str) -> dict:
+ # Fulfillment logic here
+ return {"tracking_number": "TRK123456"}
+
+@durable_execution
+def process_order(event: dict, context: DurableContext) -> dict:
+ order_id = event["order_id"]
+ amount = event["amount"]
+
+ # Step 1: Validate the order
+ validation = context.step(validate_order(order_id))
+
+ if not validation["valid"]:
+ return {"status": "failed", "reason": "invalid_order"}
+
+ # Step 2: Charge payment
+ payment = context.step(charge_payment(order_id, amount))
+
+ # Step 3: Wait for payment confirmation (simulated)
+ context.wait(seconds=5)
+
+ # Step 4: Fulfill the order
+ fulfillment = context.step(fulfill_order(order_id))
+
+ return {
+ "status": "completed",
+ "order_id": order_id,
+ "transaction_id": payment["transaction_id"],
+ "tracking_number": fulfillment["tracking_number"]
+ }
+```
+
+Each `context.step()` call is checkpointed automatically. If Lambda recycles your execution environment, the function resumes from the last completed step.
+
+[↑ Back to top](#table-of-contents)
+
+## Core concepts
+
+### Durable functions
+
+A durable function is a Lambda function decorated with `@durable_execution` that can be checkpointed and resumed. The function receives a `DurableContext` that provides methods for durable operations.
+
+### Operations
+
+Operations are units of work in a durable execution. Each operation type serves a specific purpose:
+
+- **Steps** - Execute code and checkpoint the result with retry support
+- **Waits** - Pause execution for a specified duration without blocking Lambda
+- **Callbacks** - Wait for external systems to respond with results
+- **Invoke** - Call other durable functions to compose complex workflows
+- **Child contexts** - Isolate nested workflows for better organization
+- **Parallel** - Execute multiple operations concurrently with completion criteria
+- **Map** - Process collections in parallel with batching and failure tolerance
+
+### Checkpoints
+
+Checkpoints are saved states of execution that allow resumption. When your function calls `context.step()` or other operations, the SDK creates a checkpoint and sends it to AWS. If Lambda recycles your environment or your function waits for an external event, execution can resume from the last checkpoint.
+
+### Replay
+
+When your function resumes, completed operations don't re-execute. Instead, they return their checkpointed results instantly. This means your function code runs multiple times, but side effects only happen once per operation.
+
+### Decorators
+
+The SDK provides decorators to mark functions as durable:
+
+- `@durable_execution` - Marks your Lambda handler as a durable function
+- `@durable_step` - Marks a function that can be used with `context.step()`
+- `@durable_with_child_context` - Marks a function that receives a child context
+
+[↑ Back to top](#table-of-contents)
+
+## Architecture
+
+The SDK integrates with AWS Lambda's durable execution service to provide reliable, long-running workflows. Here's how it works:
+
+1. **Execution starts** - Lambda invokes your function with a `DurableContext`
+2. **Operations checkpoint** - Each `context.step()`, `context.wait()`, or other operation creates a checkpoint
+3. **State is saved** - Checkpoints are sent to the durable execution service and persisted
+4. **Execution may pause** - Lambda can recycle your environment or wait for external events
+5. **Execution resumes** - When ready, Lambda invokes your function again with the saved state
+6. **Operations replay** - Completed operations return their saved results instantly
+7. **New operations execute** - Your function continues from where it left off
+
+### Key components
+
+- **DurableContext** - Main interface for durable operations, provided by Lambda
+- **ExecutionState** - Manages checkpoints and tracks operation results
+- **Operation handlers** - Execute steps, waits, callbacks, and other operations
+- **Checkpoint batching** - Groups multiple checkpoints into efficient API calls
+- **SerDes system** - Serializes and deserializes operation inputs and results
+
+### Checkpointing
+
+The SDK uses a background thread to batch checkpoints for efficiency. Critical operations (like step starts with at-most-once semantics) block until the checkpoint is confirmed. Non-critical operations (like observability checkpoints) are asynchronous for better performance
+
+[↑ Back to top](#table-of-contents)
+
+## Use cases
+
+The SDK helps you build:
+
+**Order processing workflows** - Validate orders, charge payments, and fulfill shipments with automatic retry on failures.
+
+**Approval workflows** - Wait for human approvals or external system responses using callbacks.
+
+**Data processing pipelines** - Process large datasets in parallel with map operations and failure tolerance.
+
+**Multi-step integrations** - Coordinate calls to multiple services with proper error handling and state management.
+
+**Long-running tasks** - Execute workflows that take minutes or hours without blocking Lambda resources.
+
+**Saga patterns** - Implement distributed transactions with compensation logic for failures.
+
+[↑ Back to top](#table-of-contents)
+
+## Getting help
+
+**Documentation** - You're reading it! Use the navigation above to find specific topics.
+
+**Examples** - Check the `examples/` directory in the repository for working code samples.
+
+**Issues** - Report bugs or request features on the [GitHub repository](https://github.com/awslabs/aws-durable-execution-sdk-python).
+
+**Contributing** - See [CONTRIBUTING.md](../CONTRIBUTING.md) for guidelines on contributing to the project.
+
+[↑ Back to top](#table-of-contents)
+
+## License
+
+See the [LICENSE](../LICENSE) file for our project's licensing.
+
+[↑ Back to top](#table-of-contents)
diff --git a/docs/testing-patterns/.gitkeep b/docs/testing-patterns/.gitkeep
new file mode 100644
index 0000000..9748135
--- /dev/null
+++ b/docs/testing-patterns/.gitkeep
@@ -0,0 +1 @@
+# This file will be removed once the directory has content
diff --git a/docs/testing-patterns/basic-tests.md b/docs/testing-patterns/basic-tests.md
new file mode 100644
index 0000000..7f6cd66
--- /dev/null
+++ b/docs/testing-patterns/basic-tests.md
@@ -0,0 +1,701 @@
+# Basic Test Patterns
+
+## Table of Contents
+
+- [Overview](#overview)
+- [Prerequisites](#prerequisites)
+- [Project structure](#project-structure)
+- [Getting started](#getting-started)
+- [Status checking patterns](#status-checking-patterns)
+- [Result verification patterns](#result-verification-patterns)
+- [Operation-specific assertions](#operation-specific-assertions)
+- [Test organization tips](#test-organization-tips)
+- [FAQ](#faq)
+- [See also](#see-also)
+
+[← Back to main index](../index.md)
+
+## Overview
+
+When you test durable functions, you need to verify that your function executed successfully, returned the expected result, and that operations like steps or waits ran correctly. This document shows you common patterns for writing these tests with simple assertions using the testing SDK.
+
+The testing SDK (`aws-durable-execution-sdk-python-testing`) provides tools to run and inspect durable functions locally without deploying to AWS. Use these patterns as building blocks for your own tests, whether you're checking a simple calculation or inspecting individual operations.
+
+[↑ Back to top](#table-of-contents)
+
+## Prerequisites
+
+To test durable functions, you need both SDKs installed:
+
+```console
+# Install the core SDK (for writing durable functions)
+pip install aws-durable-execution-sdk-python
+
+# Install the testing SDK (for testing durable functions)
+pip install aws-durable-execution-sdk-python-testing
+
+# Install pytest (test framework)
+pip install pytest
+```
+
+The core SDK provides the decorators and context for writing durable functions. The testing SDK provides the test runner and assertions for testing them.
+
+[↑ Back to top](#table-of-contents)
+
+## Project structure
+
+Here's a typical project structure for testing durable functions:
+
+```
+my-project/
+├── src/
+│ ├── __init__.py
+│ └── my_function.py # Your durable function
+├── test/
+│ ├── __init__.py
+│ ├── conftest.py # Pytest configuration and fixtures
+│ └── test_my_function.py # Your tests
+├── requirements.txt
+└── pytest.ini
+```
+
+**Key files:**
+
+- `src/my_function.py` - Contains your durable function with `@durable_execution` decorator
+- `test/conftest.py` - Configures the `durable_runner` fixture for pytest
+- `test/test_my_function.py` - Contains your test cases using the `durable_runner` fixture
+
+**Example conftest.py:**
+
+```python
+import pytest
+from aws_durable_execution_sdk_python_testing.runner import DurableFunctionTestRunner
+
+@pytest.fixture
+def durable_runner(request):
+ """Pytest fixture that provides a test runner."""
+ marker = request.node.get_closest_marker("durable_execution")
+ if not marker:
+ pytest.fail("Test must be marked with @pytest.mark.durable_execution")
+
+ handler = marker.kwargs.get("handler")
+ runner = DurableFunctionTestRunner(handler=handler)
+
+ yield runner
+```
+
+[↑ Back to top](#table-of-contents)
+
+## Getting started
+
+Here's a simple durable function:
+
+```python
+from aws_durable_execution_sdk_python import DurableContext, durable_execution
+
+@durable_execution
+def handler(event: dict, context: DurableContext) -> str:
+ """Simple hello world durable function."""
+ return "Hello World!"
+```
+
+And here's how you test it:
+
+```python
+import pytest
+from aws_durable_execution_sdk_python.execution import InvocationStatus
+from test.conftest import deserialize_operation_payload
+
+@pytest.mark.durable_execution(
+ handler=handler,
+ lambda_function_name="hello world",
+)
+def test_hello_world(durable_runner):
+ """Test hello world example."""
+ with durable_runner:
+ result = durable_runner.run(input="test", timeout=10)
+
+ assert result.status is InvocationStatus.SUCCEEDED
+ assert deserialize_operation_payload(result.result) == "Hello World!"
+```
+
+This test:
+1. Marks the test with `@pytest.mark.durable_execution` to configure the runner
+2. Uses the `durable_runner` fixture to execute the function
+3. Checks the execution status
+4. Verifies the final result
+
+[↑ Back to top](#table-of-contents)
+
+## Status checking patterns
+
+### Check for successful execution
+
+The most basic pattern verifies that your function completed successfully:
+
+```python
+@pytest.mark.durable_execution(
+ handler=my_handler,
+ lambda_function_name="my_function",
+)
+def test_success(durable_runner):
+ """Test successful execution."""
+ with durable_runner:
+ result = durable_runner.run(input={"data": "test"}, timeout=10)
+
+ assert result.status is InvocationStatus.SUCCEEDED
+```
+
+### Check for expected failures
+
+Test that your function fails correctly when given invalid input:
+
+```python
+@pytest.mark.durable_execution(
+ handler=handler_with_validation,
+ lambda_function_name="validation_function",
+)
+def test_validation_failure(durable_runner):
+ """Test that invalid input causes failure."""
+ with durable_runner:
+ result = durable_runner.run(input={"invalid": "data"}, timeout=10)
+
+ assert result.status is InvocationStatus.FAILED
+ assert "ValidationError" in str(result.error)
+```
+
+### Check execution with timeout
+
+Verify that your function completes within the expected time:
+
+```python
+@pytest.mark.durable_execution(
+ handler=quick_handler,
+ lambda_function_name="quick_function",
+)
+def test_completes_quickly(durable_runner):
+ """Test that function completes within timeout."""
+ with durable_runner:
+ # Use a short timeout to verify quick execution
+ result = durable_runner.run(input={}, timeout=5)
+
+ assert result.status is InvocationStatus.SUCCEEDED
+```
+
+[↑ Back to top](#table-of-contents)
+
+## Result verification patterns
+
+### Verify simple return values
+
+Check that your function returns the expected value:
+
+```python
+from test.conftest import deserialize_operation_payload
+
+@pytest.mark.durable_execution(
+ handler=calculator_handler,
+ lambda_function_name="calculator",
+)
+def test_calculation_result(durable_runner):
+ """Test calculation returns correct result."""
+ with durable_runner:
+ result = durable_runner.run(input={"a": 5, "b": 3}, timeout=10)
+
+ assert result.status is InvocationStatus.SUCCEEDED
+ assert deserialize_operation_payload(result.result) == 8
+```
+
+### Verify complex return values
+
+Check specific fields in complex return values:
+
+```python
+@pytest.mark.durable_execution(
+ handler=order_handler,
+ lambda_function_name="order_processor",
+)
+def test_order_processing(durable_runner):
+ """Test order processing returns correct structure."""
+ with durable_runner:
+ result = durable_runner.run(
+ input={"order_id": "order-123", "amount": 100.0},
+ timeout=10
+ )
+
+ assert result.status is InvocationStatus.SUCCEEDED
+
+ order_result = deserialize_operation_payload(result.result)
+ assert order_result["order_id"] == "order-123"
+ assert order_result["status"] == "completed"
+ assert order_result["amount"] == 100.0
+```
+
+### Verify list results
+
+Check that your function returns the expected list of values:
+
+```python
+@pytest.mark.durable_execution(
+ handler=parallel_handler,
+ lambda_function_name="parallel_tasks",
+)
+def test_parallel_results(durable_runner):
+ """Test parallel operations return all results."""
+ with durable_runner:
+ result = durable_runner.run(input={}, timeout=10)
+
+ assert result.status is InvocationStatus.SUCCEEDED
+
+ results = deserialize_operation_payload(result.result)
+ assert len(results) == 3
+ assert results == [
+ "Task 1 complete",
+ "Task 2 complete",
+ "Task 3 complete",
+ ]
+```
+
+[↑ Back to top](#table-of-contents)
+
+## Operation-specific assertions
+
+### Verify step operations
+
+Here's a function with a step:
+
+```python
+from aws_durable_execution_sdk_python import (
+ DurableContext,
+ durable_execution,
+ durable_step,
+ StepContext,
+)
+
+@durable_step
+def add_numbers(step_context: StepContext, a: int, b: int) -> int:
+ return a + b
+
+@durable_execution
+def handler(event: dict, context: DurableContext) -> int:
+ result = context.step(add_numbers(5, 3))
+ return result
+```
+
+Check that the step executed and produced the expected result:
+
+```python
+import pytest
+from aws_durable_execution_sdk_python.execution import InvocationStatus
+from test.conftest import deserialize_operation_payload
+
+@pytest.mark.durable_execution(
+ handler=handler,
+ lambda_function_name="step_function",
+)
+def test_step_execution(durable_runner):
+ """Test step executes correctly."""
+ with durable_runner:
+ result = durable_runner.run(input={}, timeout=10)
+
+ assert result.status is InvocationStatus.SUCCEEDED
+
+ # Get step by name
+ step_result = result.get_step("add_numbers")
+ assert deserialize_operation_payload(step_result.result) == 8
+```
+
+### Verify wait operations
+
+Here's a function with a wait:
+
+```python
+from aws_durable_execution_sdk_python import DurableContext, durable_execution
+
+@durable_execution
+def handler(event: dict, context: DurableContext) -> str:
+ context.wait(seconds=5)
+ return "Wait completed"
+```
+
+Check that the wait operation was created with correct timing:
+
+```python
+@pytest.mark.durable_execution(
+ handler=handler,
+ lambda_function_name="wait_function",
+)
+def test_wait_operation(durable_runner):
+ """Test wait operation is created."""
+ with durable_runner:
+ result = durable_runner.run(input={}, timeout=10)
+
+ assert result.status is InvocationStatus.SUCCEEDED
+
+ # Find wait operations
+ wait_ops = [
+ op for op in result.operations
+ if op.operation_type.value == "WAIT"
+ ]
+ assert len(wait_ops) == 1
+ assert wait_ops[0].scheduled_end_timestamp is not None
+```
+
+### Verify callback operations
+
+Here's a function that creates a callback:
+
+```python
+from aws_durable_execution_sdk_python import DurableContext, durable_execution
+from aws_durable_execution_sdk_python.config import CallbackConfig
+
+@durable_execution
+def handler(event: dict, context: DurableContext) -> str:
+ callback_config = CallbackConfig(
+ timeout_seconds=120,
+ heartbeat_timeout_seconds=60
+ )
+
+ callback = context.create_callback(
+ name="example_callback",
+ config=callback_config
+ )
+
+ return f"Callback created with ID: {callback.callback_id}"
+```
+
+Check that the callback was created with correct configuration:
+
+```python
+@pytest.mark.durable_execution(
+ handler=handler,
+ lambda_function_name="callback_function",
+)
+def test_callback_creation(durable_runner):
+ """Test callback is created correctly."""
+ with durable_runner:
+ result = durable_runner.run(input={}, timeout=10)
+
+ assert result.status is InvocationStatus.SUCCEEDED
+
+ # Find callback operations
+ callback_ops = [
+ op for op in result.operations
+ if op.operation_type.value == "CALLBACK"
+ ]
+ assert len(callback_ops) == 1
+
+ callback_op = callback_ops[0]
+ assert callback_op.name == "example_callback"
+ assert callback_op.callback_id is not None
+```
+
+### Verify child context operations
+
+Here's a function with a child context:
+
+```python
+from aws_durable_execution_sdk_python import (
+ DurableContext,
+ durable_execution,
+ durable_with_child_context,
+)
+
+@durable_with_child_context
+def child_operation(ctx: DurableContext, value: int) -> int:
+ return ctx.step(lambda _: value * 2, name="multiply")
+
+@durable_execution
+def handler(event: dict, context: DurableContext) -> str:
+ result = context.run_in_child_context(child_operation(5))
+ return f"Child context result: {result}"
+```
+
+Check that the child context executed correctly:
+
+```python
+@pytest.mark.durable_execution(
+ handler=handler,
+ lambda_function_name="child_context_function",
+)
+def test_child_context(durable_runner):
+ """Test child context executes."""
+ with durable_runner:
+ result = durable_runner.run(input={}, timeout=10)
+
+ assert result.status is InvocationStatus.SUCCEEDED
+
+ # Find child context operations
+ context_ops = [
+ op for op in result.operations
+ if op.operation_type.value == "CONTEXT"
+ ]
+ assert len(context_ops) >= 1
+```
+
+### Verify parallel operations
+
+Here's a function with parallel operations:
+
+```python
+from aws_durable_execution_sdk_python import DurableContext, durable_execution
+
+@durable_execution
+def handler(event: dict, context: DurableContext) -> list[str]:
+ # Execute multiple operations
+ task1 = context.step(lambda _: "Task 1 complete", name="task1")
+ task2 = context.step(lambda _: "Task 2 complete", name="task2")
+ task3 = context.step(lambda _: "Task 3 complete", name="task3")
+
+ # All tasks execute concurrently and results are collected
+ return [task1, task2, task3]
+```
+
+Check that multiple operations executed in parallel:
+
+```python
+from aws_durable_execution_sdk_python.lambda_service import OperationType
+
+@pytest.mark.durable_execution(
+ handler=handler,
+ lambda_function_name="parallel_function",
+)
+def test_parallel_operations(durable_runner):
+ """Test parallel operations execute."""
+ with durable_runner:
+ result = durable_runner.run(input={}, timeout=10)
+
+ assert result.status is InvocationStatus.SUCCEEDED
+
+ # Find all step operations
+ step_ops = [
+ op for op in result.operations
+ if op.operation_type == OperationType.STEP
+ ]
+ assert len(step_ops) == 3
+
+ # Verify step names
+ step_names = {op.name for op in step_ops}
+ assert step_names == {"task1", "task2", "task3"}
+```
+
+[↑ Back to top](#table-of-contents)
+
+## Test organization tips
+
+### Use descriptive test names
+
+Name your tests to clearly describe what they verify:
+
+```python
+# Good - describes what is being tested
+def test_order_processing_succeeds_with_valid_input(durable_runner):
+ pass
+
+def test_order_processing_fails_with_invalid_order_id(durable_runner):
+ pass
+
+# Avoid - vague or unclear
+def test_order(durable_runner):
+ pass
+
+def test_case_1(durable_runner):
+ pass
+```
+
+### Group related tests
+
+Organize tests by feature or functionality:
+
+```python
+# tests/test_order_processing.py
+class TestOrderValidation:
+ """Tests for order validation."""
+
+ @pytest.mark.durable_execution(handler=handler, lambda_function_name="orders")
+ def test_valid_order(self, durable_runner):
+ """Test valid order is accepted."""
+ pass
+
+ @pytest.mark.durable_execution(handler=handler, lambda_function_name="orders")
+ def test_invalid_order_id(self, durable_runner):
+ """Test invalid order ID is rejected."""
+ pass
+
+class TestOrderFulfillment:
+ """Tests for order fulfillment."""
+
+ @pytest.mark.durable_execution(handler=handler, lambda_function_name="orders")
+ def test_fulfillment_success(self, durable_runner):
+ """Test successful order fulfillment."""
+ pass
+```
+
+### Use fixtures for common test data
+
+Create fixtures for test data you use across multiple tests:
+
+```python
+# conftest.py
+@pytest.fixture
+def valid_order():
+ """Provide valid order data."""
+ return {
+ "order_id": "order-123",
+ "customer_id": "customer-456",
+ "amount": 100.0,
+ "items": [
+ {"product_id": "prod-1", "quantity": 2},
+ {"product_id": "prod-2", "quantity": 1},
+ ],
+ }
+
+# test_orders.py
+@pytest.mark.durable_execution(handler=handler, lambda_function_name="orders")
+def test_order_processing(durable_runner, valid_order):
+ """Test order processing with valid data."""
+ with durable_runner:
+ result = durable_runner.run(input=valid_order, timeout=10)
+
+ assert result.status is InvocationStatus.SUCCEEDED
+```
+
+### Add docstrings to tests
+
+Document what each test verifies:
+
+```python
+@pytest.mark.durable_execution(handler=handler, lambda_function_name="payment")
+def test_payment_with_retry(durable_runner):
+ """Test payment processing retries on transient failures.
+
+ This test verifies that:
+ 1. Payment step retries on RuntimeError
+ 2. Function eventually succeeds after retries
+ 3. Final result includes transaction ID
+ """
+ with durable_runner:
+ result = durable_runner.run(input={"amount": 50.0}, timeout=30)
+
+ assert result.status is InvocationStatus.SUCCEEDED
+```
+
+### Use parametrized tests for similar cases
+
+Test multiple inputs with the same logic using `pytest.mark.parametrize`:
+
+```python
+@pytest.mark.parametrize("a,b,expected", [
+ (5, 3, 8),
+ (10, 20, 30),
+ (0, 0, 0),
+ (-5, 5, 0),
+])
+@pytest.mark.durable_execution(handler=add_handler, lambda_function_name="calculator")
+def test_addition(durable_runner, a, b, expected):
+ """Test addition with various inputs."""
+ with durable_runner:
+ result = durable_runner.run(input={"a": a, "b": b}, timeout=10)
+
+ assert result.status is InvocationStatus.SUCCEEDED
+ assert deserialize_operation_payload(result.result) == expected
+```
+
+### Keep tests focused
+
+Each test should verify one specific behavior:
+
+```python
+# Good - focused on one behavior
+@pytest.mark.durable_execution(handler=handler, lambda_function_name="orders")
+def test_order_validation_succeeds(durable_runner):
+ """Test order validation with valid input."""
+ with durable_runner:
+ result = durable_runner.run(input={"order_id": "order-123"}, timeout=10)
+
+ assert result.status is InvocationStatus.SUCCEEDED
+
+@pytest.mark.durable_execution(handler=handler, lambda_function_name="orders")
+def test_order_validation_fails_missing_id(durable_runner):
+ """Test order validation fails without order ID."""
+ with durable_runner:
+ result = durable_runner.run(input={}, timeout=10)
+
+ assert result.status is InvocationStatus.FAILED
+
+# Avoid - testing multiple behaviors
+@pytest.mark.durable_execution(handler=handler, lambda_function_name="orders")
+def test_order_validation(durable_runner):
+ """Test order validation."""
+ # Test valid input
+ result1 = durable_runner.run(input={"order_id": "order-123"}, timeout=10)
+ assert result1.status is InvocationStatus.SUCCEEDED
+
+ # Test invalid input
+ result2 = durable_runner.run(input={}, timeout=10)
+ assert result2.status is InvocationStatus.FAILED
+```
+
+[↑ Back to top](#table-of-contents)
+
+## FAQ
+
+**Q: Do I need to deploy my function to test it?**
+
+A: No, the test runner executes your function locally. You only need to deploy for cloud testing mode.
+
+**Q: How do I test functions with external dependencies?**
+
+A: Mock external dependencies in your test setup. The test runner executes your function code as-is, so standard Python mocking works.
+
+**Q: Can I test multiple functions in one test file?**
+
+A: Yes, use different `@pytest.mark.durable_execution` markers for each function you want to test.
+
+**Q: How do I access operation results?**
+
+A: Use `result.get_step(name)` for steps, or iterate through `result.operations` to find specific operation types.
+
+**Q: What's the difference between result.result and step.result?**
+
+A: `result.result` is the final return value of your handler function. `step.result` is the return value of a specific step operation.
+
+**Q: How do I test error scenarios?**
+
+A: Check that `result.status is InvocationStatus.FAILED` and inspect `result.error` for the error message.
+
+**Q: Can I run tests in parallel?**
+
+A: Yes, use pytest-xdist: `pytest -n auto` to run tests in parallel.
+
+**Q: How do I debug failing tests?**
+
+A: Add print statements or use a debugger. The test runner executes your code locally, so standard debugging tools work.
+
+**Q: What timeout should I use?**
+
+A: Use a timeout slightly longer than your function's expected execution time. For most tests, 10-30 seconds is sufficient.
+
+**Q: How do I test functions that use environment variables?**
+
+A: Set environment variables in your test setup or use pytest fixtures to manage them.
+
+[↑ Back to top](#table-of-contents)
+
+## See also
+
+- [Complex workflows](complex-workflows.md) - Testing multi-step workflows
+- [Best practices](best-practices.md) - Testing recommendations
+- [Pytest integration](../advanced/pytest-integration.md) - Pytest fixtures and markers
+- [Custom assertions](../advanced/custom-assertions.md) - Advanced result inspection
+- [Steps](../core/steps.md) - Testing step operations
+- [Wait operations](../core/wait.md) - Testing wait operations
+- [Callbacks](../core/callbacks.md) - Testing callback operations
+
+[↑ Back to top](#table-of-contents)
+
+## License
+
+See the [LICENSE](../../LICENSE) file for our project's licensing.
+
+[↑ Back to top](#table-of-contents)
diff --git a/docs/testing-patterns/complex-workflows.md b/docs/testing-patterns/complex-workflows.md
new file mode 100644
index 0000000..cac74a1
--- /dev/null
+++ b/docs/testing-patterns/complex-workflows.md
@@ -0,0 +1,675 @@
+# Complex Workflow Testing
+
+## Table of Contents
+
+- [Overview](#overview)
+- [Prerequisites](#prerequisites)
+- [Multi-step workflows](#multi-step-workflows)
+- [Nested child contexts](#nested-child-contexts)
+- [Parallel operations](#parallel-operations)
+- [Error scenarios](#error-scenarios)
+- [Timeout handling](#timeout-handling)
+- [Polling patterns](#polling-patterns)
+- [FAQ](#faq)
+- [See also](#see-also)
+
+[← Back to main index](../index.md)
+
+## Overview
+
+When your workflows involve multiple steps, nested contexts, or parallel operations, you need to verify more than just the final result. You'll want to check intermediate states, operation ordering, error handling, and timeout behavior.
+
+This guide shows you how to test workflows that chain operations together, handle errors gracefully, and implement polling patterns.
+
+[↑ Back to top](#table-of-contents)
+
+## Prerequisites
+
+You need both SDKs installed:
+
+```console
+pip install aws-durable-execution-sdk-python
+pip install aws-durable-execution-sdk-python-testing
+pip install pytest
+```
+
+If you're new to testing durable functions, start with [Basic test patterns](basic-tests.md) first.
+
+[↑ Back to top](#table-of-contents)
+
+## Multi-step workflows
+
+### Sequential operations
+
+
+Here's a workflow that processes an order through validation, payment, and fulfillment:
+
+```python
+from aws_durable_execution_sdk_python import (
+ DurableContext,
+ durable_execution,
+ durable_step,
+ StepContext,
+)
+
+@durable_step
+def validate_order(step_context: StepContext, order_id: str) -> dict:
+ return {"order_id": order_id, "status": "validated"}
+
+@durable_step
+def process_payment(step_context: StepContext, order: dict) -> dict:
+ return {**order, "payment_status": "completed"}
+
+@durable_step
+def fulfill_order(step_context: StepContext, order: dict) -> dict:
+ return {**order, "fulfillment_status": "shipped"}
+
+@durable_execution
+def handler(event: dict, context: DurableContext) -> dict:
+ order_id = event["order_id"]
+
+ validated = context.step(validate_order(order_id), name="validate")
+ paid = context.step(process_payment(validated), name="payment")
+ fulfilled = context.step(fulfill_order(paid), name="fulfillment")
+
+ return fulfilled
+```
+
+Verify all steps execute in order:
+
+```python
+import pytest
+from aws_durable_execution_sdk_python.execution import InvocationStatus
+from aws_durable_execution_sdk_python.lambda_service import OperationType
+from test.conftest import deserialize_operation_payload
+
+@pytest.mark.durable_execution(handler=handler, lambda_function_name="order_workflow")
+def test_order_workflow(durable_runner):
+ """Test order processing executes all steps."""
+ with durable_runner:
+ result = durable_runner.run(input={"order_id": "order-123"}, timeout=30)
+
+ assert result.status is InvocationStatus.SUCCEEDED
+
+ # Check final result
+ final_result = deserialize_operation_payload(result.result)
+ assert final_result["order_id"] == "order-123"
+ assert final_result["payment_status"] == "completed"
+ assert final_result["fulfillment_status"] == "shipped"
+
+ # Verify all three steps ran
+ step_ops = [op for op in result.operations if op.operation_type == OperationType.STEP]
+ assert len(step_ops) == 3
+
+ # Check step order
+ step_names = [op.name for op in step_ops]
+ assert step_names == ["validate", "payment", "fulfillment"]
+```
+
+[↑ Back to top](#table-of-contents)
+
+### Conditional branching
+
+Test different execution paths based on input:
+
+```python
+@durable_execution
+def handler(event: dict, context: DurableContext) -> str:
+ amount = event.get("amount", 0)
+
+ context.step(lambda _: amount, name="validate_amount")
+
+ if amount > 1000:
+ context.step(lambda _: "Manager approval required", name="approval")
+ context.wait(seconds=10, name="approval_wait")
+ result = context.step(lambda _: "High-value order processed", name="process_high")
+ else:
+ result = context.step(lambda _: "Standard order processed", name="process_standard")
+
+ return result
+```
+
+Test both paths separately:
+
+```python
+@pytest.mark.durable_execution(handler=handler, lambda_function_name="conditional_workflow")
+def test_high_value_path(durable_runner):
+ """Test high-value orders require approval."""
+ with durable_runner:
+ result = durable_runner.run(input={"amount": 1500}, timeout=30)
+
+ assert result.status is InvocationStatus.SUCCEEDED
+ assert deserialize_operation_payload(result.result) == "High-value order processed"
+
+ # Verify approval step exists
+ approval_step = result.get_step("approval")
+ assert approval_step is not None
+
+@pytest.mark.durable_execution(handler=handler, lambda_function_name="conditional_workflow")
+def test_standard_path(durable_runner):
+ """Test standard orders skip approval."""
+ with durable_runner:
+ result = durable_runner.run(input={"amount": 500}, timeout=30)
+
+ assert result.status is InvocationStatus.SUCCEEDED
+
+ # Verify no approval step
+ step_names = [op.name for op in result.operations if op.operation_type == OperationType.STEP]
+ assert "approval" not in step_names
+```
+
+[↑ Back to top](#table-of-contents)
+
+## Nested child contexts
+
+
+### Single child context
+
+Child contexts isolate operations:
+
+```python
+from aws_durable_execution_sdk_python import (
+ DurableContext,
+ durable_execution,
+ durable_with_child_context,
+)
+
+@durable_with_child_context
+def process_item(ctx: DurableContext, item_id: str) -> dict:
+ ctx.step(lambda _: f"Validating {item_id}", name="validate")
+ result = ctx.step(
+ lambda _: {"item_id": item_id, "status": "processed"},
+ name="process"
+ )
+ return result
+
+@durable_execution
+def handler(event: dict, context: DurableContext) -> dict:
+ item_id = event["item_id"]
+ result = context.run_in_child_context(
+ process_item(item_id),
+ name="item_processing"
+ )
+ return result
+```
+
+Verify the child context executes:
+
+```python
+@pytest.mark.durable_execution(handler=handler, lambda_function_name="child_context_workflow")
+def test_child_context(durable_runner):
+ """Test child context execution."""
+ with durable_runner:
+ result = durable_runner.run(input={"item_id": "item-123"}, timeout=30)
+
+ assert result.status is InvocationStatus.SUCCEEDED
+
+ # Check child context ran
+ context_ops = [op for op in result.operations if op.operation_type.value == "CONTEXT"]
+ assert len(context_ops) == 1
+ assert context_ops[0].name == "item_processing"
+
+ # Check child context result
+ child_result = result.get_context("item_processing")
+ child_data = deserialize_operation_payload(child_result.result)
+ assert child_data["item_id"] == "item-123"
+```
+
+[↑ Back to top](#table-of-contents)
+
+### Multiple child contexts
+
+Use multiple child contexts to organize operations:
+
+```python
+@durable_with_child_context
+def validate_data(ctx: DurableContext, data: dict) -> dict:
+ return ctx.step(lambda _: {**data, "validated": True}, name="validate")
+
+@durable_with_child_context
+def transform_data(ctx: DurableContext, data: dict) -> dict:
+ return ctx.step(lambda _: {**data, "transformed": True}, name="transform")
+
+@durable_execution
+def handler(event: dict, context: DurableContext) -> dict:
+ data = event["data"]
+
+ validated = context.run_in_child_context(validate_data(data), name="validation")
+ transformed = context.run_in_child_context(transform_data(validated), name="transformation")
+
+ return transformed
+```
+
+Verify both contexts execute:
+
+```python
+@pytest.mark.durable_execution(handler=handler, lambda_function_name="multiple_contexts")
+def test_multiple_child_contexts(durable_runner):
+ """Test multiple child contexts."""
+ with durable_runner:
+ result = durable_runner.run(input={"data": {"value": 42}}, timeout=30)
+
+ assert result.status is InvocationStatus.SUCCEEDED
+
+ final_result = deserialize_operation_payload(result.result)
+ assert final_result["validated"] is True
+ assert final_result["transformed"] is True
+
+ # Verify both contexts ran
+ context_ops = [op for op in result.operations if op.operation_type.value == "CONTEXT"]
+ assert len(context_ops) == 2
+```
+
+[↑ Back to top](#table-of-contents)
+
+## Parallel operations
+
+### Basic parallel execution
+
+Multiple operations execute concurrently:
+
+```python
+@durable_execution
+def handler(event: dict, context: DurableContext) -> list[str]:
+ task1 = context.step(lambda _: "Task 1 complete", name="task1")
+ task2 = context.step(lambda _: "Task 2 complete", name="task2")
+ task3 = context.step(lambda _: "Task 3 complete", name="task3")
+
+ return [task1, task2, task3]
+```
+
+Verify all operations execute:
+
+```python
+@pytest.mark.durable_execution(handler=handler, lambda_function_name="parallel_ops")
+def test_parallel_operations(durable_runner):
+ """Test parallel execution."""
+ with durable_runner:
+ result = durable_runner.run(input={}, timeout=30)
+
+ assert result.status is InvocationStatus.SUCCEEDED
+
+ results = deserialize_operation_payload(result.result)
+ assert len(results) == 3
+
+ # Verify all steps ran
+ step_ops = [op for op in result.operations if op.operation_type == OperationType.STEP]
+ assert len(step_ops) == 3
+
+ step_names = {op.name for op in step_ops}
+ assert step_names == {"task1", "task2", "task3"}
+```
+
+[↑ Back to top](#table-of-contents)
+
+### Processing collections
+
+
+Process collection items in parallel:
+
+```python
+@durable_execution
+def handler(event: dict, context: DurableContext) -> list[int]:
+ numbers = event.get("numbers", [1, 2, 3, 4, 5])
+
+ results = []
+ for i, num in enumerate(numbers):
+ result = context.step(lambda _, n=num: n * 2, name=f"square_{i}")
+ results.append(result)
+
+ return results
+```
+
+Verify collection processing:
+
+```python
+@pytest.mark.durable_execution(handler=handler, lambda_function_name="parallel_collection")
+def test_collection_processing(durable_runner):
+ """Test collection processing."""
+ with durable_runner:
+ result = durable_runner.run(input={"numbers": [1, 2, 3, 4, 5]}, timeout=30)
+
+ assert result.status is InvocationStatus.SUCCEEDED
+ assert deserialize_operation_payload(result.result) == [2, 4, 6, 8, 10]
+
+ # Verify all steps ran
+ step_ops = [op for op in result.operations if op.operation_type == OperationType.STEP]
+ assert len(step_ops) == 5
+```
+
+[↑ Back to top](#table-of-contents)
+
+## Error scenarios
+
+### Expected failures
+
+Test that your workflow fails correctly:
+
+```python
+@durable_step
+def validate_input(step_context: StepContext, value: int) -> int:
+ if value < 0:
+ raise ValueError("Value must be non-negative")
+ return value
+
+@durable_execution
+def handler(event: dict, context: DurableContext) -> int:
+ value = event.get("value", 0)
+ validated = context.step(validate_input(value), name="validate")
+ return validated
+```
+
+Verify validation failures:
+
+```python
+@pytest.mark.durable_execution(handler=handler, lambda_function_name="validation_workflow")
+def test_validation_failure(durable_runner):
+ """Test validation fails with invalid input."""
+ with durable_runner:
+ result = durable_runner.run(input={"value": -5}, timeout=30)
+
+ assert result.status is InvocationStatus.FAILED
+ assert "Value must be non-negative" in str(result.error)
+```
+
+[↑ Back to top](#table-of-contents)
+
+### Retry behavior
+
+Test operations that retry on failure:
+
+```python
+from aws_durable_execution_sdk_python.config import StepConfig
+from aws_durable_execution_sdk_python.retries import (
+ RetryStrategyConfig,
+ create_retry_strategy,
+)
+
+attempt_count = 0
+
+@durable_step
+def unreliable_operation(step_context: StepContext) -> str:
+ global attempt_count
+ attempt_count += 1
+
+ if attempt_count < 3:
+ raise RuntimeError("Transient error")
+
+ return "Operation succeeded"
+
+@durable_execution
+def handler(event: dict, context: DurableContext) -> str:
+ retry_config = RetryStrategyConfig(
+ max_attempts=5,
+ retryable_error_types=[RuntimeError],
+ )
+
+ result = context.step(
+ unreliable_operation(),
+ config=StepConfig(create_retry_strategy(retry_config)),
+ name="unreliable"
+ )
+
+ return result
+```
+
+Verify retry succeeds:
+
+```python
+@pytest.mark.durable_execution(handler=handler, lambda_function_name="retry_workflow")
+def test_retry_behavior(durable_runner):
+ """Test operation retries on failure."""
+ global attempt_count
+ attempt_count = 0
+
+ with durable_runner:
+ result = durable_runner.run(input={}, timeout=60)
+
+ assert result.status is InvocationStatus.SUCCEEDED
+ assert deserialize_operation_payload(result.result) == "Operation succeeded"
+ assert attempt_count >= 3
+```
+
+[↑ Back to top](#table-of-contents)
+
+### Partial failures
+
+Test workflows where some operations succeed before failure:
+
+```python
+@durable_execution
+def handler(event: dict, context: DurableContext) -> str:
+ context.step(lambda _: "Step 1 complete", name="step1")
+ context.step(lambda _: "Step 2 complete", name="step2")
+ context.step(
+ lambda _: (_ for _ in ()).throw(RuntimeError("Step 3 failed")),
+ name="step3"
+ )
+ return "Should not reach here"
+```
+
+Verify partial execution:
+
+```python
+@pytest.mark.durable_execution(handler=handler, lambda_function_name="partial_failure")
+def test_partial_failure(durable_runner):
+ """Test workflow fails after some steps succeed."""
+ with durable_runner:
+ result = durable_runner.run(input={}, timeout=30)
+
+ assert result.status is InvocationStatus.FAILED
+
+ # First two steps succeeded
+ step1 = result.get_step("step1")
+ assert deserialize_operation_payload(step1.result) == "Step 1 complete"
+
+ step2 = result.get_step("step2")
+ assert deserialize_operation_payload(step2.result) == "Step 2 complete"
+
+ assert "Step 3 failed" in str(result.error)
+```
+
+[↑ Back to top](#table-of-contents)
+
+## Timeout handling
+
+### Callback timeouts
+
+
+Verify callback timeout configuration:
+
+```python
+from aws_durable_execution_sdk_python.config import CallbackConfig
+
+@durable_execution
+def handler(event: dict, context: DurableContext) -> str:
+ config = CallbackConfig(timeout_seconds=60, heartbeat_timeout_seconds=30)
+ callback = context.create_callback(name="approval_callback", config=config)
+ return f"Callback created: {callback.callback_id}"
+```
+
+Test callback configuration:
+
+```python
+@pytest.mark.durable_execution(handler=handler, lambda_function_name="callback_timeout")
+def test_callback_timeout(durable_runner):
+ """Test callback timeout configuration."""
+ with durable_runner:
+ result = durable_runner.run(input={}, timeout=10)
+
+ assert result.status is InvocationStatus.SUCCEEDED
+
+ callback_ops = [op for op in result.operations if op.operation_type.value == "CALLBACK"]
+ assert len(callback_ops) == 1
+ assert callback_ops[0].name == "approval_callback"
+```
+
+[↑ Back to top](#table-of-contents)
+
+### Long waits
+
+For workflows with long waits, verify configuration without actually waiting:
+
+```python
+@durable_execution
+def handler(event: dict, context: DurableContext) -> str:
+ context.step(lambda _: "Starting", name="start")
+ context.wait(seconds=3600, name="long_wait") # 1 hour
+ context.step(lambda _: "Continuing", name="continue")
+ return "Complete"
+```
+
+Test completes quickly:
+
+```python
+@pytest.mark.durable_execution(handler=handler, lambda_function_name="long_wait")
+def test_long_wait(durable_runner):
+ """Test long wait configuration."""
+ with durable_runner:
+ result = durable_runner.run(input={}, timeout=10)
+
+ assert result.status is InvocationStatus.SUCCEEDED
+
+ # Verify wait exists
+ wait_ops = [op for op in result.operations if op.operation_type.value == "WAIT"]
+ assert len(wait_ops) == 1
+ assert wait_ops[0].name == "long_wait"
+```
+
+[↑ Back to top](#table-of-contents)
+
+## Polling patterns
+
+### Wait-for-condition
+
+Poll until a condition is met:
+
+```python
+@durable_execution
+def handler(event: dict, context: DurableContext) -> int:
+ state = 0
+ attempt = 0
+ max_attempts = 5
+
+ while attempt < max_attempts:
+ attempt += 1
+
+ state = context.step(lambda _, s=state: s + 1, name=f"increment_{attempt}")
+
+ if state >= 3:
+ break
+
+ context.wait(seconds=1, name=f"wait_{attempt}")
+
+ return state
+```
+
+Verify polling behavior:
+
+```python
+@pytest.mark.durable_execution(handler=handler, lambda_function_name="polling")
+def test_polling(durable_runner):
+ """Test wait-for-condition pattern."""
+ with durable_runner:
+ result = durable_runner.run(input={}, timeout=30)
+
+ assert result.status is InvocationStatus.SUCCEEDED
+ assert deserialize_operation_payload(result.result) == 3
+
+ # Should have 3 increment steps
+ step_ops = [op for op in result.operations if op.operation_type == OperationType.STEP]
+ assert len(step_ops) == 3
+
+ # Should have 2 waits (before reaching state 3)
+ wait_ops = [op for op in result.operations if op.operation_type.value == "WAIT"]
+ assert len(wait_ops) == 2
+```
+
+[↑ Back to top](#table-of-contents)
+
+### Maximum attempts
+
+Test polling respects attempt limits:
+
+```python
+@durable_execution
+def handler(event: dict, context: DurableContext) -> dict:
+ target = event.get("target", 10)
+ state = 0
+ attempt = 0
+ max_attempts = 5
+
+ while attempt < max_attempts and state < target:
+ attempt += 1
+ state = context.step(lambda _, s=state: s + 1, name=f"attempt_{attempt}")
+
+ if state < target:
+ context.wait(seconds=1, name=f"wait_{attempt}")
+
+ return {"state": state, "attempts": attempt, "reached_target": state >= target}
+```
+
+Test with unreachable target:
+
+```python
+@pytest.mark.durable_execution(handler=handler, lambda_function_name="max_attempts")
+def test_max_attempts(durable_runner):
+ """Test polling stops at max attempts."""
+ with durable_runner:
+ result = durable_runner.run(input={"target": 10}, timeout=30)
+
+ assert result.status is InvocationStatus.SUCCEEDED
+
+ final_result = deserialize_operation_payload(result.result)
+ assert final_result["attempts"] == 5
+ assert final_result["state"] == 5
+ assert final_result["reached_target"] is False
+```
+
+[↑ Back to top](#table-of-contents)
+
+## FAQ
+
+**Q: How do I test workflows with long waits?**
+
+A: The test runner doesn't actually wait. You can verify wait operations are configured correctly without waiting for them to complete.
+
+**Q: Can I test workflows with external API calls?**
+
+A: Yes, but mock external dependencies in your tests. The test runner executes your code locally, so standard Python mocking works.
+
+**Q: What's the best way to test conditional logic?**
+
+A: Write separate tests for each execution path. Use descriptive test names and verify the specific operations that should execute in each path.
+
+**Q: How do I verify operation ordering?**
+
+A: Iterate through `result.operations` and check the order. You can also use operation names to verify specific sequences.
+
+**Q: What timeout should I use?**
+
+A: Use a timeout slightly longer than expected execution time. For most tests, 30-60 seconds is sufficient.
+
+**Q: How do I test error recovery?**
+
+A: Test both the failure case (verify the error is raised) and the recovery case (verify retry succeeds). Use separate tests for each scenario.
+
+[↑ Back to top](#table-of-contents)
+
+## See also
+
+- [Basic test patterns](basic-tests.md) - Simple testing patterns
+- [Best practices](best-practices.md) - Testing recommendations
+- [Steps](../core/steps.md) - Step operations
+- [Wait operations](../core/wait.md) - Wait operations
+- [Callbacks](../core/callbacks.md) - Callback operations
+- [Child contexts](../core/child-contexts.md) - Child context operations
+- [Parallel operations](../core/parallel.md) - Parallel execution
+
+[↑ Back to top](#table-of-contents)
+
+## License
+
+See the [LICENSE](../../LICENSE) file for our project's licensing.
+
+[↑ Back to top](#table-of-contents)
diff --git a/docs/testing-patterns/stores.md b/docs/testing-patterns/stores.md
new file mode 100644
index 0000000..dce1cfa
--- /dev/null
+++ b/docs/testing-patterns/stores.md
@@ -0,0 +1,263 @@
+# Execution Stores
+
+## Table of Contents
+
+- [Overview](#overview)
+- [Available stores](#available-stores)
+- [In-memory store](#in-memory-store)
+- [Filesystem store](#filesystem-store)
+- [Choosing a store](#choosing-a-store)
+- [Configuration](#configuration)
+- [FAQ](#faq)
+- [See also](#see-also)
+
+[← Back to main index](../index.md)
+
+## Overview
+
+Execution stores manage how test execution data is persisted during testing. The testing SDK (`aws-durable-execution-sdk-python-testing`) provides different store implementations for different testing scenarios. By default, tests use an in-memory store that's fast and doesn't require cleanup. For scenarios where you need persistence across test runs or want to inspect execution history, you can use a filesystem store.
+
+More store types will be added in future releases to support additional testing scenarios.
+
+[↑ Back to top](#table-of-contents)
+
+## Available stores
+
+The SDK currently provides two store implementations:
+
+- **In-memory store** - Fast, ephemeral storage for standard testing (default)
+- **Filesystem store** - Persistent storage that saves executions to disk
+
+Additional store types may be added in future releases.
+
+[↑ Back to top](#table-of-contents)
+
+## In-memory store
+
+The in-memory store keeps execution data in memory during test runs. It's the default store and works well for most testing scenarios.
+
+### Characteristics
+
+- **Fast** - No disk I/O overhead
+- **Ephemeral** - Data is lost when tests complete
+- **Thread-safe** - Uses locks for concurrent access
+- **No cleanup needed** - Memory is automatically freed
+
+### When to use
+
+Use the in-memory store when:
+- Running standard unit tests
+- You don't need to inspect executions after tests complete
+- You want the fastest test execution
+- You're running tests in CI/CD pipelines
+
+### Example
+
+The in-memory store is used by default:
+
+```python
+import pytest
+from aws_durable_execution_sdk_python.execution import InvocationStatus
+
+@pytest.mark.durable_execution(
+ handler=my_handler,
+ lambda_function_name="my_function",
+)
+def test_with_memory_store(durable_runner):
+ """Test uses in-memory store by default."""
+ with durable_runner:
+ result = durable_runner.run(input={"data": "test"}, timeout=10)
+
+ assert result.status is InvocationStatus.SUCCEEDED
+```
+
+[↑ Back to top](#table-of-contents)
+
+## Filesystem store
+
+The filesystem store persists execution data to disk as JSON files. Each execution is saved in a separate file, making it easy to inspect execution history.
+
+### Characteristics
+
+- **Persistent** - Data survives test runs
+- **Inspectable** - JSON files can be viewed and analyzed
+- **Configurable location** - Choose where files are stored
+- **Automatic directory creation** - Creates storage directory if needed
+
+### When to use
+
+Use the filesystem store when:
+- Debugging complex test failures
+- You need to inspect execution history
+- Running integration tests that span multiple sessions
+- Analyzing execution patterns over time
+
+### Example
+
+Configure the filesystem store using environment variables:
+
+```console
+# Set store type to filesystem
+export AWS_DEX_STORE_TYPE=filesystem
+
+# Optionally set custom storage directory (defaults to .durable_executions)
+export AWS_DEX_STORE_PATH=./test-executions
+
+# Run tests
+pytest tests/
+```
+
+Or configure it programmatically when using the cloud test runner:
+
+```python
+from aws_durable_execution_sdk_python_testing.runner import (
+ DurableFunctionCloudTestRunner,
+ DurableFunctionCloudTestRunnerConfig,
+)
+from aws_durable_execution_sdk_python_testing.stores.base import StoreType
+
+config = DurableFunctionCloudTestRunnerConfig(
+ function_name="my-function",
+ region="us-west-2",
+ store_type=StoreType.FILESYSTEM,
+ store_path="./my-test-executions",
+)
+
+runner = DurableFunctionCloudTestRunner(config=config)
+```
+
+### Storage format
+
+Executions are stored as JSON files with sanitized ARN names:
+
+```
+.durable_executions/
+├── arn_aws_states_us-west-2_123456789012_execution_my-function_abc123.json
+├── arn_aws_states_us-west-2_123456789012_execution_my-function_def456.json
+└── arn_aws_states_us-west-2_123456789012_execution_my-function_ghi789.json
+```
+
+Each file contains the complete execution state including operations, checkpoints, and results.
+
+[↑ Back to top](#table-of-contents)
+
+## Choosing a store
+
+Use this guide to choose the right store for your needs:
+
+| Scenario | Recommended Store | Reason |
+|----------|------------------|---------|
+| Unit tests | In-memory | Fast, no cleanup needed |
+| CI/CD pipelines | In-memory | Fast, ephemeral |
+| Debugging failures | Filesystem | Inspect execution history |
+| Integration tests | Filesystem | Persist across sessions |
+| Performance testing | In-memory | Minimize I/O overhead |
+| Execution analysis | Filesystem | Analyze patterns over time |
+
+[↑ Back to top](#table-of-contents)
+
+## Configuration
+
+### Environment variables
+
+Configure stores using environment variables:
+
+```console
+# Store type (memory or filesystem)
+export AWS_DEX_STORE_TYPE=filesystem
+
+# Storage directory for filesystem store (optional, defaults to .durable_executions)
+export AWS_DEX_STORE_PATH=./test-executions
+```
+
+### Programmatic configuration
+
+Configure stores when creating a cloud test runner:
+
+```python
+from aws_durable_execution_sdk_python_testing.runner import (
+ DurableFunctionCloudTestRunner,
+ DurableFunctionCloudTestRunnerConfig,
+)
+from aws_durable_execution_sdk_python_testing.stores.base import StoreType
+
+# In-memory store (default)
+config = DurableFunctionCloudTestRunnerConfig(
+ function_name="my-function",
+ region="us-west-2",
+ store_type=StoreType.MEMORY,
+)
+
+# Filesystem store
+config = DurableFunctionCloudTestRunnerConfig(
+ function_name="my-function",
+ region="us-west-2",
+ store_type=StoreType.FILESYSTEM,
+ store_path="./my-executions",
+)
+
+runner = DurableFunctionCloudTestRunner(config=config)
+```
+
+### Default values
+
+If not specified:
+- Store type defaults to `MEMORY`
+- Filesystem store path defaults to `.durable_executions`
+
+[↑ Back to top](#table-of-contents)
+
+## FAQ
+
+**Q: Can I switch stores between test runs?**
+
+A: Yes, you can change the store type at any time. However, executions stored in one store won't be available in another.
+
+**Q: Does the filesystem store clean up old executions?**
+
+A: No, the filesystem store doesn't automatically delete old executions. You need to manually clean up the storage directory when needed.
+
+**Q: Can I use the filesystem store with the local test runner?**
+
+A: The filesystem store is primarily designed for the cloud test runner. The local test runner uses an in-memory store by default.
+
+**Q: Are execution files human-readable?**
+
+A: Yes, execution files are stored as formatted JSON and can be opened in any text editor.
+
+**Q: What happens if the storage directory doesn't exist?**
+
+A: The filesystem store automatically creates the directory if it doesn't exist.
+
+**Q: Can I use a custom store implementation?**
+
+A: The SDK defines an `ExecutionStore` protocol that you can implement for custom storage backends. However, this is an advanced use case.
+
+**Q: Will more store types be added?**
+
+A: Yes, additional store types may be added in future releases to support more testing scenarios.
+
+**Q: Does the in-memory store support concurrent tests?**
+
+A: Yes, the in-memory store is thread-safe and supports concurrent test execution.
+
+**Q: How much disk space does the filesystem store use?**
+
+A: Each execution typically uses a few KB to a few MB depending on the number of operations and data size. Monitor your storage directory if running many tests.
+
+[↑ Back to top](#table-of-contents)
+
+## See also
+
+- [Basic tests](basic-tests.md) - Simple test patterns
+- [Cloud testing](../advanced/cloud-testing.md) - Testing with deployed functions
+- [Test runner](../core/test-runner.md) - Test runner configuration
+- [Best practices](best-practices.md) - Testing recommendations
+
+[↑ Back to top](#table-of-contents)
+
+## License
+
+See the [LICENSE](../../LICENSE) file for our project's licensing.
+
+[↑ Back to top](#table-of-contents)
diff --git a/ops/__tests__/test_parse_sdk_branch.py b/ops/__tests__/test_parse_sdk_branch.py
new file mode 100755
index 0000000..d458651
--- /dev/null
+++ b/ops/__tests__/test_parse_sdk_branch.py
@@ -0,0 +1,84 @@
+#!/usr/bin/env python3
+
+import os
+import sys
+
+sys.path.insert(0, os.path.dirname(os.path.dirname(__file__)))
+
+from parse_sdk_branch import parse_sdk_branch
+
+
+def test_parse_sdk_branch():
+ test_cases = [
+ # Basic cases
+ ("TESTING_SDK_BRANCH = feature/test", "feature/test"),
+ ("TESTING_SDK_BRANCH: feature/test", "feature/test"),
+ ("TESTING_SDK_BRANCH=feature/test", "feature/test"),
+ ("testing_sdk_branch: feature/test", "feature/test"),
+ # Complex PR body with backticks and contractions
+ (
+ """Updated the script to safely parse the testing SDK branch from the PR body, handling case insensitivity and whitespace.
+
+The goal here is to fix the usage of backticks such as in `foo`, and contractions that we've been using such as `we've`
+
+```
+plus of course the usage of multiple backticks to include code
+```
+
+TESTING_SDK_BRANCH = main
+
+By submitting this pull request, I confirm that you can use, modify, copy, and redistribute this contribution, under the terms of your choice.""",
+ "main",
+ ),
+ # Edge cases with markdown and special characters
+ (
+ """# PR Title
+
+Some `code` and we've got contractions here.
+
+```python
+def test():
+ return "test"
+```
+
+TESTING_SDK_BRANCH: feature/fix-backticks
+
+More text with `inline code` and don't forget contractions.""",
+ "feature/fix-backticks",
+ ),
+ # Multiple occurrences (should take first)
+ (
+ """TESTING_SDK_BRANCH = first-branch
+
+Some text here.
+
+TESTING_SDK_BRANCH = second-branch""",
+ "first-branch",
+ ),
+ # Whitespace variations
+ (" TESTING_SDK_BRANCH = feature/spaces ", "feature/spaces"),
+ ("TESTING_SDK_BRANCH:feature/no-space", "feature/no-space"),
+ # Default cases
+ ("No branch specified", "main"),
+ ("", "main"),
+ ("Just some random text", "main"),
+ # Case with backticks in branch name
+ ("TESTING_SDK_BRANCH = feature/fix-`backticks`", "feature/fix-`backticks`"),
+ # Case with contractions in surrounding text
+ (
+ "We've updated this and TESTING_SDK_BRANCH = feature/test and we're done",
+ "feature/test",
+ ),
+ ]
+
+ for input_text, expected in test_cases:
+ result = parse_sdk_branch(input_text)
+ # Assert is expected in test functions
+ assert result == expected, ( # noqa: S101
+ f"Expected '{expected}' but got '{result}' for input: {input_text[:50]}..."
+ )
+
+
+if __name__ == "__main__":
+ test_parse_sdk_branch()
+ sys.exit(0)
diff --git a/ops/parse_sdk_branch.py b/ops/parse_sdk_branch.py
new file mode 100755
index 0000000..1967085
--- /dev/null
+++ b/ops/parse_sdk_branch.py
@@ -0,0 +1,31 @@
+#!/usr/bin/env python3
+
+import os
+import re
+
+
+def parse_sdk_branch(pr_body: str, default_ref: str = "main") -> str:
+ """Parse PR body for TESTING_SDK_BRANCH and return the branch reference."""
+ pattern = re.compile(r"(?i)TESTING_SDK_BRANCH\s*[:=]\s*(\S+)", re.MULTILINE)
+
+ match = pattern.search(pr_body)
+ if match:
+ ref = match.group(1).strip()
+ if ref:
+ return ref
+
+ return default_ref
+
+
+def main():
+ pr_body = os.environ.get("PR_BODY", "")
+ ref = parse_sdk_branch(pr_body)
+
+ github_output = os.environ.get("GITHUB_OUTPUT")
+ if github_output:
+ with open(github_output, "a", encoding="utf-8") as f:
+ f.write(f"testing_ref={ref}\n")
+
+
+if __name__ == "__main__":
+ main()
diff --git a/pyproject.toml b/pyproject.toml
index 639274c..d80d37c 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -5,29 +5,29 @@ build-backend = "hatchling.build"
[project]
name = "aws-durable-execution-sdk-python"
dynamic = ["version"]
-description = 'This the Python SDK for AWS Lambda Durable Functions.'
+description = 'AWS Durable Execution SDK for Python'
readme = "README.md"
-requires-python = ">=3.13"
+requires-python = ">=3.11"
license = "Apache-2.0"
keywords = []
authors = [{ name = "yaythomas", email = "tgaigher@amazon.com" }]
classifiers = [
"Development Status :: 4 - Beta",
"Programming Language :: Python",
+ "Programming Language :: Python :: 3.11",
+ "Programming Language :: Python :: 3.12",
"Programming Language :: Python :: 3.13",
+ "Programming Language :: Python :: 3.14",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
]
-dependencies = ["boto3>=1.40.30"]
+dependencies = ["boto3>=1.42.1"]
[project.urls]
Documentation = "https://github.com/aws/aws-durable-execution-sdk-python#readme"
Issues = "https://github.com/aws/aws-durable-execution-sdk-python/issues"
Source = "https://github.com/aws/aws-durable-execution-sdk-python"
-[tool.hatch.build.targets.sdist]
-packages = ["src/aws_durable_execution_sdk_python"]
-
[tool.hatch.build.targets.wheel]
packages = ["src/aws_durable_execution_sdk_python"]
diff --git a/src/aws_durable_execution_sdk_python/__about__.py b/src/aws_durable_execution_sdk_python/__about__.py
index 97a5269..212e79b 100644
--- a/src/aws_durable_execution_sdk_python/__about__.py
+++ b/src/aws_durable_execution_sdk_python/__about__.py
@@ -1,4 +1,4 @@
# SPDX-FileCopyrightText: 2025-present Amazon.com, Inc. or its affiliates.
#
# SPDX-License-Identifier: Apache-2.0
-__version__ = "0.0.1"
+__version__ = "1.1.0"
diff --git a/src/aws_durable_execution_sdk_python/__init__.py b/src/aws_durable_execution_sdk_python/__init__.py
index 0f4de0d..1a24d31 100644
--- a/src/aws_durable_execution_sdk_python/__init__.py
+++ b/src/aws_durable_execution_sdk_python/__init__.py
@@ -1 +1,36 @@
"""AWS Lambda Durable Executions Python SDK."""
+
+# Main context - used in every durable function
+# Helper decorators - commonly used for step functions
+from aws_durable_execution_sdk_python.context import (
+ DurableContext,
+ durable_step,
+ durable_wait_for_callback,
+ durable_with_child_context,
+)
+
+# Most common exceptions - users need to handle these exceptions
+from aws_durable_execution_sdk_python.exceptions import (
+ DurableExecutionsError,
+ InvocationError,
+ ValidationError,
+)
+
+# Core decorator - used in every durable function
+from aws_durable_execution_sdk_python.execution import durable_execution
+
+# Essential context types - passed to user functions
+from aws_durable_execution_sdk_python.types import BatchResult, StepContext
+
+__all__ = [
+ "BatchResult",
+ "DurableContext",
+ "DurableExecutionsError",
+ "InvocationError",
+ "StepContext",
+ "ValidationError",
+ "durable_execution",
+ "durable_step",
+ "durable_wait_for_callback",
+ "durable_with_child_context",
+]
diff --git a/src/aws_durable_execution_sdk_python/botocore/data/lambdainternal-local/2015-03-31/service-2.json b/src/aws_durable_execution_sdk_python/botocore/data/lambdainternal-local/2015-03-31/service-2.json
deleted file mode 100644
index 0a596a8..0000000
--- a/src/aws_durable_execution_sdk_python/botocore/data/lambdainternal-local/2015-03-31/service-2.json
+++ /dev/null
@@ -1,7856 +0,0 @@
-{
- "version":"2.0",
- "metadata":{
- "apiVersion":"2015-03-31",
- "endpointPrefix":"lambda",
- "protocol":"rest-json",
- "serviceFullName":"AWS Lambda",
- "serviceId":"Lambda",
- "signatureVersion":"v4",
- "signingName":"execute-api",
- "uid":"lambda-2015-03-31"
- },
- "operations":{
- "AddLayerVersionPermission":{
- "name":"AddLayerVersionPermission",
- "http":{
- "method":"POST",
- "requestUri":"/2018-10-31/layers/{LayerName}/versions/{VersionNumber}/policy",
- "responseCode":201
- },
- "input":{"shape":"AddLayerVersionPermissionRequest"},
- "output":{"shape":"AddLayerVersionPermissionResponse"},
- "errors":[
- {"shape":"InvalidParameterValueException"},
- {"shape":"ResourceConflictException"},
- {"shape":"ServiceException"},
- {"shape":"TooManyRequestsException"},
- {"shape":"PolicyLengthExceededException"},
- {"shape":"ResourceNotFoundException"},
- {"shape":"PreconditionFailedException"}
- ],
- "documentation":"Principal. To grant permission to an organization defined in Organizations, specify the organization ID as the PrincipalOrgID. For Amazon Web Services services, the principal is a domain-style identifier that the service defines, such as s3.amazonaws.com or sns.amazonaws.com. For Amazon Web Services services, you can also specify the ARN of the associated resource as the SourceArn. If you grant permission to a service principal without specifying the source, other accounts could potentially configure resources in their account to invoke your Lambda function.RoutingConfig parameter to specify a second version and the percentage of invocation requests that it receives.
BisectBatchOnFunctionError – If the function returns an error, split the batch in two and retry.MaximumRecordAgeInSeconds – Discard records older than the specified age. The default value is infinite (-1). When set to infinite (-1), failed records are retried until the record expiresMaximumRetryAttempts – Discard records after the specified number of retries. The default value is infinite (-1). When set to infinite (-1), failed records are retried until the record expires.ParallelizationFactor – Process multiple batches from each shard concurrently.
OnFailure – Send discarded records to an Amazon SQS queue, Amazon SNS topic, or Amazon S3 bucket. For more information, see Adding a destination.
"
- },
- "CreateFunction":{
- "name":"CreateFunction",
- "http":{
- "method":"POST",
- "requestUri":"/2015-03-31/functions",
- "responseCode":201
- },
- "input":{"shape":"CreateFunctionRequest"},
- "output":{"shape":"FunctionConfiguration"},
- "errors":[
- {"shape":"InvalidParameterValueException"},
- {"shape":"ResourceConflictException"},
- {"shape":"ServiceException"},
- {"shape":"TooManyRequestsException"},
- {"shape":"InvalidCodeSignatureException"},
- {"shape":"ResourceNotFoundException"},
- {"shape":"CodeVerificationFailedException"},
- {"shape":"CodeSigningConfigNotFoundException"},
- {"shape":"CodeStorageExceededException"}
- ],
- "documentation":"Image. For a container image, the code property must include the URI of a container image in the Amazon ECR registry. You do not need to specify the handler and runtime properties.Zip. For a .zip file archive, the code property specifies the location of the .zip file. You must also specify the handler and runtime properties. The code in the deployment package must be compatible with the target instruction set architecture of the function (x86-64 or arm64). If you do not specify the architecture, then the default value is x86-64.State, StateReason, and StateReasonCode fields in the response from GetFunctionConfiguration indicate when the function is ready to invoke. For more information, see Lambda function states.Publish parameter to create version 1 of your function from its initial configuration.Deleting state and might not be completely deleted for several seconds.Qualifier parameter. Otherwise, all versions and aliases are deleted. This doesn't require the user to have explicit permissions for DeleteAlias.null is returned for the ARN. For more information, see Runtime updates.InvocationType is RequestResponse). To invoke a function asynchronously, set InvocationType to Event. Lambda passes the ClientContext object to your function for synchronous invocations only.TooManyRequestsException if running the function would cause you to exceed a concurrency limit at either the account level (ConcurrentInvocationLimitExceeded) or function level (ReservedFunctionConcurrentInvocationLimitExceeded).
Configure your Lambda functions to stream response payloads back to clients. For more information, see Configuring a Lambda function to stream responses.
This operation requires permission for the lambda:InvokeFunction action. For details on how to set up permissions for cross-account invocations, see Granting function access to other accounts.
" - }, - "ListAliases":{ - "name":"ListAliases", - "http":{ - "method":"GET", - "requestUri":"/2015-03-31/functions/{FunctionName}/aliases", - "responseCode":200 - }, - "input":{"shape":"ListAliasesRequest"}, - "output":{"shape":"ListAliasesResponse"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"} - ], - "documentation":"Returns a list of aliases for a Lambda function.
", - "readonly":true - }, - "ListCodeSigningConfigs":{ - "name":"ListCodeSigningConfigs", - "http":{ - "method":"GET", - "requestUri":"/2020-04-22/code-signing-configs", - "responseCode":200 - }, - "input":{"shape":"ListCodeSigningConfigsRequest"}, - "output":{"shape":"ListCodeSigningConfigsResponse"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ServiceException"} - ], - "documentation":"Returns a list of code signing configurations. A request returns up to 10,000 configurations per call. You can use the MaxItems parameter to return fewer configurations per call.
Lists event source mappings. Specify an EventSourceArn to show only event source mappings for a single event source.
Retrieves a list of configurations for asynchronous invocation for a function.
To configure options for asynchronous invocation, use PutFunctionEventInvokeConfig.
", - "readonly":true - }, - "ListFunctionUrlConfigs":{ - "name":"ListFunctionUrlConfigs", - "http":{ - "method":"GET", - "requestUri":"/2021-10-31/functions/{FunctionName}/urls", - "responseCode":200 - }, - "input":{"shape":"ListFunctionUrlConfigsRequest"}, - "output":{"shape":"ListFunctionUrlConfigsResponse"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"} - ], - "documentation":"Returns a list of Lambda function URLs for the specified function.
", - "readonly":true - }, - "ListFunctions":{ - "name":"ListFunctions", - "http":{ - "method":"GET", - "requestUri":"/2015-03-31/functions", - "responseCode":200 - }, - "input":{"shape":"ListFunctionsRequest"}, - "output":{"shape":"ListFunctionsResponse"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"} - ], - "documentation":"Returns a list of Lambda functions, with the version-specific configuration of each. Lambda returns up to 50 functions per call.
Set FunctionVersion to ALL to include all published versions of each function in addition to the unpublished version.
The ListFunctions operation returns a subset of the FunctionConfiguration fields. To get the additional fields (State, StateReasonCode, StateReason, LastUpdateStatus, LastUpdateStatusReason, LastUpdateStatusReasonCode, RuntimeVersionConfig) for a function or version, use GetFunction.
List the functions that use the specified code signing configuration. You can use this method prior to deleting a code signing configuration, to verify that no functions are using it.
", - "readonly":true - }, - "ListLayerVersions":{ - "name":"ListLayerVersions", - "http":{ - "method":"GET", - "requestUri":"/2018-10-31/layers/{LayerName}/versions", - "responseCode":200 - }, - "input":{"shape":"ListLayerVersionsRequest"}, - "output":{"shape":"ListLayerVersionsResponse"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"} - ], - "documentation":"Lists the versions of an Lambda layer. Versions that have been deleted aren't listed. Specify a runtime identifier to list only versions that indicate that they're compatible with that runtime. Specify a compatible architecture to include only layer versions that are compatible with that architecture.
", - "readonly":true - }, - "ListLayers":{ - "name":"ListLayers", - "http":{ - "method":"GET", - "requestUri":"/2018-10-31/layers", - "responseCode":200 - }, - "input":{"shape":"ListLayersRequest"}, - "output":{"shape":"ListLayersResponse"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"} - ], - "documentation":"Lists Lambda layers and shows information about the latest version of each. Specify a runtime identifier to list only layers that indicate that they're compatible with that runtime. Specify a compatible architecture to include only layers that are compatible with that instruction set architecture.
", - "readonly":true - }, - "ListProvisionedConcurrencyConfigs":{ - "name":"ListProvisionedConcurrencyConfigs", - "http":{ - "method":"GET", - "requestUri":"/2019-09-30/functions/{FunctionName}/provisioned-concurrency?List=ALL", - "responseCode":200 - }, - "input":{"shape":"ListProvisionedConcurrencyConfigsRequest"}, - "output":{"shape":"ListProvisionedConcurrencyConfigsResponse"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"} - ], - "documentation":"Retrieves a list of provisioned concurrency configurations for a function.
", - "readonly":true - }, - "ListTags":{ - "name":"ListTags", - "http":{ - "method":"GET", - "requestUri":"/2017-03-31/tags/{Resource}", - "responseCode":200 - }, - "input":{"shape":"ListTagsRequest"}, - "output":{"shape":"ListTagsResponse"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"} - ], - "documentation":"Returns a function, event source mapping, or code signing configuration's tags. You can also view function tags with GetFunction.
", - "readonly":true - }, - "ListVersionsByFunction":{ - "name":"ListVersionsByFunction", - "http":{ - "method":"GET", - "requestUri":"/2015-03-31/functions/{FunctionName}/versions", - "responseCode":200 - }, - "input":{"shape":"ListVersionsByFunctionRequest"}, - "output":{"shape":"ListVersionsByFunctionResponse"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"} - ], - "documentation":"Returns a list of versions, with the version-specific configuration of each. Lambda returns up to 50 versions per call.
", - "readonly":true - }, - "PublishLayerVersion":{ - "name":"PublishLayerVersion", - "http":{ - "method":"POST", - "requestUri":"/2018-10-31/layers/{LayerName}/versions", - "responseCode":201 - }, - "input":{"shape":"PublishLayerVersionRequest"}, - "output":{"shape":"PublishLayerVersionResponse"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"}, - {"shape":"CodeStorageExceededException"} - ], - "documentation":"Creates an Lambda layer from a ZIP archive. Each time you call PublishLayerVersion with the same layer name, a new version is created.
Add layers to your function with CreateFunction or UpdateFunctionConfiguration.
" - }, - "PublishVersion":{ - "name":"PublishVersion", - "http":{ - "method":"POST", - "requestUri":"/2015-03-31/functions/{FunctionName}/versions", - "responseCode":201 - }, - "input":{"shape":"PublishVersionRequest"}, - "output":{"shape":"FunctionConfiguration"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ResourceConflictException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"}, - {"shape":"CodeStorageExceededException"}, - {"shape":"PreconditionFailedException"} - ], - "documentation":"Creates a version from the current code and configuration of a function. Use versions to create a snapshot of your function code and configuration that doesn't change.
Lambda doesn't publish a version if the function's configuration and code haven't changed since the last version. Use UpdateFunctionCode or UpdateFunctionConfiguration to update the function before publishing a version.
Clients can invoke versions directly or with an alias. To create an alias, use CreateAlias.
" - }, - "PutFunctionCodeSigningConfig":{ - "name":"PutFunctionCodeSigningConfig", - "http":{ - "method":"PUT", - "requestUri":"/2020-06-30/functions/{FunctionName}/code-signing-config", - "responseCode":200 - }, - "input":{"shape":"PutFunctionCodeSigningConfigRequest"}, - "output":{"shape":"PutFunctionCodeSigningConfigResponse"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ResourceConflictException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"}, - {"shape":"CodeSigningConfigNotFoundException"} - ], - "documentation":"Update the code signing configuration for the function. Changes to the code signing configuration take effect the next time a user tries to deploy a code package to the function.
" - }, - "PutFunctionConcurrency":{ - "name":"PutFunctionConcurrency", - "http":{ - "method":"PUT", - "requestUri":"/2017-10-31/functions/{FunctionName}/concurrency", - "responseCode":200 - }, - "input":{"shape":"PutFunctionConcurrencyRequest"}, - "output":{"shape":"Concurrency"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ResourceConflictException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"} - ], - "documentation":"Sets the maximum number of simultaneous executions for a function, and reserves capacity for that concurrency level.
Concurrency settings apply to the function as a whole, including all published versions and the unpublished version. Reserving concurrency both ensures that your function has capacity to process the specified number of events simultaneously, and prevents it from scaling beyond that level. Use GetFunction to see the current setting for a function.
Use GetAccountSettings to see your Regional concurrency limit. You can reserve concurrency for as many functions as you like, as long as you leave at least 100 simultaneous executions unreserved for functions that aren't configured with a per-function limit. For more information, see Lambda function scaling.
" - }, - "PutFunctionEventInvokeConfig":{ - "name":"PutFunctionEventInvokeConfig", - "http":{ - "method":"PUT", - "requestUri":"/2019-09-25/functions/{FunctionName}/event-invoke-config", - "responseCode":200 - }, - "input":{"shape":"PutFunctionEventInvokeConfigRequest"}, - "output":{"shape":"FunctionEventInvokeConfig"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ResourceConflictException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"} - ], - "documentation":"Configures options for asynchronous invocation on a function, version, or alias. If a configuration already exists for a function, version, or alias, this operation overwrites it. If you exclude any settings, they are removed. To set one option without affecting existing settings for other options, use UpdateFunctionEventInvokeConfig.
By default, Lambda retries an asynchronous invocation twice if the function returns an error. It retains events in a queue for up to six hours. When an event fails all processing attempts or stays in the asynchronous invocation queue for too long, Lambda discards it. To retain discarded events, configure a dead-letter queue with UpdateFunctionConfiguration.
To send an invocation record to a queue, topic, S3 bucket, function, or event bus, specify a destination. You can configure separate destinations for successful invocations (on-success) and events that fail all processing attempts (on-failure). You can configure destinations in addition to or instead of a dead-letter queue.
S3 buckets are supported only for on-failure destinations. To retain records of successful invocations, use another destination type.
Sets your function's recursive loop detection configuration.
When you configure a Lambda function to output to the same service or resource that invokes the function, it's possible to create an infinite recursive loop. For example, a Lambda function might write a message to an Amazon Simple Queue Service (Amazon SQS) queue, which then invokes the same function. This invocation causes the function to write another message to the queue, which in turn invokes the function again.
Lambda can detect certain types of recursive loops shortly after they occur. When Lambda detects a recursive loop and your function's recursive loop detection configuration is set to Terminate, it stops your function being invoked and notifies you.
Adds a provisioned concurrency configuration to a function's alias or version.
", - "idempotent":true - }, - "PutRuntimeManagementConfig":{ - "name":"PutRuntimeManagementConfig", - "http":{ - "method":"PUT", - "requestUri":"/2021-07-20/functions/{FunctionName}/runtime-management-config", - "responseCode":200 - }, - "input":{"shape":"PutRuntimeManagementConfigRequest"}, - "output":{"shape":"PutRuntimeManagementConfigResponse"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ResourceConflictException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"} - ], - "documentation":"Sets the runtime management configuration for a function's version. For more information, see Runtime updates.
" - }, - "RemoveLayerVersionPermission":{ - "name":"RemoveLayerVersionPermission", - "http":{ - "method":"DELETE", - "requestUri":"/2018-10-31/layers/{LayerName}/versions/{VersionNumber}/policy/{StatementId}", - "responseCode":204 - }, - "input":{"shape":"RemoveLayerVersionPermissionRequest"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"}, - {"shape":"PreconditionFailedException"} - ], - "documentation":"Removes a statement from the permissions policy for a version of an Lambda layer. For more information, see AddLayerVersionPermission.
" - }, - "RemovePermission":{ - "name":"RemovePermission", - "http":{ - "method":"DELETE", - "requestUri":"/2015-03-31/functions/{FunctionName}/policy/{StatementId}", - "responseCode":204 - }, - "input":{"shape":"RemovePermissionRequest"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"}, - {"shape":"PreconditionFailedException"} - ], - "documentation":"Revokes function-use permission from an Amazon Web Services service or another Amazon Web Services account. You can get the ID of the statement from the output of GetPolicy.
" - }, - "SendDurableExecutionCallbackFailure":{ - "name":"SendDurableExecutionCallbackFailure", - "http":{ - "method":"POST", - "requestUri":"/2025-12-01/durable-execution-callbacks/{CallbackId}/fail", - "responseCode":200 - }, - "input":{"shape":"SendDurableExecutionCallbackFailureRequest"}, - "output":{"shape":"SendDurableExecutionCallbackFailureResponse"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ServiceException"}, - {"shape":"CallbackTimeoutException"} - ] - }, - "SendDurableExecutionCallbackHeartbeat":{ - "name":"SendDurableExecutionCallbackHeartbeat", - "http":{ - "method":"POST", - "requestUri":"/2025-12-01/durable-execution-callbacks/{CallbackId}/heartbeat", - "responseCode":200 - }, - "input":{"shape":"SendDurableExecutionCallbackHeartbeatRequest"}, - "output":{"shape":"SendDurableExecutionCallbackHeartbeatResponse"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ServiceException"}, - {"shape":"CallbackTimeoutException"} - ] - }, - "SendDurableExecutionCallbackSuccess":{ - "name":"SendDurableExecutionCallbackSuccess", - "http":{ - "method":"POST", - "requestUri":"/2025-12-01/durable-execution-callbacks/{CallbackId}/succeed", - "responseCode":200 - }, - "input":{"shape":"SendDurableExecutionCallbackSuccessRequest"}, - "output":{"shape":"SendDurableExecutionCallbackSuccessResponse"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ServiceException"}, - {"shape":"CallbackTimeoutException"} - ] - }, - "StopDurableExecution":{ - "name":"StopDurableExecution", - "http":{ - "method":"POST", - "requestUri":"/2025-12-01/durable-executions/{DurableExecutionArn}/stop", - "responseCode":200 - }, - "input":{"shape":"StopDurableExecutionRequest"}, - "output":{"shape":"StopDurableExecutionResponse"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ServiceException"}, - {"shape":"ResourceNotFoundException"} - ] - }, - "TagResource":{ - "name":"TagResource", - "http":{ - "method":"POST", - "requestUri":"/2017-03-31/tags/{Resource}", - "responseCode":204 - }, - "input":{"shape":"TagResourceRequest"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ResourceConflictException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"} - ], - "documentation":"Adds tags to a function, event source mapping, or code signing configuration.
" - }, - "UntagResource":{ - "name":"UntagResource", - "http":{ - "method":"DELETE", - "requestUri":"/2017-03-31/tags/{Resource}", - "responseCode":204 - }, - "input":{"shape":"UntagResourceRequest"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ResourceConflictException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"} - ], - "documentation":"Removes tags from a function, event source mapping, or code signing configuration.
" - }, - "UpdateAlias":{ - "name":"UpdateAlias", - "http":{ - "method":"PUT", - "requestUri":"/2015-03-31/functions/{FunctionName}/aliases/{Name}", - "responseCode":200 - }, - "input":{"shape":"UpdateAliasRequest"}, - "output":{"shape":"AliasConfiguration"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ResourceConflictException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"}, - {"shape":"PreconditionFailedException"} - ], - "documentation":"Updates the configuration of a Lambda function alias.
" - }, - "UpdateCodeSigningConfig":{ - "name":"UpdateCodeSigningConfig", - "http":{ - "method":"PUT", - "requestUri":"/2020-04-22/code-signing-configs/{CodeSigningConfigArn}", - "responseCode":200 - }, - "input":{"shape":"UpdateCodeSigningConfigRequest"}, - "output":{"shape":"UpdateCodeSigningConfigResponse"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ServiceException"}, - {"shape":"ResourceNotFoundException"} - ], - "documentation":"Update the code signing configuration. Changes to the code signing configuration take effect the next time a user tries to deploy a code package to the function.
" - }, - "UpdateEventSourceMapping":{ - "name":"UpdateEventSourceMapping", - "http":{ - "method":"PUT", - "requestUri":"/2015-03-31/event-source-mappings/{UUID}", - "responseCode":202 - }, - "input":{"shape":"UpdateEventSourceMappingRequest"}, - "output":{"shape":"EventSourceMappingConfiguration"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ResourceInUseException"}, - {"shape":"ResourceConflictException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"} - ], - "documentation":"Updates an event source mapping. You can change the function that Lambda invokes, or pause invocation and resume later from the same location.
For details about how to configure different event sources, see the following topics.
The following error handling options are available only for DynamoDB and Kinesis event sources:
BisectBatchOnFunctionError – If the function returns an error, split the batch in two and retry.
MaximumRecordAgeInSeconds – Discard records older than the specified age. The default value is infinite (-1). When set to infinite (-1), failed records are retried until the record expires
MaximumRetryAttempts – Discard records after the specified number of retries. The default value is infinite (-1). When set to infinite (-1), failed records are retried until the record expires.
ParallelizationFactor – Process multiple batches from each shard concurrently.
For stream sources (DynamoDB, Kinesis, Amazon MSK, and self-managed Apache Kafka), the following option is also available:
OnFailure – Send discarded records to an Amazon SQS queue, Amazon SNS topic, or Amazon S3 bucket. For more information, see Adding a destination.
For information about which configuration parameters apply to each event source, see the following topics.
Updates a Lambda function's code. If code signing is enabled for the function, the code package must be signed by a trusted publisher. For more information, see Configuring code signing for Lambda.
If the function's package type is Image, then you must specify the code package in ImageUri as the URI of a container image in the Amazon ECR registry.
If the function's package type is Zip, then you must specify the deployment package as a .zip file archive. Enter the Amazon S3 bucket and key of the code .zip file location. You can also provide the function code inline using the ZipFile field.
The code in the deployment package must be compatible with the target instruction set architecture of the function (x86-64 or arm64).
The function's code is locked when you publish a version. You can't modify the code of a published version, only the unpublished version.
For a function defined as a container image, Lambda resolves the image tag to an image digest. In Amazon ECR, if you update the image tag to a new image, Lambda does not automatically update the function.
Modify the version-specific settings of a Lambda function.
When you update a function, Lambda provisions an instance of the function and its supporting resources. If your function connects to a VPC, this process can take a minute. During this time, you can't modify the function, but you can still invoke it. The LastUpdateStatus, LastUpdateStatusReason, and LastUpdateStatusReasonCode fields in the response from GetFunctionConfiguration indicate when the update is complete and the function is processing events with the new configuration. For more information, see Lambda function states.
These settings can vary between versions of a function and are locked when you publish a version. You can't modify the configuration of a published version, only the unpublished version.
To configure function concurrency, use PutFunctionConcurrency. To grant invoke permissions to an Amazon Web Services account or Amazon Web Services service, use AddPermission.
" - }, - "UpdateFunctionEventInvokeConfig":{ - "name":"UpdateFunctionEventInvokeConfig", - "http":{ - "method":"POST", - "requestUri":"/2019-09-25/functions/{FunctionName}/event-invoke-config", - "responseCode":200 - }, - "input":{"shape":"UpdateFunctionEventInvokeConfigRequest"}, - "output":{"shape":"FunctionEventInvokeConfig"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ResourceConflictException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"} - ], - "documentation":"Updates the configuration for asynchronous invocation for a function, version, or alias.
To configure options for asynchronous invocation, use PutFunctionEventInvokeConfig.
" - }, - "UpdateFunctionUrlConfig":{ - "name":"UpdateFunctionUrlConfig", - "http":{ - "method":"PUT", - "requestUri":"/2021-10-31/functions/{FunctionName}/url", - "responseCode":200 - }, - "input":{"shape":"UpdateFunctionUrlConfigRequest"}, - "output":{"shape":"UpdateFunctionUrlConfigResponse"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ResourceConflictException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"} - ], - "documentation":"Updates the configuration for a Lambda function URL.
" - } - }, - "shapes":{ - "AccountLimit":{ - "type":"structure", - "members":{ - "TotalCodeSize":{ - "shape":"Long", - "documentation":"The amount of storage space that you can use for all deployment packages and layer archives.
" - }, - "CodeSizeUnzipped":{ - "shape":"Long", - "documentation":"The maximum size of a function's deployment package and layers when they're extracted.
" - }, - "CodeSizeZipped":{ - "shape":"Long", - "documentation":"The maximum size of a deployment package when it's uploaded directly to Lambda. Use Amazon S3 for larger files.
" - }, - "ConcurrentExecutions":{ - "shape":"Integer", - "documentation":"The maximum number of simultaneous function executions.
" - }, - "UnreservedConcurrentExecutions":{ - "shape":"UnreservedConcurrentExecutions", - "documentation":"The maximum number of simultaneous function executions, minus the capacity that's reserved for individual functions with PutFunctionConcurrency.
" - } - }, - "documentation":"Limits that are related to concurrency and storage. All file and storage sizes are in bytes.
" - }, - "AccountUsage":{ - "type":"structure", - "members":{ - "TotalCodeSize":{ - "shape":"Long", - "documentation":"The amount of storage space, in bytes, that's being used by deployment packages and layer archives.
" - }, - "FunctionCount":{ - "shape":"Long", - "documentation":"The number of Lambda functions.
" - } - }, - "documentation":"The number of functions and amount of storage in use.
" - }, - "Action":{ - "type":"string", - "pattern":"(lambda:[*]|lambda:[a-zA-Z]+|[*])" - }, - "AddLayerVersionPermissionRequest":{ - "type":"structure", - "required":[ - "LayerName", - "VersionNumber", - "StatementId", - "Action", - "Principal" - ], - "members":{ - "LayerName":{ - "shape":"LayerName", - "documentation":"The name or Amazon Resource Name (ARN) of the layer.
", - "location":"uri", - "locationName":"LayerName" - }, - "VersionNumber":{ - "shape":"LayerVersionNumber", - "documentation":"The version number.
", - "location":"uri", - "locationName":"VersionNumber" - }, - "StatementId":{ - "shape":"StatementId", - "documentation":"An identifier that distinguishes the policy from others on the same layer version.
" - }, - "Action":{ - "shape":"LayerPermissionAllowedAction", - "documentation":"The API action that grants access to the layer. For example, lambda:GetLayerVersion.
An account ID, or * to grant layer usage permission to all accounts in an organization, or all Amazon Web Services accounts (if organizationId is not specified). For the last case, make sure that you really do want all Amazon Web Services accounts to have usage permission to this layer.
With the principal set to *, grant permission to all accounts in the specified organization.
Only update the policy if the revision ID matches the ID specified. Use this option to avoid modifying a policy that has changed since you last read it.
", - "location":"querystring", - "locationName":"RevisionId" - } - } - }, - "AddLayerVersionPermissionResponse":{ - "type":"structure", - "members":{ - "Statement":{ - "shape":"String", - "documentation":"The permission statement.
" - }, - "RevisionId":{ - "shape":"String", - "documentation":"A unique identifier for the current revision of the policy.
" - } - } - }, - "AddPermissionRequest":{ - "type":"structure", - "required":[ - "FunctionName", - "StatementId", - "Action", - "Principal" - ], - "members":{ - "FunctionName":{ - "shape":"FunctionName", - "documentation":"The name or ARN of the Lambda function, version, or alias.
Name formats
Function name – my-function (name-only), my-function:v1 (with alias).
Function ARN – arn:aws:lambda:us-west-2:123456789012:function:my-function.
Partial ARN – 123456789012:function:my-function.
You can append a version number or alias to any of the formats. The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.
", - "location":"uri", - "locationName":"FunctionName" - }, - "StatementId":{ - "shape":"StatementId", - "documentation":"A statement identifier that differentiates the statement from others in the same policy.
" - }, - "Action":{ - "shape":"Action", - "documentation":"The action that the principal can use on the function. For example, lambda:InvokeFunction or lambda:GetFunction.
The Amazon Web Services service, Amazon Web Services account, IAM user, or IAM role that invokes the function. If you specify a service, use SourceArn or SourceAccount to limit who can invoke the function through that service.
For Amazon Web Services services, the ARN of the Amazon Web Services resource that invokes the function. For example, an Amazon S3 bucket or Amazon SNS topic.
Note that Lambda configures the comparison using the StringLike operator.
For Amazon Web Services service, the ID of the Amazon Web Services account that owns the resource. Use this together with SourceArn to ensure that the specified account owns the resource. It is possible for an Amazon S3 bucket to be deleted by its owner and recreated by another account.
For Alexa Smart Home functions, a token that the invoker must supply.
" - }, - "Qualifier":{ - "shape":"Qualifier", - "documentation":"Specify a version or alias to add permissions to a published version of the function.
", - "location":"querystring", - "locationName":"Qualifier" - }, - "RevisionId":{ - "shape":"String", - "documentation":"Update the policy only if the revision ID matches the ID that's specified. Use this option to avoid modifying a policy that has changed since you last read it.
" - }, - "PrincipalOrgID":{ - "shape":"PrincipalOrgID", - "documentation":"The identifier for your organization in Organizations. Use this to grant permissions to all the Amazon Web Services accounts under this organization.
" - }, - "FunctionUrlAuthType":{ - "shape":"FunctionUrlAuthType", - "documentation":"The type of authentication that your function URL uses. Set to AWS_IAM if you want to restrict access to authenticated users only. Set to NONE if you want to bypass IAM authentication to create a public endpoint. For more information, see Security and auth model for Lambda function URLs.
The permission statement that's added to the function policy.
" - } - } - }, - "AdditionalVersion":{ - "type":"string", - "max":1024, - "min":1, - "pattern":"[0-9]+" - }, - "AdditionalVersionWeights":{ - "type":"map", - "key":{"shape":"AdditionalVersion"}, - "value":{"shape":"Weight"} - }, - "Alias":{ - "type":"string", - "max":128, - "min":1, - "pattern":"(?!^[0-9]+$)([a-zA-Z0-9-_]+)" - }, - "AliasConfiguration":{ - "type":"structure", - "members":{ - "AliasArn":{ - "shape":"FunctionArn", - "documentation":"The Amazon Resource Name (ARN) of the alias.
" - }, - "Name":{ - "shape":"Alias", - "documentation":"The name of the alias.
" - }, - "FunctionVersion":{ - "shape":"Version", - "documentation":"The function version that the alias invokes.
" - }, - "Description":{ - "shape":"Description", - "documentation":"A description of the alias.
" - }, - "RoutingConfig":{ - "shape":"AliasRoutingConfiguration", - "documentation":"The routing configuration of the alias.
" - }, - "RevisionId":{ - "shape":"String", - "documentation":"A unique identifier that changes when you update the alias.
" - } - }, - "documentation":"Provides configuration information about a Lambda function alias.
" - }, - "AliasList":{ - "type":"list", - "member":{"shape":"AliasConfiguration"} - }, - "AliasRoutingConfiguration":{ - "type":"structure", - "members":{ - "AdditionalVersionWeights":{ - "shape":"AdditionalVersionWeights", - "documentation":"The second version, and the percentage of traffic that's routed to it.
" - } - }, - "documentation":"The traffic-shifting configuration of a Lambda function alias.
" - }, - "AllowCredentials":{ - "type":"boolean", - "box":true - }, - "AllowMethodsList":{ - "type":"list", - "member":{"shape":"Method"}, - "max":6, - "min":0 - }, - "AllowOriginsList":{ - "type":"list", - "member":{"shape":"Origin"}, - "max":100, - "min":0 - }, - "AllowedPublishers":{ - "type":"structure", - "required":["SigningProfileVersionArns"], - "members":{ - "SigningProfileVersionArns":{ - "shape":"SigningProfileVersionArns", - "documentation":"The Amazon Resource Name (ARN) for each of the signing profiles. A signing profile defines a trusted user who can sign a code package.
" - } - }, - "documentation":"List of signing profiles that can sign a code package.
" - }, - "AmazonManagedKafkaEventSourceConfig":{ - "type":"structure", - "members":{ - "ConsumerGroupId":{ - "shape":"URI", - "documentation":"The identifier for the Kafka consumer group to join. The consumer group ID must be unique among all your Kafka event sources. After creating a Kafka event source mapping with the consumer group ID specified, you cannot update this value. For more information, see Customizable consumer group ID.
" - }, - "SchemaRegistryConfig":{ - "shape":"KafkaSchemaRegistryConfig", - "documentation":"Specific configuration settings for a Kafka schema registry.
" - } - }, - "documentation":"Specific configuration settings for an Amazon Managed Streaming for Apache Kafka (Amazon MSK) event source.
" - }, - "ApplicationLogLevel":{ - "type":"string", - "enum":[ - "TRACE", - "DEBUG", - "INFO", - "WARN", - "ERROR", - "FATAL" - ] - }, - "Architecture":{ - "type":"string", - "enum":[ - "x86_64", - "arm64" - ] - }, - "ArchitecturesList":{ - "type":"list", - "member":{"shape":"Architecture"}, - "max":1, - "min":1 - }, - "Arn":{ - "type":"string", - "pattern":"arn:(aws[a-zA-Z0-9-]*):([a-zA-Z0-9\\-])+:([a-z]{2}(-gov)?-[a-z]+-\\d{1})?:(\\d{12})?:(.*)" - }, - "AttemptCount":{ - "type":"integer", - "min":0 - }, - "BatchSize":{ - "type":"integer", - "box":true, - "max":10000, - "min":1 - }, - "BinaryOperationPayload":{ - "type":"blob", - "max":262144, - "min":0, - "sensitive":true - }, - "BisectBatchOnFunctionError":{ - "type":"boolean", - "box":true - }, - "Blob":{ - "type":"blob", - "sensitive":true - }, - "BlobStream":{ - "type":"blob", - "streaming":true - }, - "Boolean":{"type":"boolean"}, - "CallbackDetails":{ - "type":"structure", - "members":{ - "CallbackId":{"shape":"CallbackId"}, - "Result":{"shape":"OperationPayload"}, - "Error":{"shape":"ErrorObject"} - } - }, - "CallbackFailedDetails":{ - "type":"structure", - "required":["Error"], - "members":{ - "Error":{"shape":"EventError"} - } - }, - "CallbackId":{ - "type":"string", - "max":1024, - "min":1, - "pattern":"[A-Za-z0-9+/]+={0,2}" - }, - "CallbackOptions":{ - "type":"structure", - "members":{ - "TimeoutSeconds":{"shape":"DurationSeconds"}, - "HeartbeatTimeoutSeconds":{"shape":"DurationSeconds"} - } - }, - "CallbackStartedDetails":{ - "type":"structure", - "required":["CallbackId"], - "members":{ - "CallbackId":{"shape":"CallbackId"}, - "HeartbeatTimeout":{"shape":"DurationSeconds"}, - "Timeout":{"shape":"DurationSeconds"} - } - }, - "CallbackSucceededDetails":{ - "type":"structure", - "required":["Result"], - "members":{ - "Result":{"shape":"EventResult"} - } - }, - "CallbackTimedOutDetails":{ - "type":"structure", - "required":["Error"], - "members":{ - "Error":{"shape":"EventError"} - } - }, - "CallbackTimeoutException":{ - "type":"structure", - "members":{ - "Type":{"shape":"String"}, - "Message":{"shape":"String"} - }, - "error":{ - "httpStatusCode":400, - "senderFault":true - }, - "exception":true - }, - "ChainedInvokeFailedDetails":{ - "type":"structure", - "required":["Error"], - "members":{ - "Error":{"shape":"EventError"} - } - }, - "ChainedInvokeOptions":{ - "type":"structure", - "members":{ - "FunctionName":{"shape":"FunctionName"} - } - }, - "ChainedInvokePendingDetails":{ - "type":"structure", - "required":[ - "Input", - "FunctionName" - ], - "members":{ - "Input":{"shape":"EventInput"}, - "FunctionName":{"shape":"FunctionName"} - } - }, - "ChainedInvokeStartedDetails":{ - "type":"structure", - "members":{ - "DurableExecutionArn":{"shape":"DurableExecutionArn"} - } - }, - "ChainedInvokeStoppedDetails":{ - "type":"structure", - "required":["Error"], - "members":{ - "Error":{"shape":"EventError"} - } - }, - "ChainedInvokeSucceededDetails":{ - "type":"structure", - "required":["Result"], - "members":{ - "Result":{"shape":"EventResult"} - } - }, - "ChainedInvokeTimedOutDetails":{ - "type":"structure", - "required":["Error"], - "members":{ - "Error":{"shape":"EventError"} - } - }, - "CheckpointDurableExecutionRequest":{ - "type":"structure", - "required":[ - "DurableExecutionArn", - "CheckpointToken" - ], - "members":{ - "DurableExecutionArn":{ - "shape":"DurableExecutionArn", - "location":"uri", - "locationName":"DurableExecutionArn" - }, - "CheckpointToken":{"shape":"CheckpointToken"}, - "Updates":{"shape":"OperationUpdates"}, - "ClientToken":{"shape":"ClientToken"} - } - }, - "CheckpointDurableExecutionResponse":{ - "type":"structure", - "required":["NewExecutionState"], - "members":{ - "CheckpointToken":{"shape":"CheckpointToken"}, - "NewExecutionState":{"shape":"CheckpointUpdatedExecutionState"} - } - }, - "CheckpointToken":{ - "type":"string", - "max":2048, - "min":1, - "pattern":"[A-Za-z0-9+/]+={0,2}" - }, - "CheckpointUpdatedExecutionState":{ - "type":"structure", - "members":{ - "Operations":{"shape":"Operations"}, - "NextMarker":{"shape":"String"} - } - }, - "ClientToken":{ - "type":"string", - "max":64, - "min":1, - "pattern":"[\\x21-\\x7E]+" - }, - "CodeSigningConfig":{ - "type":"structure", - "required":[ - "CodeSigningConfigId", - "CodeSigningConfigArn", - "AllowedPublishers", - "CodeSigningPolicies", - "LastModified" - ], - "members":{ - "CodeSigningConfigId":{ - "shape":"CodeSigningConfigId", - "documentation":"Unique identifer for the Code signing configuration.
" - }, - "CodeSigningConfigArn":{ - "shape":"CodeSigningConfigArn", - "documentation":"The Amazon Resource Name (ARN) of the Code signing configuration.
" - }, - "Description":{ - "shape":"Description", - "documentation":"Code signing configuration description.
" - }, - "AllowedPublishers":{ - "shape":"AllowedPublishers", - "documentation":"List of allowed publishers.
" - }, - "CodeSigningPolicies":{ - "shape":"CodeSigningPolicies", - "documentation":"The code signing policy controls the validation failure action for signature mismatch or expiry.
" - }, - "LastModified":{ - "shape":"Timestamp", - "documentation":"The date and time that the Code signing configuration was last modified, in ISO-8601 format (YYYY-MM-DDThh:mm:ss.sTZD).
" - } - }, - "documentation":"Details about a Code signing configuration.
" - }, - "CodeSigningConfigArn":{ - "type":"string", - "max":200, - "min":0, - "pattern":"arn:(aws[a-zA-Z-]*)?:lambda:[a-z]{2}((-gov)|(-iso(b?)))?-[a-z]+-\\d{1}:\\d{12}:code-signing-config:csc-[a-z0-9]{17}" - }, - "CodeSigningConfigId":{ - "type":"string", - "pattern":"csc-[a-zA-Z0-9-_\\.]{17}" - }, - "CodeSigningConfigList":{ - "type":"list", - "member":{"shape":"CodeSigningConfig"} - }, - "CodeSigningConfigNotFoundException":{ - "type":"structure", - "members":{ - "Type":{"shape":"String"}, - "Message":{"shape":"String"} - }, - "documentation":"The specified code signing configuration does not exist.
", - "error":{ - "httpStatusCode":404, - "senderFault":true - }, - "exception":true - }, - "CodeSigningPolicies":{ - "type":"structure", - "members":{ - "UntrustedArtifactOnDeployment":{ - "shape":"CodeSigningPolicy", - "documentation":"Code signing configuration policy for deployment validation failure. If you set the policy to Enforce, Lambda blocks the deployment request if signature validation checks fail. If you set the policy to Warn, Lambda allows the deployment and creates a CloudWatch log.
Default value: Warn
Code signing configuration policies specify the validation failure action for signature mismatch or expiry.
" - }, - "CodeSigningPolicy":{ - "type":"string", - "enum":[ - "Warn", - "Enforce" - ] - }, - "CodeStorageExceededException":{ - "type":"structure", - "members":{ - "Type":{ - "shape":"String", - "documentation":"The exception type.
" - }, - "message":{"shape":"String"} - }, - "documentation":"Your Amazon Web Services account has exceeded its maximum total code size. For more information, see Lambda quotas.
", - "error":{ - "httpStatusCode":400, - "senderFault":true - }, - "exception":true - }, - "CodeVerificationFailedException":{ - "type":"structure", - "members":{ - "Type":{"shape":"String"}, - "Message":{"shape":"String"} - }, - "documentation":"The code signature failed one or more of the validation checks for signature mismatch or expiry, and the code signing policy is set to ENFORCE. Lambda blocks the deployment.
", - "error":{ - "httpStatusCode":400, - "senderFault":true - }, - "exception":true - }, - "CollectionName":{ - "type":"string", - "max":57, - "min":1, - "pattern":"(^(?!(system\\x2e)))(^[_a-zA-Z0-9])([^$]*)" - }, - "CompatibleArchitectures":{ - "type":"list", - "member":{"shape":"Architecture"}, - "max":2, - "min":0 - }, - "CompatibleRuntimes":{ - "type":"list", - "member":{"shape":"Runtime"}, - "max":15, - "min":0 - }, - "Concurrency":{ - "type":"structure", - "members":{ - "ReservedConcurrentExecutions":{ - "shape":"ReservedConcurrentExecutions", - "documentation":"The number of concurrent executions that are reserved for this function. For more information, see Managing Lambda reserved concurrency.
" - } - } - }, - "ContextDetails":{ - "type":"structure", - "members":{ - "ReplayChildren":{"shape":"ReplayChildren"}, - "Result":{"shape":"OperationPayload"}, - "Error":{"shape":"ErrorObject"} - } - }, - "ContextFailedDetails":{ - "type":"structure", - "required":["Error"], - "members":{ - "Error":{"shape":"EventError"} - } - }, - "ContextOptions":{ - "type":"structure", - "members":{ - "ReplayChildren":{"shape":"ReplayChildren"} - } - }, - "ContextStartedDetails":{ - "type":"structure", - "members":{} - }, - "ContextSucceededDetails":{ - "type":"structure", - "required":["Result"], - "members":{ - "Result":{"shape":"EventResult"} - } - }, - "Cors":{ - "type":"structure", - "members":{ - "AllowCredentials":{ - "shape":"AllowCredentials", - "documentation":"Whether to allow cookies or other credentials in requests to your function URL. The default is false.
The HTTP headers that origins can include in requests to your function URL. For example: Date, Keep-Alive, X-Custom-Header.
The HTTP methods that are allowed when calling your function URL. For example: GET, POST, DELETE, or the wildcard character (*).
The origins that can access your function URL. You can list any number of specific origins, separated by a comma. For example: https://www.example.com, http://localhost:60905.
Alternatively, you can grant access to all origins using the wildcard character (*).
The HTTP headers in your function response that you want to expose to origins that call your function URL. For example: Date, Keep-Alive, X-Custom-Header.
The maximum amount of time, in seconds, that web browsers can cache results of a preflight request. By default, this is set to 0, which means that the browser doesn't cache results.
The cross-origin resource sharing (CORS) settings for your Lambda function URL. Use CORS to grant access to your function URL from any origin. You can also use CORS to control access for specific HTTP headers and methods in requests to your function URL.
" - }, - "CreateAliasRequest":{ - "type":"structure", - "required":[ - "FunctionName", - "Name", - "FunctionVersion" - ], - "members":{ - "FunctionName":{ - "shape":"FunctionName", - "documentation":"The name or ARN of the Lambda function.
Name formats
Function name - MyFunction.
Function ARN - arn:aws:lambda:us-west-2:123456789012:function:MyFunction.
Partial ARN - 123456789012:function:MyFunction.
The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.
", - "location":"uri", - "locationName":"FunctionName" - }, - "Name":{ - "shape":"Alias", - "documentation":"The name of the alias.
" - }, - "FunctionVersion":{ - "shape":"Version", - "documentation":"The function version that the alias invokes.
" - }, - "Description":{ - "shape":"Description", - "documentation":"A description of the alias.
" - }, - "RoutingConfig":{ - "shape":"AliasRoutingConfiguration", - "documentation":"The routing configuration of the alias.
" - } - } - }, - "CreateCodeSigningConfigRequest":{ - "type":"structure", - "required":["AllowedPublishers"], - "members":{ - "Description":{ - "shape":"Description", - "documentation":"Descriptive name for this code signing configuration.
" - }, - "AllowedPublishers":{ - "shape":"AllowedPublishers", - "documentation":"Signing profiles for this code signing configuration.
" - }, - "CodeSigningPolicies":{ - "shape":"CodeSigningPolicies", - "documentation":"The code signing policies define the actions to take if the validation checks fail.
" - }, - "Tags":{ - "shape":"Tags", - "documentation":"A list of tags to add to the code signing configuration.
" - } - } - }, - "CreateCodeSigningConfigResponse":{ - "type":"structure", - "required":["CodeSigningConfig"], - "members":{ - "CodeSigningConfig":{ - "shape":"CodeSigningConfig", - "documentation":"The code signing configuration.
" - } - } - }, - "CreateEventSourceMappingRequest":{ - "type":"structure", - "required":["FunctionName"], - "members":{ - "EventSourceArn":{ - "shape":"Arn", - "documentation":"The Amazon Resource Name (ARN) of the event source.
Amazon Kinesis – The ARN of the data stream or a stream consumer.
Amazon DynamoDB Streams – The ARN of the stream.
Amazon Simple Queue Service – The ARN of the queue.
Amazon Managed Streaming for Apache Kafka – The ARN of the cluster or the ARN of the VPC connection (for cross-account event source mappings).
Amazon MQ – The ARN of the broker.
Amazon DocumentDB – The ARN of the DocumentDB change stream.
The name or ARN of the Lambda function.
Name formats
Function name – MyFunction.
Function ARN – arn:aws:lambda:us-west-2:123456789012:function:MyFunction.
Version or Alias ARN – arn:aws:lambda:us-west-2:123456789012:function:MyFunction:PROD.
Partial ARN – 123456789012:function:MyFunction.
The length constraint applies only to the full ARN. If you specify only the function name, it's limited to 64 characters in length.
" - }, - "Enabled":{ - "shape":"Enabled", - "documentation":"When true, the event source mapping is active. When false, Lambda pauses polling and invocation.
Default: True
" - }, - "BatchSize":{ - "shape":"BatchSize", - "documentation":"The maximum number of records in each batch that Lambda pulls from your stream or queue and sends to your function. Lambda passes all of the records in the batch to the function in a single call, up to the payload limit for synchronous invocation (6 MB).
Amazon Kinesis – Default 100. Max 10,000.
Amazon DynamoDB Streams – Default 100. Max 10,000.
Amazon Simple Queue Service – Default 10. For standard queues the max is 10,000. For FIFO queues the max is 10.
Amazon Managed Streaming for Apache Kafka – Default 100. Max 10,000.
Self-managed Apache Kafka – Default 100. Max 10,000.
Amazon MQ (ActiveMQ and RabbitMQ) – Default 100. Max 10,000.
DocumentDB – Default 100. Max 10,000.
An object that defines the filter criteria that determine whether Lambda should process an event. For more information, see Lambda event filtering.
" - }, - "MaximumBatchingWindowInSeconds":{ - "shape":"MaximumBatchingWindowInSeconds", - "documentation":"The maximum amount of time, in seconds, that Lambda spends gathering records before invoking the function. You can configure MaximumBatchingWindowInSeconds to any value from 0 seconds to 300 seconds in increments of seconds.
For Kinesis, DynamoDB, and Amazon SQS event sources, the default batching window is 0 seconds. For Amazon MSK, Self-managed Apache Kafka, Amazon MQ, and DocumentDB event sources, the default batching window is 500 ms. Note that because you can only change MaximumBatchingWindowInSeconds in increments of seconds, you cannot revert back to the 500 ms default batching window after you have changed it. To restore the default batching window, you must create a new event source mapping.
Related setting: For Kinesis, DynamoDB, and Amazon SQS event sources, when you set BatchSize to a value greater than 10, you must set MaximumBatchingWindowInSeconds to at least 1.
(Kinesis and DynamoDB Streams only) The number of batches to process from each shard concurrently.
" - }, - "StartingPosition":{ - "shape":"EventSourcePosition", - "documentation":"The position in a stream from which to start reading. Required for Amazon Kinesis and Amazon DynamoDB Stream event sources. AT_TIMESTAMP is supported only for Amazon Kinesis streams, Amazon DocumentDB, Amazon MSK, and self-managed Apache Kafka.
With StartingPosition set to AT_TIMESTAMP, the time from which to start reading. StartingPositionTimestamp cannot be in the future.
(Kinesis, DynamoDB Streams, Amazon MSK, and self-managed Kafka only) A configuration object that specifies the destination of an event after Lambda processes it.
" - }, - "MaximumRecordAgeInSeconds":{ - "shape":"MaximumRecordAgeInSeconds", - "documentation":"(Kinesis and DynamoDB Streams only) Discard records older than the specified age. The default value is infinite (-1).
" - }, - "BisectBatchOnFunctionError":{ - "shape":"BisectBatchOnFunctionError", - "documentation":"(Kinesis and DynamoDB Streams only) If the function returns an error, split the batch in two and retry.
" - }, - "MaximumRetryAttempts":{ - "shape":"MaximumRetryAttemptsEventSourceMapping", - "documentation":"(Kinesis and DynamoDB Streams only) Discard records after the specified number of retries. The default value is infinite (-1). When set to infinite (-1), failed records are retried until the record expires.
" - }, - "Tags":{ - "shape":"Tags", - "documentation":"A list of tags to apply to the event source mapping.
" - }, - "TumblingWindowInSeconds":{ - "shape":"TumblingWindowInSeconds", - "documentation":"(Kinesis and DynamoDB Streams only) The duration in seconds of a processing window for DynamoDB and Kinesis Streams event sources. A value of 0 seconds indicates no tumbling window.
" - }, - "Topics":{ - "shape":"Topics", - "documentation":"The name of the Kafka topic.
" - }, - "Queues":{ - "shape":"Queues", - "documentation":"(MQ) The name of the Amazon MQ broker destination queue to consume.
" - }, - "SourceAccessConfigurations":{ - "shape":"SourceAccessConfigurations", - "documentation":"An array of authentication protocols or VPC components required to secure your event source.
" - }, - "SelfManagedEventSource":{ - "shape":"SelfManagedEventSource", - "documentation":"The self-managed Apache Kafka cluster to receive records from.
" - }, - "FunctionResponseTypes":{ - "shape":"FunctionResponseTypeList", - "documentation":"(Kinesis, DynamoDB Streams, and Amazon SQS) A list of current response type enums applied to the event source mapping.
" - }, - "AmazonManagedKafkaEventSourceConfig":{ - "shape":"AmazonManagedKafkaEventSourceConfig", - "documentation":"Specific configuration settings for an Amazon Managed Streaming for Apache Kafka (Amazon MSK) event source.
" - }, - "SelfManagedKafkaEventSourceConfig":{ - "shape":"SelfManagedKafkaEventSourceConfig", - "documentation":"Specific configuration settings for a self-managed Apache Kafka event source.
" - }, - "ScalingConfig":{ - "shape":"ScalingConfig", - "documentation":"(Amazon SQS only) The scaling configuration for the event source. For more information, see Configuring maximum concurrency for Amazon SQS event sources.
" - }, - "DocumentDBEventSourceConfig":{ - "shape":"DocumentDBEventSourceConfig", - "documentation":"Specific configuration settings for a DocumentDB event source.
" - }, - "KMSKeyArn":{ - "shape":"KMSKeyArn", - "documentation":"The ARN of the Key Management Service (KMS) customer managed key that Lambda uses to encrypt your function's filter criteria. By default, Lambda does not encrypt your filter criteria object. Specify this property to encrypt data using your own customer managed key.
" - }, - "MetricsConfig":{ - "shape":"EventSourceMappingMetricsConfig", - "documentation":"The metrics configuration for your event source. For more information, see Event source mapping metrics.
" - }, - "ProvisionedPollerConfig":{ - "shape":"ProvisionedPollerConfig", - "documentation":"(Amazon MSK and self-managed Apache Kafka only) The provisioned mode configuration for the event source. For more information, see provisioned mode.
" - } - } - }, - "CreateFunctionRequest":{ - "type":"structure", - "required":[ - "FunctionName", - "Role", - "Code" - ], - "members":{ - "FunctionName":{ - "shape":"FunctionName", - "documentation":"The name or ARN of the Lambda function.
Name formats
Function name – my-function.
Function ARN – arn:aws:lambda:us-west-2:123456789012:function:my-function.
Partial ARN – 123456789012:function:my-function.
The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.
" - }, - "Runtime":{ - "shape":"Runtime", - "documentation":"The identifier of the function's runtime. Runtime is required if the deployment package is a .zip file archive. Specifying a runtime results in an error if you're deploying a function using a container image.
The following list includes deprecated runtimes. Lambda blocks creating new functions and updating existing functions shortly after each runtime is deprecated. For more information, see Runtime use after deprecation.
For a list of all currently supported runtimes, see Supported runtimes.
" - }, - "Role":{ - "shape":"RoleArn", - "documentation":"The Amazon Resource Name (ARN) of the function's execution role.
" - }, - "Handler":{ - "shape":"Handler", - "documentation":"The name of the method within your code that Lambda calls to run your function. Handler is required if the deployment package is a .zip file archive. The format includes the file name. It can also include namespaces and other qualifiers, depending on the runtime. For more information, see Lambda programming model.
" - }, - "Code":{ - "shape":"FunctionCode", - "documentation":"The code for the function.
" - }, - "Description":{ - "shape":"Description", - "documentation":"A description of the function.
" - }, - "Timeout":{ - "shape":"Timeout", - "documentation":"The amount of time (in seconds) that Lambda allows a function to run before stopping it. The default is 3 seconds. The maximum allowed value is 900 seconds. For more information, see Lambda execution environment.
" - }, - "MemorySize":{ - "shape":"MemorySize", - "documentation":"The amount of memory available to the function at runtime. Increasing the function memory also increases its CPU allocation. The default value is 128 MB. The value can be any multiple of 1 MB.
" - }, - "Publish":{ - "shape":"Boolean", - "documentation":"Set to true to publish the first version of the function during creation.
" - }, - "VpcConfig":{ - "shape":"VpcConfig", - "documentation":"For network connectivity to Amazon Web Services resources in a VPC, specify a list of security groups and subnets in the VPC. When you connect a function to a VPC, it can access resources and the internet only through that VPC. For more information, see Configuring a Lambda function to access resources in a VPC.
" - }, - "PackageType":{ - "shape":"PackageType", - "documentation":"The type of deployment package. Set to Image for container image and set to Zip for .zip file archive.
A dead-letter queue configuration that specifies the queue or topic where Lambda sends asynchronous events when they fail processing. For more information, see Dead-letter queues.
" - }, - "Environment":{ - "shape":"Environment", - "documentation":"Environment variables that are accessible from function code during execution.
" - }, - "KMSKeyArn":{ - "shape":"KMSKeyArn", - "documentation":"The ARN of the Key Management Service (KMS) customer managed key that's used to encrypt the following resources:
The function's environment variables.
The function's Lambda SnapStart snapshots.
When used with SourceKMSKeyArn, the unzipped version of the .zip deployment package that's used for function invocations. For more information, see Specifying a customer managed key for Lambda.
The optimized version of the container image that's used for function invocations. Note that this is not the same key that's used to protect your container image in the Amazon Elastic Container Registry (Amazon ECR). For more information, see Function lifecycle.
If you don't provide a customer managed key, Lambda uses an Amazon Web Services owned key or an Amazon Web Services managed key.
" - }, - "TracingConfig":{ - "shape":"TracingConfig", - "documentation":"Set Mode to Active to sample and trace a subset of incoming requests with X-Ray.
A list of tags to apply to the function.
" - }, - "Layers":{ - "shape":"LayerList", - "documentation":"A list of function layers to add to the function's execution environment. Specify each layer by its ARN, including the version.
" - }, - "FileSystemConfigs":{ - "shape":"FileSystemConfigList", - "documentation":"Connection settings for an Amazon EFS file system.
" - }, - "ImageConfig":{ - "shape":"ImageConfig", - "documentation":"Container image configuration values that override the values in the container image Dockerfile.
" - }, - "CodeSigningConfigArn":{ - "shape":"CodeSigningConfigArn", - "documentation":"To enable code signing for this function, specify the ARN of a code-signing configuration. A code-signing configuration includes a set of signing profiles, which define the trusted publishers for this function.
" - }, - "Architectures":{ - "shape":"ArchitecturesList", - "documentation":"The instruction set architecture that the function supports. Enter a string array with one of the valid values (arm64 or x86_64). The default value is x86_64.
The size of the function's /tmp directory in MB. The default value is 512, but can be any whole number between 512 and 10,240 MB. For more information, see Configuring ephemeral storage (console).
The function's SnapStart setting.
" - }, - "LoggingConfig":{ - "shape":"LoggingConfig", - "documentation":"The function's Amazon CloudWatch Logs configuration settings.
" - }, - "DurableConfig":{"shape":"DurableConfig"} - } - }, - "CreateFunctionUrlConfigRequest":{ - "type":"structure", - "required":[ - "FunctionName", - "AuthType" - ], - "members":{ - "FunctionName":{ - "shape":"FunctionName", - "documentation":"The name or ARN of the Lambda function.
Name formats
Function name – my-function.
Function ARN – arn:aws:lambda:us-west-2:123456789012:function:my-function.
Partial ARN – 123456789012:function:my-function.
The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.
", - "location":"uri", - "locationName":"FunctionName" - }, - "Qualifier":{ - "shape":"FunctionUrlQualifier", - "documentation":"The alias name.
", - "location":"querystring", - "locationName":"Qualifier" - }, - "AuthType":{ - "shape":"FunctionUrlAuthType", - "documentation":"The type of authentication that your function URL uses. Set to AWS_IAM if you want to restrict access to authenticated users only. Set to NONE if you want to bypass IAM authentication to create a public endpoint. For more information, see Security and auth model for Lambda function URLs.
The cross-origin resource sharing (CORS) settings for your function URL.
" - }, - "InvokeMode":{ - "shape":"InvokeMode", - "documentation":"Use one of the following options:
BUFFERED – This is the default option. Lambda invokes your function using the Invoke API operation. Invocation results are available when the payload is complete. The maximum payload size is 6 MB.
RESPONSE_STREAM – Your function streams payload results as they become available. Lambda invokes your function using the InvokeWithResponseStream API operation. The maximum response payload size is 20 MB, however, you can request a quota increase.
The HTTP URL endpoint for your function.
" - }, - "FunctionArn":{ - "shape":"FunctionArn", - "documentation":"The Amazon Resource Name (ARN) of your function.
" - }, - "AuthType":{ - "shape":"FunctionUrlAuthType", - "documentation":"The type of authentication that your function URL uses. Set to AWS_IAM if you want to restrict access to authenticated users only. Set to NONE if you want to bypass IAM authentication to create a public endpoint. For more information, see Security and auth model for Lambda function URLs.
The cross-origin resource sharing (CORS) settings for your function URL.
" - }, - "CreationTime":{ - "shape":"Timestamp", - "documentation":"When the function URL was created, in ISO-8601 format (YYYY-MM-DDThh:mm:ss.sTZD).
" - }, - "InvokeMode":{ - "shape":"InvokeMode", - "documentation":"Use one of the following options:
BUFFERED – This is the default option. Lambda invokes your function using the Invoke API operation. Invocation results are available when the payload is complete. The maximum payload size is 6 MB.
RESPONSE_STREAM – Your function streams payload results as they become available. Lambda invokes your function using the InvokeWithResponseStream API operation. The maximum response payload size is 20 MB, however, you can request a quota increase.
The Amazon Resource Name (ARN) of an Amazon SQS queue or Amazon SNS topic.
" - } - }, - "documentation":"The dead-letter queue for failed asynchronous invocations.
" - }, - "DeleteAliasRequest":{ - "type":"structure", - "required":[ - "FunctionName", - "Name" - ], - "members":{ - "FunctionName":{ - "shape":"FunctionName", - "documentation":"The name or ARN of the Lambda function.
Name formats
Function name - MyFunction.
Function ARN - arn:aws:lambda:us-west-2:123456789012:function:MyFunction.
Partial ARN - 123456789012:function:MyFunction.
The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.
", - "location":"uri", - "locationName":"FunctionName" - }, - "Name":{ - "shape":"Alias", - "documentation":"The name of the alias.
", - "location":"uri", - "locationName":"Name" - } - } - }, - "DeleteCodeSigningConfigRequest":{ - "type":"structure", - "required":["CodeSigningConfigArn"], - "members":{ - "CodeSigningConfigArn":{ - "shape":"CodeSigningConfigArn", - "documentation":"The The Amazon Resource Name (ARN) of the code signing configuration.
", - "location":"uri", - "locationName":"CodeSigningConfigArn" - } - } - }, - "DeleteCodeSigningConfigResponse":{ - "type":"structure", - "members":{} - }, - "DeleteEventSourceMappingRequest":{ - "type":"structure", - "required":["UUID"], - "members":{ - "UUID":{ - "shape":"String", - "documentation":"The identifier of the event source mapping.
", - "location":"uri", - "locationName":"UUID" - } - } - }, - "DeleteFunctionCodeSigningConfigRequest":{ - "type":"structure", - "required":["FunctionName"], - "members":{ - "FunctionName":{ - "shape":"FunctionName", - "documentation":"The name or ARN of the Lambda function.
Name formats
Function name - MyFunction.
Function ARN - arn:aws:lambda:us-west-2:123456789012:function:MyFunction.
Partial ARN - 123456789012:function:MyFunction.
The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.
", - "location":"uri", - "locationName":"FunctionName" - } - } - }, - "DeleteFunctionConcurrencyRequest":{ - "type":"structure", - "required":["FunctionName"], - "members":{ - "FunctionName":{ - "shape":"FunctionName", - "documentation":"The name or ARN of the Lambda function.
Name formats
Function name – my-function.
Function ARN – arn:aws:lambda:us-west-2:123456789012:function:my-function.
Partial ARN – 123456789012:function:my-function.
The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.
", - "location":"uri", - "locationName":"FunctionName" - } - } - }, - "DeleteFunctionEventInvokeConfigRequest":{ - "type":"structure", - "required":["FunctionName"], - "members":{ - "FunctionName":{ - "shape":"FunctionName", - "documentation":"The name or ARN of the Lambda function, version, or alias.
Name formats
Function name - my-function (name-only), my-function:v1 (with alias).
Function ARN - arn:aws:lambda:us-west-2:123456789012:function:my-function.
Partial ARN - 123456789012:function:my-function.
You can append a version number or alias to any of the formats. The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.
", - "location":"uri", - "locationName":"FunctionName" - }, - "Qualifier":{ - "shape":"Qualifier", - "documentation":"A version number or alias name.
", - "location":"querystring", - "locationName":"Qualifier" - } - } - }, - "DeleteFunctionRequest":{ - "type":"structure", - "required":["FunctionName"], - "members":{ - "FunctionName":{ - "shape":"FunctionName", - "documentation":"The name or ARN of the Lambda function or version.
Name formats
Function name – my-function (name-only), my-function:1 (with version).
Function ARN – arn:aws:lambda:us-west-2:123456789012:function:my-function.
Partial ARN – 123456789012:function:my-function.
You can append a version number or alias to any of the formats. The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.
", - "location":"uri", - "locationName":"FunctionName" - }, - "Qualifier":{ - "shape":"Qualifier", - "documentation":"Specify a version to delete. You can't delete a version that an alias references.
", - "location":"querystring", - "locationName":"Qualifier" - } - } - }, - "DeleteFunctionUrlConfigRequest":{ - "type":"structure", - "required":["FunctionName"], - "members":{ - "FunctionName":{ - "shape":"FunctionName", - "documentation":"The name or ARN of the Lambda function.
Name formats
Function name – my-function.
Function ARN – arn:aws:lambda:us-west-2:123456789012:function:my-function.
Partial ARN – 123456789012:function:my-function.
The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.
", - "location":"uri", - "locationName":"FunctionName" - }, - "Qualifier":{ - "shape":"FunctionUrlQualifier", - "documentation":"The alias name.
", - "location":"querystring", - "locationName":"Qualifier" - } - } - }, - "DeleteLayerVersionRequest":{ - "type":"structure", - "required":[ - "LayerName", - "VersionNumber" - ], - "members":{ - "LayerName":{ - "shape":"LayerName", - "documentation":"The name or Amazon Resource Name (ARN) of the layer.
", - "location":"uri", - "locationName":"LayerName" - }, - "VersionNumber":{ - "shape":"LayerVersionNumber", - "documentation":"The version number.
", - "location":"uri", - "locationName":"VersionNumber" - } - } - }, - "DeleteProvisionedConcurrencyConfigRequest":{ - "type":"structure", - "required":[ - "FunctionName", - "Qualifier" - ], - "members":{ - "FunctionName":{ - "shape":"FunctionName", - "documentation":"The name or ARN of the Lambda function.
Name formats
Function name – my-function.
Function ARN – arn:aws:lambda:us-west-2:123456789012:function:my-function.
Partial ARN – 123456789012:function:my-function.
The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.
", - "location":"uri", - "locationName":"FunctionName" - }, - "Qualifier":{ - "shape":"Qualifier", - "documentation":"The version number or alias name.
", - "location":"querystring", - "locationName":"Qualifier" - } - } - }, - "Description":{ - "type":"string", - "max":256, - "min":0 - }, - "DestinationArn":{ - "type":"string", - "max":350, - "min":0, - "pattern":"$|arn:(aws[a-zA-Z0-9-]*):([a-zA-Z0-9\\-])+:([a-z]{2}(-gov)?-[a-z]+-\\d{1})?:(\\d{12})?:(.*)" - }, - "DestinationConfig":{ - "type":"structure", - "members":{ - "OnSuccess":{ - "shape":"OnSuccess", - "documentation":"The destination configuration for successful invocations. Not supported in CreateEventSourceMapping or UpdateEventSourceMapping.
The destination configuration for failed invocations.
" - } - }, - "documentation":"A configuration object that specifies the destination of an event after Lambda processes it. For more information, see Adding a destination.
" - }, - "DocumentDBEventSourceConfig":{ - "type":"structure", - "members":{ - "DatabaseName":{ - "shape":"DatabaseName", - "documentation":"The name of the database to consume within the DocumentDB cluster.
" - }, - "CollectionName":{ - "shape":"CollectionName", - "documentation":"The name of the collection to consume within the database. If you do not specify a collection, Lambda consumes all collections.
" - }, - "FullDocument":{ - "shape":"FullDocument", - "documentation":"Determines what DocumentDB sends to your event stream during document update operations. If set to UpdateLookup, DocumentDB sends a delta describing the changes, along with a copy of the entire document. Otherwise, DocumentDB sends only a partial document that contains the changes.
" - } - }, - "documentation":"Specific configuration settings for a DocumentDB event source.
" - }, - "DurableConfig":{ - "type":"structure", - "members":{ - "RetentionPeriodInDays":{"shape":"RetentionPeriodInDays"}, - "ExecutionTimeout":{"shape":"ExecutionTimeout"} - } - }, - "DurableExecutionAlreadyStartedException":{ - "type":"structure", - "members":{ - "Type":{"shape":"String"}, - "Message":{"shape":"String"} - }, - "error":{ - "httpStatusCode":409, - "senderFault":true - }, - "exception":true - }, - "DurableExecutionArn":{ - "type":"string", - "max":1024, - "min":1, - "pattern":"arn:([a-zA-Z0-9-]+):lambda:([a-zA-Z0-9-]+):(\\d{12}):function:([a-zA-Z0-9_-]+):(\\$LATEST(?:\\.PUBLISHED)?|[0-9]+)/durable-execution/([a-zA-Z0-9_-]+)/([a-zA-Z0-9_-]+)" - }, - "DurableExecutionName":{ - "type":"string", - "max":64, - "min":1, - "pattern":"[a-zA-Z0-9-_]+" - }, - "DurableExecutions":{ - "type":"list", - "member":{"shape":"Execution"} - }, - "DurationSeconds":{ - "type":"integer", - "box":true, - "min":0 - }, - "EC2AccessDeniedException":{ - "type":"structure", - "members":{ - "Type":{"shape":"String"}, - "Message":{"shape":"String"} - }, - "documentation":"Need additional permissions to configure VPC settings.
", - "error":{"httpStatusCode":502}, - "exception":true, - "fault":true - }, - "EC2ThrottledException":{ - "type":"structure", - "members":{ - "Type":{"shape":"String"}, - "Message":{"shape":"String"} - }, - "documentation":"Amazon EC2 throttled Lambda during Lambda function initialization using the execution role provided for the function.
", - "error":{"httpStatusCode":502}, - "exception":true, - "fault":true - }, - "EC2UnexpectedException":{ - "type":"structure", - "members":{ - "Type":{"shape":"String"}, - "Message":{"shape":"String"}, - "EC2ErrorCode":{"shape":"String"} - }, - "documentation":"Lambda received an unexpected Amazon EC2 client exception while setting up for the Lambda function.
", - "error":{"httpStatusCode":502}, - "exception":true, - "fault":true - }, - "EFSIOException":{ - "type":"structure", - "members":{ - "Type":{"shape":"String"}, - "Message":{"shape":"String"} - }, - "documentation":"An error occurred when reading from or writing to a connected file system.
", - "error":{ - "httpStatusCode":410, - "senderFault":true - }, - "exception":true - }, - "EFSMountConnectivityException":{ - "type":"structure", - "members":{ - "Type":{"shape":"String"}, - "Message":{"shape":"String"} - }, - "documentation":"The Lambda function couldn't make a network connection to the configured file system.
", - "error":{ - "httpStatusCode":408, - "senderFault":true - }, - "exception":true - }, - "EFSMountFailureException":{ - "type":"structure", - "members":{ - "Type":{"shape":"String"}, - "Message":{"shape":"String"} - }, - "documentation":"The Lambda function couldn't mount the configured file system due to a permission or configuration issue.
", - "error":{ - "httpStatusCode":403, - "senderFault":true - }, - "exception":true - }, - "EFSMountTimeoutException":{ - "type":"structure", - "members":{ - "Type":{"shape":"String"}, - "Message":{"shape":"String"} - }, - "documentation":"The Lambda function made a network connection to the configured file system, but the mount operation timed out.
", - "error":{ - "httpStatusCode":408, - "senderFault":true - }, - "exception":true - }, - "ENILimitReachedException":{ - "type":"structure", - "members":{ - "Type":{"shape":"String"}, - "Message":{"shape":"String"} - }, - "documentation":"Lambda couldn't create an elastic network interface in the VPC, specified as part of Lambda function configuration, because the limit for network interfaces has been reached. For more information, see Lambda quotas.
", - "error":{"httpStatusCode":502}, - "exception":true, - "fault":true - }, - "Enabled":{ - "type":"boolean", - "box":true - }, - "EndPointType":{ - "type":"string", - "enum":["KAFKA_BOOTSTRAP_SERVERS"] - }, - "Endpoint":{ - "type":"string", - "max":300, - "min":1, - "pattern":"(([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\\-]*[a-zA-Z0-9])\\.)*([A-Za-z0-9]|[A-Za-z0-9][A-Za-z0-9\\-]*[A-Za-z0-9]):[0-9]{1,5}" - }, - "EndpointLists":{ - "type":"list", - "member":{"shape":"Endpoint"}, - "max":10, - "min":1 - }, - "Endpoints":{ - "type":"map", - "key":{"shape":"EndPointType"}, - "value":{"shape":"EndpointLists"}, - "max":2, - "min":1 - }, - "Environment":{ - "type":"structure", - "members":{ - "Variables":{ - "shape":"EnvironmentVariables", - "documentation":"Environment variable key-value pairs. For more information, see Using Lambda environment variables.
" - } - }, - "documentation":"A function's environment variable settings. You can use environment variables to adjust your function's behavior without updating code. An environment variable is a pair of strings that are stored in a function's version-specific configuration.
" - }, - "EnvironmentError":{ - "type":"structure", - "members":{ - "ErrorCode":{ - "shape":"String", - "documentation":"The error code.
" - }, - "Message":{ - "shape":"SensitiveString", - "documentation":"The error message.
" - } - }, - "documentation":"Error messages for environment variables that couldn't be applied.
" - }, - "EnvironmentResponse":{ - "type":"structure", - "members":{ - "Variables":{ - "shape":"EnvironmentVariables", - "documentation":"Environment variable key-value pairs. Omitted from CloudTrail logs.
" - }, - "Error":{ - "shape":"EnvironmentError", - "documentation":"Error messages for environment variables that couldn't be applied.
" - } - }, - "documentation":"The results of an operation to update or read environment variables. If the operation succeeds, the response contains the environment variables. If it fails, the response contains details about the error.
" - }, - "EnvironmentVariableName":{ - "type":"string", - "pattern":"[a-zA-Z]([a-zA-Z0-9_])+", - "sensitive":true - }, - "EnvironmentVariableValue":{ - "type":"string", - "sensitive":true - }, - "EnvironmentVariables":{ - "type":"map", - "key":{"shape":"EnvironmentVariableName"}, - "value":{"shape":"EnvironmentVariableValue"}, - "sensitive":true - }, - "EphemeralStorage":{ - "type":"structure", - "required":["Size"], - "members":{ - "Size":{ - "shape":"EphemeralStorageSize", - "documentation":"The size of the function's /tmp directory.
The size of the function's /tmp directory in MB. The default value is 512, but can be any whole number between 512 and 10,240 MB. For more information, see Configuring ephemeral storage (console).
The identifier of the event source mapping.
" - }, - "StartingPosition":{ - "shape":"EventSourcePosition", - "documentation":"The position in a stream from which to start reading. Required for Amazon Kinesis and Amazon DynamoDB Stream event sources. AT_TIMESTAMP is supported only for Amazon Kinesis streams, Amazon DocumentDB, Amazon MSK, and self-managed Apache Kafka.
With StartingPosition set to AT_TIMESTAMP, the time from which to start reading. StartingPositionTimestamp cannot be in the future.
The maximum number of records in each batch that Lambda pulls from your stream or queue and sends to your function. Lambda passes all of the records in the batch to the function in a single call, up to the payload limit for synchronous invocation (6 MB).
Default value: Varies by service. For Amazon SQS, the default is 10. For all other services, the default is 100.
Related setting: When you set BatchSize to a value greater than 10, you must set MaximumBatchingWindowInSeconds to at least 1.
The maximum amount of time, in seconds, that Lambda spends gathering records before invoking the function. You can configure MaximumBatchingWindowInSeconds to any value from 0 seconds to 300 seconds in increments of seconds.
For streams and Amazon SQS event sources, the default batching window is 0 seconds. For Amazon MSK, Self-managed Apache Kafka, Amazon MQ, and DocumentDB event sources, the default batching window is 500 ms. Note that because you can only change MaximumBatchingWindowInSeconds in increments of seconds, you cannot revert back to the 500 ms default batching window after you have changed it. To restore the default batching window, you must create a new event source mapping.
Related setting: For streams and Amazon SQS event sources, when you set BatchSize to a value greater than 10, you must set MaximumBatchingWindowInSeconds to at least 1.
(Kinesis and DynamoDB Streams only) The number of batches to process concurrently from each shard. The default value is 1.
" - }, - "EventSourceArn":{ - "shape":"Arn", - "documentation":"The Amazon Resource Name (ARN) of the event source.
" - }, - "FilterCriteria":{ - "shape":"FilterCriteria", - "documentation":"An object that defines the filter criteria that determine whether Lambda should process an event. For more information, see Lambda event filtering.
If filter criteria is encrypted, this field shows up as null in the response of ListEventSourceMapping API calls. You can view this field in plaintext in the response of GetEventSourceMapping and DeleteEventSourceMapping calls if you have kms:Decrypt permissions for the correct KMS key.
The ARN of the Lambda function.
" - }, - "LastModified":{ - "shape":"Date", - "documentation":"The date that the event source mapping was last updated or that its state changed.
" - }, - "LastProcessingResult":{ - "shape":"String", - "documentation":"The result of the event source mapping's last processing attempt.
" - }, - "State":{ - "shape":"String", - "documentation":"The state of the event source mapping. It can be one of the following: Creating, Enabling, Enabled, Disabling, Disabled, Updating, or Deleting.
Indicates whether a user or Lambda made the last change to the event source mapping.
" - }, - "DestinationConfig":{ - "shape":"DestinationConfig", - "documentation":"(Kinesis, DynamoDB Streams, Amazon MSK, and self-managed Apache Kafka event sources only) A configuration object that specifies the destination of an event after Lambda processes it.
" - }, - "Topics":{ - "shape":"Topics", - "documentation":"The name of the Kafka topic.
" - }, - "Queues":{ - "shape":"Queues", - "documentation":"(Amazon MQ) The name of the Amazon MQ broker destination queue to consume.
" - }, - "SourceAccessConfigurations":{ - "shape":"SourceAccessConfigurations", - "documentation":"An array of the authentication protocol, VPC components, or virtual host to secure and define your event source.
" - }, - "SelfManagedEventSource":{ - "shape":"SelfManagedEventSource", - "documentation":"The self-managed Apache Kafka cluster for your event source.
" - }, - "MaximumRecordAgeInSeconds":{ - "shape":"MaximumRecordAgeInSeconds", - "documentation":"(Kinesis and DynamoDB Streams only) Discard records older than the specified age. The default value is -1, which sets the maximum age to infinite. When the value is set to infinite, Lambda never discards old records.
The minimum valid value for maximum record age is 60s. Although values less than 60 and greater than -1 fall within the parameter's absolute range, they are not allowed
(Kinesis and DynamoDB Streams only) If the function returns an error, split the batch in two and retry. The default value is false.
" - }, - "MaximumRetryAttempts":{ - "shape":"MaximumRetryAttemptsEventSourceMapping", - "documentation":"(Kinesis and DynamoDB Streams only) Discard records after the specified number of retries. The default value is -1, which sets the maximum number of retries to infinite. When MaximumRetryAttempts is infinite, Lambda retries failed records until the record expires in the event source.
" - }, - "TumblingWindowInSeconds":{ - "shape":"TumblingWindowInSeconds", - "documentation":"(Kinesis and DynamoDB Streams only) The duration in seconds of a processing window for DynamoDB and Kinesis Streams event sources. A value of 0 seconds indicates no tumbling window.
" - }, - "FunctionResponseTypes":{ - "shape":"FunctionResponseTypeList", - "documentation":"(Kinesis, DynamoDB Streams, and Amazon SQS) A list of current response type enums applied to the event source mapping.
" - }, - "AmazonManagedKafkaEventSourceConfig":{ - "shape":"AmazonManagedKafkaEventSourceConfig", - "documentation":"Specific configuration settings for an Amazon Managed Streaming for Apache Kafka (Amazon MSK) event source.
" - }, - "SelfManagedKafkaEventSourceConfig":{ - "shape":"SelfManagedKafkaEventSourceConfig", - "documentation":"Specific configuration settings for a self-managed Apache Kafka event source.
" - }, - "ScalingConfig":{ - "shape":"ScalingConfig", - "documentation":"(Amazon SQS only) The scaling configuration for the event source. For more information, see Configuring maximum concurrency for Amazon SQS event sources.
" - }, - "DocumentDBEventSourceConfig":{ - "shape":"DocumentDBEventSourceConfig", - "documentation":"Specific configuration settings for a DocumentDB event source.
" - }, - "KMSKeyArn":{ - "shape":"KMSKeyArn", - "documentation":"The ARN of the Key Management Service (KMS) customer managed key that Lambda uses to encrypt your function's filter criteria.
" - }, - "FilterCriteriaError":{ - "shape":"FilterCriteriaError", - "documentation":"An object that contains details about an error related to filter criteria encryption.
" - }, - "EventSourceMappingArn":{ - "shape":"EventSourceMappingArn", - "documentation":"The Amazon Resource Name (ARN) of the event source mapping.
" - }, - "MetricsConfig":{ - "shape":"EventSourceMappingMetricsConfig", - "documentation":"The metrics configuration for your event source. For more information, see Event source mapping metrics.
" - }, - "ProvisionedPollerConfig":{ - "shape":"ProvisionedPollerConfig", - "documentation":"(Amazon MSK and self-managed Apache Kafka only) The provisioned mode configuration for the event source. For more information, see provisioned mode.
" - } - }, - "documentation":"A mapping between an Amazon Web Services resource and a Lambda function. For details, see CreateEventSourceMapping.
" - }, - "EventSourceMappingMetric":{ - "type":"string", - "enum":["EventCount"] - }, - "EventSourceMappingMetricList":{ - "type":"list", - "member":{"shape":"EventSourceMappingMetric"}, - "max":1, - "min":0 - }, - "EventSourceMappingMetricsConfig":{ - "type":"structure", - "members":{ - "Metrics":{ - "shape":"EventSourceMappingMetricList", - "documentation":" The metrics you want your event source mapping to produce. Include EventCount to receive event source mapping metrics related to the number of events processed by your event source mapping. For more information about these metrics, see Event source mapping metrics.
The metrics configuration for your event source. Use this configuration object to define which metrics you want your event source mapping to produce.
" - }, - "EventSourceMappingsList":{ - "type":"list", - "member":{"shape":"EventSourceMappingConfiguration"} - }, - "EventSourcePosition":{ - "type":"string", - "enum":[ - "TRIM_HORIZON", - "LATEST", - "AT_TIMESTAMP" - ] - }, - "EventSourceToken":{ - "type":"string", - "max":256, - "min":0, - "pattern":"[a-zA-Z0-9._\\-]+" - }, - "EventType":{ - "type":"string", - "enum":[ - "ExecutionStarted", - "ExecutionSucceeded", - "ExecutionFailed", - "ExecutionTimedOut", - "ExecutionStopped", - "ContextStarted", - "ContextSucceeded", - "ContextFailed", - "WaitStarted", - "WaitSucceeded", - "WaitCancelled", - "StepStarted", - "StepSucceeded", - "StepFailed", - "ChainedInvokePending", - "ChainedInvokeStarted", - "ChainedInvokeSucceeded", - "ChainedInvokeFailed", - "ChainedInvokeTimedOut", - "ChainedInvokeCancelled", - "CallbackStarted", - "CallbackSucceeded", - "CallbackFailed", - "CallbackTimedOut", - "InvocationCompleted" - ] - }, - "Events":{ - "type":"list", - "member":{"shape":"Event"} - }, - "Execution":{ - "type":"structure", - "required":[ - "DurableExecutionArn", - "DurableExecutionName", - "FunctionArn", - "Status", - "StartTimestamp" - ], - "members":{ - "DurableExecutionArn":{"shape":"DurableExecutionArn"}, - "DurableExecutionName":{"shape":"DurableExecutionName"}, - "FunctionArn":{"shape":"FunctionArn"}, - "Status":{"shape":"ExecutionStatus"}, - "StartTimestamp":{"shape":"ExecutionTimestamp"}, - "EndTimestamp":{"shape":"ExecutionTimestamp"} - } - }, - "ExecutionDetails":{ - "type":"structure", - "members":{ - "InputPayload":{"shape":"InputPayload"} - } - }, - "ExecutionFailedDetails":{ - "type":"structure", - "required":["Error"], - "members":{ - "Error":{"shape":"EventError"} - } - }, - "ExecutionStartedDetails":{ - "type":"structure", - "required":[ - "Input", - "ExecutionTimeout" - ], - "members":{ - "Input":{"shape":"EventInput"}, - "ExecutionTimeout":{"shape":"DurationSeconds"} - } - }, - "ExecutionStatus":{ - "type":"string", - "enum":[ - "RUNNING", - "SUCCEEDED", - "FAILED", - "TIMED_OUT", - "STOPPED" - ] - }, - "ExecutionStatusList":{ - "type":"list", - "member":{"shape":"ExecutionStatus"} - }, - "ExecutionStoppedDetails":{ - "type":"structure", - "required":["Error"], - "members":{ - "Error":{"shape":"EventError"} - } - }, - "ExecutionSucceededDetails":{ - "type":"structure", - "required":["Result"], - "members":{ - "Result":{"shape":"EventResult"} - } - }, - "ExecutionTimedOutDetails":{ - "type":"structure", - "members":{ - "Error":{"shape":"EventError"} - } - }, - "ExecutionTimeout":{ - "type":"integer", - "box":true, - "max":31622400, - "min":1 - }, - "ExecutionTimestamp":{"type":"timestamp"}, - "FileSystemArn":{ - "type":"string", - "max":200, - "min":0, - "pattern":"arn:aws[a-zA-Z-]*:elasticfilesystem:[a-z]{2}((-gov)|(-iso(b?)))?-[a-z]+-\\d{1}:\\d{12}:access-point/fsap-[a-f0-9]{17}" - }, - "FileSystemConfig":{ - "type":"structure", - "required":[ - "Arn", - "LocalMountPath" - ], - "members":{ - "Arn":{ - "shape":"FileSystemArn", - "documentation":"The Amazon Resource Name (ARN) of the Amazon EFS access point that provides access to the file system.
" - }, - "LocalMountPath":{ - "shape":"LocalMountPath", - "documentation":"The path where the function can access the file system, starting with /mnt/.
Details about the connection between a Lambda function and an Amazon EFS file system.
" - }, - "FileSystemConfigList":{ - "type":"list", - "member":{"shape":"FileSystemConfig"}, - "max":1, - "min":0 - }, - "Filter":{ - "type":"structure", - "members":{ - "Pattern":{ - "shape":"Pattern", - "documentation":"A filter pattern. For more information on the syntax of a filter pattern, see Filter rule syntax.
" - } - }, - "documentation":" A structure within a FilterCriteria object that defines an event filtering pattern.
A list of filters.
" - } - }, - "documentation":"An object that contains the filters for an event source.
" - }, - "FilterCriteriaError":{ - "type":"structure", - "members":{ - "ErrorCode":{ - "shape":"FilterCriteriaErrorCode", - "documentation":"The KMS exception that resulted from filter criteria encryption or decryption.
" - }, - "Message":{ - "shape":"FilterCriteriaErrorMessage", - "documentation":"The error message.
" - } - }, - "documentation":"An object that contains details about an error related to filter criteria encryption.
" - }, - "FilterCriteriaErrorCode":{ - "type":"string", - "max":50, - "min":10, - "pattern":"[A-Za-z]+Exception" - }, - "FilterCriteriaErrorMessage":{ - "type":"string", - "max":2048, - "min":10, - "pattern":".*" - }, - "FilterList":{ - "type":"list", - "member":{"shape":"Filter"} - }, - "FullDocument":{ - "type":"string", - "enum":[ - "UpdateLookup", - "Default" - ] - }, - "FunctionArn":{ - "type":"string", - "max":10000, - "min":0, - "pattern":"arn:(aws[a-zA-Z-]*)?:lambda:[a-z]{2}(-gov)?-[a-z]+-\\d{1}:\\d{12}:function:[a-zA-Z0-9-_]+(:(\\$LATEST|[a-zA-Z0-9-_]+))?" - }, - "FunctionArnList":{ - "type":"list", - "member":{"shape":"FunctionArn"} - }, - "FunctionCode":{ - "type":"structure", - "members":{ - "ZipFile":{ - "shape":"Blob", - "documentation":"The base64-encoded contents of the deployment package. Amazon Web Services SDK and CLI clients handle the encoding for you.
" - }, - "S3Bucket":{ - "shape":"S3Bucket", - "documentation":"An Amazon S3 bucket in the same Amazon Web Services Region as your function. The bucket can be in a different Amazon Web Services account.
" - }, - "S3Key":{ - "shape":"S3Key", - "documentation":"The Amazon S3 key of the deployment package.
" - }, - "S3ObjectVersion":{ - "shape":"S3ObjectVersion", - "documentation":"For versioned objects, the version of the deployment package object to use.
" - }, - "ImageUri":{ - "shape":"String", - "documentation":"URI of a container image in the Amazon ECR registry.
" - }, - "SourceKMSKeyArn":{ - "shape":"KMSKeyArn", - "documentation":"The ARN of the Key Management Service (KMS) customer managed key that's used to encrypt your function's .zip deployment package. If you don't provide a customer managed key, Lambda uses an Amazon Web Services owned key.
" - } - }, - "documentation":"The code for the Lambda function. You can either specify an object in Amazon S3, upload a .zip file archive deployment package directly, or specify the URI of a container image.
" - }, - "FunctionCodeLocation":{ - "type":"structure", - "members":{ - "RepositoryType":{ - "shape":"String", - "documentation":"The service that's hosting the file.
" - }, - "Location":{ - "shape":"String", - "documentation":"A presigned URL that you can use to download the deployment package.
" - }, - "ImageUri":{ - "shape":"String", - "documentation":"URI of a container image in the Amazon ECR registry.
" - }, - "ResolvedImageUri":{ - "shape":"String", - "documentation":"The resolved URI for the image.
" - }, - "SourceKMSKeyArn":{ - "shape":"String", - "documentation":"The ARN of the Key Management Service (KMS) customer managed key that's used to encrypt your function's .zip deployment package. If you don't provide a customer managed key, Lambda uses an Amazon Web Services owned key.
" - } - }, - "documentation":"Details about a function's deployment package.
" - }, - "FunctionConfiguration":{ - "type":"structure", - "members":{ - "FunctionName":{ - "shape":"NamespacedFunctionName", - "documentation":"The name of the function.
" - }, - "FunctionArn":{ - "shape":"NameSpacedFunctionArn", - "documentation":"The function's Amazon Resource Name (ARN).
" - }, - "Runtime":{ - "shape":"Runtime", - "documentation":"The identifier of the function's runtime. Runtime is required if the deployment package is a .zip file archive. Specifying a runtime results in an error if you're deploying a function using a container image.
The following list includes deprecated runtimes. Lambda blocks creating new functions and updating existing functions shortly after each runtime is deprecated. For more information, see Runtime use after deprecation.
For a list of all currently supported runtimes, see Supported runtimes.
" - }, - "Role":{ - "shape":"RoleArn", - "documentation":"The function's execution role.
" - }, - "Handler":{ - "shape":"Handler", - "documentation":"The function that Lambda calls to begin running your function.
" - }, - "CodeSize":{ - "shape":"Long", - "documentation":"The size of the function's deployment package, in bytes.
" - }, - "Description":{ - "shape":"Description", - "documentation":"The function's description.
" - }, - "Timeout":{ - "shape":"Timeout", - "documentation":"The amount of time in seconds that Lambda allows a function to run before stopping it.
" - }, - "MemorySize":{ - "shape":"MemorySize", - "documentation":"The amount of memory available to the function at runtime.
" - }, - "LastModified":{ - "shape":"Timestamp", - "documentation":"The date and time that the function was last updated, in ISO-8601 format (YYYY-MM-DDThh:mm:ss.sTZD).
" - }, - "CodeSha256":{ - "shape":"String", - "documentation":"The SHA256 hash of the function's deployment package.
" - }, - "Version":{ - "shape":"Version", - "documentation":"The version of the Lambda function.
" - }, - "VpcConfig":{ - "shape":"VpcConfigResponse", - "documentation":"The function's networking configuration.
" - }, - "DeadLetterConfig":{ - "shape":"DeadLetterConfig", - "documentation":"The function's dead letter queue.
" - }, - "Environment":{ - "shape":"EnvironmentResponse", - "documentation":"The function's environment variables. Omitted from CloudTrail logs.
" - }, - "KMSKeyArn":{ - "shape":"KMSKeyArn", - "documentation":"The ARN of the Key Management Service (KMS) customer managed key that's used to encrypt the following resources:
The function's environment variables.
The function's Lambda SnapStart snapshots.
When used with SourceKMSKeyArn, the unzipped version of the .zip deployment package that's used for function invocations. For more information, see Specifying a customer managed key for Lambda.
The optimized version of the container image that's used for function invocations. Note that this is not the same key that's used to protect your container image in the Amazon Elastic Container Registry (Amazon ECR). For more information, see Function lifecycle.
If you don't provide a customer managed key, Lambda uses an Amazon Web Services owned key or an Amazon Web Services managed key.
" - }, - "TracingConfig":{ - "shape":"TracingConfigResponse", - "documentation":"The function's X-Ray tracing configuration.
" - }, - "MasterArn":{ - "shape":"FunctionArn", - "documentation":"For Lambda@Edge functions, the ARN of the main function.
" - }, - "RevisionId":{ - "shape":"String", - "documentation":"The latest updated revision of the function or alias.
" - }, - "Layers":{ - "shape":"LayersReferenceList", - "documentation":"The function's layers.
" - }, - "State":{ - "shape":"State", - "documentation":"The current state of the function. When the state is Inactive, you can reactivate the function by invoking it.
The reason for the function's current state.
" - }, - "StateReasonCode":{ - "shape":"StateReasonCode", - "documentation":"The reason code for the function's current state. When the code is Creating, you can't invoke or modify the function.
The status of the last update that was performed on the function. This is first set to Successful after function creation completes.
The reason for the last update that was performed on the function.
" - }, - "LastUpdateStatusReasonCode":{ - "shape":"LastUpdateStatusReasonCode", - "documentation":"The reason code for the last update that was performed on the function.
" - }, - "FileSystemConfigs":{ - "shape":"FileSystemConfigList", - "documentation":"Connection settings for an Amazon EFS file system.
" - }, - "PackageType":{ - "shape":"PackageType", - "documentation":"The type of deployment package. Set to Image for container image and set Zip for .zip file archive.
The function's image configuration values.
" - }, - "SigningProfileVersionArn":{ - "shape":"Arn", - "documentation":"The ARN of the signing profile version.
" - }, - "SigningJobArn":{ - "shape":"Arn", - "documentation":"The ARN of the signing job.
" - }, - "Architectures":{ - "shape":"ArchitecturesList", - "documentation":"The instruction set architecture that the function supports. Architecture is a string array with one of the valid values. The default architecture value is x86_64.
The size of the function's /tmp directory in MB. The default value is 512, but can be any whole number between 512 and 10,240 MB. For more information, see Configuring ephemeral storage (console).
Set ApplyOn to PublishedVersions to create a snapshot of the initialized execution environment when you publish a function version. For more information, see Improving startup performance with Lambda SnapStart.
The ARN of the runtime and any errors that occured.
" - }, - "LoggingConfig":{ - "shape":"LoggingConfig", - "documentation":"The function's Amazon CloudWatch Logs configuration settings.
" - }, - "DurableConfig":{"shape":"DurableConfig"} - }, - "documentation":"Details about a function's configuration.
" - }, - "FunctionEventInvokeConfig":{ - "type":"structure", - "members":{ - "LastModified":{ - "shape":"Date", - "documentation":"The date and time that the configuration was last updated.
" - }, - "FunctionArn":{ - "shape":"FunctionArn", - "documentation":"The Amazon Resource Name (ARN) of the function.
" - }, - "MaximumRetryAttempts":{ - "shape":"MaximumRetryAttempts", - "documentation":"The maximum number of times to retry when the function returns an error.
" - }, - "MaximumEventAgeInSeconds":{ - "shape":"MaximumEventAgeInSeconds", - "documentation":"The maximum age of a request that Lambda sends to a function for processing.
" - }, - "DestinationConfig":{ - "shape":"DestinationConfig", - "documentation":"A destination for events after they have been sent to a function for processing.
Destinations
Function - The Amazon Resource Name (ARN) of a Lambda function.
Queue - The ARN of a standard SQS queue.
Bucket - The ARN of an Amazon S3 bucket.
Topic - The ARN of a standard SNS topic.
Event Bus - The ARN of an Amazon EventBridge event bus.
S3 buckets are supported only for on-failure destinations. To retain records of successful invocations, use another destination type.
The HTTP URL endpoint for your function.
" - }, - "FunctionArn":{ - "shape":"FunctionArn", - "documentation":"The Amazon Resource Name (ARN) of your function.
" - }, - "CreationTime":{ - "shape":"Timestamp", - "documentation":"When the function URL was created, in ISO-8601 format (YYYY-MM-DDThh:mm:ss.sTZD).
" - }, - "LastModifiedTime":{ - "shape":"Timestamp", - "documentation":"When the function URL configuration was last updated, in ISO-8601 format (YYYY-MM-DDThh:mm:ss.sTZD).
" - }, - "Cors":{ - "shape":"Cors", - "documentation":"The cross-origin resource sharing (CORS) settings for your function URL.
" - }, - "AuthType":{ - "shape":"FunctionUrlAuthType", - "documentation":"The type of authentication that your function URL uses. Set to AWS_IAM if you want to restrict access to authenticated users only. Set to NONE if you want to bypass IAM authentication to create a public endpoint. For more information, see Security and auth model for Lambda function URLs.
Use one of the following options:
BUFFERED – This is the default option. Lambda invokes your function using the Invoke API operation. Invocation results are available when the payload is complete. The maximum payload size is 6 MB.
RESPONSE_STREAM – Your function streams payload results as they become available. Lambda invokes your function using the InvokeWithResponseStream API operation. The maximum response payload size is 20 MB, however, you can request a quota increase.
Details about a Lambda function URL.
" - }, - "FunctionUrlConfigList":{ - "type":"list", - "member":{"shape":"FunctionUrlConfig"} - }, - "FunctionUrlQualifier":{ - "type":"string", - "max":128, - "min":1, - "pattern":"(^\\$LATEST$)|((?!^[0-9]+$)([a-zA-Z0-9-_]+))" - }, - "FunctionVersion":{ - "type":"string", - "enum":["ALL"] - }, - "GetAccountSettingsRequest":{ - "type":"structure", - "members":{} - }, - "GetAccountSettingsResponse":{ - "type":"structure", - "members":{ - "AccountLimit":{ - "shape":"AccountLimit", - "documentation":"Limits that are related to concurrency and code storage.
" - }, - "AccountUsage":{ - "shape":"AccountUsage", - "documentation":"The number of functions and amount of storage in use.
" - } - } - }, - "GetAliasRequest":{ - "type":"structure", - "required":[ - "FunctionName", - "Name" - ], - "members":{ - "FunctionName":{ - "shape":"FunctionName", - "documentation":"The name or ARN of the Lambda function.
Name formats
Function name - MyFunction.
Function ARN - arn:aws:lambda:us-west-2:123456789012:function:MyFunction.
Partial ARN - 123456789012:function:MyFunction.
The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.
", - "location":"uri", - "locationName":"FunctionName" - }, - "Name":{ - "shape":"Alias", - "documentation":"The name of the alias.
", - "location":"uri", - "locationName":"Name" - } - } - }, - "GetCodeSigningConfigRequest":{ - "type":"structure", - "required":["CodeSigningConfigArn"], - "members":{ - "CodeSigningConfigArn":{ - "shape":"CodeSigningConfigArn", - "documentation":"The The Amazon Resource Name (ARN) of the code signing configuration.
", - "location":"uri", - "locationName":"CodeSigningConfigArn" - } - } - }, - "GetCodeSigningConfigResponse":{ - "type":"structure", - "required":["CodeSigningConfig"], - "members":{ - "CodeSigningConfig":{ - "shape":"CodeSigningConfig", - "documentation":"The code signing configuration
" - } - } - }, - "GetDurableExecutionHistoryRequest":{ - "type":"structure", - "required":["DurableExecutionArn"], - "members":{ - "DurableExecutionArn":{ - "shape":"DurableExecutionArn", - "location":"uri", - "locationName":"DurableExecutionArn" - }, - "IncludeExecutionData":{ - "shape":"IncludeExecutionData", - "location":"querystring", - "locationName":"IncludeExecutionData" - }, - "MaxItems":{ - "shape":"ItemCount", - "location":"querystring", - "locationName":"MaxItems" - }, - "Marker":{ - "shape":"String", - "location":"querystring", - "locationName":"Marker" - }, - "ReverseOrder":{ - "shape":"ReverseOrder", - "location":"querystring", - "locationName":"ReverseOrder" - } - } - }, - "GetDurableExecutionHistoryResponse":{ - "type":"structure", - "required":["Events"], - "members":{ - "Events":{"shape":"Events"}, - "NextMarker":{"shape":"String"} - } - }, - "GetDurableExecutionRequest":{ - "type":"structure", - "required":["DurableExecutionArn"], - "members":{ - "DurableExecutionArn":{ - "shape":"DurableExecutionArn", - "location":"uri", - "locationName":"DurableExecutionArn" - } - } - }, - "GetDurableExecutionResponse":{ - "type":"structure", - "required":[ - "DurableExecutionArn", - "DurableExecutionName", - "FunctionArn", - "StartTimestamp", - "Status" - ], - "members":{ - "DurableExecutionArn":{"shape":"DurableExecutionArn"}, - "DurableExecutionName":{"shape":"DurableExecutionName"}, - "FunctionArn":{"shape":"FunctionArn"}, - "InputPayload":{"shape":"InputPayload"}, - "Result":{"shape":"OutputPayload"}, - "Error":{"shape":"ErrorObject"}, - "StartTimestamp":{"shape":"ExecutionTimestamp"}, - "Status":{"shape":"ExecutionStatus"}, - "EndTimestamp":{"shape":"ExecutionTimestamp"}, - "Version":{"shape":"Version"} - } - }, - "GetDurableExecutionStateRequest":{ - "type":"structure", - "required":[ - "DurableExecutionArn", - "CheckpointToken" - ], - "members":{ - "DurableExecutionArn":{ - "shape":"DurableExecutionArn", - "location":"uri", - "locationName":"DurableExecutionArn" - }, - "CheckpointToken":{ - "shape":"CheckpointToken", - "location":"querystring", - "locationName":"CheckpointToken" - }, - "Marker":{ - "shape":"String", - "location":"querystring", - "locationName":"Marker" - }, - "MaxItems":{ - "shape":"ItemCount", - "location":"querystring", - "locationName":"MaxItems" - } - } - }, - "GetDurableExecutionStateResponse":{ - "type":"structure", - "required":["Operations"], - "members":{ - "Operations":{"shape":"Operations"}, - "NextMarker":{"shape":"String"} - } - }, - "GetEventSourceMappingRequest":{ - "type":"structure", - "required":["UUID"], - "members":{ - "UUID":{ - "shape":"String", - "documentation":"The identifier of the event source mapping.
", - "location":"uri", - "locationName":"UUID" - } - } - }, - "GetFunctionCodeSigningConfigRequest":{ - "type":"structure", - "required":["FunctionName"], - "members":{ - "FunctionName":{ - "shape":"FunctionName", - "documentation":"The name or ARN of the Lambda function.
Name formats
Function name - MyFunction.
Function ARN - arn:aws:lambda:us-west-2:123456789012:function:MyFunction.
Partial ARN - 123456789012:function:MyFunction.
The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.
", - "location":"uri", - "locationName":"FunctionName" - } - } - }, - "GetFunctionCodeSigningConfigResponse":{ - "type":"structure", - "required":[ - "CodeSigningConfigArn", - "FunctionName" - ], - "members":{ - "CodeSigningConfigArn":{ - "shape":"CodeSigningConfigArn", - "documentation":"The The Amazon Resource Name (ARN) of the code signing configuration.
" - }, - "FunctionName":{ - "shape":"FunctionName", - "documentation":"The name or ARN of the Lambda function.
Name formats
Function name - MyFunction.
Function ARN - arn:aws:lambda:us-west-2:123456789012:function:MyFunction.
Partial ARN - 123456789012:function:MyFunction.
The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.
" - } - } - }, - "GetFunctionConcurrencyRequest":{ - "type":"structure", - "required":["FunctionName"], - "members":{ - "FunctionName":{ - "shape":"FunctionName", - "documentation":"The name or ARN of the Lambda function.
Name formats
Function name – my-function.
Function ARN – arn:aws:lambda:us-west-2:123456789012:function:my-function.
Partial ARN – 123456789012:function:my-function.
The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.
", - "location":"uri", - "locationName":"FunctionName" - } - } - }, - "GetFunctionConcurrencyResponse":{ - "type":"structure", - "members":{ - "ReservedConcurrentExecutions":{ - "shape":"ReservedConcurrentExecutions", - "documentation":"The number of simultaneous executions that are reserved for the function.
" - } - } - }, - "GetFunctionConfigurationRequest":{ - "type":"structure", - "required":["FunctionName"], - "members":{ - "FunctionName":{ - "shape":"NamespacedFunctionName", - "documentation":"The name or ARN of the Lambda function, version, or alias.
Name formats
Function name – my-function (name-only), my-function:v1 (with alias).
Function ARN – arn:aws:lambda:us-west-2:123456789012:function:my-function.
Partial ARN – 123456789012:function:my-function.
You can append a version number or alias to any of the formats. The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.
", - "location":"uri", - "locationName":"FunctionName" - }, - "Qualifier":{ - "shape":"Qualifier", - "documentation":"Specify a version or alias to get details about a published version of the function.
", - "location":"querystring", - "locationName":"Qualifier" - } - } - }, - "GetFunctionEventInvokeConfigRequest":{ - "type":"structure", - "required":["FunctionName"], - "members":{ - "FunctionName":{ - "shape":"FunctionName", - "documentation":"The name or ARN of the Lambda function, version, or alias.
Name formats
Function name - my-function (name-only), my-function:v1 (with alias).
Function ARN - arn:aws:lambda:us-west-2:123456789012:function:my-function.
Partial ARN - 123456789012:function:my-function.
You can append a version number or alias to any of the formats. The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.
", - "location":"uri", - "locationName":"FunctionName" - }, - "Qualifier":{ - "shape":"Qualifier", - "documentation":"A version number or alias name.
", - "location":"querystring", - "locationName":"Qualifier" - } - } - }, - "GetFunctionRecursionConfigRequest":{ - "type":"structure", - "required":["FunctionName"], - "members":{ - "FunctionName":{ - "shape":"UnqualifiedFunctionName", - "documentation":"", - "location":"uri", - "locationName":"FunctionName" - } - } - }, - "GetFunctionRecursionConfigResponse":{ - "type":"structure", - "members":{ - "RecursiveLoop":{ - "shape":"RecursiveLoop", - "documentation":"If your function's recursive loop detection configuration is Allow, Lambda doesn't take any action when it detects your function being invoked as part of a recursive loop.
If your function's recursive loop detection configuration is Terminate, Lambda stops your function being invoked and notifies you when it detects your function being invoked as part of a recursive loop.
By default, Lambda sets your function's configuration to Terminate. You can update this configuration using the PutFunctionRecursionConfig action.
The name or ARN of the Lambda function, version, or alias.
Name formats
Function name – my-function (name-only), my-function:v1 (with alias).
Function ARN – arn:aws:lambda:us-west-2:123456789012:function:my-function.
Partial ARN – 123456789012:function:my-function.
You can append a version number or alias to any of the formats. The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.
", - "location":"uri", - "locationName":"FunctionName" - }, - "Qualifier":{ - "shape":"Qualifier", - "documentation":"Specify a version or alias to get details about a published version of the function.
", - "location":"querystring", - "locationName":"Qualifier" - } - } - }, - "GetFunctionResponse":{ - "type":"structure", - "members":{ - "Configuration":{ - "shape":"FunctionConfiguration", - "documentation":"The configuration of the function or version.
" - }, - "Code":{ - "shape":"FunctionCodeLocation", - "documentation":"The deployment package of the function or version.
" - }, - "Tags":{ - "shape":"Tags", - "documentation":"The function's tags. Lambda returns tag data only if you have explicit allow permissions for lambda:ListTags.
" - }, - "TagsError":{ - "shape":"TagsError", - "documentation":"An object that contains details about an error related to retrieving tags.
" - }, - "Concurrency":{ - "shape":"Concurrency", - "documentation":"The function's reserved concurrency.
" - } - } - }, - "GetFunctionUrlConfigRequest":{ - "type":"structure", - "required":["FunctionName"], - "members":{ - "FunctionName":{ - "shape":"FunctionName", - "documentation":"The name or ARN of the Lambda function.
Name formats
Function name – my-function.
Function ARN – arn:aws:lambda:us-west-2:123456789012:function:my-function.
Partial ARN – 123456789012:function:my-function.
The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.
", - "location":"uri", - "locationName":"FunctionName" - }, - "Qualifier":{ - "shape":"FunctionUrlQualifier", - "documentation":"The alias name.
", - "location":"querystring", - "locationName":"Qualifier" - } - } - }, - "GetFunctionUrlConfigResponse":{ - "type":"structure", - "required":[ - "FunctionUrl", - "FunctionArn", - "AuthType", - "CreationTime", - "LastModifiedTime" - ], - "members":{ - "FunctionUrl":{ - "shape":"FunctionUrl", - "documentation":"The HTTP URL endpoint for your function.
" - }, - "FunctionArn":{ - "shape":"FunctionArn", - "documentation":"The Amazon Resource Name (ARN) of your function.
" - }, - "AuthType":{ - "shape":"FunctionUrlAuthType", - "documentation":"The type of authentication that your function URL uses. Set to AWS_IAM if you want to restrict access to authenticated users only. Set to NONE if you want to bypass IAM authentication to create a public endpoint. For more information, see Security and auth model for Lambda function URLs.
The cross-origin resource sharing (CORS) settings for your function URL.
" - }, - "CreationTime":{ - "shape":"Timestamp", - "documentation":"When the function URL was created, in ISO-8601 format (YYYY-MM-DDThh:mm:ss.sTZD).
" - }, - "LastModifiedTime":{ - "shape":"Timestamp", - "documentation":"When the function URL configuration was last updated, in ISO-8601 format (YYYY-MM-DDThh:mm:ss.sTZD).
" - }, - "InvokeMode":{ - "shape":"InvokeMode", - "documentation":"Use one of the following options:
BUFFERED – This is the default option. Lambda invokes your function using the Invoke API operation. Invocation results are available when the payload is complete. The maximum payload size is 6 MB.
RESPONSE_STREAM – Your function streams payload results as they become available. Lambda invokes your function using the InvokeWithResponseStream API operation. The maximum response payload size is 20 MB, however, you can request a quota increase.
The ARN of the layer version.
", - "location":"querystring", - "locationName":"Arn" - } - } - }, - "GetLayerVersionPolicyRequest":{ - "type":"structure", - "required":[ - "LayerName", - "VersionNumber" - ], - "members":{ - "LayerName":{ - "shape":"LayerName", - "documentation":"The name or Amazon Resource Name (ARN) of the layer.
", - "location":"uri", - "locationName":"LayerName" - }, - "VersionNumber":{ - "shape":"LayerVersionNumber", - "documentation":"The version number.
", - "location":"uri", - "locationName":"VersionNumber" - } - } - }, - "GetLayerVersionPolicyResponse":{ - "type":"structure", - "members":{ - "Policy":{ - "shape":"String", - "documentation":"The policy document.
" - }, - "RevisionId":{ - "shape":"String", - "documentation":"A unique identifier for the current revision of the policy.
" - } - } - }, - "GetLayerVersionRequest":{ - "type":"structure", - "required":[ - "LayerName", - "VersionNumber" - ], - "members":{ - "LayerName":{ - "shape":"LayerName", - "documentation":"The name or Amazon Resource Name (ARN) of the layer.
", - "location":"uri", - "locationName":"LayerName" - }, - "VersionNumber":{ - "shape":"LayerVersionNumber", - "documentation":"The version number.
", - "location":"uri", - "locationName":"VersionNumber" - } - } - }, - "GetLayerVersionResponse":{ - "type":"structure", - "members":{ - "Content":{ - "shape":"LayerVersionContentOutput", - "documentation":"Details about the layer version.
" - }, - "LayerArn":{ - "shape":"LayerArn", - "documentation":"The ARN of the layer.
" - }, - "LayerVersionArn":{ - "shape":"LayerVersionArn", - "documentation":"The ARN of the layer version.
" - }, - "Description":{ - "shape":"Description", - "documentation":"The description of the version.
" - }, - "CreatedDate":{ - "shape":"Timestamp", - "documentation":"The date that the layer version was created, in ISO-8601 format (YYYY-MM-DDThh:mm:ss.sTZD).
" - }, - "Version":{ - "shape":"LayerVersionNumber", - "documentation":"The version number.
" - }, - "CompatibleRuntimes":{ - "shape":"CompatibleRuntimes", - "documentation":"The layer's compatible runtimes.
The following list includes deprecated runtimes. For more information, see Runtime use after deprecation.
For a list of all currently supported runtimes, see Supported runtimes.
" - }, - "LicenseInfo":{ - "shape":"LicenseInfo", - "documentation":"The layer's software license.
" - }, - "CompatibleArchitectures":{ - "shape":"CompatibleArchitectures", - "documentation":"A list of compatible instruction set architectures.
" - } - } - }, - "GetPolicyRequest":{ - "type":"structure", - "required":["FunctionName"], - "members":{ - "FunctionName":{ - "shape":"NamespacedFunctionName", - "documentation":"The name or ARN of the Lambda function, version, or alias.
Name formats
Function name – my-function (name-only), my-function:v1 (with alias).
Function ARN – arn:aws:lambda:us-west-2:123456789012:function:my-function.
Partial ARN – 123456789012:function:my-function.
You can append a version number or alias to any of the formats. The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.
", - "location":"uri", - "locationName":"FunctionName" - }, - "Qualifier":{ - "shape":"Qualifier", - "documentation":"Specify a version or alias to get the policy for that resource.
", - "location":"querystring", - "locationName":"Qualifier" - } - } - }, - "GetPolicyResponse":{ - "type":"structure", - "members":{ - "Policy":{ - "shape":"String", - "documentation":"The resource-based policy.
" - }, - "RevisionId":{ - "shape":"String", - "documentation":"A unique identifier for the current revision of the policy.
" - } - } - }, - "GetProvisionedConcurrencyConfigRequest":{ - "type":"structure", - "required":[ - "FunctionName", - "Qualifier" - ], - "members":{ - "FunctionName":{ - "shape":"FunctionName", - "documentation":"The name or ARN of the Lambda function.
Name formats
Function name – my-function.
Function ARN – arn:aws:lambda:us-west-2:123456789012:function:my-function.
Partial ARN – 123456789012:function:my-function.
The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.
", - "location":"uri", - "locationName":"FunctionName" - }, - "Qualifier":{ - "shape":"Qualifier", - "documentation":"The version number or alias name.
", - "location":"querystring", - "locationName":"Qualifier" - } - } - }, - "GetProvisionedConcurrencyConfigResponse":{ - "type":"structure", - "members":{ - "RequestedProvisionedConcurrentExecutions":{ - "shape":"PositiveInteger", - "documentation":"The amount of provisioned concurrency requested.
" - }, - "AvailableProvisionedConcurrentExecutions":{ - "shape":"NonNegativeInteger", - "documentation":"The amount of provisioned concurrency available.
" - }, - "AllocatedProvisionedConcurrentExecutions":{ - "shape":"NonNegativeInteger", - "documentation":"The amount of provisioned concurrency allocated. When a weighted alias is used during linear and canary deployments, this value fluctuates depending on the amount of concurrency that is provisioned for the function versions.
" - }, - "Status":{ - "shape":"ProvisionedConcurrencyStatusEnum", - "documentation":"The status of the allocation process.
" - }, - "StatusReason":{ - "shape":"String", - "documentation":"For failed allocations, the reason that provisioned concurrency could not be allocated.
" - }, - "LastModified":{ - "shape":"Timestamp", - "documentation":"The date and time that a user last updated the configuration, in ISO 8601 format.
" - } - } - }, - "GetRuntimeManagementConfigRequest":{ - "type":"structure", - "required":["FunctionName"], - "members":{ - "FunctionName":{ - "shape":"NamespacedFunctionName", - "documentation":"The name or ARN of the Lambda function.
Name formats
Function name – my-function.
Function ARN – arn:aws:lambda:us-west-2:123456789012:function:my-function.
Partial ARN – 123456789012:function:my-function.
The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.
", - "location":"uri", - "locationName":"FunctionName" - }, - "Qualifier":{ - "shape":"Qualifier", - "documentation":"Specify a version of the function. This can be $LATEST or a published version number. If no value is specified, the configuration for the $LATEST version is returned.
The current runtime update mode of the function.
" - }, - "RuntimeVersionArn":{ - "shape":"RuntimeVersionArn", - "documentation":"The ARN of the runtime the function is configured to use. If the runtime update mode is Manual, the ARN is returned, otherwise null is returned.
The Amazon Resource Name (ARN) of your function.
" - } - } - }, - "Handler":{ - "type":"string", - "max":128, - "min":0, - "pattern":"[^\\s]+" - }, - "Header":{ - "type":"string", - "max":1024, - "min":0, - "pattern":".*" - }, - "HeadersList":{ - "type":"list", - "member":{"shape":"Header"}, - "max":100, - "min":0 - }, - "HttpStatus":{"type":"integer"}, - "ImageConfig":{ - "type":"structure", - "members":{ - "EntryPoint":{ - "shape":"StringList", - "documentation":"Specifies the entry point to their application, which is typically the location of the runtime executable.
" - }, - "Command":{ - "shape":"StringList", - "documentation":"Specifies parameters that you want to pass in with ENTRYPOINT.
" - }, - "WorkingDirectory":{ - "shape":"WorkingDirectory", - "documentation":"Specifies the working directory.
" - } - }, - "documentation":"Configuration values that override the container image Dockerfile settings. For more information, see Container image settings.
" - }, - "ImageConfigError":{ - "type":"structure", - "members":{ - "ErrorCode":{ - "shape":"String", - "documentation":"Error code.
" - }, - "Message":{ - "shape":"SensitiveString", - "documentation":"Error message.
" - } - }, - "documentation":"Error response to GetFunctionConfiguration.
Configuration values that override the container image Dockerfile.
" - }, - "Error":{ - "shape":"ImageConfigError", - "documentation":"Error response to GetFunctionConfiguration.
Response to a GetFunctionConfiguration request.
The code signature failed the integrity check. If the integrity check fails, then Lambda blocks deployment, even if the code signing policy is set to WARN.
", - "error":{ - "httpStatusCode":400, - "senderFault":true - }, - "exception":true - }, - "InvalidParameterValueException":{ - "type":"structure", - "members":{ - "Type":{ - "shape":"String", - "documentation":"The exception type.
" - }, - "message":{ - "shape":"String", - "documentation":"The exception message.
" - } - }, - "documentation":"One of the parameters in the request is not valid.
", - "error":{ - "httpStatusCode":400, - "senderFault":true - }, - "exception":true - }, - "InvalidRequestContentException":{ - "type":"structure", - "members":{ - "Type":{ - "shape":"String", - "documentation":"The exception type.
" - }, - "message":{ - "shape":"String", - "documentation":"The exception message.
" - } - }, - "documentation":"The request body could not be parsed as JSON, or a request header is invalid. For example, the 'x-amzn-RequestId' header is not a valid UUID string.
", - "error":{ - "httpStatusCode":400, - "senderFault":true - }, - "exception":true - }, - "InvalidRuntimeException":{ - "type":"structure", - "members":{ - "Type":{"shape":"String"}, - "Message":{"shape":"String"} - }, - "documentation":"The runtime or runtime version specified is not supported.
", - "error":{"httpStatusCode":502}, - "exception":true, - "fault":true - }, - "InvalidSecurityGroupIDException":{ - "type":"structure", - "members":{ - "Type":{"shape":"String"}, - "Message":{"shape":"String"} - }, - "documentation":"The security group ID provided in the Lambda function VPC configuration is not valid.
", - "error":{"httpStatusCode":502}, - "exception":true, - "fault":true - }, - "InvalidSubnetIDException":{ - "type":"structure", - "members":{ - "Type":{"shape":"String"}, - "Message":{"shape":"String"} - }, - "documentation":"The subnet ID provided in the Lambda function VPC configuration is not valid.
", - "error":{"httpStatusCode":502}, - "exception":true, - "fault":true - }, - "InvalidZipFileException":{ - "type":"structure", - "members":{ - "Type":{"shape":"String"}, - "Message":{"shape":"String"} - }, - "documentation":"Lambda could not unzip the deployment package.
", - "error":{"httpStatusCode":502}, - "exception":true, - "fault":true - }, - "InvocationCompletedDetails":{ - "type":"structure", - "required":[ - "StartTimestamp", - "EndTimestamp", - "RequestId" - ], - "members":{ - "StartTimestamp":{"shape":"ExecutionTimestamp"}, - "EndTimestamp":{"shape":"ExecutionTimestamp"}, - "RequestId":{"shape":"String"}, - "Error":{"shape":"EventError"} - } - }, - "InvocationRequest":{ - "type":"structure", - "required":["FunctionName"], - "members":{ - "FunctionName":{ - "shape":"NamespacedFunctionName", - "documentation":"The name or ARN of the Lambda function, version, or alias.
Name formats
Function name – my-function (name-only), my-function:v1 (with alias).
Function ARN – arn:aws:lambda:us-west-2:123456789012:function:my-function.
Partial ARN – 123456789012:function:my-function.
You can append a version number or alias to any of the formats. The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.
", - "location":"uri", - "locationName":"FunctionName" - }, - "InvocationType":{ - "shape":"InvocationType", - "documentation":"Choose from the following options.
RequestResponse (default) – Invoke the function synchronously. Keep the connection open until the function returns a response or times out. The API response includes the function response and additional data.
Event – Invoke the function asynchronously. Send events that fail multiple times to the function's dead-letter queue (if one is configured). The API response only includes a status code.
DryRun – Validate parameter values and verify that the user or role has permission to invoke the function.
Set to Tail to include the execution log in the response. Applies to synchronously invoked functions only.
Up to 3,583 bytes of base64-encoded data about the invoking client to pass to the function in the context object. Lambda passes the ClientContext object to your function for synchronous invocations only.
The JSON that you want to provide to your Lambda function as input.
You can enter the JSON directly. For example, --payload '{ \"key\": \"value\" }'. You can also specify a file path. For example, --payload file://payload.json.
Specify a version or alias to invoke a published version of the function.
", - "location":"querystring", - "locationName":"Qualifier" - } - }, - "payload":"Payload" - }, - "InvocationResponse":{ - "type":"structure", - "members":{ - "StatusCode":{ - "shape":"Integer", - "documentation":"The HTTP status code is in the 200 range for a successful request. For the RequestResponse invocation type, this status code is 200. For the Event invocation type, this status code is 202. For the DryRun invocation type, the status code is 204.
If present, indicates that an error occurred during function execution. Details about the error are included in the response payload.
", - "location":"header", - "locationName":"X-Amz-Function-Error" - }, - "LogResult":{ - "shape":"String", - "documentation":"The last 4 KB of the execution log, which is base64-encoded.
", - "location":"header", - "locationName":"X-Amz-Log-Result" - }, - "Payload":{ - "shape":"Blob", - "documentation":"The response from the function, or an error object.
" - }, - "ExecutedVersion":{ - "shape":"Version", - "documentation":"The version of the function that executed. When you invoke a function with an alias, this indicates which version the alias resolved to.
", - "location":"header", - "locationName":"X-Amz-Executed-Version" - }, - "DurableExecutionArn":{ - "shape":"DurableExecutionArn", - "location":"header", - "locationName":"X-Amz-Durable-Execution-Arn" - } - }, - "payload":"Payload" - }, - "InvocationType":{ - "type":"string", - "enum":[ - "Event", - "RequestResponse", - "DryRun" - ] - }, - "InvokeAsyncRequest":{ - "type":"structure", - "required":[ - "FunctionName", - "InvokeArgs" - ], - "members":{ - "FunctionName":{ - "shape":"NamespacedFunctionName", - "documentation":"The name or ARN of the Lambda function.
Name formats
Function name – my-function.
Function ARN – arn:aws:lambda:us-west-2:123456789012:function:my-function.
Partial ARN – 123456789012:function:my-function.
The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.
", - "location":"uri", - "locationName":"FunctionName" - }, - "InvokeArgs":{ - "shape":"BlobStream", - "documentation":"The JSON that you want to provide to your Lambda function as input.
" - } - }, - "deprecated":true, - "payload":"InvokeArgs" - }, - "InvokeAsyncResponse":{ - "type":"structure", - "members":{ - "Status":{ - "shape":"HttpStatus", - "documentation":"The status code.
", - "location":"statusCode" - } - }, - "deprecated":true, - "payload":"Body", - "documentation":"A success response (202 Accepted) indicates that the request is queued for invocation.
Data returned by your Lambda function.
", - "eventpayload":true - } - }, - "documentation":"A chunk of the streamed response payload.
", - "event":true - }, - "InvokeWithResponseStreamCompleteEvent":{ - "type":"structure", - "members":{ - "ErrorCode":{ - "shape":"String", - "documentation":"An error code.
" - }, - "ErrorDetails":{ - "shape":"String", - "documentation":"The details of any returned error.
" - }, - "LogResult":{ - "shape":"String", - "documentation":"The last 4 KB of the execution log, which is base64-encoded.
" - } - }, - "documentation":"A response confirming that the event stream is complete.
", - "event":true - }, - "InvokeWithResponseStreamRequest":{ - "type":"structure", - "required":["FunctionName"], - "members":{ - "FunctionName":{ - "shape":"NamespacedFunctionName", - "documentation":"The name or ARN of the Lambda function.
Name formats
Function name – my-function.
Function ARN – arn:aws:lambda:us-west-2:123456789012:function:my-function.
Partial ARN – 123456789012:function:my-function.
The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.
", - "location":"uri", - "locationName":"FunctionName" - }, - "InvocationType":{ - "shape":"ResponseStreamingInvocationType", - "documentation":"Use one of the following options:
RequestResponse (default) – Invoke the function synchronously. Keep the connection open until the function returns a response or times out. The API operation response includes the function response and additional data.
DryRun – Validate parameter values and verify that the IAM user or role has permission to invoke the function.
Set to Tail to include the execution log in the response. Applies to synchronously invoked functions only.
Up to 3,583 bytes of base64-encoded data about the invoking client to pass to the function in the context object.
", - "location":"header", - "locationName":"X-Amz-Client-Context" - }, - "Qualifier":{ - "shape":"Qualifier", - "documentation":"The alias name.
", - "location":"querystring", - "locationName":"Qualifier" - }, - "Payload":{ - "shape":"Blob", - "documentation":"The JSON that you want to provide to your Lambda function as input.
You can enter the JSON directly. For example, --payload '{ \"key\": \"value\" }'. You can also specify a file path. For example, --payload file://payload.json.
For a successful request, the HTTP status code is in the 200 range. For the RequestResponse invocation type, this status code is 200. For the DryRun invocation type, this status code is 204.
The version of the function that executed. When you invoke a function with an alias, this indicates which version the alias resolved to.
", - "location":"header", - "locationName":"X-Amz-Executed-Version" - }, - "EventStream":{ - "shape":"InvokeWithResponseStreamResponseEvent", - "documentation":"The stream of response payloads.
" - }, - "ResponseStreamContentType":{ - "shape":"String", - "documentation":"The type of data the stream is returning.
", - "location":"header", - "locationName":"Content-Type" - } - }, - "payload":"EventStream" - }, - "InvokeWithResponseStreamResponseEvent":{ - "type":"structure", - "members":{ - "PayloadChunk":{ - "shape":"InvokeResponseStreamUpdate", - "documentation":"A chunk of the streamed response payload.
" - }, - "InvokeComplete":{ - "shape":"InvokeWithResponseStreamCompleteEvent", - "documentation":"An object that's returned when the stream has ended and all the payload chunks have been returned.
" - } - }, - "documentation":"An object that includes a chunk of the response payload. When the stream has ended, Lambda includes a InvokeComplete object.
Lambda couldn't decrypt the environment variables because KMS access was denied. Check the Lambda function's KMS permissions.
", - "error":{"httpStatusCode":502}, - "exception":true, - "fault":true - }, - "KMSDisabledException":{ - "type":"structure", - "members":{ - "Type":{"shape":"String"}, - "Message":{"shape":"String"} - }, - "documentation":"Lambda couldn't decrypt the environment variables because the KMS key used is disabled. Check the Lambda function's KMS key settings.
", - "error":{"httpStatusCode":502}, - "exception":true, - "fault":true - }, - "KMSInvalidStateException":{ - "type":"structure", - "members":{ - "Type":{"shape":"String"}, - "Message":{"shape":"String"} - }, - "documentation":"Lambda couldn't decrypt the environment variables because the state of the KMS key used is not valid for Decrypt. Check the function's KMS key settings.
", - "error":{"httpStatusCode":502}, - "exception":true, - "fault":true - }, - "KMSKeyArn":{ - "type":"string", - "pattern":"(arn:(aws[a-zA-Z-]*)?:[a-z0-9-.]+:.*)|()" - }, - "KMSNotFoundException":{ - "type":"structure", - "members":{ - "Type":{"shape":"String"}, - "Message":{"shape":"String"} - }, - "documentation":"Lambda couldn't decrypt the environment variables because the KMS key was not found. Check the function's KMS key settings.
", - "error":{"httpStatusCode":502}, - "exception":true, - "fault":true - }, - "KafkaSchemaRegistryAccessConfig":{ - "type":"structure", - "members":{ - "Type":{ - "shape":"KafkaSchemaRegistryAuthType", - "documentation":"The type of authentication Lambda uses to access your schema registry.
" - }, - "URI":{ - "shape":"Arn", - "documentation":"The URI of the secret (Secrets Manager secret ARN) to authenticate with your schema registry.
" - } - }, - "documentation":"Specific access configuration settings that tell Lambda how to authenticate with your schema registry.
If you're working with an Glue schema registry, don't provide authentication details in this object. Instead, ensure that your execution role has the required permissions for Lambda to access your cluster.
If you're working with a Confluent schema registry, choose the authentication method in the Type field, and provide the Secrets Manager secret ARN in the URI field.
The URI for your schema registry. The correct URI format depends on the type of schema registry you're using.
For Glue schema registries, use the ARN of the registry.
For Confluent schema registries, use the URL of the registry.
The record format that Lambda delivers to your function after schema validation.
Choose JSON to have Lambda deliver the record to your function as a standard JSON object.
Choose SOURCE to have Lambda deliver the record to your function in its original source format. Lambda removes all schema metadata, such as the schema ID, before sending the record to your function.
An array of access configuration objects that tell Lambda how to authenticate with your schema registry.
" - }, - "SchemaValidationConfigs":{ - "shape":"KafkaSchemaValidationConfigList", - "documentation":"An array of schema validation configuration objects, which tell Lambda the message attributes you want to validate and filter using your schema registry.
" - } - }, - "documentation":"Specific configuration settings for a Kafka schema registry.
" - }, - "KafkaSchemaValidationAttribute":{ - "type":"string", - "enum":[ - "KEY", - "VALUE" - ] - }, - "KafkaSchemaValidationConfig":{ - "type":"structure", - "members":{ - "Attribute":{ - "shape":"KafkaSchemaValidationAttribute", - "documentation":" The attributes you want your schema registry to validate and filter for. If you selected JSON as the EventRecordFormat, Lambda also deserializes the selected message attributes.
Specific schema validation configuration settings that tell Lambda the message attributes you want to validate and filter using your schema registry.
" - }, - "KafkaSchemaValidationConfigList":{ - "type":"list", - "member":{"shape":"KafkaSchemaValidationConfig"} - }, - "LastUpdateStatus":{ - "type":"string", - "enum":[ - "Successful", - "Failed", - "InProgress" - ] - }, - "LastUpdateStatusReason":{"type":"string"}, - "LastUpdateStatusReasonCode":{ - "type":"string", - "enum":[ - "EniLimitExceeded", - "InsufficientRolePermissions", - "InvalidConfiguration", - "InternalError", - "SubnetOutOfIPAddresses", - "InvalidSubnet", - "InvalidSecurityGroup", - "ImageDeleted", - "ImageAccessDenied", - "InvalidImage", - "KMSKeyAccessDenied", - "KMSKeyNotFound", - "InvalidStateKMSKey", - "DisabledKMSKey", - "EFSIOError", - "EFSMountConnectivityError", - "EFSMountFailure", - "EFSMountTimeout", - "InvalidRuntime", - "InvalidZipFileException", - "FunctionError" - ] - }, - "Layer":{ - "type":"structure", - "members":{ - "Arn":{ - "shape":"LayerVersionArn", - "documentation":"The Amazon Resource Name (ARN) of the function layer.
" - }, - "CodeSize":{ - "shape":"Long", - "documentation":"The size of the layer archive in bytes.
" - }, - "SigningProfileVersionArn":{ - "shape":"Arn", - "documentation":"The Amazon Resource Name (ARN) for a signing profile version.
" - }, - "SigningJobArn":{ - "shape":"Arn", - "documentation":"The Amazon Resource Name (ARN) of a signing job.
" - } - }, - "documentation":"An Lambda layer.
" - }, - "LayerArn":{ - "type":"string", - "max":140, - "min":1, - "pattern":"arn:[a-zA-Z0-9-]+:lambda:[a-zA-Z0-9-]+:\\d{12}:layer:[a-zA-Z0-9-_]+" - }, - "LayerList":{ - "type":"list", - "member":{"shape":"LayerVersionArn"} - }, - "LayerName":{ - "type":"string", - "max":140, - "min":1, - "pattern":"(arn:[a-zA-Z0-9-]+:lambda:[a-zA-Z0-9-]+:\\d{12}:layer:[a-zA-Z0-9-_]+)|[a-zA-Z0-9-_]+" - }, - "LayerPermissionAllowedAction":{ - "type":"string", - "max":22, - "min":0, - "pattern":"lambda:GetLayerVersion" - }, - "LayerPermissionAllowedPrincipal":{ - "type":"string", - "pattern":"\\d{12}|\\*|arn:(aws[a-zA-Z-]*):iam::\\d{12}:root" - }, - "LayerVersionArn":{ - "type":"string", - "max":140, - "min":1, - "pattern":"arn:[a-zA-Z0-9-]+:lambda:[a-zA-Z0-9-]+:\\d{12}:layer:[a-zA-Z0-9-_]+:[0-9]+" - }, - "LayerVersionContentInput":{ - "type":"structure", - "members":{ - "S3Bucket":{ - "shape":"S3Bucket", - "documentation":"The Amazon S3 bucket of the layer archive.
" - }, - "S3Key":{ - "shape":"S3Key", - "documentation":"The Amazon S3 key of the layer archive.
" - }, - "S3ObjectVersion":{ - "shape":"S3ObjectVersion", - "documentation":"For versioned objects, the version of the layer archive object to use.
" - }, - "ZipFile":{ - "shape":"Blob", - "documentation":"The base64-encoded contents of the layer archive. Amazon Web Services SDK and Amazon Web Services CLI clients handle the encoding for you.
" - } - }, - "documentation":"A ZIP archive that contains the contents of an Lambda layer. You can specify either an Amazon S3 location, or upload a layer archive directly.
" - }, - "LayerVersionContentOutput":{ - "type":"structure", - "members":{ - "Location":{ - "shape":"String", - "documentation":"A link to the layer archive in Amazon S3 that is valid for 10 minutes.
" - }, - "CodeSha256":{ - "shape":"String", - "documentation":"The SHA-256 hash of the layer archive.
" - }, - "CodeSize":{ - "shape":"Long", - "documentation":"The size of the layer archive in bytes.
" - }, - "SigningProfileVersionArn":{ - "shape":"String", - "documentation":"The Amazon Resource Name (ARN) for a signing profile version.
" - }, - "SigningJobArn":{ - "shape":"String", - "documentation":"The Amazon Resource Name (ARN) of a signing job.
" - } - }, - "documentation":"Details about a version of an Lambda layer.
" - }, - "LayerVersionNumber":{"type":"long"}, - "LayerVersionsList":{ - "type":"list", - "member":{"shape":"LayerVersionsListItem"} - }, - "LayerVersionsListItem":{ - "type":"structure", - "members":{ - "LayerVersionArn":{ - "shape":"LayerVersionArn", - "documentation":"The ARN of the layer version.
" - }, - "Version":{ - "shape":"LayerVersionNumber", - "documentation":"The version number.
" - }, - "Description":{ - "shape":"Description", - "documentation":"The description of the version.
" - }, - "CreatedDate":{ - "shape":"Timestamp", - "documentation":"The date that the version was created, in ISO 8601 format. For example, 2018-11-27T15:10:45.123+0000.
The layer's compatible runtimes.
The following list includes deprecated runtimes. For more information, see Runtime use after deprecation.
For a list of all currently supported runtimes, see Supported runtimes.
" - }, - "LicenseInfo":{ - "shape":"LicenseInfo", - "documentation":"The layer's open-source license.
" - }, - "CompatibleArchitectures":{ - "shape":"CompatibleArchitectures", - "documentation":"A list of compatible instruction set architectures.
" - } - }, - "documentation":"Details about a version of an Lambda layer.
" - }, - "LayersList":{ - "type":"list", - "member":{"shape":"LayersListItem"} - }, - "LayersListItem":{ - "type":"structure", - "members":{ - "LayerName":{ - "shape":"LayerName", - "documentation":"The name of the layer.
" - }, - "LayerArn":{ - "shape":"LayerArn", - "documentation":"The Amazon Resource Name (ARN) of the function layer.
" - }, - "LatestMatchingVersion":{ - "shape":"LayerVersionsListItem", - "documentation":"The newest version of the layer.
" - } - }, - "documentation":"Details about an Lambda layer.
" - }, - "LayersReferenceList":{ - "type":"list", - "member":{"shape":"Layer"} - }, - "LicenseInfo":{ - "type":"string", - "max":512, - "min":0 - }, - "ListAliasesRequest":{ - "type":"structure", - "required":["FunctionName"], - "members":{ - "FunctionName":{ - "shape":"FunctionName", - "documentation":"The name or ARN of the Lambda function.
Name formats
Function name - MyFunction.
Function ARN - arn:aws:lambda:us-west-2:123456789012:function:MyFunction.
Partial ARN - 123456789012:function:MyFunction.
The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.
", - "location":"uri", - "locationName":"FunctionName" - }, - "FunctionVersion":{ - "shape":"Version", - "documentation":"Specify a function version to only list aliases that invoke that version.
", - "location":"querystring", - "locationName":"FunctionVersion" - }, - "Marker":{ - "shape":"String", - "documentation":"Specify the pagination token that's returned by a previous request to retrieve the next page of results.
", - "location":"querystring", - "locationName":"Marker" - }, - "MaxItems":{ - "shape":"MaxListItems", - "documentation":"Limit the number of aliases returned.
", - "location":"querystring", - "locationName":"MaxItems" - } - } - }, - "ListAliasesResponse":{ - "type":"structure", - "members":{ - "NextMarker":{ - "shape":"String", - "documentation":"The pagination token that's included if more results are available.
" - }, - "Aliases":{ - "shape":"AliasList", - "documentation":"A list of aliases.
" - } - } - }, - "ListCodeSigningConfigsRequest":{ - "type":"structure", - "members":{ - "Marker":{ - "shape":"String", - "documentation":"Specify the pagination token that's returned by a previous request to retrieve the next page of results.
", - "location":"querystring", - "locationName":"Marker" - }, - "MaxItems":{ - "shape":"MaxListItems", - "documentation":"Maximum number of items to return.
", - "location":"querystring", - "locationName":"MaxItems" - } - } - }, - "ListCodeSigningConfigsResponse":{ - "type":"structure", - "members":{ - "NextMarker":{ - "shape":"String", - "documentation":"The pagination token that's included if more results are available.
" - }, - "CodeSigningConfigs":{ - "shape":"CodeSigningConfigList", - "documentation":"The code signing configurations
" - } - } - }, - "ListDurableExecutionsByFunctionRequest":{ - "type":"structure", - "required":["FunctionName"], - "members":{ - "FunctionName":{ - "shape":"FunctionName", - "location":"uri", - "locationName":"FunctionName" - }, - "Qualifier":{ - "shape":"Qualifier", - "location":"querystring", - "locationName":"Qualifier" - }, - "DurableExecutionName":{ - "shape":"DurableExecutionName", - "location":"querystring", - "locationName":"DurableExecutionName" - }, - "Statuses":{ - "shape":"ExecutionStatusList", - "location":"querystring", - "locationName":"Statuses" - }, - "StartedAfter":{ - "shape":"ExecutionTimestamp", - "location":"querystring", - "locationName":"StartedAfter" - }, - "StartedBefore":{ - "shape":"ExecutionTimestamp", - "location":"querystring", - "locationName":"StartedBefore" - }, - "ReverseOrder":{ - "shape":"ReverseOrder", - "location":"querystring", - "locationName":"ReverseOrder" - }, - "Marker":{ - "shape":"String", - "location":"querystring", - "locationName":"Marker" - }, - "MaxItems":{ - "shape":"ItemCount", - "location":"querystring", - "locationName":"MaxItems" - } - } - }, - "ListDurableExecutionsByFunctionResponse":{ - "type":"structure", - "members":{ - "DurableExecutions":{"shape":"DurableExecutions"}, - "NextMarker":{"shape":"String"} - } - }, - "ListEventSourceMappingsRequest":{ - "type":"structure", - "members":{ - "EventSourceArn":{ - "shape":"Arn", - "documentation":"The Amazon Resource Name (ARN) of the event source.
Amazon Kinesis – The ARN of the data stream or a stream consumer.
Amazon DynamoDB Streams – The ARN of the stream.
Amazon Simple Queue Service – The ARN of the queue.
Amazon Managed Streaming for Apache Kafka – The ARN of the cluster or the ARN of the VPC connection (for cross-account event source mappings).
Amazon MQ – The ARN of the broker.
Amazon DocumentDB – The ARN of the DocumentDB change stream.
The name or ARN of the Lambda function.
Name formats
Function name – MyFunction.
Function ARN – arn:aws:lambda:us-west-2:123456789012:function:MyFunction.
Version or Alias ARN – arn:aws:lambda:us-west-2:123456789012:function:MyFunction:PROD.
Partial ARN – 123456789012:function:MyFunction.
The length constraint applies only to the full ARN. If you specify only the function name, it's limited to 64 characters in length.
", - "location":"querystring", - "locationName":"FunctionName" - }, - "Marker":{ - "shape":"String", - "documentation":"A pagination token returned by a previous call.
", - "location":"querystring", - "locationName":"Marker" - }, - "MaxItems":{ - "shape":"MaxListItems", - "documentation":"The maximum number of event source mappings to return. Note that ListEventSourceMappings returns a maximum of 100 items in each response, even if you set the number higher.
", - "location":"querystring", - "locationName":"MaxItems" - } - } - }, - "ListEventSourceMappingsResponse":{ - "type":"structure", - "members":{ - "NextMarker":{ - "shape":"String", - "documentation":"A pagination token that's returned when the response doesn't contain all event source mappings.
" - }, - "EventSourceMappings":{ - "shape":"EventSourceMappingsList", - "documentation":"A list of event source mappings.
" - } - } - }, - "ListFunctionEventInvokeConfigsRequest":{ - "type":"structure", - "required":["FunctionName"], - "members":{ - "FunctionName":{ - "shape":"FunctionName", - "documentation":"The name or ARN of the Lambda function.
Name formats
Function name - my-function.
Function ARN - arn:aws:lambda:us-west-2:123456789012:function:my-function.
Partial ARN - 123456789012:function:my-function.
The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.
", - "location":"uri", - "locationName":"FunctionName" - }, - "Marker":{ - "shape":"String", - "documentation":"Specify the pagination token that's returned by a previous request to retrieve the next page of results.
", - "location":"querystring", - "locationName":"Marker" - }, - "MaxItems":{ - "shape":"MaxFunctionEventInvokeConfigListItems", - "documentation":"The maximum number of configurations to return.
", - "location":"querystring", - "locationName":"MaxItems" - } - } - }, - "ListFunctionEventInvokeConfigsResponse":{ - "type":"structure", - "members":{ - "FunctionEventInvokeConfigs":{ - "shape":"FunctionEventInvokeConfigList", - "documentation":"A list of configurations.
" - }, - "NextMarker":{ - "shape":"String", - "documentation":"The pagination token that's included if more results are available.
" - } - } - }, - "ListFunctionUrlConfigsRequest":{ - "type":"structure", - "required":["FunctionName"], - "members":{ - "FunctionName":{ - "shape":"FunctionName", - "documentation":"The name or ARN of the Lambda function.
Name formats
Function name – my-function.
Function ARN – arn:aws:lambda:us-west-2:123456789012:function:my-function.
Partial ARN – 123456789012:function:my-function.
The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.
", - "location":"uri", - "locationName":"FunctionName" - }, - "Marker":{ - "shape":"String", - "documentation":"Specify the pagination token that's returned by a previous request to retrieve the next page of results.
", - "location":"querystring", - "locationName":"Marker" - }, - "MaxItems":{ - "shape":"MaxItems", - "documentation":"The maximum number of function URLs to return in the response. Note that ListFunctionUrlConfigs returns a maximum of 50 items in each response, even if you set the number higher.
A list of function URL configurations.
" - }, - "NextMarker":{ - "shape":"String", - "documentation":"The pagination token that's included if more results are available.
" - } - } - }, - "ListFunctionsByCodeSigningConfigRequest":{ - "type":"structure", - "required":["CodeSigningConfigArn"], - "members":{ - "CodeSigningConfigArn":{ - "shape":"CodeSigningConfigArn", - "documentation":"The The Amazon Resource Name (ARN) of the code signing configuration.
", - "location":"uri", - "locationName":"CodeSigningConfigArn" - }, - "Marker":{ - "shape":"String", - "documentation":"Specify the pagination token that's returned by a previous request to retrieve the next page of results.
", - "location":"querystring", - "locationName":"Marker" - }, - "MaxItems":{ - "shape":"MaxListItems", - "documentation":"Maximum number of items to return.
", - "location":"querystring", - "locationName":"MaxItems" - } - } - }, - "ListFunctionsByCodeSigningConfigResponse":{ - "type":"structure", - "members":{ - "NextMarker":{ - "shape":"String", - "documentation":"The pagination token that's included if more results are available.
" - }, - "FunctionArns":{ - "shape":"FunctionArnList", - "documentation":"The function ARNs.
" - } - } - }, - "ListFunctionsRequest":{ - "type":"structure", - "members":{ - "MasterRegion":{ - "shape":"MasterRegion", - "documentation":"For Lambda@Edge functions, the Amazon Web Services Region of the master function. For example, us-east-1 filters the list of functions to include only Lambda@Edge functions replicated from a master function in US East (N. Virginia). If specified, you must set FunctionVersion to ALL.
Set to ALL to include entries for all published versions of each function.
Specify the pagination token that's returned by a previous request to retrieve the next page of results.
", - "location":"querystring", - "locationName":"Marker" - }, - "MaxItems":{ - "shape":"MaxListItems", - "documentation":"The maximum number of functions to return in the response. Note that ListFunctions returns a maximum of 50 items in each response, even if you set the number higher.
The pagination token that's included if more results are available.
" - }, - "Functions":{ - "shape":"FunctionList", - "documentation":"A list of Lambda functions.
" - } - }, - "documentation":"A list of Lambda functions.
" - }, - "ListLayerVersionsRequest":{ - "type":"structure", - "required":["LayerName"], - "members":{ - "CompatibleRuntime":{ - "shape":"Runtime", - "documentation":"A runtime identifier.
The following list includes deprecated runtimes. For more information, see Runtime use after deprecation.
For a list of all currently supported runtimes, see Supported runtimes.
", - "location":"querystring", - "locationName":"CompatibleRuntime" - }, - "LayerName":{ - "shape":"LayerName", - "documentation":"The name or Amazon Resource Name (ARN) of the layer.
", - "location":"uri", - "locationName":"LayerName" - }, - "Marker":{ - "shape":"String", - "documentation":"A pagination token returned by a previous call.
", - "location":"querystring", - "locationName":"Marker" - }, - "MaxItems":{ - "shape":"MaxLayerListItems", - "documentation":"The maximum number of versions to return.
", - "location":"querystring", - "locationName":"MaxItems" - }, - "CompatibleArchitecture":{ - "shape":"Architecture", - "documentation":"The compatible instruction set architecture.
", - "location":"querystring", - "locationName":"CompatibleArchitecture" - } - } - }, - "ListLayerVersionsResponse":{ - "type":"structure", - "members":{ - "NextMarker":{ - "shape":"String", - "documentation":"A pagination token returned when the response doesn't contain all versions.
" - }, - "LayerVersions":{ - "shape":"LayerVersionsList", - "documentation":"A list of versions.
" - } - } - }, - "ListLayersRequest":{ - "type":"structure", - "members":{ - "CompatibleRuntime":{ - "shape":"Runtime", - "documentation":"A runtime identifier.
The following list includes deprecated runtimes. For more information, see Runtime use after deprecation.
For a list of all currently supported runtimes, see Supported runtimes.
", - "location":"querystring", - "locationName":"CompatibleRuntime" - }, - "Marker":{ - "shape":"String", - "documentation":"A pagination token returned by a previous call.
", - "location":"querystring", - "locationName":"Marker" - }, - "MaxItems":{ - "shape":"MaxLayerListItems", - "documentation":"The maximum number of layers to return.
", - "location":"querystring", - "locationName":"MaxItems" - }, - "CompatibleArchitecture":{ - "shape":"Architecture", - "documentation":"The compatible instruction set architecture.
", - "location":"querystring", - "locationName":"CompatibleArchitecture" - } - } - }, - "ListLayersResponse":{ - "type":"structure", - "members":{ - "NextMarker":{ - "shape":"String", - "documentation":"A pagination token returned when the response doesn't contain all layers.
" - }, - "Layers":{ - "shape":"LayersList", - "documentation":"A list of function layers.
" - } - } - }, - "ListProvisionedConcurrencyConfigsRequest":{ - "type":"structure", - "required":["FunctionName"], - "members":{ - "FunctionName":{ - "shape":"FunctionName", - "documentation":"The name or ARN of the Lambda function.
Name formats
Function name – my-function.
Function ARN – arn:aws:lambda:us-west-2:123456789012:function:my-function.
Partial ARN – 123456789012:function:my-function.
The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.
", - "location":"uri", - "locationName":"FunctionName" - }, - "Marker":{ - "shape":"String", - "documentation":"Specify the pagination token that's returned by a previous request to retrieve the next page of results.
", - "location":"querystring", - "locationName":"Marker" - }, - "MaxItems":{ - "shape":"MaxProvisionedConcurrencyConfigListItems", - "documentation":"Specify a number to limit the number of configurations returned.
", - "location":"querystring", - "locationName":"MaxItems" - } - } - }, - "ListProvisionedConcurrencyConfigsResponse":{ - "type":"structure", - "members":{ - "ProvisionedConcurrencyConfigs":{ - "shape":"ProvisionedConcurrencyConfigList", - "documentation":"A list of provisioned concurrency configurations.
" - }, - "NextMarker":{ - "shape":"String", - "documentation":"The pagination token that's included if more results are available.
" - } - } - }, - "ListTagsRequest":{ - "type":"structure", - "required":["Resource"], - "members":{ - "Resource":{ - "shape":"TaggableResource", - "documentation":"The resource's Amazon Resource Name (ARN). Note: Lambda does not support adding tags to function aliases or versions.
", - "location":"uri", - "locationName":"Resource" - } - } - }, - "ListTagsResponse":{ - "type":"structure", - "members":{ - "Tags":{ - "shape":"Tags", - "documentation":"The function's tags.
" - } - } - }, - "ListVersionsByFunctionRequest":{ - "type":"structure", - "required":["FunctionName"], - "members":{ - "FunctionName":{ - "shape":"NamespacedFunctionName", - "documentation":"The name or ARN of the Lambda function.
Name formats
Function name - MyFunction.
Function ARN - arn:aws:lambda:us-west-2:123456789012:function:MyFunction.
Partial ARN - 123456789012:function:MyFunction.
The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.
", - "location":"uri", - "locationName":"FunctionName" - }, - "Marker":{ - "shape":"String", - "documentation":"Specify the pagination token that's returned by a previous request to retrieve the next page of results.
", - "location":"querystring", - "locationName":"Marker" - }, - "MaxItems":{ - "shape":"MaxListItems", - "documentation":"The maximum number of versions to return. Note that ListVersionsByFunction returns a maximum of 50 items in each response, even if you set the number higher.
The pagination token that's included if more results are available.
" - }, - "Versions":{ - "shape":"FunctionList", - "documentation":"A list of Lambda function versions.
" - } - } - }, - "LocalMountPath":{ - "type":"string", - "max":160, - "min":0, - "pattern":"/mnt/[a-zA-Z0-9-_.]+" - }, - "LogFormat":{ - "type":"string", - "enum":[ - "JSON", - "Text" - ] - }, - "LogGroup":{ - "type":"string", - "max":512, - "min":1, - "pattern":"[\\.\\-_/#A-Za-z0-9]+" - }, - "LogType":{ - "type":"string", - "enum":[ - "None", - "Tail" - ] - }, - "LoggingConfig":{ - "type":"structure", - "members":{ - "LogFormat":{ - "shape":"LogFormat", - "documentation":"The format in which Lambda sends your function's application and system logs to CloudWatch. Select between plain text and structured JSON.
" - }, - "ApplicationLogLevel":{ - "shape":"ApplicationLogLevel", - "documentation":"Set this property to filter the application logs for your function that Lambda sends to CloudWatch. Lambda only sends application logs at the selected level of detail and lower, where TRACE is the highest level and FATAL is the lowest.
Set this property to filter the system logs for your function that Lambda sends to CloudWatch. Lambda only sends system logs at the selected level of detail and lower, where DEBUG is the highest level and WARN is the lowest.
The name of the Amazon CloudWatch log group the function sends logs to. By default, Lambda functions send logs to a default log group named /aws/lambda/<function name>. To use a different log group, enter an existing log group or enter a new log group name.
The function's Amazon CloudWatch Logs configuration settings.
" - }, - "Long":{"type":"long"}, - "MasterRegion":{ - "type":"string", - "pattern":"ALL|[a-z]{2}(-gov)?-[a-z]+-\\d{1}" - }, - "MaxAge":{ - "type":"integer", - "box":true, - "max":86400, - "min":0 - }, - "MaxFunctionEventInvokeConfigListItems":{ - "type":"integer", - "box":true, - "max":50, - "min":1 - }, - "MaxItems":{ - "type":"integer", - "box":true, - "max":50, - "min":1 - }, - "MaxLayerListItems":{ - "type":"integer", - "box":true, - "max":50, - "min":1 - }, - "MaxListItems":{ - "type":"integer", - "box":true, - "max":10000, - "min":1 - }, - "MaxProvisionedConcurrencyConfigListItems":{ - "type":"integer", - "box":true, - "max":50, - "min":1 - }, - "MaximumBatchingWindowInSeconds":{ - "type":"integer", - "box":true, - "max":300, - "min":0 - }, - "MaximumConcurrency":{ - "type":"integer", - "box":true, - "max":1000, - "min":2 - }, - "MaximumEventAgeInSeconds":{ - "type":"integer", - "box":true, - "max":21600, - "min":60 - }, - "MaximumNumberOfPollers":{ - "type":"integer", - "box":true, - "max":2000, - "min":1 - }, - "MaximumRecordAgeInSeconds":{ - "type":"integer", - "box":true, - "max":604800, - "min":-1 - }, - "MaximumRetryAttempts":{ - "type":"integer", - "box":true, - "max":2, - "min":0 - }, - "MaximumRetryAttemptsEventSourceMapping":{ - "type":"integer", - "box":true, - "max":10000, - "min":-1 - }, - "MemorySize":{ - "type":"integer", - "box":true, - "max":10240, - "min":128 - }, - "Method":{ - "type":"string", - "max":6, - "min":0, - "pattern":".*" - }, - "MinimumNumberOfPollers":{ - "type":"integer", - "box":true, - "max":200, - "min":1 - }, - "NameSpacedFunctionArn":{ - "type":"string", - "pattern":"arn:(aws[a-zA-Z-]*)?:lambda:[a-z]{2}(-gov)?-[a-z]+-\\d{1}:\\d{12}:function:[a-zA-Z0-9-_\\.]+(:(\\$LATEST|[a-zA-Z0-9-_]+))?" - }, - "NamespacedFunctionName":{ - "type":"string", - "max":170, - "min":1, - "pattern":"(arn:(aws[a-zA-Z-]*)?:lambda:)?([a-z]{2}(-gov)?-[a-z]+-\\d{1}:)?(\\d{12}:)?(function:)?([a-zA-Z0-9-_\\.]+)(:(\\$LATEST|[a-zA-Z0-9-_]+))?" - }, - "NamespacedStatementId":{ - "type":"string", - "max":100, - "min":1, - "pattern":"([a-zA-Z0-9-_.]+)" - }, - "NonNegativeInteger":{ - "type":"integer", - "box":true, - "min":0 - }, - "NullableBoolean":{ - "type":"boolean", - "box":true - }, - "OnFailure":{ - "type":"structure", - "members":{ - "Destination":{ - "shape":"DestinationArn", - "documentation":"The Amazon Resource Name (ARN) of the destination resource.
To retain records of unsuccessful asynchronous invocations, you can configure an Amazon SNS topic, Amazon SQS queue, Amazon S3 bucket, Lambda function, or Amazon EventBridge event bus as the destination.
To retain records of failed invocations from Kinesis, DynamoDB, self-managed Kafka or Amazon MSK, you can configure an Amazon SNS topic, Amazon SQS queue, or Amazon S3 bucket as the destination.
" - } - }, - "documentation":"A destination for events that failed processing. For more information, see Adding a destination.
" - }, - "OnSuccess":{ - "type":"structure", - "members":{ - "Destination":{ - "shape":"DestinationArn", - "documentation":"The Amazon Resource Name (ARN) of the destination resource.
" - } - }, - "documentation":"A destination for events that were processed successfully.
To retain records of successful asynchronous invocations, you can configure an Amazon SNS topic, Amazon SQS queue, Lambda function, or Amazon EventBridge event bus as the destination.
OnSuccess is not supported in CreateEventSourceMapping or UpdateEventSourceMapping requests.
The permissions policy for the resource is too large. For more information, see Lambda quotas.
", - "error":{ - "httpStatusCode":400, - "senderFault":true - }, - "exception":true - }, - "PositiveInteger":{ - "type":"integer", - "box":true, - "min":1 - }, - "PreconditionFailedException":{ - "type":"structure", - "members":{ - "Type":{ - "shape":"String", - "documentation":"The exception type.
" - }, - "message":{ - "shape":"String", - "documentation":"The exception message.
" - } - }, - "documentation":"The RevisionId provided does not match the latest RevisionId for the Lambda function or alias.
For AddPermission and RemovePermission API operations: Call GetPolicy to retrieve the latest RevisionId for your resource.
For all other API operations: Call GetFunction or GetAlias to retrieve the latest RevisionId for your resource.
The Amazon Resource Name (ARN) of the alias or version.
" - }, - "RequestedProvisionedConcurrentExecutions":{ - "shape":"PositiveInteger", - "documentation":"The amount of provisioned concurrency requested.
" - }, - "AvailableProvisionedConcurrentExecutions":{ - "shape":"NonNegativeInteger", - "documentation":"The amount of provisioned concurrency available.
" - }, - "AllocatedProvisionedConcurrentExecutions":{ - "shape":"NonNegativeInteger", - "documentation":"The amount of provisioned concurrency allocated. When a weighted alias is used during linear and canary deployments, this value fluctuates depending on the amount of concurrency that is provisioned for the function versions.
" - }, - "Status":{ - "shape":"ProvisionedConcurrencyStatusEnum", - "documentation":"The status of the allocation process.
" - }, - "StatusReason":{ - "shape":"String", - "documentation":"For failed allocations, the reason that provisioned concurrency could not be allocated.
" - }, - "LastModified":{ - "shape":"Timestamp", - "documentation":"The date and time that a user last updated the configuration, in ISO 8601 format.
" - } - }, - "documentation":"Details about the provisioned concurrency configuration for a function alias or version.
" - }, - "ProvisionedConcurrencyConfigNotFoundException":{ - "type":"structure", - "members":{ - "Type":{"shape":"String"}, - "message":{"shape":"String"} - }, - "documentation":"The specified configuration does not exist.
", - "error":{ - "httpStatusCode":404, - "senderFault":true - }, - "exception":true - }, - "ProvisionedConcurrencyStatusEnum":{ - "type":"string", - "enum":[ - "IN_PROGRESS", - "READY", - "FAILED" - ] - }, - "ProvisionedPollerConfig":{ - "type":"structure", - "members":{ - "MinimumPollers":{ - "shape":"MinimumNumberOfPollers", - "documentation":"The minimum number of event pollers this event source can scale down to.
" - }, - "MaximumPollers":{ - "shape":"MaximumNumberOfPollers", - "documentation":"The maximum number of event pollers this event source can scale up to.
" - } - }, - "documentation":"The provisioned mode configuration for the event source. Use Provisioned Mode to customize the minimum and maximum number of event pollers for your event source. An event poller is a compute unit that provides approximately 5 MBps of throughput.
" - }, - "PublishLayerVersionRequest":{ - "type":"structure", - "required":[ - "LayerName", - "Content" - ], - "members":{ - "LayerName":{ - "shape":"LayerName", - "documentation":"The name or Amazon Resource Name (ARN) of the layer.
", - "location":"uri", - "locationName":"LayerName" - }, - "Description":{ - "shape":"Description", - "documentation":"The description of the version.
" - }, - "Content":{ - "shape":"LayerVersionContentInput", - "documentation":"The function layer archive.
" - }, - "CompatibleRuntimes":{ - "shape":"CompatibleRuntimes", - "documentation":"A list of compatible function runtimes. Used for filtering with ListLayers and ListLayerVersions.
The following list includes deprecated runtimes. For more information, see Runtime deprecation policy.
" - }, - "LicenseInfo":{ - "shape":"LicenseInfo", - "documentation":"The layer's software license. It can be any of the following:
An SPDX license identifier. For example, MIT.
The URL of a license hosted on the internet. For example, https://opensource.org/licenses/MIT.
The full text of the license.
A list of compatible instruction set architectures.
" - } - } - }, - "PublishLayerVersionResponse":{ - "type":"structure", - "members":{ - "Content":{ - "shape":"LayerVersionContentOutput", - "documentation":"Details about the layer version.
" - }, - "LayerArn":{ - "shape":"LayerArn", - "documentation":"The ARN of the layer.
" - }, - "LayerVersionArn":{ - "shape":"LayerVersionArn", - "documentation":"The ARN of the layer version.
" - }, - "Description":{ - "shape":"Description", - "documentation":"The description of the version.
" - }, - "CreatedDate":{ - "shape":"Timestamp", - "documentation":"The date that the layer version was created, in ISO-8601 format (YYYY-MM-DDThh:mm:ss.sTZD).
" - }, - "Version":{ - "shape":"LayerVersionNumber", - "documentation":"The version number.
" - }, - "CompatibleRuntimes":{ - "shape":"CompatibleRuntimes", - "documentation":"The layer's compatible runtimes.
The following list includes deprecated runtimes. For more information, see Runtime use after deprecation.
For a list of all currently supported runtimes, see Supported runtimes.
" - }, - "LicenseInfo":{ - "shape":"LicenseInfo", - "documentation":"The layer's software license.
" - }, - "CompatibleArchitectures":{ - "shape":"CompatibleArchitectures", - "documentation":"A list of compatible instruction set architectures.
" - } - } - }, - "PublishVersionRequest":{ - "type":"structure", - "required":["FunctionName"], - "members":{ - "FunctionName":{ - "shape":"FunctionName", - "documentation":"The name or ARN of the Lambda function.
Name formats
Function name - MyFunction.
Function ARN - arn:aws:lambda:us-west-2:123456789012:function:MyFunction.
Partial ARN - 123456789012:function:MyFunction.
The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.
", - "location":"uri", - "locationName":"FunctionName" - }, - "CodeSha256":{ - "shape":"String", - "documentation":"Only publish a version if the hash value matches the value that's specified. Use this option to avoid publishing a version if the function code has changed since you last updated it. You can get the hash for the version that you uploaded from the output of UpdateFunctionCode.
" - }, - "Description":{ - "shape":"Description", - "documentation":"A description for the version to override the description in the function configuration.
" - }, - "RevisionId":{ - "shape":"String", - "documentation":"Only update the function if the revision ID matches the ID that's specified. Use this option to avoid publishing a version if the function configuration has changed since you last updated it.
" - } - } - }, - "PutFunctionCodeSigningConfigRequest":{ - "type":"structure", - "required":[ - "CodeSigningConfigArn", - "FunctionName" - ], - "members":{ - "CodeSigningConfigArn":{ - "shape":"CodeSigningConfigArn", - "documentation":"The The Amazon Resource Name (ARN) of the code signing configuration.
" - }, - "FunctionName":{ - "shape":"FunctionName", - "documentation":"The name or ARN of the Lambda function.
Name formats
Function name - MyFunction.
Function ARN - arn:aws:lambda:us-west-2:123456789012:function:MyFunction.
Partial ARN - 123456789012:function:MyFunction.
The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.
", - "location":"uri", - "locationName":"FunctionName" - } - } - }, - "PutFunctionCodeSigningConfigResponse":{ - "type":"structure", - "required":[ - "CodeSigningConfigArn", - "FunctionName" - ], - "members":{ - "CodeSigningConfigArn":{ - "shape":"CodeSigningConfigArn", - "documentation":"The The Amazon Resource Name (ARN) of the code signing configuration.
" - }, - "FunctionName":{ - "shape":"FunctionName", - "documentation":"The name or ARN of the Lambda function.
Name formats
Function name - MyFunction.
Function ARN - arn:aws:lambda:us-west-2:123456789012:function:MyFunction.
Partial ARN - 123456789012:function:MyFunction.
The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.
" - } - } - }, - "PutFunctionConcurrencyRequest":{ - "type":"structure", - "required":[ - "FunctionName", - "ReservedConcurrentExecutions" - ], - "members":{ - "FunctionName":{ - "shape":"FunctionName", - "documentation":"The name or ARN of the Lambda function.
Name formats
Function name – my-function.
Function ARN – arn:aws:lambda:us-west-2:123456789012:function:my-function.
Partial ARN – 123456789012:function:my-function.
The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.
", - "location":"uri", - "locationName":"FunctionName" - }, - "ReservedConcurrentExecutions":{ - "shape":"ReservedConcurrentExecutions", - "documentation":"The number of simultaneous executions to reserve for the function.
" - } - } - }, - "PutFunctionEventInvokeConfigRequest":{ - "type":"structure", - "required":["FunctionName"], - "members":{ - "FunctionName":{ - "shape":"FunctionName", - "documentation":"The name or ARN of the Lambda function, version, or alias.
Name formats
Function name - my-function (name-only), my-function:v1 (with alias).
Function ARN - arn:aws:lambda:us-west-2:123456789012:function:my-function.
Partial ARN - 123456789012:function:my-function.
You can append a version number or alias to any of the formats. The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.
", - "location":"uri", - "locationName":"FunctionName" - }, - "Qualifier":{ - "shape":"Qualifier", - "documentation":"A version number or alias name.
", - "location":"querystring", - "locationName":"Qualifier" - }, - "MaximumRetryAttempts":{ - "shape":"MaximumRetryAttempts", - "documentation":"The maximum number of times to retry when the function returns an error.
" - }, - "MaximumEventAgeInSeconds":{ - "shape":"MaximumEventAgeInSeconds", - "documentation":"The maximum age of a request that Lambda sends to a function for processing.
" - }, - "DestinationConfig":{ - "shape":"DestinationConfig", - "documentation":"A destination for events after they have been sent to a function for processing.
Destinations
Function - The Amazon Resource Name (ARN) of a Lambda function.
Queue - The ARN of a standard SQS queue.
Bucket - The ARN of an Amazon S3 bucket.
Topic - The ARN of a standard SNS topic.
Event Bus - The ARN of an Amazon EventBridge event bus.
S3 buckets are supported only for on-failure destinations. To retain records of successful invocations, use another destination type.
The name or ARN of the Lambda function.
Name formats
Function name – my-function.
Function ARN – arn:aws:lambda:us-west-2:123456789012:function:my-function.
Partial ARN – 123456789012:function:my-function.
The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.
", - "location":"uri", - "locationName":"FunctionName" - }, - "RecursiveLoop":{ - "shape":"RecursiveLoop", - "documentation":"If you set your function's recursive loop detection configuration to Allow, Lambda doesn't take any action when it detects your function being invoked as part of a recursive loop. We recommend that you only use this setting if your design intentionally uses a Lambda function to write data back to the same Amazon Web Services resource that invokes it.
If you set your function's recursive loop detection configuration to Terminate, Lambda stops your function being invoked and notifies you when it detects your function being invoked as part of a recursive loop.
By default, Lambda sets your function's configuration to Terminate.
If your design intentionally uses a Lambda function to write data back to the same Amazon Web Services resource that invokes the function, then use caution and implement suitable guard rails to prevent unexpected charges being billed to your Amazon Web Services account. To learn more about best practices for using recursive invocation patterns, see Recursive patterns that cause run-away Lambda functions in Serverless Land.
The status of your function's recursive loop detection configuration.
When this value is set to Allowand Lambda detects your function being invoked as part of a recursive loop, it doesn't take any action.
When this value is set to Terminate and Lambda detects your function being invoked as part of a recursive loop, it stops your function being invoked and notifies you.
The name or ARN of the Lambda function.
Name formats
Function name – my-function.
Function ARN – arn:aws:lambda:us-west-2:123456789012:function:my-function.
Partial ARN – 123456789012:function:my-function.
The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.
", - "location":"uri", - "locationName":"FunctionName" - }, - "Qualifier":{ - "shape":"Qualifier", - "documentation":"The version number or alias name.
", - "location":"querystring", - "locationName":"Qualifier" - }, - "ProvisionedConcurrentExecutions":{ - "shape":"PositiveInteger", - "documentation":"The amount of provisioned concurrency to allocate for the version or alias.
" - } - } - }, - "PutProvisionedConcurrencyConfigResponse":{ - "type":"structure", - "members":{ - "RequestedProvisionedConcurrentExecutions":{ - "shape":"PositiveInteger", - "documentation":"The amount of provisioned concurrency requested.
" - }, - "AvailableProvisionedConcurrentExecutions":{ - "shape":"NonNegativeInteger", - "documentation":"The amount of provisioned concurrency available.
" - }, - "AllocatedProvisionedConcurrentExecutions":{ - "shape":"NonNegativeInteger", - "documentation":"The amount of provisioned concurrency allocated. When a weighted alias is used during linear and canary deployments, this value fluctuates depending on the amount of concurrency that is provisioned for the function versions.
" - }, - "Status":{ - "shape":"ProvisionedConcurrencyStatusEnum", - "documentation":"The status of the allocation process.
" - }, - "StatusReason":{ - "shape":"String", - "documentation":"For failed allocations, the reason that provisioned concurrency could not be allocated.
" - }, - "LastModified":{ - "shape":"Timestamp", - "documentation":"The date and time that a user last updated the configuration, in ISO 8601 format.
" - } - } - }, - "PutRuntimeManagementConfigRequest":{ - "type":"structure", - "required":[ - "FunctionName", - "UpdateRuntimeOn" - ], - "members":{ - "FunctionName":{ - "shape":"FunctionName", - "documentation":"The name or ARN of the Lambda function.
Name formats
Function name – my-function.
Function ARN – arn:aws:lambda:us-west-2:123456789012:function:my-function.
Partial ARN – 123456789012:function:my-function.
The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.
", - "location":"uri", - "locationName":"FunctionName" - }, - "Qualifier":{ - "shape":"Qualifier", - "documentation":"Specify a version of the function. This can be $LATEST or a published version number. If no value is specified, the configuration for the $LATEST version is returned.
Specify the runtime update mode.
Auto (default) - Automatically update to the most recent and secure runtime version using a Two-phase runtime version rollout. This is the best choice for most customers to ensure they always benefit from runtime updates.
Function update - Lambda updates the runtime of your function to the most recent and secure runtime version when you update your function. This approach synchronizes runtime updates with function deployments, giving you control over when runtime updates are applied and allowing you to detect and mitigate rare runtime update incompatibilities early. When using this setting, you need to regularly update your functions to keep their runtime up-to-date.
Manual - You specify a runtime version in your function configuration. The function will use this runtime version indefinitely. In the rare case where a new runtime version is incompatible with an existing function, this allows you to roll back your function to an earlier runtime version. For more information, see Roll back a runtime version.
The ARN of the runtime version you want the function to use.
This is only required if you're using the Manual runtime update mode.
The runtime update mode.
" - }, - "FunctionArn":{ - "shape":"FunctionArn", - "documentation":"The ARN of the function
" - }, - "RuntimeVersionArn":{ - "shape":"RuntimeVersionArn", - "documentation":"The ARN of the runtime the function is configured to use. If the runtime update mode is manual, the ARN is returned, otherwise null is returned.
The exception type.
" - }, - "Message":{ - "shape":"String", - "documentation":"The exception message.
" - } - }, - "documentation":"Lambda has detected your function being invoked in a recursive loop with other Amazon Web Services resources and stopped your function's invocation.
", - "error":{ - "httpStatusCode":400, - "senderFault":true - }, - "exception":true - }, - "RecursiveLoop":{ - "type":"string", - "enum":[ - "Allow", - "Terminate" - ] - }, - "RemoveLayerVersionPermissionRequest":{ - "type":"structure", - "required":[ - "LayerName", - "VersionNumber", - "StatementId" - ], - "members":{ - "LayerName":{ - "shape":"LayerName", - "documentation":"The name or Amazon Resource Name (ARN) of the layer.
", - "location":"uri", - "locationName":"LayerName" - }, - "VersionNumber":{ - "shape":"LayerVersionNumber", - "documentation":"The version number.
", - "location":"uri", - "locationName":"VersionNumber" - }, - "StatementId":{ - "shape":"StatementId", - "documentation":"The identifier that was specified when the statement was added.
", - "location":"uri", - "locationName":"StatementId" - }, - "RevisionId":{ - "shape":"String", - "documentation":"Only update the policy if the revision ID matches the ID specified. Use this option to avoid modifying a policy that has changed since you last read it.
", - "location":"querystring", - "locationName":"RevisionId" - } - } - }, - "RemovePermissionRequest":{ - "type":"structure", - "required":[ - "FunctionName", - "StatementId" - ], - "members":{ - "FunctionName":{ - "shape":"FunctionName", - "documentation":"The name or ARN of the Lambda function, version, or alias.
Name formats
Function name – my-function (name-only), my-function:v1 (with alias).
Function ARN – arn:aws:lambda:us-west-2:123456789012:function:my-function.
Partial ARN – 123456789012:function:my-function.
You can append a version number or alias to any of the formats. The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.
", - "location":"uri", - "locationName":"FunctionName" - }, - "StatementId":{ - "shape":"NamespacedStatementId", - "documentation":"Statement ID of the permission to remove.
", - "location":"uri", - "locationName":"StatementId" - }, - "Qualifier":{ - "shape":"Qualifier", - "documentation":"Specify a version or alias to remove permissions from a published version of the function.
", - "location":"querystring", - "locationName":"Qualifier" - }, - "RevisionId":{ - "shape":"String", - "documentation":"Update the policy only if the revision ID matches the ID that's specified. Use this option to avoid modifying a policy that has changed since you last read it.
", - "location":"querystring", - "locationName":"RevisionId" - } - } - }, - "ReplayChildren":{ - "type":"boolean", - "box":true - }, - "RequestTooLargeException":{ - "type":"structure", - "members":{ - "Type":{"shape":"String"}, - "message":{"shape":"String"} - }, - "documentation":"The request payload exceeded the Invoke request body JSON input quota. For more information, see Lambda quotas.
The exception type.
" - }, - "message":{ - "shape":"String", - "documentation":"The exception message.
" - } - }, - "documentation":"The resource already exists, or another operation is in progress.
", - "error":{ - "httpStatusCode":409, - "senderFault":true - }, - "exception":true - }, - "ResourceInUseException":{ - "type":"structure", - "members":{ - "Type":{"shape":"String"}, - "Message":{"shape":"String"} - }, - "documentation":"The operation conflicts with the resource's availability. For example, you tried to update an event source mapping in the CREATING state, or you tried to delete an event source mapping currently UPDATING.
", - "error":{ - "httpStatusCode":400, - "senderFault":true - }, - "exception":true - }, - "ResourceNotFoundException":{ - "type":"structure", - "members":{ - "Type":{"shape":"String"}, - "Message":{"shape":"String"} - }, - "documentation":"The resource specified in the request does not exist.
", - "error":{ - "httpStatusCode":404, - "senderFault":true - }, - "exception":true - }, - "ResourceNotReadyException":{ - "type":"structure", - "members":{ - "Type":{ - "shape":"String", - "documentation":"The exception type.
" - }, - "message":{ - "shape":"String", - "documentation":"The exception message.
" - } - }, - "documentation":"The function is inactive and its VPC connection is no longer available. Wait for the VPC connection to reestablish and try again.
", - "error":{"httpStatusCode":502}, - "exception":true, - "fault":true - }, - "ResponseStreamingInvocationType":{ - "type":"string", - "enum":[ - "RequestResponse", - "DryRun" - ] - }, - "RetentionPeriodInDays":{ - "type":"integer", - "box":true, - "max":90, - "min":1 - }, - "RetryDetails":{ - "type":"structure", - "members":{ - "CurrentAttempt":{"shape":"AttemptCount"}, - "NextAttemptDelaySeconds":{"shape":"DurationSeconds"} - } - }, - "ReverseOrder":{ - "type":"boolean", - "box":true - }, - "RoleArn":{ - "type":"string", - "pattern":"arn:(aws[a-zA-Z-]*)?:iam::\\d{12}:role/?[a-zA-Z_0-9+=,.@\\-_/]+" - }, - "Runtime":{ - "type":"string", - "enum":[ - "nodejs", - "nodejs4.3", - "nodejs6.10", - "nodejs8.10", - "nodejs10.x", - "nodejs12.x", - "nodejs14.x", - "nodejs16.x", - "java8", - "java8.al2", - "java11", - "python2.7", - "python3.6", - "python3.7", - "python3.8", - "python3.9", - "dotnetcore1.0", - "dotnetcore2.0", - "dotnetcore2.1", - "dotnetcore3.1", - "dotnet6", - "dotnet8", - "nodejs4.3-edge", - "go1.x", - "ruby2.5", - "ruby2.7", - "provided", - "provided.al2", - "nodejs18.x", - "python3.10", - "java17", - "ruby3.2", - "ruby3.3", - "ruby3.4", - "python3.11", - "nodejs20.x", - "provided.al2023", - "python3.12", - "java21", - "python3.13", - "nodejs22.x" - ] - }, - "RuntimeVersionArn":{ - "type":"string", - "max":2048, - "min":26, - "pattern":"arn:(aws[a-zA-Z-]*):lambda:[a-z]{2}((-gov)|(-iso(b?)))?-[a-z]+-\\d{1}::runtime:.+" - }, - "RuntimeVersionConfig":{ - "type":"structure", - "members":{ - "RuntimeVersionArn":{ - "shape":"RuntimeVersionArn", - "documentation":"The ARN of the runtime version you want the function to use.
" - }, - "Error":{ - "shape":"RuntimeVersionError", - "documentation":"Error response when Lambda is unable to retrieve the runtime version for a function.
" - } - }, - "documentation":"The ARN of the runtime and any errors that occured.
" - }, - "RuntimeVersionError":{ - "type":"structure", - "members":{ - "ErrorCode":{ - "shape":"String", - "documentation":"The error code.
" - }, - "Message":{ - "shape":"SensitiveString", - "documentation":"The error message.
" - } - }, - "documentation":"Any error returned when the runtime version information for the function could not be retrieved.
" - }, - "S3Bucket":{ - "type":"string", - "max":63, - "min":3, - "pattern":"[0-9A-Za-z\\.\\-_]*(?Limits the number of concurrent instances that the Amazon SQS event source can invoke." - } - }, - "documentation":"(Amazon SQS only) The scaling configuration for the event source. To remove the configuration, pass an empty value.
" - }, - "SchemaRegistryEventRecordFormat":{ - "type":"string", - "enum":[ - "JSON", - "SOURCE" - ] - }, - "SchemaRegistryUri":{ - "type":"string", - "max":10000, - "min":1, - "pattern":"[a-zA-Z0-9-\\/*:_+=.@-]*" - }, - "SecurityGroupId":{"type":"string"}, - "SecurityGroupIds":{ - "type":"list", - "member":{"shape":"SecurityGroupId"}, - "max":5, - "min":0 - }, - "SelfManagedEventSource":{ - "type":"structure", - "members":{ - "Endpoints":{ - "shape":"Endpoints", - "documentation":"The list of bootstrap servers for your Kafka brokers in the following format: \"KAFKA_BOOTSTRAP_SERVERS\": [\"abc.xyz.com:xxxx\",\"abc2.xyz.com:xxxx\"].
The self-managed Apache Kafka cluster for your event source.
" - }, - "SelfManagedKafkaEventSourceConfig":{ - "type":"structure", - "members":{ - "ConsumerGroupId":{ - "shape":"URI", - "documentation":"The identifier for the Kafka consumer group to join. The consumer group ID must be unique among all your Kafka event sources. After creating a Kafka event source mapping with the consumer group ID specified, you cannot update this value. For more information, see Customizable consumer group ID.
" - }, - "SchemaRegistryConfig":{ - "shape":"KafkaSchemaRegistryConfig", - "documentation":"Specific configuration settings for a Kafka schema registry.
" - } - }, - "documentation":"Specific configuration settings for a self-managed Apache Kafka event source.
" - }, - "SendDurableExecutionCallbackFailureRequest":{ - "type":"structure", - "required":["CallbackId"], - "members":{ - "CallbackId":{ - "shape":"CallbackId", - "location":"uri", - "locationName":"CallbackId" - }, - "Error":{"shape":"ErrorObject"} - }, - "payload":"Error" - }, - "SendDurableExecutionCallbackFailureResponse":{ - "type":"structure", - "members":{} - }, - "SendDurableExecutionCallbackHeartbeatRequest":{ - "type":"structure", - "required":["CallbackId"], - "members":{ - "CallbackId":{ - "shape":"CallbackId", - "location":"uri", - "locationName":"CallbackId" - } - } - }, - "SendDurableExecutionCallbackHeartbeatResponse":{ - "type":"structure", - "members":{} - }, - "SendDurableExecutionCallbackSuccessRequest":{ - "type":"structure", - "required":["CallbackId"], - "members":{ - "CallbackId":{ - "shape":"CallbackId", - "location":"uri", - "locationName":"CallbackId" - }, - "Result":{"shape":"BinaryOperationPayload"} - }, - "payload":"Result" - }, - "SendDurableExecutionCallbackSuccessResponse":{ - "type":"structure", - "members":{} - }, - "SensitiveString":{ - "type":"string", - "sensitive":true - }, - "ServiceException":{ - "type":"structure", - "members":{ - "Type":{"shape":"String"}, - "Message":{"shape":"String"} - }, - "documentation":"The Lambda service encountered an internal error.
", - "error":{"httpStatusCode":500}, - "exception":true, - "fault":true - }, - "SigningProfileVersionArns":{ - "type":"list", - "member":{"shape":"Arn"}, - "max":20, - "min":1 - }, - "SnapStart":{ - "type":"structure", - "members":{ - "ApplyOn":{ - "shape":"SnapStartApplyOn", - "documentation":"Set to PublishedVersions to create a snapshot of the initialized execution environment when you publish a function version.
The function's Lambda SnapStart setting. Set ApplyOn to PublishedVersions to create a snapshot of the initialized execution environment when you publish a function version.
The afterRestore() runtime hook encountered an error. For more information, check the Amazon CloudWatch logs.
Lambda is initializing your function. You can invoke the function when the function state becomes Active.
When set to PublishedVersions, Lambda creates a snapshot of the execution environment when you publish a function version.
When you provide a qualified Amazon Resource Name (ARN), this response element indicates whether SnapStart is activated for the specified function version.
" - } - }, - "documentation":"The function's SnapStart setting.
" - }, - "SnapStartTimeoutException":{ - "type":"structure", - "members":{ - "Type":{"shape":"String"}, - "Message":{"shape":"String"} - }, - "documentation":"Lambda couldn't restore the snapshot within the timeout limit.
", - "error":{ - "httpStatusCode":408, - "senderFault":true - }, - "exception":true - }, - "SourceAccessConfiguration":{ - "type":"structure", - "members":{ - "Type":{ - "shape":"SourceAccessType", - "documentation":"The type of authentication protocol, VPC components, or virtual host for your event source. For example: \"Type\":\"SASL_SCRAM_512_AUTH\".
BASIC_AUTH – (Amazon MQ) The Secrets Manager secret that stores your broker credentials.
BASIC_AUTH – (Self-managed Apache Kafka) The Secrets Manager ARN of your secret key used for SASL/PLAIN authentication of your Apache Kafka brokers.
VPC_SUBNET – (Self-managed Apache Kafka) The subnets associated with your VPC. Lambda connects to these subnets to fetch data from your self-managed Apache Kafka cluster.
VPC_SECURITY_GROUP – (Self-managed Apache Kafka) The VPC security group used to manage access to your self-managed Apache Kafka brokers.
SASL_SCRAM_256_AUTH – (Self-managed Apache Kafka) The Secrets Manager ARN of your secret key used for SASL SCRAM-256 authentication of your self-managed Apache Kafka brokers.
SASL_SCRAM_512_AUTH – (Amazon MSK, Self-managed Apache Kafka) The Secrets Manager ARN of your secret key used for SASL SCRAM-512 authentication of your self-managed Apache Kafka brokers.
VIRTUAL_HOST –- (RabbitMQ) The name of the virtual host in your RabbitMQ broker. Lambda uses this RabbitMQ host as the event source. This property cannot be specified in an UpdateEventSourceMapping API call.
CLIENT_CERTIFICATE_TLS_AUTH – (Amazon MSK, self-managed Apache Kafka) The Secrets Manager ARN of your secret key containing the certificate chain (X.509 PEM), private key (PKCS#8 PEM), and private key password (optional) used for mutual TLS authentication of your MSK/Apache Kafka brokers.
SERVER_ROOT_CA_CERTIFICATE – (Self-managed Apache Kafka) The Secrets Manager ARN of your secret key containing the root CA certificate (X.509 PEM) used for TLS encryption of your Apache Kafka brokers.
The value for your chosen configuration in Type. For example: \"URI\": \"arn:aws:secretsmanager:us-east-1:01234567890:secret:MyBrokerSecretName\".
To secure and define access to your event source, you can specify the authentication protocol, VPC components, or virtual host.
" - }, - "SourceAccessConfigurations":{ - "type":"list", - "member":{"shape":"SourceAccessConfiguration"}, - "max":22, - "min":0 - }, - "SourceAccessType":{ - "type":"string", - "enum":[ - "BASIC_AUTH", - "VPC_SUBNET", - "VPC_SECURITY_GROUP", - "SASL_SCRAM_512_AUTH", - "SASL_SCRAM_256_AUTH", - "VIRTUAL_HOST", - "CLIENT_CERTIFICATE_TLS_AUTH", - "SERVER_ROOT_CA_CERTIFICATE" - ] - }, - "SourceOwner":{ - "type":"string", - "max":12, - "min":0, - "pattern":"\\d{12}" - }, - "StackTraceEntries":{ - "type":"list", - "member":{"shape":"StackTraceEntry"} - }, - "StackTraceEntry":{ - "type":"string", - "sensitive":true - }, - "State":{ - "type":"string", - "enum":[ - "Pending", - "Active", - "Inactive", - "Failed" - ] - }, - "StateReason":{"type":"string"}, - "StateReasonCode":{ - "type":"string", - "enum":[ - "Idle", - "Creating", - "Restoring", - "EniLimitExceeded", - "InsufficientRolePermissions", - "InvalidConfiguration", - "InternalError", - "SubnetOutOfIPAddresses", - "InvalidSubnet", - "InvalidSecurityGroup", - "ImageDeleted", - "ImageAccessDenied", - "InvalidImage", - "KMSKeyAccessDenied", - "KMSKeyNotFound", - "InvalidStateKMSKey", - "DisabledKMSKey", - "EFSIOError", - "EFSMountConnectivityError", - "EFSMountFailure", - "EFSMountTimeout", - "InvalidRuntime", - "InvalidZipFileException", - "FunctionError", - "DrainingDurableExecutions" - ] - }, - "StatementId":{ - "type":"string", - "max":100, - "min":1, - "pattern":"([a-zA-Z0-9-_]+)" - }, - "StepDetails":{ - "type":"structure", - "members":{ - "Attempt":{"shape":"AttemptCount"}, - "NextAttemptTimestamp":{"shape":"ExecutionTimestamp"}, - "Result":{"shape":"OperationPayload"}, - "Error":{"shape":"ErrorObject"} - } - }, - "StepFailedDetails":{ - "type":"structure", - "required":[ - "Error", - "RetryDetails" - ], - "members":{ - "Error":{"shape":"EventError"}, - "RetryDetails":{"shape":"RetryDetails"} - } - }, - "StepOptions":{ - "type":"structure", - "members":{ - "NextAttemptDelaySeconds":{"shape":"StepOptionsNextAttemptDelaySecondsInteger"} - } - }, - "StepOptionsNextAttemptDelaySecondsInteger":{ - "type":"integer", - "box":true, - "max":31622400, - "min":1 - }, - "StepStartedDetails":{ - "type":"structure", - "members":{} - }, - "StepSucceededDetails":{ - "type":"structure", - "required":[ - "Result", - "RetryDetails" - ], - "members":{ - "Result":{"shape":"EventResult"}, - "RetryDetails":{"shape":"RetryDetails"} - } - }, - "StopDurableExecutionRequest":{ - "type":"structure", - "required":["DurableExecutionArn"], - "members":{ - "DurableExecutionArn":{ - "shape":"DurableExecutionArn", - "location":"uri", - "locationName":"DurableExecutionArn" - }, - "Error":{"shape":"ErrorObject"} - }, - "payload":"Error" - }, - "StopDurableExecutionResponse":{ - "type":"structure", - "required":["StopTimestamp"], - "members":{ - "StopTimestamp":{"shape":"ExecutionTimestamp"} - } - }, - "String":{"type":"string"}, - "StringList":{ - "type":"list", - "member":{"shape":"String"}, - "max":1500, - "min":0 - }, - "SubnetIPAddressLimitReachedException":{ - "type":"structure", - "members":{ - "Type":{"shape":"String"}, - "Message":{"shape":"String"} - }, - "documentation":"Lambda couldn't set up VPC access for the Lambda function because one or more configured subnets has no available IP addresses.
", - "error":{"httpStatusCode":502}, - "exception":true, - "fault":true - }, - "SubnetId":{"type":"string"}, - "SubnetIds":{ - "type":"list", - "member":{"shape":"SubnetId"}, - "max":16, - "min":0 - }, - "SystemLogLevel":{ - "type":"string", - "enum":[ - "DEBUG", - "INFO", - "WARN" - ] - }, - "TagKey":{"type":"string"}, - "TagKeyList":{ - "type":"list", - "member":{"shape":"TagKey"} - }, - "TagResourceRequest":{ - "type":"structure", - "required":[ - "Resource", - "Tags" - ], - "members":{ - "Resource":{ - "shape":"TaggableResource", - "documentation":"The resource's Amazon Resource Name (ARN).
", - "location":"uri", - "locationName":"Resource" - }, - "Tags":{ - "shape":"Tags", - "documentation":"A list of tags to apply to the resource.
" - } - } - }, - "TagValue":{"type":"string"}, - "TaggableResource":{ - "type":"string", - "max":256, - "min":1, - "pattern":"arn:(aws[a-zA-Z-]*):lambda:[a-z]{2}((-gov)|(-iso([a-z]?)))?-[a-z]+-\\d{1}:\\d{12}:(function:[a-zA-Z0-9-_]+(:(\\$LATEST|[a-zA-Z0-9-_]+))?|code-signing-config:csc-[a-z0-9]{17}|event-source-mapping:[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12})" - }, - "Tags":{ - "type":"map", - "key":{"shape":"TagKey"}, - "value":{"shape":"TagValue"} - }, - "TagsError":{ - "type":"structure", - "required":[ - "ErrorCode", - "Message" - ], - "members":{ - "ErrorCode":{ - "shape":"TagsErrorCode", - "documentation":"The error code.
" - }, - "Message":{ - "shape":"TagsErrorMessage", - "documentation":"The error message.
" - } - }, - "documentation":"An object that contains details about an error related to retrieving tags.
" - }, - "TagsErrorCode":{ - "type":"string", - "max":21, - "min":10, - "pattern":"[A-Za-z]+Exception" - }, - "TagsErrorMessage":{ - "type":"string", - "max":1000, - "min":84, - "pattern":".*" - }, - "ThrottleReason":{ - "type":"string", - "enum":[ - "ConcurrentInvocationLimitExceeded", - "FunctionInvocationRateLimitExceeded", - "ReservedFunctionConcurrentInvocationLimitExceeded", - "ReservedFunctionInvocationRateLimitExceeded", - "CallerRateLimitExceeded", - "ConcurrentSnapshotCreateLimitExceeded" - ] - }, - "Timeout":{ - "type":"integer", - "box":true, - "min":1 - }, - "Timestamp":{"type":"string"}, - "TooManyRequestsException":{ - "type":"structure", - "members":{ - "retryAfterSeconds":{ - "shape":"String", - "documentation":"The number of seconds the caller should wait before retrying.
", - "location":"header", - "locationName":"Retry-After" - }, - "Type":{"shape":"String"}, - "message":{"shape":"String"}, - "Reason":{"shape":"ThrottleReason"} - }, - "documentation":"The request throughput limit was exceeded. For more information, see Lambda quotas.
", - "error":{ - "httpStatusCode":429, - "senderFault":true - }, - "exception":true - }, - "Topic":{ - "type":"string", - "max":249, - "min":1, - "pattern":"[^.]([a-zA-Z0-9\\-_.]+)" - }, - "Topics":{ - "type":"list", - "member":{"shape":"Topic"}, - "max":1, - "min":1 - }, - "TracingConfig":{ - "type":"structure", - "members":{ - "Mode":{ - "shape":"TracingMode", - "documentation":"The tracing mode.
" - } - }, - "documentation":"The function's X-Ray tracing configuration. To sample and record incoming requests, set Mode to Active.
The tracing mode.
" - } - }, - "documentation":"The function's X-Ray tracing configuration.
" - }, - "TracingMode":{ - "type":"string", - "enum":[ - "Active", - "PassThrough" - ] - }, - "Truncated":{ - "type":"boolean", - "box":true - }, - "TumblingWindowInSeconds":{ - "type":"integer", - "box":true, - "max":900, - "min":0 - }, - "URI":{ - "type":"string", - "max":200, - "min":1, - "pattern":"[a-zA-Z0-9-\\/*:_+=.@-]*" - }, - "UnqualifiedFunctionName":{ - "type":"string", - "max":140, - "min":1, - "pattern":"(arn:(aws[a-zA-Z-]*)?:lambda:)?([a-z]{2}((-gov)|(-iso([a-z]?)))?-[a-z]+-\\d{1}:)?(\\d{12}:)?(function:)?([a-zA-Z0-9-_]+)" - }, - "UnreservedConcurrentExecutions":{ - "type":"integer", - "box":true, - "min":0 - }, - "UnsupportedMediaTypeException":{ - "type":"structure", - "members":{ - "Type":{"shape":"String"}, - "message":{"shape":"String"} - }, - "documentation":"The content type of the Invoke request body is not JSON.
The resource's Amazon Resource Name (ARN).
", - "location":"uri", - "locationName":"Resource" - }, - "TagKeys":{ - "shape":"TagKeyList", - "documentation":"A list of tag keys to remove from the resource.
", - "location":"querystring", - "locationName":"tagKeys" - } - } - }, - "UpdateAliasRequest":{ - "type":"structure", - "required":[ - "FunctionName", - "Name" - ], - "members":{ - "FunctionName":{ - "shape":"FunctionName", - "documentation":"The name or ARN of the Lambda function.
Name formats
Function name - MyFunction.
Function ARN - arn:aws:lambda:us-west-2:123456789012:function:MyFunction.
Partial ARN - 123456789012:function:MyFunction.
The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.
", - "location":"uri", - "locationName":"FunctionName" - }, - "Name":{ - "shape":"Alias", - "documentation":"The name of the alias.
", - "location":"uri", - "locationName":"Name" - }, - "FunctionVersion":{ - "shape":"Version", - "documentation":"The function version that the alias invokes.
" - }, - "Description":{ - "shape":"Description", - "documentation":"A description of the alias.
" - }, - "RoutingConfig":{ - "shape":"AliasRoutingConfiguration", - "documentation":"The routing configuration of the alias.
" - }, - "RevisionId":{ - "shape":"String", - "documentation":"Only update the alias if the revision ID matches the ID that's specified. Use this option to avoid modifying an alias that has changed since you last read it.
" - } - } - }, - "UpdateCodeSigningConfigRequest":{ - "type":"structure", - "required":["CodeSigningConfigArn"], - "members":{ - "CodeSigningConfigArn":{ - "shape":"CodeSigningConfigArn", - "documentation":"The The Amazon Resource Name (ARN) of the code signing configuration.
", - "location":"uri", - "locationName":"CodeSigningConfigArn" - }, - "Description":{ - "shape":"Description", - "documentation":"Descriptive name for this code signing configuration.
" - }, - "AllowedPublishers":{ - "shape":"AllowedPublishers", - "documentation":"Signing profiles for this code signing configuration.
" - }, - "CodeSigningPolicies":{ - "shape":"CodeSigningPolicies", - "documentation":"The code signing policy.
" - } - } - }, - "UpdateCodeSigningConfigResponse":{ - "type":"structure", - "required":["CodeSigningConfig"], - "members":{ - "CodeSigningConfig":{ - "shape":"CodeSigningConfig", - "documentation":"The code signing configuration
" - } - } - }, - "UpdateEventSourceMappingRequest":{ - "type":"structure", - "required":["UUID"], - "members":{ - "UUID":{ - "shape":"String", - "documentation":"The identifier of the event source mapping.
", - "location":"uri", - "locationName":"UUID" - }, - "FunctionName":{ - "shape":"FunctionName", - "documentation":"The name or ARN of the Lambda function.
Name formats
Function name – MyFunction.
Function ARN – arn:aws:lambda:us-west-2:123456789012:function:MyFunction.
Version or Alias ARN – arn:aws:lambda:us-west-2:123456789012:function:MyFunction:PROD.
Partial ARN – 123456789012:function:MyFunction.
The length constraint applies only to the full ARN. If you specify only the function name, it's limited to 64 characters in length.
" - }, - "Enabled":{ - "shape":"Enabled", - "documentation":"When true, the event source mapping is active. When false, Lambda pauses polling and invocation.
Default: True
" - }, - "BatchSize":{ - "shape":"BatchSize", - "documentation":"The maximum number of records in each batch that Lambda pulls from your stream or queue and sends to your function. Lambda passes all of the records in the batch to the function in a single call, up to the payload limit for synchronous invocation (6 MB).
Amazon Kinesis – Default 100. Max 10,000.
Amazon DynamoDB Streams – Default 100. Max 10,000.
Amazon Simple Queue Service – Default 10. For standard queues the max is 10,000. For FIFO queues the max is 10.
Amazon Managed Streaming for Apache Kafka – Default 100. Max 10,000.
Self-managed Apache Kafka – Default 100. Max 10,000.
Amazon MQ (ActiveMQ and RabbitMQ) – Default 100. Max 10,000.
DocumentDB – Default 100. Max 10,000.
An object that defines the filter criteria that determine whether Lambda should process an event. For more information, see Lambda event filtering.
" - }, - "MaximumBatchingWindowInSeconds":{ - "shape":"MaximumBatchingWindowInSeconds", - "documentation":"The maximum amount of time, in seconds, that Lambda spends gathering records before invoking the function. You can configure MaximumBatchingWindowInSeconds to any value from 0 seconds to 300 seconds in increments of seconds.
For Kinesis, DynamoDB, and Amazon SQS event sources, the default batching window is 0 seconds. For Amazon MSK, Self-managed Apache Kafka, Amazon MQ, and DocumentDB event sources, the default batching window is 500 ms. Note that because you can only change MaximumBatchingWindowInSeconds in increments of seconds, you cannot revert back to the 500 ms default batching window after you have changed it. To restore the default batching window, you must create a new event source mapping.
Related setting: For Kinesis, DynamoDB, and Amazon SQS event sources, when you set BatchSize to a value greater than 10, you must set MaximumBatchingWindowInSeconds to at least 1.
(Kinesis, DynamoDB Streams, Amazon MSK, and self-managed Kafka only) A configuration object that specifies the destination of an event after Lambda processes it.
" - }, - "MaximumRecordAgeInSeconds":{ - "shape":"MaximumRecordAgeInSeconds", - "documentation":"(Kinesis and DynamoDB Streams only) Discard records older than the specified age. The default value is infinite (-1).
" - }, - "BisectBatchOnFunctionError":{ - "shape":"BisectBatchOnFunctionError", - "documentation":"(Kinesis and DynamoDB Streams only) If the function returns an error, split the batch in two and retry.
" - }, - "MaximumRetryAttempts":{ - "shape":"MaximumRetryAttemptsEventSourceMapping", - "documentation":"(Kinesis and DynamoDB Streams only) Discard records after the specified number of retries. The default value is infinite (-1). When set to infinite (-1), failed records are retried until the record expires.
" - }, - "ParallelizationFactor":{ - "shape":"ParallelizationFactor", - "documentation":"(Kinesis and DynamoDB Streams only) The number of batches to process from each shard concurrently.
" - }, - "SourceAccessConfigurations":{ - "shape":"SourceAccessConfigurations", - "documentation":"An array of authentication protocols or VPC components required to secure your event source.
" - }, - "TumblingWindowInSeconds":{ - "shape":"TumblingWindowInSeconds", - "documentation":"(Kinesis and DynamoDB Streams only) The duration in seconds of a processing window for DynamoDB and Kinesis Streams event sources. A value of 0 seconds indicates no tumbling window.
" - }, - "FunctionResponseTypes":{ - "shape":"FunctionResponseTypeList", - "documentation":"(Kinesis, DynamoDB Streams, and Amazon SQS) A list of current response type enums applied to the event source mapping.
" - }, - "ScalingConfig":{ - "shape":"ScalingConfig", - "documentation":"(Amazon SQS only) The scaling configuration for the event source. For more information, see Configuring maximum concurrency for Amazon SQS event sources.
" - }, - "AmazonManagedKafkaEventSourceConfig":{"shape":"AmazonManagedKafkaEventSourceConfig"}, - "SelfManagedKafkaEventSourceConfig":{"shape":"SelfManagedKafkaEventSourceConfig"}, - "DocumentDBEventSourceConfig":{ - "shape":"DocumentDBEventSourceConfig", - "documentation":"Specific configuration settings for a DocumentDB event source.
" - }, - "KMSKeyArn":{ - "shape":"KMSKeyArn", - "documentation":"The ARN of the Key Management Service (KMS) customer managed key that Lambda uses to encrypt your function's filter criteria. By default, Lambda does not encrypt your filter criteria object. Specify this property to encrypt data using your own customer managed key.
" - }, - "MetricsConfig":{ - "shape":"EventSourceMappingMetricsConfig", - "documentation":"The metrics configuration for your event source. For more information, see Event source mapping metrics.
" - }, - "ProvisionedPollerConfig":{ - "shape":"ProvisionedPollerConfig", - "documentation":"(Amazon MSK and self-managed Apache Kafka only) The provisioned mode configuration for the event source. For more information, see provisioned mode.
" - } - } - }, - "UpdateFunctionCodeRequest":{ - "type":"structure", - "required":["FunctionName"], - "members":{ - "FunctionName":{ - "shape":"FunctionName", - "documentation":"The name or ARN of the Lambda function.
Name formats
Function name – my-function.
Function ARN – arn:aws:lambda:us-west-2:123456789012:function:my-function.
Partial ARN – 123456789012:function:my-function.
The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.
", - "location":"uri", - "locationName":"FunctionName" - }, - "ZipFile":{ - "shape":"Blob", - "documentation":"The base64-encoded contents of the deployment package. Amazon Web Services SDK and CLI clients handle the encoding for you. Use only with a function defined with a .zip file archive deployment package.
" - }, - "S3Bucket":{ - "shape":"S3Bucket", - "documentation":"An Amazon S3 bucket in the same Amazon Web Services Region as your function. The bucket can be in a different Amazon Web Services account. Use only with a function defined with a .zip file archive deployment package.
" - }, - "S3Key":{ - "shape":"S3Key", - "documentation":"The Amazon S3 key of the deployment package. Use only with a function defined with a .zip file archive deployment package.
" - }, - "S3ObjectVersion":{ - "shape":"S3ObjectVersion", - "documentation":"For versioned objects, the version of the deployment package object to use.
" - }, - "ImageUri":{ - "shape":"String", - "documentation":"URI of a container image in the Amazon ECR registry. Do not use for a function defined with a .zip file archive.
" - }, - "Publish":{ - "shape":"Boolean", - "documentation":"Set to true to publish a new version of the function after updating the code. This has the same effect as calling PublishVersion separately.
" - }, - "DryRun":{ - "shape":"Boolean", - "documentation":"Set to true to validate the request parameters and access permissions without modifying the function code.
" - }, - "RevisionId":{ - "shape":"String", - "documentation":"Update the function only if the revision ID matches the ID that's specified. Use this option to avoid modifying a function that has changed since you last read it.
" - }, - "Architectures":{ - "shape":"ArchitecturesList", - "documentation":"The instruction set architecture that the function supports. Enter a string array with one of the valid values (arm64 or x86_64). The default value is x86_64.
The ARN of the Key Management Service (KMS) customer managed key that's used to encrypt your function's .zip deployment package. If you don't provide a customer managed key, Lambda uses an Amazon Web Services managed key.
" - } - } - }, - "UpdateFunctionConfigurationRequest":{ - "type":"structure", - "required":["FunctionName"], - "members":{ - "FunctionName":{ - "shape":"FunctionName", - "documentation":"The name or ARN of the Lambda function.
Name formats
Function name – my-function.
Function ARN – arn:aws:lambda:us-west-2:123456789012:function:my-function.
Partial ARN – 123456789012:function:my-function.
The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.
", - "location":"uri", - "locationName":"FunctionName" - }, - "Role":{ - "shape":"RoleArn", - "documentation":"The Amazon Resource Name (ARN) of the function's execution role.
" - }, - "Handler":{ - "shape":"Handler", - "documentation":"The name of the method within your code that Lambda calls to run your function. Handler is required if the deployment package is a .zip file archive. The format includes the file name. It can also include namespaces and other qualifiers, depending on the runtime. For more information, see Lambda programming model.
" - }, - "Description":{ - "shape":"Description", - "documentation":"A description of the function.
" - }, - "Timeout":{ - "shape":"Timeout", - "documentation":"The amount of time (in seconds) that Lambda allows a function to run before stopping it. The default is 3 seconds. The maximum allowed value is 900 seconds. For more information, see Lambda execution environment.
" - }, - "MemorySize":{ - "shape":"MemorySize", - "documentation":"The amount of memory available to the function at runtime. Increasing the function memory also increases its CPU allocation. The default value is 128 MB. The value can be any multiple of 1 MB.
" - }, - "VpcConfig":{ - "shape":"VpcConfig", - "documentation":"For network connectivity to Amazon Web Services resources in a VPC, specify a list of security groups and subnets in the VPC. When you connect a function to a VPC, it can access resources and the internet only through that VPC. For more information, see Configuring a Lambda function to access resources in a VPC.
" - }, - "Environment":{ - "shape":"Environment", - "documentation":"Environment variables that are accessible from function code during execution.
" - }, - "Runtime":{ - "shape":"Runtime", - "documentation":"The identifier of the function's runtime. Runtime is required if the deployment package is a .zip file archive. Specifying a runtime results in an error if you're deploying a function using a container image.
The following list includes deprecated runtimes. Lambda blocks creating new functions and updating existing functions shortly after each runtime is deprecated. For more information, see Runtime use after deprecation.
For a list of all currently supported runtimes, see Supported runtimes.
" - }, - "DeadLetterConfig":{ - "shape":"DeadLetterConfig", - "documentation":"A dead-letter queue configuration that specifies the queue or topic where Lambda sends asynchronous events when they fail processing. For more information, see Dead-letter queues.
" - }, - "KMSKeyArn":{ - "shape":"KMSKeyArn", - "documentation":"The ARN of the Key Management Service (KMS) customer managed key that's used to encrypt the following resources:
The function's environment variables.
The function's Lambda SnapStart snapshots.
When used with SourceKMSKeyArn, the unzipped version of the .zip deployment package that's used for function invocations. For more information, see Specifying a customer managed key for Lambda.
The optimized version of the container image that's used for function invocations. Note that this is not the same key that's used to protect your container image in the Amazon Elastic Container Registry (Amazon ECR). For more information, see Function lifecycle.
If you don't provide a customer managed key, Lambda uses an Amazon Web Services owned key or an Amazon Web Services managed key.
" - }, - "TracingConfig":{ - "shape":"TracingConfig", - "documentation":"Set Mode to Active to sample and trace a subset of incoming requests with X-Ray.
Update the function only if the revision ID matches the ID that's specified. Use this option to avoid modifying a function that has changed since you last read it.
" - }, - "Layers":{ - "shape":"LayerList", - "documentation":"A list of function layers to add to the function's execution environment. Specify each layer by its ARN, including the version.
" - }, - "FileSystemConfigs":{ - "shape":"FileSystemConfigList", - "documentation":"Connection settings for an Amazon EFS file system.
" - }, - "ImageConfig":{ - "shape":"ImageConfig", - "documentation":"Container image configuration values that override the values in the container image Docker file.
" - }, - "EphemeralStorage":{ - "shape":"EphemeralStorage", - "documentation":"The size of the function's /tmp directory in MB. The default value is 512, but can be any whole number between 512 and 10,240 MB. For more information, see Configuring ephemeral storage (console).
The function's SnapStart setting.
" - }, - "LoggingConfig":{ - "shape":"LoggingConfig", - "documentation":"The function's Amazon CloudWatch Logs configuration settings.
" - }, - "DurableConfig":{"shape":"DurableConfig"} - } - }, - "UpdateFunctionEventInvokeConfigRequest":{ - "type":"structure", - "required":["FunctionName"], - "members":{ - "FunctionName":{ - "shape":"FunctionName", - "documentation":"The name or ARN of the Lambda function, version, or alias.
Name formats
Function name - my-function (name-only), my-function:v1 (with alias).
Function ARN - arn:aws:lambda:us-west-2:123456789012:function:my-function.
Partial ARN - 123456789012:function:my-function.
You can append a version number or alias to any of the formats. The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.
", - "location":"uri", - "locationName":"FunctionName" - }, - "Qualifier":{ - "shape":"Qualifier", - "documentation":"A version number or alias name.
", - "location":"querystring", - "locationName":"Qualifier" - }, - "MaximumRetryAttempts":{ - "shape":"MaximumRetryAttempts", - "documentation":"The maximum number of times to retry when the function returns an error.
" - }, - "MaximumEventAgeInSeconds":{ - "shape":"MaximumEventAgeInSeconds", - "documentation":"The maximum age of a request that Lambda sends to a function for processing.
" - }, - "DestinationConfig":{ - "shape":"DestinationConfig", - "documentation":"A destination for events after they have been sent to a function for processing.
Destinations
Function - The Amazon Resource Name (ARN) of a Lambda function.
Queue - The ARN of a standard SQS queue.
Bucket - The ARN of an Amazon S3 bucket.
Topic - The ARN of a standard SNS topic.
Event Bus - The ARN of an Amazon EventBridge event bus.
S3 buckets are supported only for on-failure destinations. To retain records of successful invocations, use another destination type.
The name or ARN of the Lambda function.
Name formats
Function name – my-function.
Function ARN – arn:aws:lambda:us-west-2:123456789012:function:my-function.
Partial ARN – 123456789012:function:my-function.
The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.
", - "location":"uri", - "locationName":"FunctionName" - }, - "Qualifier":{ - "shape":"FunctionUrlQualifier", - "documentation":"The alias name.
", - "location":"querystring", - "locationName":"Qualifier" - }, - "AuthType":{ - "shape":"FunctionUrlAuthType", - "documentation":"The type of authentication that your function URL uses. Set to AWS_IAM if you want to restrict access to authenticated users only. Set to NONE if you want to bypass IAM authentication to create a public endpoint. For more information, see Security and auth model for Lambda function URLs.
The cross-origin resource sharing (CORS) settings for your function URL.
" - }, - "InvokeMode":{ - "shape":"InvokeMode", - "documentation":"Use one of the following options:
BUFFERED – This is the default option. Lambda invokes your function using the Invoke API operation. Invocation results are available when the payload is complete. The maximum payload size is 6 MB.
RESPONSE_STREAM – Your function streams payload results as they become available. Lambda invokes your function using the InvokeWithResponseStream API operation. The maximum response payload size is 20 MB, however, you can request a quota increase.
The HTTP URL endpoint for your function.
" - }, - "FunctionArn":{ - "shape":"FunctionArn", - "documentation":"The Amazon Resource Name (ARN) of your function.
" - }, - "AuthType":{ - "shape":"FunctionUrlAuthType", - "documentation":"The type of authentication that your function URL uses. Set to AWS_IAM if you want to restrict access to authenticated users only. Set to NONE if you want to bypass IAM authentication to create a public endpoint. For more information, see Security and auth model for Lambda function URLs.
The cross-origin resource sharing (CORS) settings for your function URL.
" - }, - "CreationTime":{ - "shape":"Timestamp", - "documentation":"When the function URL was created, in ISO-8601 format (YYYY-MM-DDThh:mm:ss.sTZD).
" - }, - "LastModifiedTime":{ - "shape":"Timestamp", - "documentation":"When the function URL configuration was last updated, in ISO-8601 format (YYYY-MM-DDThh:mm:ss.sTZD).
" - }, - "InvokeMode":{ - "shape":"InvokeMode", - "documentation":"Use one of the following options:
BUFFERED – This is the default option. Lambda invokes your function using the Invoke API operation. Invocation results are available when the payload is complete. The maximum payload size is 6 MB.
RESPONSE_STREAM – Your function streams payload results as they become available. Lambda invokes your function using the InvokeWithResponseStream API operation. The maximum response payload size is 20 MB, however, you can request a quota increase.
A list of VPC subnet IDs.
" - }, - "SecurityGroupIds":{ - "shape":"SecurityGroupIds", - "documentation":"A list of VPC security group IDs.
" - }, - "Ipv6AllowedForDualStack":{ - "shape":"NullableBoolean", - "documentation":"Allows outbound IPv6 traffic on VPC functions that are connected to dual-stack subnets.
" - } - }, - "documentation":"The VPC security groups and subnets that are attached to a Lambda function. For more information, see Configuring a Lambda function to access resources in a VPC.
" - }, - "VpcConfigResponse":{ - "type":"structure", - "members":{ - "SubnetIds":{ - "shape":"SubnetIds", - "documentation":"A list of VPC subnet IDs.
" - }, - "SecurityGroupIds":{ - "shape":"SecurityGroupIds", - "documentation":"A list of VPC security group IDs.
" - }, - "VpcId":{ - "shape":"VpcId", - "documentation":"The ID of the VPC.
" - }, - "Ipv6AllowedForDualStack":{ - "shape":"NullableBoolean", - "documentation":"Allows outbound IPv6 traffic on VPC functions that are connected to dual-stack subnets.
" - } - }, - "documentation":"The VPC security groups and subnets that are attached to a Lambda function.
" - }, - "VpcId":{"type":"string"}, - "WaitCancelledDetails":{ - "type":"structure", - "members":{ - "Error":{"shape":"EventError"} - } - }, - "WaitDetails":{ - "type":"structure", - "members":{ - "ScheduledEndTimestamp":{"shape":"ExecutionTimestamp"} - } - }, - "WaitOptions":{ - "type":"structure", - "members":{ - "WaitSeconds":{"shape":"WaitOptionsWaitSecondsInteger"} - } - }, - "WaitOptionsWaitSecondsInteger":{ - "type":"integer", - "box":true, - "max":31622400, - "min":1 - }, - "WaitStartedDetails":{ - "type":"structure", - "required":[ - "Duration", - "ScheduledEndTimestamp" - ], - "members":{ - "Duration":{"shape":"DurationSeconds"}, - "ScheduledEndTimestamp":{"shape":"ExecutionTimestamp"} - } - }, - "WaitSucceededDetails":{ - "type":"structure", - "members":{ - "Duration":{"shape":"DurationSeconds"} - } - }, - "Weight":{ - "type":"double", - "max":1.0, - "min":0.0 - }, - "WorkingDirectory":{ - "type":"string", - "max":1000, - "min":0 - } - }, - "documentation":"Overview
Lambda is a compute service that lets you run code without provisioning or managing servers. Lambda runs your code on a high-availability compute infrastructure and performs all of the administration of the compute resources, including server and operating system maintenance, capacity provisioning and automatic scaling, code monitoring and logging. With Lambda, you can run code for virtually any type of application or backend service. For more information about the Lambda service, see What is Lambda in the Lambda Developer Guide.
The Lambda API Reference provides information about each of the API methods, including details about the parameters in each API request and response.
You can use Software Development Kits (SDKs), Integrated Development Environment (IDE) Toolkits, and command line tools to access the API. For installation instructions, see Tools for Amazon Web Services.
For a list of Region-specific endpoints that Lambda supports, see Lambda endpoints and quotas in the Amazon Web Services General Reference..
When making the API calls, you will need to authenticate your request by providing a signature. Lambda supports signature version 4. For more information, see Signature Version 4 signing process in the Amazon Web Services General Reference..
CA certificates
Because Amazon Web Services SDKs use the CA certificates from your computer, changes to the certificates on the Amazon Web Services servers can cause connection failures when you attempt to use an SDK. You can prevent these failures by keeping your computer's CA certificates and operating system up-to-date. If you encounter this issue in a corporate environment and do not manage your own computer, you might need to ask an administrator to assist with the update process. The following list shows minimum operating system and Java versions:
Microsoft Windows versions that have updates from January 2005 or later installed contain at least one of the required CAs in their trust list.
Mac OS X 10.4 with Java for Mac OS X 10.4 Release 5 (February 2007), Mac OS X 10.5 (October 2007), and later versions contain at least one of the required CAs in their trust list.
Red Hat Enterprise Linux 5 (March 2007), 6, and 7 and CentOS 5, 6, and 7 all contain at least one of the required CAs in their default trusted CA list.
Java 1.4.2_12 (May 2006), 5 Update 2 (March 2005), and all later versions, including Java 6 (December 2006), 7, and 8, contain at least one of the required CAs in their default trusted CA list.
When accessing the Lambda management console or Lambda API endpoints, whether through browsers or programmatically, you will need to ensure your client machines support any of the following CAs:
Amazon Root CA 1
Starfield Services Root Certificate Authority - G2
Starfield Class 2 Certification Authority
Root certificates from the first two authorities are available from Amazon trust services, but keeping your computer up-to-date is the more straightforward solution. To learn more about ACM-provided certificates, see Amazon Web Services Certificate Manager FAQs.
" -} diff --git a/src/aws_durable_execution_sdk_python/botocore/data/lambdainternal/2015-03-31/service-2.json b/src/aws_durable_execution_sdk_python/botocore/data/lambdainternal/2015-03-31/service-2.json deleted file mode 100644 index a96f3e3..0000000 --- a/src/aws_durable_execution_sdk_python/botocore/data/lambdainternal/2015-03-31/service-2.json +++ /dev/null @@ -1,7855 +0,0 @@ -{ - "version":"2.0", - "metadata":{ - "apiVersion":"2015-03-31", - "endpointPrefix":"lambda", - "protocol":"rest-json", - "serviceFullName":"AWS Lambda", - "serviceId":"Lambda", - "signatureVersion":"v4", - "uid":"lambda-2015-03-31" - }, - "operations":{ - "AddLayerVersionPermission":{ - "name":"AddLayerVersionPermission", - "http":{ - "method":"POST", - "requestUri":"/2018-10-31/layers/{LayerName}/versions/{VersionNumber}/policy", - "responseCode":201 - }, - "input":{"shape":"AddLayerVersionPermissionRequest"}, - "output":{"shape":"AddLayerVersionPermissionResponse"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ResourceConflictException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"PolicyLengthExceededException"}, - {"shape":"ResourceNotFoundException"}, - {"shape":"PreconditionFailedException"} - ], - "documentation":"Adds permissions to the resource-based policy of a version of an Lambda layer. Use this action to grant layer usage permission to other accounts. You can grant permission to a single account, all accounts in an organization, or all Amazon Web Services accounts.
To revoke permission, call RemoveLayerVersionPermission with the statement ID that you specified when you added it.
" - }, - "AddPermission":{ - "name":"AddPermission", - "http":{ - "method":"POST", - "requestUri":"/2015-03-31/functions/{FunctionName}/policy", - "responseCode":201 - }, - "input":{"shape":"AddPermissionRequest"}, - "output":{"shape":"AddPermissionResponse"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ResourceConflictException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"PolicyLengthExceededException"}, - {"shape":"ResourceNotFoundException"}, - {"shape":"PreconditionFailedException"} - ], - "documentation":"Grants a principal permission to use a function. You can apply the policy at the function level, or specify a qualifier to restrict access to a single version or alias. If you use a qualifier, the invoker must use the full Amazon Resource Name (ARN) of that version or alias to invoke the function. Note: Lambda does not support adding policies to version $LATEST.
To grant permission to another account, specify the account ID as the Principal. To grant permission to an organization defined in Organizations, specify the organization ID as the PrincipalOrgID. For Amazon Web Services services, the principal is a domain-style identifier that the service defines, such as s3.amazonaws.com or sns.amazonaws.com. For Amazon Web Services services, you can also specify the ARN of the associated resource as the SourceArn. If you grant permission to a service principal without specifying the source, other accounts could potentially configure resources in their account to invoke your Lambda function.
This operation adds a statement to a resource-based permissions policy for the function. For more information about function policies, see Using resource-based policies for Lambda.
" - }, - "CheckpointDurableExecution":{ - "name":"CheckpointDurableExecution", - "http":{ - "method":"POST", - "requestUri":"/2025-12-01/durable-executions/{DurableExecutionArn}/checkpoint", - "responseCode":200 - }, - "input":{"shape":"CheckpointDurableExecutionRequest"}, - "output":{"shape":"CheckpointDurableExecutionResponse"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ServiceException"} - ], - "idempotent":true - }, - "CreateAlias":{ - "name":"CreateAlias", - "http":{ - "method":"POST", - "requestUri":"/2015-03-31/functions/{FunctionName}/aliases", - "responseCode":201 - }, - "input":{"shape":"CreateAliasRequest"}, - "output":{"shape":"AliasConfiguration"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ResourceConflictException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"} - ], - "documentation":"Creates an alias for a Lambda function version. Use aliases to provide clients with a function identifier that you can update to invoke a different version.
You can also map an alias to split invocation requests between two versions. Use the RoutingConfig parameter to specify a second version and the percentage of invocation requests that it receives.
Creates a code signing configuration. A code signing configuration defines a list of allowed signing profiles and defines the code-signing validation policy (action to be taken if deployment validation checks fail).
" - }, - "CreateEventSourceMapping":{ - "name":"CreateEventSourceMapping", - "http":{ - "method":"POST", - "requestUri":"/2015-03-31/event-source-mappings", - "responseCode":202 - }, - "input":{"shape":"CreateEventSourceMappingRequest"}, - "output":{"shape":"EventSourceMappingConfiguration"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ResourceConflictException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"} - ], - "documentation":"Creates a mapping between an event source and an Lambda function. Lambda reads items from the event source and invokes the function.
For details about how to configure different event sources, see the following topics.
The following error handling options are available only for DynamoDB and Kinesis event sources:
BisectBatchOnFunctionError – If the function returns an error, split the batch in two and retry.
MaximumRecordAgeInSeconds – Discard records older than the specified age. The default value is infinite (-1). When set to infinite (-1), failed records are retried until the record expires
MaximumRetryAttempts – Discard records after the specified number of retries. The default value is infinite (-1). When set to infinite (-1), failed records are retried until the record expires.
ParallelizationFactor – Process multiple batches from each shard concurrently.
For stream sources (DynamoDB, Kinesis, Amazon MSK, and self-managed Apache Kafka), the following option is also available:
OnFailure – Send discarded records to an Amazon SQS queue, Amazon SNS topic, or Amazon S3 bucket. For more information, see Adding a destination.
For information about which configuration parameters apply to each event source, see the following topics.
Creates a Lambda function. To create a function, you need a deployment package and an execution role. The deployment package is a .zip file archive or container image that contains your function code. The execution role grants the function permission to use Amazon Web Services services, such as Amazon CloudWatch Logs for log streaming and X-Ray for request tracing.
If the deployment package is a container image, then you set the package type to Image. For a container image, the code property must include the URI of a container image in the Amazon ECR registry. You do not need to specify the handler and runtime properties.
If the deployment package is a .zip file archive, then you set the package type to Zip. For a .zip file archive, the code property specifies the location of the .zip file. You must also specify the handler and runtime properties. The code in the deployment package must be compatible with the target instruction set architecture of the function (x86-64 or arm64). If you do not specify the architecture, then the default value is x86-64.
When you create a function, Lambda provisions an instance of the function and its supporting resources. If your function connects to a VPC, this process can take a minute or so. During this time, you can't invoke or modify the function. The State, StateReason, and StateReasonCode fields in the response from GetFunctionConfiguration indicate when the function is ready to invoke. For more information, see Lambda function states.
A function has an unpublished version, and can have published versions and aliases. The unpublished version changes when you update your function's code and configuration. A published version is a snapshot of your function code and configuration that can't be changed. An alias is a named resource that maps to a version, and can be changed to map to a different version. Use the Publish parameter to create version 1 of your function from its initial configuration.
The other parameters let you configure version-specific and function-level settings. You can modify version-specific settings later with UpdateFunctionConfiguration. Function-level settings apply to both the unpublished and published versions of the function, and include tags (TagResource) and per-function concurrency limits (PutFunctionConcurrency).
You can use code signing if your deployment package is a .zip file archive. To enable code signing for this function, specify the ARN of a code-signing configuration. When a user attempts to deploy a code package with UpdateFunctionCode, Lambda checks that the code package has a valid signature from a trusted publisher. The code-signing configuration includes set of signing profiles, which define the trusted publishers for this function.
If another Amazon Web Services account or an Amazon Web Services service invokes your function, use AddPermission to grant permission by creating a resource-based Identity and Access Management (IAM) policy. You can grant permissions at the function level, on a version, or on an alias.
To invoke your function directly, use Invoke. To invoke your function in response to events in other Amazon Web Services services, create an event source mapping (CreateEventSourceMapping), or configure a function trigger in the other service. For more information, see Invoking Lambda functions.
", - "idempotent":true - }, - "CreateFunctionUrlConfig":{ - "name":"CreateFunctionUrlConfig", - "http":{ - "method":"POST", - "requestUri":"/2021-10-31/functions/{FunctionName}/url", - "responseCode":201 - }, - "input":{"shape":"CreateFunctionUrlConfigRequest"}, - "output":{"shape":"CreateFunctionUrlConfigResponse"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ResourceConflictException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"} - ], - "documentation":"Creates a Lambda function URL with the specified configuration parameters. A function URL is a dedicated HTTP(S) endpoint that you can use to invoke your function.
" - }, - "DeleteAlias":{ - "name":"DeleteAlias", - "http":{ - "method":"DELETE", - "requestUri":"/2015-03-31/functions/{FunctionName}/aliases/{Name}", - "responseCode":204 - }, - "input":{"shape":"DeleteAliasRequest"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ResourceConflictException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"} - ], - "documentation":"Deletes a Lambda function alias.
", - "idempotent":true - }, - "DeleteCodeSigningConfig":{ - "name":"DeleteCodeSigningConfig", - "http":{ - "method":"DELETE", - "requestUri":"/2020-04-22/code-signing-configs/{CodeSigningConfigArn}", - "responseCode":204 - }, - "input":{"shape":"DeleteCodeSigningConfigRequest"}, - "output":{"shape":"DeleteCodeSigningConfigResponse"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ResourceConflictException"}, - {"shape":"ServiceException"}, - {"shape":"ResourceNotFoundException"} - ], - "documentation":"Deletes the code signing configuration. You can delete the code signing configuration only if no function is using it.
", - "idempotent":true - }, - "DeleteEventSourceMapping":{ - "name":"DeleteEventSourceMapping", - "http":{ - "method":"DELETE", - "requestUri":"/2015-03-31/event-source-mappings/{UUID}", - "responseCode":202 - }, - "input":{"shape":"DeleteEventSourceMappingRequest"}, - "output":{"shape":"EventSourceMappingConfiguration"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ResourceInUseException"}, - {"shape":"ResourceConflictException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"} - ], - "documentation":"Deletes an event source mapping. You can get the identifier of a mapping from the output of ListEventSourceMappings.
When you delete an event source mapping, it enters a Deleting state and might not be completely deleted for several seconds.
Deletes a Lambda function. To delete a specific function version, use the Qualifier parameter. Otherwise, all versions and aliases are deleted. This doesn't require the user to have explicit permissions for DeleteAlias.
To delete Lambda event source mappings that invoke a function, use DeleteEventSourceMapping. For Amazon Web Services services and resources that invoke your function directly, delete the trigger in the service where you originally configured it.
", - "idempotent":true - }, - "DeleteFunctionCodeSigningConfig":{ - "name":"DeleteFunctionCodeSigningConfig", - "http":{ - "method":"DELETE", - "requestUri":"/2020-06-30/functions/{FunctionName}/code-signing-config", - "responseCode":204 - }, - "input":{"shape":"DeleteFunctionCodeSigningConfigRequest"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ResourceConflictException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"}, - {"shape":"CodeSigningConfigNotFoundException"} - ], - "documentation":"Removes the code signing configuration from the function.
" - }, - "DeleteFunctionConcurrency":{ - "name":"DeleteFunctionConcurrency", - "http":{ - "method":"DELETE", - "requestUri":"/2017-10-31/functions/{FunctionName}/concurrency", - "responseCode":204 - }, - "input":{"shape":"DeleteFunctionConcurrencyRequest"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ResourceConflictException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"} - ], - "documentation":"Removes a concurrent execution limit from a function.
" - }, - "DeleteFunctionEventInvokeConfig":{ - "name":"DeleteFunctionEventInvokeConfig", - "http":{ - "method":"DELETE", - "requestUri":"/2019-09-25/functions/{FunctionName}/event-invoke-config", - "responseCode":204 - }, - "input":{"shape":"DeleteFunctionEventInvokeConfigRequest"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ResourceConflictException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"} - ], - "documentation":"Deletes the configuration for asynchronous invocation for a function, version, or alias.
To configure options for asynchronous invocation, use PutFunctionEventInvokeConfig.
" - }, - "DeleteFunctionUrlConfig":{ - "name":"DeleteFunctionUrlConfig", - "http":{ - "method":"DELETE", - "requestUri":"/2021-10-31/functions/{FunctionName}/url", - "responseCode":204 - }, - "input":{"shape":"DeleteFunctionUrlConfigRequest"}, - "errors":[ - {"shape":"ResourceConflictException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"} - ], - "documentation":"Deletes a Lambda function URL. When you delete a function URL, you can't recover it. Creating a new function URL results in a different URL address.
" - }, - "DeleteLayerVersion":{ - "name":"DeleteLayerVersion", - "http":{ - "method":"DELETE", - "requestUri":"/2018-10-31/layers/{LayerName}/versions/{VersionNumber}", - "responseCode":204 - }, - "input":{"shape":"DeleteLayerVersionRequest"}, - "errors":[ - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"} - ], - "documentation":"Deletes a version of an Lambda layer. Deleted versions can no longer be viewed or added to functions. To avoid breaking functions, a copy of the version remains in Lambda until no functions refer to it.
", - "idempotent":true - }, - "DeleteProvisionedConcurrencyConfig":{ - "name":"DeleteProvisionedConcurrencyConfig", - "http":{ - "method":"DELETE", - "requestUri":"/2019-09-30/functions/{FunctionName}/provisioned-concurrency", - "responseCode":204 - }, - "input":{"shape":"DeleteProvisionedConcurrencyConfigRequest"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ResourceConflictException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"} - ], - "documentation":"Deletes the provisioned concurrency configuration for a function.
", - "idempotent":true - }, - "GetAccountSettings":{ - "name":"GetAccountSettings", - "http":{ - "method":"GET", - "requestUri":"/2016-08-19/account-settings", - "responseCode":200 - }, - "input":{"shape":"GetAccountSettingsRequest"}, - "output":{"shape":"GetAccountSettingsResponse"}, - "errors":[ - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"} - ], - "documentation":"Retrieves details about your account's limits and usage in an Amazon Web Services Region.
", - "readonly":true - }, - "GetAlias":{ - "name":"GetAlias", - "http":{ - "method":"GET", - "requestUri":"/2015-03-31/functions/{FunctionName}/aliases/{Name}", - "responseCode":200 - }, - "input":{"shape":"GetAliasRequest"}, - "output":{"shape":"AliasConfiguration"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"} - ], - "documentation":"Returns details about a Lambda function alias.
", - "readonly":true - }, - "GetCodeSigningConfig":{ - "name":"GetCodeSigningConfig", - "http":{ - "method":"GET", - "requestUri":"/2020-04-22/code-signing-configs/{CodeSigningConfigArn}", - "responseCode":200 - }, - "input":{"shape":"GetCodeSigningConfigRequest"}, - "output":{"shape":"GetCodeSigningConfigResponse"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ServiceException"}, - {"shape":"ResourceNotFoundException"} - ], - "documentation":"Returns information about the specified code signing configuration.
", - "readonly":true - }, - "GetDurableExecution":{ - "name":"GetDurableExecution", - "http":{ - "method":"GET", - "requestUri":"/2025-12-01/durable-executions/{DurableExecutionArn}", - "responseCode":200 - }, - "input":{"shape":"GetDurableExecutionRequest"}, - "output":{"shape":"GetDurableExecutionResponse"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ServiceException"}, - {"shape":"ResourceNotFoundException"} - ], - "readonly":true - }, - "GetDurableExecutionHistory":{ - "name":"GetDurableExecutionHistory", - "http":{ - "method":"GET", - "requestUri":"/2025-12-01/durable-executions/{DurableExecutionArn}/history", - "responseCode":200 - }, - "input":{"shape":"GetDurableExecutionHistoryRequest"}, - "output":{"shape":"GetDurableExecutionHistoryResponse"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ServiceException"}, - {"shape":"ResourceNotFoundException"} - ], - "readonly":true - }, - "GetDurableExecutionState":{ - "name":"GetDurableExecutionState", - "http":{ - "method":"GET", - "requestUri":"/2025-12-01/durable-executions/{DurableExecutionArn}/state", - "responseCode":200 - }, - "input":{"shape":"GetDurableExecutionStateRequest"}, - "output":{"shape":"GetDurableExecutionStateResponse"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ServiceException"} - ], - "readonly":true - }, - "GetEventSourceMapping":{ - "name":"GetEventSourceMapping", - "http":{ - "method":"GET", - "requestUri":"/2015-03-31/event-source-mappings/{UUID}", - "responseCode":200 - }, - "input":{"shape":"GetEventSourceMappingRequest"}, - "output":{"shape":"EventSourceMappingConfiguration"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"} - ], - "documentation":"Returns details about an event source mapping. You can get the identifier of a mapping from the output of ListEventSourceMappings.
", - "readonly":true - }, - "GetFunction":{ - "name":"GetFunction", - "http":{ - "method":"GET", - "requestUri":"/2015-03-31/functions/{FunctionName}", - "responseCode":200 - }, - "input":{"shape":"GetFunctionRequest"}, - "output":{"shape":"GetFunctionResponse"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"} - ], - "documentation":"Returns information about the function or function version, with a link to download the deployment package that's valid for 10 minutes. If you specify a function version, only details that are specific to that version are returned.
", - "readonly":true - }, - "GetFunctionCodeSigningConfig":{ - "name":"GetFunctionCodeSigningConfig", - "http":{ - "method":"GET", - "requestUri":"/2020-06-30/functions/{FunctionName}/code-signing-config", - "responseCode":200 - }, - "input":{"shape":"GetFunctionCodeSigningConfigRequest"}, - "output":{"shape":"GetFunctionCodeSigningConfigResponse"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"} - ], - "documentation":"Returns the code signing configuration for the specified function.
", - "readonly":true - }, - "GetFunctionConcurrency":{ - "name":"GetFunctionConcurrency", - "http":{ - "method":"GET", - "requestUri":"/2019-09-30/functions/{FunctionName}/concurrency", - "responseCode":200 - }, - "input":{"shape":"GetFunctionConcurrencyRequest"}, - "output":{"shape":"GetFunctionConcurrencyResponse"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"} - ], - "documentation":"Returns details about the reserved concurrency configuration for a function. To set a concurrency limit for a function, use PutFunctionConcurrency.
", - "readonly":true - }, - "GetFunctionConfiguration":{ - "name":"GetFunctionConfiguration", - "http":{ - "method":"GET", - "requestUri":"/2015-03-31/functions/{FunctionName}/configuration", - "responseCode":200 - }, - "input":{"shape":"GetFunctionConfigurationRequest"}, - "output":{"shape":"FunctionConfiguration"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"} - ], - "documentation":"Returns the version-specific settings of a Lambda function or version. The output includes only options that can vary between versions of a function. To modify these settings, use UpdateFunctionConfiguration.
To get all of a function's details, including function-level settings, use GetFunction.
", - "readonly":true - }, - "GetFunctionEventInvokeConfig":{ - "name":"GetFunctionEventInvokeConfig", - "http":{ - "method":"GET", - "requestUri":"/2019-09-25/functions/{FunctionName}/event-invoke-config", - "responseCode":200 - }, - "input":{"shape":"GetFunctionEventInvokeConfigRequest"}, - "output":{"shape":"FunctionEventInvokeConfig"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"} - ], - "documentation":"Retrieves the configuration for asynchronous invocation for a function, version, or alias.
To configure options for asynchronous invocation, use PutFunctionEventInvokeConfig.
", - "readonly":true - }, - "GetFunctionRecursionConfig":{ - "name":"GetFunctionRecursionConfig", - "http":{ - "method":"GET", - "requestUri":"/2024-08-31/functions/{FunctionName}/recursion-config", - "responseCode":200 - }, - "input":{"shape":"GetFunctionRecursionConfigRequest"}, - "output":{"shape":"GetFunctionRecursionConfigResponse"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"} - ], - "documentation":"Returns your function's recursive loop detection configuration.
", - "readonly":true - }, - "GetFunctionUrlConfig":{ - "name":"GetFunctionUrlConfig", - "http":{ - "method":"GET", - "requestUri":"/2021-10-31/functions/{FunctionName}/url", - "responseCode":200 - }, - "input":{"shape":"GetFunctionUrlConfigRequest"}, - "output":{"shape":"GetFunctionUrlConfigResponse"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"} - ], - "documentation":"Returns details about a Lambda function URL.
", - "readonly":true - }, - "GetLayerVersion":{ - "name":"GetLayerVersion", - "http":{ - "method":"GET", - "requestUri":"/2018-10-31/layers/{LayerName}/versions/{VersionNumber}", - "responseCode":200 - }, - "input":{"shape":"GetLayerVersionRequest"}, - "output":{"shape":"GetLayerVersionResponse"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"} - ], - "documentation":"Returns information about a version of an Lambda layer, with a link to download the layer archive that's valid for 10 minutes.
", - "readonly":true - }, - "GetLayerVersionByArn":{ - "name":"GetLayerVersionByArn", - "http":{ - "method":"GET", - "requestUri":"/2018-10-31/layers?find=LayerVersion", - "responseCode":200 - }, - "input":{"shape":"GetLayerVersionByArnRequest"}, - "output":{"shape":"GetLayerVersionResponse"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"} - ], - "documentation":"Returns information about a version of an Lambda layer, with a link to download the layer archive that's valid for 10 minutes.
", - "readonly":true - }, - "GetLayerVersionPolicy":{ - "name":"GetLayerVersionPolicy", - "http":{ - "method":"GET", - "requestUri":"/2018-10-31/layers/{LayerName}/versions/{VersionNumber}/policy", - "responseCode":200 - }, - "input":{"shape":"GetLayerVersionPolicyRequest"}, - "output":{"shape":"GetLayerVersionPolicyResponse"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"} - ], - "documentation":"Returns the permission policy for a version of an Lambda layer. For more information, see AddLayerVersionPermission.
", - "readonly":true - }, - "GetPolicy":{ - "name":"GetPolicy", - "http":{ - "method":"GET", - "requestUri":"/2015-03-31/functions/{FunctionName}/policy", - "responseCode":200 - }, - "input":{"shape":"GetPolicyRequest"}, - "output":{"shape":"GetPolicyResponse"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"} - ], - "documentation":"Returns the resource-based IAM policy for a function, version, or alias.
", - "readonly":true - }, - "GetProvisionedConcurrencyConfig":{ - "name":"GetProvisionedConcurrencyConfig", - "http":{ - "method":"GET", - "requestUri":"/2019-09-30/functions/{FunctionName}/provisioned-concurrency", - "responseCode":200 - }, - "input":{"shape":"GetProvisionedConcurrencyConfigRequest"}, - "output":{"shape":"GetProvisionedConcurrencyConfigResponse"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ProvisionedConcurrencyConfigNotFoundException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"} - ], - "documentation":"Retrieves the provisioned concurrency configuration for a function's alias or version.
", - "readonly":true - }, - "GetRuntimeManagementConfig":{ - "name":"GetRuntimeManagementConfig", - "http":{ - "method":"GET", - "requestUri":"/2021-07-20/functions/{FunctionName}/runtime-management-config", - "responseCode":200 - }, - "input":{"shape":"GetRuntimeManagementConfigRequest"}, - "output":{"shape":"GetRuntimeManagementConfigResponse"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"} - ], - "documentation":"Retrieves the runtime management configuration for a function's version. If the runtime update mode is Manual, this includes the ARN of the runtime version and the runtime update mode. If the runtime update mode is Auto or Function update, this includes the runtime update mode and null is returned for the ARN. For more information, see Runtime updates.
Invokes a Lambda function. You can invoke a function synchronously (and wait for the response), or asynchronously. By default, Lambda invokes your function synchronously (i.e. theInvocationType is RequestResponse). To invoke a function asynchronously, set InvocationType to Event. Lambda passes the ClientContext object to your function for synchronous invocations only.
For synchronous invocation, details about the function response, including errors, are included in the response body and headers. For either invocation type, you can find more information in the execution log and trace.
When an error occurs, your function may be invoked multiple times. Retry behavior varies by error type, client, event source, and invocation type. For example, if you invoke a function asynchronously and it returns an error, Lambda executes the function up to two more times. For more information, see Error handling and automatic retries in Lambda.
For asynchronous invocation, Lambda adds events to a queue before sending them to your function. If your function does not have enough capacity to keep up with the queue, events may be lost. Occasionally, your function may receive the same event multiple times, even if no error occurs. To retain events that were not processed, configure your function with a dead-letter queue.
The status code in the API response doesn't reflect function errors. Error codes are reserved for errors that prevent your function from executing, such as permissions errors, quota errors, or issues with your function's code and configuration. For example, Lambda returns TooManyRequestsException if running the function would cause you to exceed a concurrency limit at either the account level (ConcurrentInvocationLimitExceeded) or function level (ReservedFunctionConcurrentInvocationLimitExceeded).
For functions with a long timeout, your client might disconnect during synchronous invocation while it waits for a response. Configure your HTTP client, SDK, firewall, proxy, or operating system to allow for long connections with timeout or keep-alive settings.
This operation requires permission for the lambda:InvokeFunction action. For details on how to set up permissions for cross-account invocations, see Granting function access to other accounts.
" - }, - "InvokeAsync":{ - "name":"InvokeAsync", - "http":{ - "method":"POST", - "requestUri":"/2014-11-13/functions/{FunctionName}/invoke-async", - "responseCode":202 - }, - "input":{"shape":"InvokeAsyncRequest"}, - "output":{"shape":"InvokeAsyncResponse"}, - "errors":[ - {"shape":"InvalidRuntimeException"}, - {"shape":"ResourceConflictException"}, - {"shape":"ServiceException"}, - {"shape":"ResourceNotFoundException"}, - {"shape":"InvalidRequestContentException"} - ], - "documentation":"For asynchronous function invocation, use Invoke.
Invokes a function asynchronously.
If you do use the InvokeAsync action, note that it doesn't support the use of X-Ray active tracing. Trace ID is not propagated to the function, even if X-Ray active tracing is turned on.
Configure your Lambda functions to stream response payloads back to clients. For more information, see Configuring a Lambda function to stream responses.
This operation requires permission for the lambda:InvokeFunction action. For details on how to set up permissions for cross-account invocations, see Granting function access to other accounts.
" - }, - "ListAliases":{ - "name":"ListAliases", - "http":{ - "method":"GET", - "requestUri":"/2015-03-31/functions/{FunctionName}/aliases", - "responseCode":200 - }, - "input":{"shape":"ListAliasesRequest"}, - "output":{"shape":"ListAliasesResponse"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"} - ], - "documentation":"Returns a list of aliases for a Lambda function.
", - "readonly":true - }, - "ListCodeSigningConfigs":{ - "name":"ListCodeSigningConfigs", - "http":{ - "method":"GET", - "requestUri":"/2020-04-22/code-signing-configs", - "responseCode":200 - }, - "input":{"shape":"ListCodeSigningConfigsRequest"}, - "output":{"shape":"ListCodeSigningConfigsResponse"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ServiceException"} - ], - "documentation":"Returns a list of code signing configurations. A request returns up to 10,000 configurations per call. You can use the MaxItems parameter to return fewer configurations per call.
Lists event source mappings. Specify an EventSourceArn to show only event source mappings for a single event source.
Retrieves a list of configurations for asynchronous invocation for a function.
To configure options for asynchronous invocation, use PutFunctionEventInvokeConfig.
", - "readonly":true - }, - "ListFunctionUrlConfigs":{ - "name":"ListFunctionUrlConfigs", - "http":{ - "method":"GET", - "requestUri":"/2021-10-31/functions/{FunctionName}/urls", - "responseCode":200 - }, - "input":{"shape":"ListFunctionUrlConfigsRequest"}, - "output":{"shape":"ListFunctionUrlConfigsResponse"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"} - ], - "documentation":"Returns a list of Lambda function URLs for the specified function.
", - "readonly":true - }, - "ListFunctions":{ - "name":"ListFunctions", - "http":{ - "method":"GET", - "requestUri":"/2015-03-31/functions", - "responseCode":200 - }, - "input":{"shape":"ListFunctionsRequest"}, - "output":{"shape":"ListFunctionsResponse"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"} - ], - "documentation":"Returns a list of Lambda functions, with the version-specific configuration of each. Lambda returns up to 50 functions per call.
Set FunctionVersion to ALL to include all published versions of each function in addition to the unpublished version.
The ListFunctions operation returns a subset of the FunctionConfiguration fields. To get the additional fields (State, StateReasonCode, StateReason, LastUpdateStatus, LastUpdateStatusReason, LastUpdateStatusReasonCode, RuntimeVersionConfig) for a function or version, use GetFunction.
List the functions that use the specified code signing configuration. You can use this method prior to deleting a code signing configuration, to verify that no functions are using it.
", - "readonly":true - }, - "ListLayerVersions":{ - "name":"ListLayerVersions", - "http":{ - "method":"GET", - "requestUri":"/2018-10-31/layers/{LayerName}/versions", - "responseCode":200 - }, - "input":{"shape":"ListLayerVersionsRequest"}, - "output":{"shape":"ListLayerVersionsResponse"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"} - ], - "documentation":"Lists the versions of an Lambda layer. Versions that have been deleted aren't listed. Specify a runtime identifier to list only versions that indicate that they're compatible with that runtime. Specify a compatible architecture to include only layer versions that are compatible with that architecture.
", - "readonly":true - }, - "ListLayers":{ - "name":"ListLayers", - "http":{ - "method":"GET", - "requestUri":"/2018-10-31/layers", - "responseCode":200 - }, - "input":{"shape":"ListLayersRequest"}, - "output":{"shape":"ListLayersResponse"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"} - ], - "documentation":"Lists Lambda layers and shows information about the latest version of each. Specify a runtime identifier to list only layers that indicate that they're compatible with that runtime. Specify a compatible architecture to include only layers that are compatible with that instruction set architecture.
", - "readonly":true - }, - "ListProvisionedConcurrencyConfigs":{ - "name":"ListProvisionedConcurrencyConfigs", - "http":{ - "method":"GET", - "requestUri":"/2019-09-30/functions/{FunctionName}/provisioned-concurrency?List=ALL", - "responseCode":200 - }, - "input":{"shape":"ListProvisionedConcurrencyConfigsRequest"}, - "output":{"shape":"ListProvisionedConcurrencyConfigsResponse"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"} - ], - "documentation":"Retrieves a list of provisioned concurrency configurations for a function.
", - "readonly":true - }, - "ListTags":{ - "name":"ListTags", - "http":{ - "method":"GET", - "requestUri":"/2017-03-31/tags/{Resource}", - "responseCode":200 - }, - "input":{"shape":"ListTagsRequest"}, - "output":{"shape":"ListTagsResponse"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"} - ], - "documentation":"Returns a function, event source mapping, or code signing configuration's tags. You can also view function tags with GetFunction.
", - "readonly":true - }, - "ListVersionsByFunction":{ - "name":"ListVersionsByFunction", - "http":{ - "method":"GET", - "requestUri":"/2015-03-31/functions/{FunctionName}/versions", - "responseCode":200 - }, - "input":{"shape":"ListVersionsByFunctionRequest"}, - "output":{"shape":"ListVersionsByFunctionResponse"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"} - ], - "documentation":"Returns a list of versions, with the version-specific configuration of each. Lambda returns up to 50 versions per call.
", - "readonly":true - }, - "PublishLayerVersion":{ - "name":"PublishLayerVersion", - "http":{ - "method":"POST", - "requestUri":"/2018-10-31/layers/{LayerName}/versions", - "responseCode":201 - }, - "input":{"shape":"PublishLayerVersionRequest"}, - "output":{"shape":"PublishLayerVersionResponse"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"}, - {"shape":"CodeStorageExceededException"} - ], - "documentation":"Creates an Lambda layer from a ZIP archive. Each time you call PublishLayerVersion with the same layer name, a new version is created.
Add layers to your function with CreateFunction or UpdateFunctionConfiguration.
" - }, - "PublishVersion":{ - "name":"PublishVersion", - "http":{ - "method":"POST", - "requestUri":"/2015-03-31/functions/{FunctionName}/versions", - "responseCode":201 - }, - "input":{"shape":"PublishVersionRequest"}, - "output":{"shape":"FunctionConfiguration"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ResourceConflictException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"}, - {"shape":"CodeStorageExceededException"}, - {"shape":"PreconditionFailedException"} - ], - "documentation":"Creates a version from the current code and configuration of a function. Use versions to create a snapshot of your function code and configuration that doesn't change.
Lambda doesn't publish a version if the function's configuration and code haven't changed since the last version. Use UpdateFunctionCode or UpdateFunctionConfiguration to update the function before publishing a version.
Clients can invoke versions directly or with an alias. To create an alias, use CreateAlias.
" - }, - "PutFunctionCodeSigningConfig":{ - "name":"PutFunctionCodeSigningConfig", - "http":{ - "method":"PUT", - "requestUri":"/2020-06-30/functions/{FunctionName}/code-signing-config", - "responseCode":200 - }, - "input":{"shape":"PutFunctionCodeSigningConfigRequest"}, - "output":{"shape":"PutFunctionCodeSigningConfigResponse"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ResourceConflictException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"}, - {"shape":"CodeSigningConfigNotFoundException"} - ], - "documentation":"Update the code signing configuration for the function. Changes to the code signing configuration take effect the next time a user tries to deploy a code package to the function.
" - }, - "PutFunctionConcurrency":{ - "name":"PutFunctionConcurrency", - "http":{ - "method":"PUT", - "requestUri":"/2017-10-31/functions/{FunctionName}/concurrency", - "responseCode":200 - }, - "input":{"shape":"PutFunctionConcurrencyRequest"}, - "output":{"shape":"Concurrency"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ResourceConflictException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"} - ], - "documentation":"Sets the maximum number of simultaneous executions for a function, and reserves capacity for that concurrency level.
Concurrency settings apply to the function as a whole, including all published versions and the unpublished version. Reserving concurrency both ensures that your function has capacity to process the specified number of events simultaneously, and prevents it from scaling beyond that level. Use GetFunction to see the current setting for a function.
Use GetAccountSettings to see your Regional concurrency limit. You can reserve concurrency for as many functions as you like, as long as you leave at least 100 simultaneous executions unreserved for functions that aren't configured with a per-function limit. For more information, see Lambda function scaling.
" - }, - "PutFunctionEventInvokeConfig":{ - "name":"PutFunctionEventInvokeConfig", - "http":{ - "method":"PUT", - "requestUri":"/2019-09-25/functions/{FunctionName}/event-invoke-config", - "responseCode":200 - }, - "input":{"shape":"PutFunctionEventInvokeConfigRequest"}, - "output":{"shape":"FunctionEventInvokeConfig"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ResourceConflictException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"} - ], - "documentation":"Configures options for asynchronous invocation on a function, version, or alias. If a configuration already exists for a function, version, or alias, this operation overwrites it. If you exclude any settings, they are removed. To set one option without affecting existing settings for other options, use UpdateFunctionEventInvokeConfig.
By default, Lambda retries an asynchronous invocation twice if the function returns an error. It retains events in a queue for up to six hours. When an event fails all processing attempts or stays in the asynchronous invocation queue for too long, Lambda discards it. To retain discarded events, configure a dead-letter queue with UpdateFunctionConfiguration.
To send an invocation record to a queue, topic, S3 bucket, function, or event bus, specify a destination. You can configure separate destinations for successful invocations (on-success) and events that fail all processing attempts (on-failure). You can configure destinations in addition to or instead of a dead-letter queue.
S3 buckets are supported only for on-failure destinations. To retain records of successful invocations, use another destination type.
Sets your function's recursive loop detection configuration.
When you configure a Lambda function to output to the same service or resource that invokes the function, it's possible to create an infinite recursive loop. For example, a Lambda function might write a message to an Amazon Simple Queue Service (Amazon SQS) queue, which then invokes the same function. This invocation causes the function to write another message to the queue, which in turn invokes the function again.
Lambda can detect certain types of recursive loops shortly after they occur. When Lambda detects a recursive loop and your function's recursive loop detection configuration is set to Terminate, it stops your function being invoked and notifies you.
Adds a provisioned concurrency configuration to a function's alias or version.
", - "idempotent":true - }, - "PutRuntimeManagementConfig":{ - "name":"PutRuntimeManagementConfig", - "http":{ - "method":"PUT", - "requestUri":"/2021-07-20/functions/{FunctionName}/runtime-management-config", - "responseCode":200 - }, - "input":{"shape":"PutRuntimeManagementConfigRequest"}, - "output":{"shape":"PutRuntimeManagementConfigResponse"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ResourceConflictException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"} - ], - "documentation":"Sets the runtime management configuration for a function's version. For more information, see Runtime updates.
" - }, - "RemoveLayerVersionPermission":{ - "name":"RemoveLayerVersionPermission", - "http":{ - "method":"DELETE", - "requestUri":"/2018-10-31/layers/{LayerName}/versions/{VersionNumber}/policy/{StatementId}", - "responseCode":204 - }, - "input":{"shape":"RemoveLayerVersionPermissionRequest"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"}, - {"shape":"PreconditionFailedException"} - ], - "documentation":"Removes a statement from the permissions policy for a version of an Lambda layer. For more information, see AddLayerVersionPermission.
" - }, - "RemovePermission":{ - "name":"RemovePermission", - "http":{ - "method":"DELETE", - "requestUri":"/2015-03-31/functions/{FunctionName}/policy/{StatementId}", - "responseCode":204 - }, - "input":{"shape":"RemovePermissionRequest"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"}, - {"shape":"PreconditionFailedException"} - ], - "documentation":"Revokes function-use permission from an Amazon Web Services service or another Amazon Web Services account. You can get the ID of the statement from the output of GetPolicy.
" - }, - "SendDurableExecutionCallbackFailure":{ - "name":"SendDurableExecutionCallbackFailure", - "http":{ - "method":"POST", - "requestUri":"/2025-12-01/durable-execution-callbacks/{CallbackId}/fail", - "responseCode":200 - }, - "input":{"shape":"SendDurableExecutionCallbackFailureRequest"}, - "output":{"shape":"SendDurableExecutionCallbackFailureResponse"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ServiceException"}, - {"shape":"CallbackTimeoutException"} - ] - }, - "SendDurableExecutionCallbackHeartbeat":{ - "name":"SendDurableExecutionCallbackHeartbeat", - "http":{ - "method":"POST", - "requestUri":"/2025-12-01/durable-execution-callbacks/{CallbackId}/heartbeat", - "responseCode":200 - }, - "input":{"shape":"SendDurableExecutionCallbackHeartbeatRequest"}, - "output":{"shape":"SendDurableExecutionCallbackHeartbeatResponse"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ServiceException"}, - {"shape":"CallbackTimeoutException"} - ] - }, - "SendDurableExecutionCallbackSuccess":{ - "name":"SendDurableExecutionCallbackSuccess", - "http":{ - "method":"POST", - "requestUri":"/2025-12-01/durable-execution-callbacks/{CallbackId}/succeed", - "responseCode":200 - }, - "input":{"shape":"SendDurableExecutionCallbackSuccessRequest"}, - "output":{"shape":"SendDurableExecutionCallbackSuccessResponse"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ServiceException"}, - {"shape":"CallbackTimeoutException"} - ] - }, - "StopDurableExecution":{ - "name":"StopDurableExecution", - "http":{ - "method":"POST", - "requestUri":"/2025-12-01/durable-executions/{DurableExecutionArn}/stop", - "responseCode":200 - }, - "input":{"shape":"StopDurableExecutionRequest"}, - "output":{"shape":"StopDurableExecutionResponse"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ServiceException"}, - {"shape":"ResourceNotFoundException"} - ] - }, - "TagResource":{ - "name":"TagResource", - "http":{ - "method":"POST", - "requestUri":"/2017-03-31/tags/{Resource}", - "responseCode":204 - }, - "input":{"shape":"TagResourceRequest"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ResourceConflictException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"} - ], - "documentation":"Adds tags to a function, event source mapping, or code signing configuration.
" - }, - "UntagResource":{ - "name":"UntagResource", - "http":{ - "method":"DELETE", - "requestUri":"/2017-03-31/tags/{Resource}", - "responseCode":204 - }, - "input":{"shape":"UntagResourceRequest"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ResourceConflictException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"} - ], - "documentation":"Removes tags from a function, event source mapping, or code signing configuration.
" - }, - "UpdateAlias":{ - "name":"UpdateAlias", - "http":{ - "method":"PUT", - "requestUri":"/2015-03-31/functions/{FunctionName}/aliases/{Name}", - "responseCode":200 - }, - "input":{"shape":"UpdateAliasRequest"}, - "output":{"shape":"AliasConfiguration"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ResourceConflictException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"}, - {"shape":"PreconditionFailedException"} - ], - "documentation":"Updates the configuration of a Lambda function alias.
" - }, - "UpdateCodeSigningConfig":{ - "name":"UpdateCodeSigningConfig", - "http":{ - "method":"PUT", - "requestUri":"/2020-04-22/code-signing-configs/{CodeSigningConfigArn}", - "responseCode":200 - }, - "input":{"shape":"UpdateCodeSigningConfigRequest"}, - "output":{"shape":"UpdateCodeSigningConfigResponse"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ServiceException"}, - {"shape":"ResourceNotFoundException"} - ], - "documentation":"Update the code signing configuration. Changes to the code signing configuration take effect the next time a user tries to deploy a code package to the function.
" - }, - "UpdateEventSourceMapping":{ - "name":"UpdateEventSourceMapping", - "http":{ - "method":"PUT", - "requestUri":"/2015-03-31/event-source-mappings/{UUID}", - "responseCode":202 - }, - "input":{"shape":"UpdateEventSourceMappingRequest"}, - "output":{"shape":"EventSourceMappingConfiguration"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ResourceInUseException"}, - {"shape":"ResourceConflictException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"} - ], - "documentation":"Updates an event source mapping. You can change the function that Lambda invokes, or pause invocation and resume later from the same location.
For details about how to configure different event sources, see the following topics.
The following error handling options are available only for DynamoDB and Kinesis event sources:
BisectBatchOnFunctionError – If the function returns an error, split the batch in two and retry.
MaximumRecordAgeInSeconds – Discard records older than the specified age. The default value is infinite (-1). When set to infinite (-1), failed records are retried until the record expires
MaximumRetryAttempts – Discard records after the specified number of retries. The default value is infinite (-1). When set to infinite (-1), failed records are retried until the record expires.
ParallelizationFactor – Process multiple batches from each shard concurrently.
For stream sources (DynamoDB, Kinesis, Amazon MSK, and self-managed Apache Kafka), the following option is also available:
OnFailure – Send discarded records to an Amazon SQS queue, Amazon SNS topic, or Amazon S3 bucket. For more information, see Adding a destination.
For information about which configuration parameters apply to each event source, see the following topics.
Updates a Lambda function's code. If code signing is enabled for the function, the code package must be signed by a trusted publisher. For more information, see Configuring code signing for Lambda.
If the function's package type is Image, then you must specify the code package in ImageUri as the URI of a container image in the Amazon ECR registry.
If the function's package type is Zip, then you must specify the deployment package as a .zip file archive. Enter the Amazon S3 bucket and key of the code .zip file location. You can also provide the function code inline using the ZipFile field.
The code in the deployment package must be compatible with the target instruction set architecture of the function (x86-64 or arm64).
The function's code is locked when you publish a version. You can't modify the code of a published version, only the unpublished version.
For a function defined as a container image, Lambda resolves the image tag to an image digest. In Amazon ECR, if you update the image tag to a new image, Lambda does not automatically update the function.
Modify the version-specific settings of a Lambda function.
When you update a function, Lambda provisions an instance of the function and its supporting resources. If your function connects to a VPC, this process can take a minute. During this time, you can't modify the function, but you can still invoke it. The LastUpdateStatus, LastUpdateStatusReason, and LastUpdateStatusReasonCode fields in the response from GetFunctionConfiguration indicate when the update is complete and the function is processing events with the new configuration. For more information, see Lambda function states.
These settings can vary between versions of a function and are locked when you publish a version. You can't modify the configuration of a published version, only the unpublished version.
To configure function concurrency, use PutFunctionConcurrency. To grant invoke permissions to an Amazon Web Services account or Amazon Web Services service, use AddPermission.
" - }, - "UpdateFunctionEventInvokeConfig":{ - "name":"UpdateFunctionEventInvokeConfig", - "http":{ - "method":"POST", - "requestUri":"/2019-09-25/functions/{FunctionName}/event-invoke-config", - "responseCode":200 - }, - "input":{"shape":"UpdateFunctionEventInvokeConfigRequest"}, - "output":{"shape":"FunctionEventInvokeConfig"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ResourceConflictException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"} - ], - "documentation":"Updates the configuration for asynchronous invocation for a function, version, or alias.
To configure options for asynchronous invocation, use PutFunctionEventInvokeConfig.
" - }, - "UpdateFunctionUrlConfig":{ - "name":"UpdateFunctionUrlConfig", - "http":{ - "method":"PUT", - "requestUri":"/2021-10-31/functions/{FunctionName}/url", - "responseCode":200 - }, - "input":{"shape":"UpdateFunctionUrlConfigRequest"}, - "output":{"shape":"UpdateFunctionUrlConfigResponse"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ResourceConflictException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"} - ], - "documentation":"Updates the configuration for a Lambda function URL.
" - } - }, - "shapes":{ - "AccountLimit":{ - "type":"structure", - "members":{ - "TotalCodeSize":{ - "shape":"Long", - "documentation":"The amount of storage space that you can use for all deployment packages and layer archives.
" - }, - "CodeSizeUnzipped":{ - "shape":"Long", - "documentation":"The maximum size of a function's deployment package and layers when they're extracted.
" - }, - "CodeSizeZipped":{ - "shape":"Long", - "documentation":"The maximum size of a deployment package when it's uploaded directly to Lambda. Use Amazon S3 for larger files.
" - }, - "ConcurrentExecutions":{ - "shape":"Integer", - "documentation":"The maximum number of simultaneous function executions.
" - }, - "UnreservedConcurrentExecutions":{ - "shape":"UnreservedConcurrentExecutions", - "documentation":"The maximum number of simultaneous function executions, minus the capacity that's reserved for individual functions with PutFunctionConcurrency.
" - } - }, - "documentation":"Limits that are related to concurrency and storage. All file and storage sizes are in bytes.
" - }, - "AccountUsage":{ - "type":"structure", - "members":{ - "TotalCodeSize":{ - "shape":"Long", - "documentation":"The amount of storage space, in bytes, that's being used by deployment packages and layer archives.
" - }, - "FunctionCount":{ - "shape":"Long", - "documentation":"The number of Lambda functions.
" - } - }, - "documentation":"The number of functions and amount of storage in use.
" - }, - "Action":{ - "type":"string", - "pattern":"(lambda:[*]|lambda:[a-zA-Z]+|[*])" - }, - "AddLayerVersionPermissionRequest":{ - "type":"structure", - "required":[ - "LayerName", - "VersionNumber", - "StatementId", - "Action", - "Principal" - ], - "members":{ - "LayerName":{ - "shape":"LayerName", - "documentation":"The name or Amazon Resource Name (ARN) of the layer.
", - "location":"uri", - "locationName":"LayerName" - }, - "VersionNumber":{ - "shape":"LayerVersionNumber", - "documentation":"The version number.
", - "location":"uri", - "locationName":"VersionNumber" - }, - "StatementId":{ - "shape":"StatementId", - "documentation":"An identifier that distinguishes the policy from others on the same layer version.
" - }, - "Action":{ - "shape":"LayerPermissionAllowedAction", - "documentation":"The API action that grants access to the layer. For example, lambda:GetLayerVersion.
An account ID, or * to grant layer usage permission to all accounts in an organization, or all Amazon Web Services accounts (if organizationId is not specified). For the last case, make sure that you really do want all Amazon Web Services accounts to have usage permission to this layer.
With the principal set to *, grant permission to all accounts in the specified organization.
Only update the policy if the revision ID matches the ID specified. Use this option to avoid modifying a policy that has changed since you last read it.
", - "location":"querystring", - "locationName":"RevisionId" - } - } - }, - "AddLayerVersionPermissionResponse":{ - "type":"structure", - "members":{ - "Statement":{ - "shape":"String", - "documentation":"The permission statement.
" - }, - "RevisionId":{ - "shape":"String", - "documentation":"A unique identifier for the current revision of the policy.
" - } - } - }, - "AddPermissionRequest":{ - "type":"structure", - "required":[ - "FunctionName", - "StatementId", - "Action", - "Principal" - ], - "members":{ - "FunctionName":{ - "shape":"FunctionName", - "documentation":"The name or ARN of the Lambda function, version, or alias.
Name formats
Function name – my-function (name-only), my-function:v1 (with alias).
Function ARN – arn:aws:lambda:us-west-2:123456789012:function:my-function.
Partial ARN – 123456789012:function:my-function.
You can append a version number or alias to any of the formats. The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.
", - "location":"uri", - "locationName":"FunctionName" - }, - "StatementId":{ - "shape":"StatementId", - "documentation":"A statement identifier that differentiates the statement from others in the same policy.
" - }, - "Action":{ - "shape":"Action", - "documentation":"The action that the principal can use on the function. For example, lambda:InvokeFunction or lambda:GetFunction.
The Amazon Web Services service, Amazon Web Services account, IAM user, or IAM role that invokes the function. If you specify a service, use SourceArn or SourceAccount to limit who can invoke the function through that service.
For Amazon Web Services services, the ARN of the Amazon Web Services resource that invokes the function. For example, an Amazon S3 bucket or Amazon SNS topic.
Note that Lambda configures the comparison using the StringLike operator.
For Amazon Web Services service, the ID of the Amazon Web Services account that owns the resource. Use this together with SourceArn to ensure that the specified account owns the resource. It is possible for an Amazon S3 bucket to be deleted by its owner and recreated by another account.
For Alexa Smart Home functions, a token that the invoker must supply.
" - }, - "Qualifier":{ - "shape":"Qualifier", - "documentation":"Specify a version or alias to add permissions to a published version of the function.
", - "location":"querystring", - "locationName":"Qualifier" - }, - "RevisionId":{ - "shape":"String", - "documentation":"Update the policy only if the revision ID matches the ID that's specified. Use this option to avoid modifying a policy that has changed since you last read it.
" - }, - "PrincipalOrgID":{ - "shape":"PrincipalOrgID", - "documentation":"The identifier for your organization in Organizations. Use this to grant permissions to all the Amazon Web Services accounts under this organization.
" - }, - "FunctionUrlAuthType":{ - "shape":"FunctionUrlAuthType", - "documentation":"The type of authentication that your function URL uses. Set to AWS_IAM if you want to restrict access to authenticated users only. Set to NONE if you want to bypass IAM authentication to create a public endpoint. For more information, see Security and auth model for Lambda function URLs.
The permission statement that's added to the function policy.
" - } - } - }, - "AdditionalVersion":{ - "type":"string", - "max":1024, - "min":1, - "pattern":"[0-9]+" - }, - "AdditionalVersionWeights":{ - "type":"map", - "key":{"shape":"AdditionalVersion"}, - "value":{"shape":"Weight"} - }, - "Alias":{ - "type":"string", - "max":128, - "min":1, - "pattern":"(?!^[0-9]+$)([a-zA-Z0-9-_]+)" - }, - "AliasConfiguration":{ - "type":"structure", - "members":{ - "AliasArn":{ - "shape":"FunctionArn", - "documentation":"The Amazon Resource Name (ARN) of the alias.
" - }, - "Name":{ - "shape":"Alias", - "documentation":"The name of the alias.
" - }, - "FunctionVersion":{ - "shape":"Version", - "documentation":"The function version that the alias invokes.
" - }, - "Description":{ - "shape":"Description", - "documentation":"A description of the alias.
" - }, - "RoutingConfig":{ - "shape":"AliasRoutingConfiguration", - "documentation":"The routing configuration of the alias.
" - }, - "RevisionId":{ - "shape":"String", - "documentation":"A unique identifier that changes when you update the alias.
" - } - }, - "documentation":"Provides configuration information about a Lambda function alias.
" - }, - "AliasList":{ - "type":"list", - "member":{"shape":"AliasConfiguration"} - }, - "AliasRoutingConfiguration":{ - "type":"structure", - "members":{ - "AdditionalVersionWeights":{ - "shape":"AdditionalVersionWeights", - "documentation":"The second version, and the percentage of traffic that's routed to it.
" - } - }, - "documentation":"The traffic-shifting configuration of a Lambda function alias.
" - }, - "AllowCredentials":{ - "type":"boolean", - "box":true - }, - "AllowMethodsList":{ - "type":"list", - "member":{"shape":"Method"}, - "max":6, - "min":0 - }, - "AllowOriginsList":{ - "type":"list", - "member":{"shape":"Origin"}, - "max":100, - "min":0 - }, - "AllowedPublishers":{ - "type":"structure", - "required":["SigningProfileVersionArns"], - "members":{ - "SigningProfileVersionArns":{ - "shape":"SigningProfileVersionArns", - "documentation":"The Amazon Resource Name (ARN) for each of the signing profiles. A signing profile defines a trusted user who can sign a code package.
" - } - }, - "documentation":"List of signing profiles that can sign a code package.
" - }, - "AmazonManagedKafkaEventSourceConfig":{ - "type":"structure", - "members":{ - "ConsumerGroupId":{ - "shape":"URI", - "documentation":"The identifier for the Kafka consumer group to join. The consumer group ID must be unique among all your Kafka event sources. After creating a Kafka event source mapping with the consumer group ID specified, you cannot update this value. For more information, see Customizable consumer group ID.
" - }, - "SchemaRegistryConfig":{ - "shape":"KafkaSchemaRegistryConfig", - "documentation":"Specific configuration settings for a Kafka schema registry.
" - } - }, - "documentation":"Specific configuration settings for an Amazon Managed Streaming for Apache Kafka (Amazon MSK) event source.
" - }, - "ApplicationLogLevel":{ - "type":"string", - "enum":[ - "TRACE", - "DEBUG", - "INFO", - "WARN", - "ERROR", - "FATAL" - ] - }, - "Architecture":{ - "type":"string", - "enum":[ - "x86_64", - "arm64" - ] - }, - "ArchitecturesList":{ - "type":"list", - "member":{"shape":"Architecture"}, - "max":1, - "min":1 - }, - "Arn":{ - "type":"string", - "pattern":"arn:(aws[a-zA-Z0-9-]*):([a-zA-Z0-9\\-])+:([a-z]{2}(-gov)?-[a-z]+-\\d{1})?:(\\d{12})?:(.*)" - }, - "AttemptCount":{ - "type":"integer", - "min":0 - }, - "BatchSize":{ - "type":"integer", - "box":true, - "max":10000, - "min":1 - }, - "BinaryOperationPayload":{ - "type":"blob", - "max":262144, - "min":0, - "sensitive":true - }, - "BisectBatchOnFunctionError":{ - "type":"boolean", - "box":true - }, - "Blob":{ - "type":"blob", - "sensitive":true - }, - "BlobStream":{ - "type":"blob", - "streaming":true - }, - "Boolean":{"type":"boolean"}, - "CallbackDetails":{ - "type":"structure", - "members":{ - "CallbackId":{"shape":"CallbackId"}, - "Result":{"shape":"OperationPayload"}, - "Error":{"shape":"ErrorObject"} - } - }, - "CallbackFailedDetails":{ - "type":"structure", - "required":["Error"], - "members":{ - "Error":{"shape":"EventError"} - } - }, - "CallbackId":{ - "type":"string", - "max":1024, - "min":1, - "pattern":"[A-Za-z0-9+/]+={0,2}" - }, - "CallbackOptions":{ - "type":"structure", - "members":{ - "TimeoutSeconds":{"shape":"DurationSeconds"}, - "HeartbeatTimeoutSeconds":{"shape":"DurationSeconds"} - } - }, - "CallbackStartedDetails":{ - "type":"structure", - "required":["CallbackId"], - "members":{ - "CallbackId":{"shape":"CallbackId"}, - "HeartbeatTimeout":{"shape":"DurationSeconds"}, - "Timeout":{"shape":"DurationSeconds"} - } - }, - "CallbackSucceededDetails":{ - "type":"structure", - "required":["Result"], - "members":{ - "Result":{"shape":"EventResult"} - } - }, - "CallbackTimedOutDetails":{ - "type":"structure", - "required":["Error"], - "members":{ - "Error":{"shape":"EventError"} - } - }, - "CallbackTimeoutException":{ - "type":"structure", - "members":{ - "Type":{"shape":"String"}, - "Message":{"shape":"String"} - }, - "error":{ - "httpStatusCode":400, - "senderFault":true - }, - "exception":true - }, - "ChainedInvokeDetails":{ - "type":"structure", - "members":{ - "Result":{"shape":"OperationPayload"}, - "Error":{"shape":"ErrorObject"} - } - }, - "ChainedInvokeFailedDetails":{ - "type":"structure", - "required":["Error"], - "members":{ - "Error":{"shape":"EventError"} - } - }, - "ChainedInvokeOptions":{ - "type":"structure", - "members":{ - "FunctionName":{"shape":"FunctionName"} - } - }, - "ChainedInvokePendingDetails":{ - "type":"structure", - "required":[ - "Input", - "FunctionName" - ], - "members":{ - "Input":{"shape":"EventInput"}, - "FunctionName":{"shape":"FunctionName"} - } - }, - "ChainedInvokeStartedDetails":{ - "type":"structure", - "members":{ - "DurableExecutionArn":{"shape":"DurableExecutionArn"} - } - }, - "ChainedInvokeStoppedDetails":{ - "type":"structure", - "required":["Error"], - "members":{ - "Error":{"shape":"EventError"} - } - }, - "ChainedInvokeSucceededDetails":{ - "type":"structure", - "required":["Result"], - "members":{ - "Result":{"shape":"EventResult"} - } - }, - "ChainedInvokeTimedOutDetails":{ - "type":"structure", - "required":["Error"], - "members":{ - "Error":{"shape":"EventError"} - } - }, - "CheckpointDurableExecutionRequest":{ - "type":"structure", - "required":[ - "DurableExecutionArn", - "CheckpointToken" - ], - "members":{ - "DurableExecutionArn":{ - "shape":"DurableExecutionArn", - "location":"uri", - "locationName":"DurableExecutionArn" - }, - "CheckpointToken":{"shape":"CheckpointToken"}, - "Updates":{"shape":"OperationUpdates"}, - "ClientToken":{"shape":"ClientToken"} - } - }, - "CheckpointDurableExecutionResponse":{ - "type":"structure", - "required":["NewExecutionState"], - "members":{ - "CheckpointToken":{"shape":"CheckpointToken"}, - "NewExecutionState":{"shape":"CheckpointUpdatedExecutionState"} - } - }, - "CheckpointToken":{ - "type":"string", - "max":2048, - "min":1, - "pattern":"[A-Za-z0-9+/]+={0,2}" - }, - "CheckpointUpdatedExecutionState":{ - "type":"structure", - "members":{ - "Operations":{"shape":"Operations"}, - "NextMarker":{"shape":"String"} - } - }, - "ClientToken":{ - "type":"string", - "max":64, - "min":1, - "pattern":"[\\x21-\\x7E]+" - }, - "CodeSigningConfig":{ - "type":"structure", - "required":[ - "CodeSigningConfigId", - "CodeSigningConfigArn", - "AllowedPublishers", - "CodeSigningPolicies", - "LastModified" - ], - "members":{ - "CodeSigningConfigId":{ - "shape":"CodeSigningConfigId", - "documentation":"Unique identifer for the Code signing configuration.
" - }, - "CodeSigningConfigArn":{ - "shape":"CodeSigningConfigArn", - "documentation":"The Amazon Resource Name (ARN) of the Code signing configuration.
" - }, - "Description":{ - "shape":"Description", - "documentation":"Code signing configuration description.
" - }, - "AllowedPublishers":{ - "shape":"AllowedPublishers", - "documentation":"List of allowed publishers.
" - }, - "CodeSigningPolicies":{ - "shape":"CodeSigningPolicies", - "documentation":"The code signing policy controls the validation failure action for signature mismatch or expiry.
" - }, - "LastModified":{ - "shape":"Timestamp", - "documentation":"The date and time that the Code signing configuration was last modified, in ISO-8601 format (YYYY-MM-DDThh:mm:ss.sTZD).
" - } - }, - "documentation":"Details about a Code signing configuration.
" - }, - "CodeSigningConfigArn":{ - "type":"string", - "max":200, - "min":0, - "pattern":"arn:(aws[a-zA-Z-]*)?:lambda:[a-z]{2}((-gov)|(-iso(b?)))?-[a-z]+-\\d{1}:\\d{12}:code-signing-config:csc-[a-z0-9]{17}" - }, - "CodeSigningConfigId":{ - "type":"string", - "pattern":"csc-[a-zA-Z0-9-_\\.]{17}" - }, - "CodeSigningConfigList":{ - "type":"list", - "member":{"shape":"CodeSigningConfig"} - }, - "CodeSigningConfigNotFoundException":{ - "type":"structure", - "members":{ - "Type":{"shape":"String"}, - "Message":{"shape":"String"} - }, - "documentation":"The specified code signing configuration does not exist.
", - "error":{ - "httpStatusCode":404, - "senderFault":true - }, - "exception":true - }, - "CodeSigningPolicies":{ - "type":"structure", - "members":{ - "UntrustedArtifactOnDeployment":{ - "shape":"CodeSigningPolicy", - "documentation":"Code signing configuration policy for deployment validation failure. If you set the policy to Enforce, Lambda blocks the deployment request if signature validation checks fail. If you set the policy to Warn, Lambda allows the deployment and creates a CloudWatch log.
Default value: Warn
Code signing configuration policies specify the validation failure action for signature mismatch or expiry.
" - }, - "CodeSigningPolicy":{ - "type":"string", - "enum":[ - "Warn", - "Enforce" - ] - }, - "CodeStorageExceededException":{ - "type":"structure", - "members":{ - "Type":{ - "shape":"String", - "documentation":"The exception type.
" - }, - "message":{"shape":"String"} - }, - "documentation":"Your Amazon Web Services account has exceeded its maximum total code size. For more information, see Lambda quotas.
", - "error":{ - "httpStatusCode":400, - "senderFault":true - }, - "exception":true - }, - "CodeVerificationFailedException":{ - "type":"structure", - "members":{ - "Type":{"shape":"String"}, - "Message":{"shape":"String"} - }, - "documentation":"The code signature failed one or more of the validation checks for signature mismatch or expiry, and the code signing policy is set to ENFORCE. Lambda blocks the deployment.
", - "error":{ - "httpStatusCode":400, - "senderFault":true - }, - "exception":true - }, - "CollectionName":{ - "type":"string", - "max":57, - "min":1, - "pattern":"(^(?!(system\\x2e)))(^[_a-zA-Z0-9])([^$]*)" - }, - "CompatibleArchitectures":{ - "type":"list", - "member":{"shape":"Architecture"}, - "max":2, - "min":0 - }, - "CompatibleRuntimes":{ - "type":"list", - "member":{"shape":"Runtime"}, - "max":15, - "min":0 - }, - "Concurrency":{ - "type":"structure", - "members":{ - "ReservedConcurrentExecutions":{ - "shape":"ReservedConcurrentExecutions", - "documentation":"The number of concurrent executions that are reserved for this function. For more information, see Managing Lambda reserved concurrency.
" - } - } - }, - "ContextDetails":{ - "type":"structure", - "members":{ - "ReplayChildren":{"shape":"ReplayChildren"}, - "Result":{"shape":"OperationPayload"}, - "Error":{"shape":"ErrorObject"} - } - }, - "ContextFailedDetails":{ - "type":"structure", - "required":["Error"], - "members":{ - "Error":{"shape":"EventError"} - } - }, - "ContextOptions":{ - "type":"structure", - "members":{ - "ReplayChildren":{"shape":"ReplayChildren"} - } - }, - "ContextStartedDetails":{ - "type":"structure", - "members":{} - }, - "ContextSucceededDetails":{ - "type":"structure", - "required":["Result"], - "members":{ - "Result":{"shape":"EventResult"} - } - }, - "Cors":{ - "type":"structure", - "members":{ - "AllowCredentials":{ - "shape":"AllowCredentials", - "documentation":"Whether to allow cookies or other credentials in requests to your function URL. The default is false.
The HTTP headers that origins can include in requests to your function URL. For example: Date, Keep-Alive, X-Custom-Header.
The HTTP methods that are allowed when calling your function URL. For example: GET, POST, DELETE, or the wildcard character (*).
The origins that can access your function URL. You can list any number of specific origins, separated by a comma. For example: https://www.example.com, http://localhost:60905.
Alternatively, you can grant access to all origins using the wildcard character (*).
The HTTP headers in your function response that you want to expose to origins that call your function URL. For example: Date, Keep-Alive, X-Custom-Header.
The maximum amount of time, in seconds, that web browsers can cache results of a preflight request. By default, this is set to 0, which means that the browser doesn't cache results.
The cross-origin resource sharing (CORS) settings for your Lambda function URL. Use CORS to grant access to your function URL from any origin. You can also use CORS to control access for specific HTTP headers and methods in requests to your function URL.
" - }, - "CreateAliasRequest":{ - "type":"structure", - "required":[ - "FunctionName", - "Name", - "FunctionVersion" - ], - "members":{ - "FunctionName":{ - "shape":"FunctionName", - "documentation":"The name or ARN of the Lambda function.
Name formats
Function name - MyFunction.
Function ARN - arn:aws:lambda:us-west-2:123456789012:function:MyFunction.
Partial ARN - 123456789012:function:MyFunction.
The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.
", - "location":"uri", - "locationName":"FunctionName" - }, - "Name":{ - "shape":"Alias", - "documentation":"The name of the alias.
" - }, - "FunctionVersion":{ - "shape":"Version", - "documentation":"The function version that the alias invokes.
" - }, - "Description":{ - "shape":"Description", - "documentation":"A description of the alias.
" - }, - "RoutingConfig":{ - "shape":"AliasRoutingConfiguration", - "documentation":"The routing configuration of the alias.
" - } - } - }, - "CreateCodeSigningConfigRequest":{ - "type":"structure", - "required":["AllowedPublishers"], - "members":{ - "Description":{ - "shape":"Description", - "documentation":"Descriptive name for this code signing configuration.
" - }, - "AllowedPublishers":{ - "shape":"AllowedPublishers", - "documentation":"Signing profiles for this code signing configuration.
" - }, - "CodeSigningPolicies":{ - "shape":"CodeSigningPolicies", - "documentation":"The code signing policies define the actions to take if the validation checks fail.
" - }, - "Tags":{ - "shape":"Tags", - "documentation":"A list of tags to add to the code signing configuration.
" - } - } - }, - "CreateCodeSigningConfigResponse":{ - "type":"structure", - "required":["CodeSigningConfig"], - "members":{ - "CodeSigningConfig":{ - "shape":"CodeSigningConfig", - "documentation":"The code signing configuration.
" - } - } - }, - "CreateEventSourceMappingRequest":{ - "type":"structure", - "required":["FunctionName"], - "members":{ - "EventSourceArn":{ - "shape":"Arn", - "documentation":"The Amazon Resource Name (ARN) of the event source.
Amazon Kinesis – The ARN of the data stream or a stream consumer.
Amazon DynamoDB Streams – The ARN of the stream.
Amazon Simple Queue Service – The ARN of the queue.
Amazon Managed Streaming for Apache Kafka – The ARN of the cluster or the ARN of the VPC connection (for cross-account event source mappings).
Amazon MQ – The ARN of the broker.
Amazon DocumentDB – The ARN of the DocumentDB change stream.
The name or ARN of the Lambda function.
Name formats
Function name – MyFunction.
Function ARN – arn:aws:lambda:us-west-2:123456789012:function:MyFunction.
Version or Alias ARN – arn:aws:lambda:us-west-2:123456789012:function:MyFunction:PROD.
Partial ARN – 123456789012:function:MyFunction.
The length constraint applies only to the full ARN. If you specify only the function name, it's limited to 64 characters in length.
" - }, - "Enabled":{ - "shape":"Enabled", - "documentation":"When true, the event source mapping is active. When false, Lambda pauses polling and invocation.
Default: True
" - }, - "BatchSize":{ - "shape":"BatchSize", - "documentation":"The maximum number of records in each batch that Lambda pulls from your stream or queue and sends to your function. Lambda passes all of the records in the batch to the function in a single call, up to the payload limit for synchronous invocation (6 MB).
Amazon Kinesis – Default 100. Max 10,000.
Amazon DynamoDB Streams – Default 100. Max 10,000.
Amazon Simple Queue Service – Default 10. For standard queues the max is 10,000. For FIFO queues the max is 10.
Amazon Managed Streaming for Apache Kafka – Default 100. Max 10,000.
Self-managed Apache Kafka – Default 100. Max 10,000.
Amazon MQ (ActiveMQ and RabbitMQ) – Default 100. Max 10,000.
DocumentDB – Default 100. Max 10,000.
An object that defines the filter criteria that determine whether Lambda should process an event. For more information, see Lambda event filtering.
" - }, - "MaximumBatchingWindowInSeconds":{ - "shape":"MaximumBatchingWindowInSeconds", - "documentation":"The maximum amount of time, in seconds, that Lambda spends gathering records before invoking the function. You can configure MaximumBatchingWindowInSeconds to any value from 0 seconds to 300 seconds in increments of seconds.
For Kinesis, DynamoDB, and Amazon SQS event sources, the default batching window is 0 seconds. For Amazon MSK, Self-managed Apache Kafka, Amazon MQ, and DocumentDB event sources, the default batching window is 500 ms. Note that because you can only change MaximumBatchingWindowInSeconds in increments of seconds, you cannot revert back to the 500 ms default batching window after you have changed it. To restore the default batching window, you must create a new event source mapping.
Related setting: For Kinesis, DynamoDB, and Amazon SQS event sources, when you set BatchSize to a value greater than 10, you must set MaximumBatchingWindowInSeconds to at least 1.
(Kinesis and DynamoDB Streams only) The number of batches to process from each shard concurrently.
" - }, - "StartingPosition":{ - "shape":"EventSourcePosition", - "documentation":"The position in a stream from which to start reading. Required for Amazon Kinesis and Amazon DynamoDB Stream event sources. AT_TIMESTAMP is supported only for Amazon Kinesis streams, Amazon DocumentDB, Amazon MSK, and self-managed Apache Kafka.
With StartingPosition set to AT_TIMESTAMP, the time from which to start reading. StartingPositionTimestamp cannot be in the future.
(Kinesis, DynamoDB Streams, Amazon MSK, and self-managed Kafka only) A configuration object that specifies the destination of an event after Lambda processes it.
" - }, - "MaximumRecordAgeInSeconds":{ - "shape":"MaximumRecordAgeInSeconds", - "documentation":"(Kinesis and DynamoDB Streams only) Discard records older than the specified age. The default value is infinite (-1).
" - }, - "BisectBatchOnFunctionError":{ - "shape":"BisectBatchOnFunctionError", - "documentation":"(Kinesis and DynamoDB Streams only) If the function returns an error, split the batch in two and retry.
" - }, - "MaximumRetryAttempts":{ - "shape":"MaximumRetryAttemptsEventSourceMapping", - "documentation":"(Kinesis and DynamoDB Streams only) Discard records after the specified number of retries. The default value is infinite (-1). When set to infinite (-1), failed records are retried until the record expires.
" - }, - "Tags":{ - "shape":"Tags", - "documentation":"A list of tags to apply to the event source mapping.
" - }, - "TumblingWindowInSeconds":{ - "shape":"TumblingWindowInSeconds", - "documentation":"(Kinesis and DynamoDB Streams only) The duration in seconds of a processing window for DynamoDB and Kinesis Streams event sources. A value of 0 seconds indicates no tumbling window.
" - }, - "Topics":{ - "shape":"Topics", - "documentation":"The name of the Kafka topic.
" - }, - "Queues":{ - "shape":"Queues", - "documentation":"(MQ) The name of the Amazon MQ broker destination queue to consume.
" - }, - "SourceAccessConfigurations":{ - "shape":"SourceAccessConfigurations", - "documentation":"An array of authentication protocols or VPC components required to secure your event source.
" - }, - "SelfManagedEventSource":{ - "shape":"SelfManagedEventSource", - "documentation":"The self-managed Apache Kafka cluster to receive records from.
" - }, - "FunctionResponseTypes":{ - "shape":"FunctionResponseTypeList", - "documentation":"(Kinesis, DynamoDB Streams, and Amazon SQS) A list of current response type enums applied to the event source mapping.
" - }, - "AmazonManagedKafkaEventSourceConfig":{ - "shape":"AmazonManagedKafkaEventSourceConfig", - "documentation":"Specific configuration settings for an Amazon Managed Streaming for Apache Kafka (Amazon MSK) event source.
" - }, - "SelfManagedKafkaEventSourceConfig":{ - "shape":"SelfManagedKafkaEventSourceConfig", - "documentation":"Specific configuration settings for a self-managed Apache Kafka event source.
" - }, - "ScalingConfig":{ - "shape":"ScalingConfig", - "documentation":"(Amazon SQS only) The scaling configuration for the event source. For more information, see Configuring maximum concurrency for Amazon SQS event sources.
" - }, - "DocumentDBEventSourceConfig":{ - "shape":"DocumentDBEventSourceConfig", - "documentation":"Specific configuration settings for a DocumentDB event source.
" - }, - "KMSKeyArn":{ - "shape":"KMSKeyArn", - "documentation":"The ARN of the Key Management Service (KMS) customer managed key that Lambda uses to encrypt your function's filter criteria. By default, Lambda does not encrypt your filter criteria object. Specify this property to encrypt data using your own customer managed key.
" - }, - "MetricsConfig":{ - "shape":"EventSourceMappingMetricsConfig", - "documentation":"The metrics configuration for your event source. For more information, see Event source mapping metrics.
" - }, - "ProvisionedPollerConfig":{ - "shape":"ProvisionedPollerConfig", - "documentation":"(Amazon MSK and self-managed Apache Kafka only) The provisioned mode configuration for the event source. For more information, see provisioned mode.
" - } - } - }, - "CreateFunctionRequest":{ - "type":"structure", - "required":[ - "FunctionName", - "Role", - "Code" - ], - "members":{ - "FunctionName":{ - "shape":"FunctionName", - "documentation":"The name or ARN of the Lambda function.
Name formats
Function name – my-function.
Function ARN – arn:aws:lambda:us-west-2:123456789012:function:my-function.
Partial ARN – 123456789012:function:my-function.
The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.
" - }, - "Runtime":{ - "shape":"Runtime", - "documentation":"The identifier of the function's runtime. Runtime is required if the deployment package is a .zip file archive. Specifying a runtime results in an error if you're deploying a function using a container image.
The following list includes deprecated runtimes. Lambda blocks creating new functions and updating existing functions shortly after each runtime is deprecated. For more information, see Runtime use after deprecation.
For a list of all currently supported runtimes, see Supported runtimes.
" - }, - "Role":{ - "shape":"RoleArn", - "documentation":"The Amazon Resource Name (ARN) of the function's execution role.
" - }, - "Handler":{ - "shape":"Handler", - "documentation":"The name of the method within your code that Lambda calls to run your function. Handler is required if the deployment package is a .zip file archive. The format includes the file name. It can also include namespaces and other qualifiers, depending on the runtime. For more information, see Lambda programming model.
" - }, - "Code":{ - "shape":"FunctionCode", - "documentation":"The code for the function.
" - }, - "Description":{ - "shape":"Description", - "documentation":"A description of the function.
" - }, - "Timeout":{ - "shape":"Timeout", - "documentation":"The amount of time (in seconds) that Lambda allows a function to run before stopping it. The default is 3 seconds. The maximum allowed value is 900 seconds. For more information, see Lambda execution environment.
" - }, - "MemorySize":{ - "shape":"MemorySize", - "documentation":"The amount of memory available to the function at runtime. Increasing the function memory also increases its CPU allocation. The default value is 128 MB. The value can be any multiple of 1 MB.
" - }, - "Publish":{ - "shape":"Boolean", - "documentation":"Set to true to publish the first version of the function during creation.
" - }, - "VpcConfig":{ - "shape":"VpcConfig", - "documentation":"For network connectivity to Amazon Web Services resources in a VPC, specify a list of security groups and subnets in the VPC. When you connect a function to a VPC, it can access resources and the internet only through that VPC. For more information, see Configuring a Lambda function to access resources in a VPC.
" - }, - "PackageType":{ - "shape":"PackageType", - "documentation":"The type of deployment package. Set to Image for container image and set to Zip for .zip file archive.
A dead-letter queue configuration that specifies the queue or topic where Lambda sends asynchronous events when they fail processing. For more information, see Dead-letter queues.
" - }, - "Environment":{ - "shape":"Environment", - "documentation":"Environment variables that are accessible from function code during execution.
" - }, - "KMSKeyArn":{ - "shape":"KMSKeyArn", - "documentation":"The ARN of the Key Management Service (KMS) customer managed key that's used to encrypt the following resources:
The function's environment variables.
The function's Lambda SnapStart snapshots.
When used with SourceKMSKeyArn, the unzipped version of the .zip deployment package that's used for function invocations. For more information, see Specifying a customer managed key for Lambda.
The optimized version of the container image that's used for function invocations. Note that this is not the same key that's used to protect your container image in the Amazon Elastic Container Registry (Amazon ECR). For more information, see Function lifecycle.
If you don't provide a customer managed key, Lambda uses an Amazon Web Services owned key or an Amazon Web Services managed key.
" - }, - "TracingConfig":{ - "shape":"TracingConfig", - "documentation":"Set Mode to Active to sample and trace a subset of incoming requests with X-Ray.
A list of tags to apply to the function.
" - }, - "Layers":{ - "shape":"LayerList", - "documentation":"A list of function layers to add to the function's execution environment. Specify each layer by its ARN, including the version.
" - }, - "FileSystemConfigs":{ - "shape":"FileSystemConfigList", - "documentation":"Connection settings for an Amazon EFS file system.
" - }, - "ImageConfig":{ - "shape":"ImageConfig", - "documentation":"Container image configuration values that override the values in the container image Dockerfile.
" - }, - "CodeSigningConfigArn":{ - "shape":"CodeSigningConfigArn", - "documentation":"To enable code signing for this function, specify the ARN of a code-signing configuration. A code-signing configuration includes a set of signing profiles, which define the trusted publishers for this function.
" - }, - "Architectures":{ - "shape":"ArchitecturesList", - "documentation":"The instruction set architecture that the function supports. Enter a string array with one of the valid values (arm64 or x86_64). The default value is x86_64.
The size of the function's /tmp directory in MB. The default value is 512, but can be any whole number between 512 and 10,240 MB. For more information, see Configuring ephemeral storage (console).
The function's SnapStart setting.
" - }, - "LoggingConfig":{ - "shape":"LoggingConfig", - "documentation":"The function's Amazon CloudWatch Logs configuration settings.
" - }, - "DurableConfig":{"shape":"DurableConfig"} - } - }, - "CreateFunctionUrlConfigRequest":{ - "type":"structure", - "required":[ - "FunctionName", - "AuthType" - ], - "members":{ - "FunctionName":{ - "shape":"FunctionName", - "documentation":"The name or ARN of the Lambda function.
Name formats
Function name – my-function.
Function ARN – arn:aws:lambda:us-west-2:123456789012:function:my-function.
Partial ARN – 123456789012:function:my-function.
The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.
", - "location":"uri", - "locationName":"FunctionName" - }, - "Qualifier":{ - "shape":"FunctionUrlQualifier", - "documentation":"The alias name.
", - "location":"querystring", - "locationName":"Qualifier" - }, - "AuthType":{ - "shape":"FunctionUrlAuthType", - "documentation":"The type of authentication that your function URL uses. Set to AWS_IAM if you want to restrict access to authenticated users only. Set to NONE if you want to bypass IAM authentication to create a public endpoint. For more information, see Security and auth model for Lambda function URLs.
The cross-origin resource sharing (CORS) settings for your function URL.
" - }, - "InvokeMode":{ - "shape":"InvokeMode", - "documentation":"Use one of the following options:
BUFFERED – This is the default option. Lambda invokes your function using the Invoke API operation. Invocation results are available when the payload is complete. The maximum payload size is 6 MB.
RESPONSE_STREAM – Your function streams payload results as they become available. Lambda invokes your function using the InvokeWithResponseStream API operation. The maximum response payload size is 20 MB, however, you can request a quota increase.
The HTTP URL endpoint for your function.
" - }, - "FunctionArn":{ - "shape":"FunctionArn", - "documentation":"The Amazon Resource Name (ARN) of your function.
" - }, - "AuthType":{ - "shape":"FunctionUrlAuthType", - "documentation":"The type of authentication that your function URL uses. Set to AWS_IAM if you want to restrict access to authenticated users only. Set to NONE if you want to bypass IAM authentication to create a public endpoint. For more information, see Security and auth model for Lambda function URLs.
The cross-origin resource sharing (CORS) settings for your function URL.
" - }, - "CreationTime":{ - "shape":"Timestamp", - "documentation":"When the function URL was created, in ISO-8601 format (YYYY-MM-DDThh:mm:ss.sTZD).
" - }, - "InvokeMode":{ - "shape":"InvokeMode", - "documentation":"Use one of the following options:
BUFFERED – This is the default option. Lambda invokes your function using the Invoke API operation. Invocation results are available when the payload is complete. The maximum payload size is 6 MB.
RESPONSE_STREAM – Your function streams payload results as they become available. Lambda invokes your function using the InvokeWithResponseStream API operation. The maximum response payload size is 20 MB, however, you can request a quota increase.
The Amazon Resource Name (ARN) of an Amazon SQS queue or Amazon SNS topic.
" - } - }, - "documentation":"The dead-letter queue for failed asynchronous invocations.
" - }, - "DeleteAliasRequest":{ - "type":"structure", - "required":[ - "FunctionName", - "Name" - ], - "members":{ - "FunctionName":{ - "shape":"FunctionName", - "documentation":"The name or ARN of the Lambda function.
Name formats
Function name - MyFunction.
Function ARN - arn:aws:lambda:us-west-2:123456789012:function:MyFunction.
Partial ARN - 123456789012:function:MyFunction.
The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.
", - "location":"uri", - "locationName":"FunctionName" - }, - "Name":{ - "shape":"Alias", - "documentation":"The name of the alias.
", - "location":"uri", - "locationName":"Name" - } - } - }, - "DeleteCodeSigningConfigRequest":{ - "type":"structure", - "required":["CodeSigningConfigArn"], - "members":{ - "CodeSigningConfigArn":{ - "shape":"CodeSigningConfigArn", - "documentation":"The The Amazon Resource Name (ARN) of the code signing configuration.
", - "location":"uri", - "locationName":"CodeSigningConfigArn" - } - } - }, - "DeleteCodeSigningConfigResponse":{ - "type":"structure", - "members":{} - }, - "DeleteEventSourceMappingRequest":{ - "type":"structure", - "required":["UUID"], - "members":{ - "UUID":{ - "shape":"String", - "documentation":"The identifier of the event source mapping.
", - "location":"uri", - "locationName":"UUID" - } - } - }, - "DeleteFunctionCodeSigningConfigRequest":{ - "type":"structure", - "required":["FunctionName"], - "members":{ - "FunctionName":{ - "shape":"FunctionName", - "documentation":"The name or ARN of the Lambda function.
Name formats
Function name - MyFunction.
Function ARN - arn:aws:lambda:us-west-2:123456789012:function:MyFunction.
Partial ARN - 123456789012:function:MyFunction.
The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.
", - "location":"uri", - "locationName":"FunctionName" - } - } - }, - "DeleteFunctionConcurrencyRequest":{ - "type":"structure", - "required":["FunctionName"], - "members":{ - "FunctionName":{ - "shape":"FunctionName", - "documentation":"The name or ARN of the Lambda function.
Name formats
Function name – my-function.
Function ARN – arn:aws:lambda:us-west-2:123456789012:function:my-function.
Partial ARN – 123456789012:function:my-function.
The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.
", - "location":"uri", - "locationName":"FunctionName" - } - } - }, - "DeleteFunctionEventInvokeConfigRequest":{ - "type":"structure", - "required":["FunctionName"], - "members":{ - "FunctionName":{ - "shape":"FunctionName", - "documentation":"The name or ARN of the Lambda function, version, or alias.
Name formats
Function name - my-function (name-only), my-function:v1 (with alias).
Function ARN - arn:aws:lambda:us-west-2:123456789012:function:my-function.
Partial ARN - 123456789012:function:my-function.
You can append a version number or alias to any of the formats. The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.
", - "location":"uri", - "locationName":"FunctionName" - }, - "Qualifier":{ - "shape":"Qualifier", - "documentation":"A version number or alias name.
", - "location":"querystring", - "locationName":"Qualifier" - } - } - }, - "DeleteFunctionRequest":{ - "type":"structure", - "required":["FunctionName"], - "members":{ - "FunctionName":{ - "shape":"FunctionName", - "documentation":"The name or ARN of the Lambda function or version.
Name formats
Function name – my-function (name-only), my-function:1 (with version).
Function ARN – arn:aws:lambda:us-west-2:123456789012:function:my-function.
Partial ARN – 123456789012:function:my-function.
You can append a version number or alias to any of the formats. The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.
", - "location":"uri", - "locationName":"FunctionName" - }, - "Qualifier":{ - "shape":"Qualifier", - "documentation":"Specify a version to delete. You can't delete a version that an alias references.
", - "location":"querystring", - "locationName":"Qualifier" - } - } - }, - "DeleteFunctionUrlConfigRequest":{ - "type":"structure", - "required":["FunctionName"], - "members":{ - "FunctionName":{ - "shape":"FunctionName", - "documentation":"The name or ARN of the Lambda function.
Name formats
Function name – my-function.
Function ARN – arn:aws:lambda:us-west-2:123456789012:function:my-function.
Partial ARN – 123456789012:function:my-function.
The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.
", - "location":"uri", - "locationName":"FunctionName" - }, - "Qualifier":{ - "shape":"FunctionUrlQualifier", - "documentation":"The alias name.
", - "location":"querystring", - "locationName":"Qualifier" - } - } - }, - "DeleteLayerVersionRequest":{ - "type":"structure", - "required":[ - "LayerName", - "VersionNumber" - ], - "members":{ - "LayerName":{ - "shape":"LayerName", - "documentation":"The name or Amazon Resource Name (ARN) of the layer.
", - "location":"uri", - "locationName":"LayerName" - }, - "VersionNumber":{ - "shape":"LayerVersionNumber", - "documentation":"The version number.
", - "location":"uri", - "locationName":"VersionNumber" - } - } - }, - "DeleteProvisionedConcurrencyConfigRequest":{ - "type":"structure", - "required":[ - "FunctionName", - "Qualifier" - ], - "members":{ - "FunctionName":{ - "shape":"FunctionName", - "documentation":"The name or ARN of the Lambda function.
Name formats
Function name – my-function.
Function ARN – arn:aws:lambda:us-west-2:123456789012:function:my-function.
Partial ARN – 123456789012:function:my-function.
The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.
", - "location":"uri", - "locationName":"FunctionName" - }, - "Qualifier":{ - "shape":"Qualifier", - "documentation":"The version number or alias name.
", - "location":"querystring", - "locationName":"Qualifier" - } - } - }, - "Description":{ - "type":"string", - "max":256, - "min":0 - }, - "DestinationArn":{ - "type":"string", - "max":350, - "min":0, - "pattern":"$|arn:(aws[a-zA-Z0-9-]*):([a-zA-Z0-9\\-])+:([a-z]{2}(-gov)?-[a-z]+-\\d{1})?:(\\d{12})?:(.*)" - }, - "DestinationConfig":{ - "type":"structure", - "members":{ - "OnSuccess":{ - "shape":"OnSuccess", - "documentation":"The destination configuration for successful invocations. Not supported in CreateEventSourceMapping or UpdateEventSourceMapping.
The destination configuration for failed invocations.
" - } - }, - "documentation":"A configuration object that specifies the destination of an event after Lambda processes it. For more information, see Adding a destination.
" - }, - "DocumentDBEventSourceConfig":{ - "type":"structure", - "members":{ - "DatabaseName":{ - "shape":"DatabaseName", - "documentation":"The name of the database to consume within the DocumentDB cluster.
" - }, - "CollectionName":{ - "shape":"CollectionName", - "documentation":"The name of the collection to consume within the database. If you do not specify a collection, Lambda consumes all collections.
" - }, - "FullDocument":{ - "shape":"FullDocument", - "documentation":"Determines what DocumentDB sends to your event stream during document update operations. If set to UpdateLookup, DocumentDB sends a delta describing the changes, along with a copy of the entire document. Otherwise, DocumentDB sends only a partial document that contains the changes.
" - } - }, - "documentation":"Specific configuration settings for a DocumentDB event source.
" - }, - "DurableConfig":{ - "type":"structure", - "members":{ - "RetentionPeriodInDays":{"shape":"RetentionPeriodInDays"}, - "ExecutionTimeout":{"shape":"ExecutionTimeout"} - } - }, - "DurableExecutionAlreadyStartedException":{ - "type":"structure", - "members":{ - "Type":{"shape":"String"}, - "Message":{"shape":"String"} - }, - "error":{ - "httpStatusCode":409, - "senderFault":true - }, - "exception":true - }, - "DurableExecutionArn":{ - "type":"string", - "max":1024, - "min":1, - "pattern":"arn:([a-zA-Z0-9-]+):lambda:([a-zA-Z0-9-]+):(\\d{12}):function:([a-zA-Z0-9_-]+):(\\$LATEST(?:\\.PUBLISHED)?|[0-9]+)/durable-execution/([a-zA-Z0-9_-]+)/([a-zA-Z0-9_-]+)" - }, - "DurableExecutionName":{ - "type":"string", - "max":64, - "min":1, - "pattern":"[a-zA-Z0-9-_]+" - }, - "DurableExecutions":{ - "type":"list", - "member":{"shape":"Execution"} - }, - "DurationSeconds":{ - "type":"integer", - "box":true, - "min":0 - }, - "EC2AccessDeniedException":{ - "type":"structure", - "members":{ - "Type":{"shape":"String"}, - "Message":{"shape":"String"} - }, - "documentation":"Need additional permissions to configure VPC settings.
", - "error":{"httpStatusCode":502}, - "exception":true, - "fault":true - }, - "EC2ThrottledException":{ - "type":"structure", - "members":{ - "Type":{"shape":"String"}, - "Message":{"shape":"String"} - }, - "documentation":"Amazon EC2 throttled Lambda during Lambda function initialization using the execution role provided for the function.
", - "error":{"httpStatusCode":502}, - "exception":true, - "fault":true - }, - "EC2UnexpectedException":{ - "type":"structure", - "members":{ - "Type":{"shape":"String"}, - "Message":{"shape":"String"}, - "EC2ErrorCode":{"shape":"String"} - }, - "documentation":"Lambda received an unexpected Amazon EC2 client exception while setting up for the Lambda function.
", - "error":{"httpStatusCode":502}, - "exception":true, - "fault":true - }, - "EFSIOException":{ - "type":"structure", - "members":{ - "Type":{"shape":"String"}, - "Message":{"shape":"String"} - }, - "documentation":"An error occurred when reading from or writing to a connected file system.
", - "error":{ - "httpStatusCode":410, - "senderFault":true - }, - "exception":true - }, - "EFSMountConnectivityException":{ - "type":"structure", - "members":{ - "Type":{"shape":"String"}, - "Message":{"shape":"String"} - }, - "documentation":"The Lambda function couldn't make a network connection to the configured file system.
", - "error":{ - "httpStatusCode":408, - "senderFault":true - }, - "exception":true - }, - "EFSMountFailureException":{ - "type":"structure", - "members":{ - "Type":{"shape":"String"}, - "Message":{"shape":"String"} - }, - "documentation":"The Lambda function couldn't mount the configured file system due to a permission or configuration issue.
", - "error":{ - "httpStatusCode":403, - "senderFault":true - }, - "exception":true - }, - "EFSMountTimeoutException":{ - "type":"structure", - "members":{ - "Type":{"shape":"String"}, - "Message":{"shape":"String"} - }, - "documentation":"The Lambda function made a network connection to the configured file system, but the mount operation timed out.
", - "error":{ - "httpStatusCode":408, - "senderFault":true - }, - "exception":true - }, - "ENILimitReachedException":{ - "type":"structure", - "members":{ - "Type":{"shape":"String"}, - "Message":{"shape":"String"} - }, - "documentation":"Lambda couldn't create an elastic network interface in the VPC, specified as part of Lambda function configuration, because the limit for network interfaces has been reached. For more information, see Lambda quotas.
", - "error":{"httpStatusCode":502}, - "exception":true, - "fault":true - }, - "Enabled":{ - "type":"boolean", - "box":true - }, - "EndPointType":{ - "type":"string", - "enum":["KAFKA_BOOTSTRAP_SERVERS"] - }, - "Endpoint":{ - "type":"string", - "max":300, - "min":1, - "pattern":"(([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\\-]*[a-zA-Z0-9])\\.)*([A-Za-z0-9]|[A-Za-z0-9][A-Za-z0-9\\-]*[A-Za-z0-9]):[0-9]{1,5}" - }, - "EndpointLists":{ - "type":"list", - "member":{"shape":"Endpoint"}, - "max":10, - "min":1 - }, - "Endpoints":{ - "type":"map", - "key":{"shape":"EndPointType"}, - "value":{"shape":"EndpointLists"}, - "max":2, - "min":1 - }, - "Environment":{ - "type":"structure", - "members":{ - "Variables":{ - "shape":"EnvironmentVariables", - "documentation":"Environment variable key-value pairs. For more information, see Using Lambda environment variables.
" - } - }, - "documentation":"A function's environment variable settings. You can use environment variables to adjust your function's behavior without updating code. An environment variable is a pair of strings that are stored in a function's version-specific configuration.
" - }, - "EnvironmentError":{ - "type":"structure", - "members":{ - "ErrorCode":{ - "shape":"String", - "documentation":"The error code.
" - }, - "Message":{ - "shape":"SensitiveString", - "documentation":"The error message.
" - } - }, - "documentation":"Error messages for environment variables that couldn't be applied.
" - }, - "EnvironmentResponse":{ - "type":"structure", - "members":{ - "Variables":{ - "shape":"EnvironmentVariables", - "documentation":"Environment variable key-value pairs. Omitted from CloudTrail logs.
" - }, - "Error":{ - "shape":"EnvironmentError", - "documentation":"Error messages for environment variables that couldn't be applied.
" - } - }, - "documentation":"The results of an operation to update or read environment variables. If the operation succeeds, the response contains the environment variables. If it fails, the response contains details about the error.
" - }, - "EnvironmentVariableName":{ - "type":"string", - "pattern":"[a-zA-Z]([a-zA-Z0-9_])+", - "sensitive":true - }, - "EnvironmentVariableValue":{ - "type":"string", - "sensitive":true - }, - "EnvironmentVariables":{ - "type":"map", - "key":{"shape":"EnvironmentVariableName"}, - "value":{"shape":"EnvironmentVariableValue"}, - "sensitive":true - }, - "EphemeralStorage":{ - "type":"structure", - "required":["Size"], - "members":{ - "Size":{ - "shape":"EphemeralStorageSize", - "documentation":"The size of the function's /tmp directory.
The size of the function's /tmp directory in MB. The default value is 512, but can be any whole number between 512 and 10,240 MB. For more information, see Configuring ephemeral storage (console).
The identifier of the event source mapping.
" - }, - "StartingPosition":{ - "shape":"EventSourcePosition", - "documentation":"The position in a stream from which to start reading. Required for Amazon Kinesis and Amazon DynamoDB Stream event sources. AT_TIMESTAMP is supported only for Amazon Kinesis streams, Amazon DocumentDB, Amazon MSK, and self-managed Apache Kafka.
With StartingPosition set to AT_TIMESTAMP, the time from which to start reading. StartingPositionTimestamp cannot be in the future.
The maximum number of records in each batch that Lambda pulls from your stream or queue and sends to your function. Lambda passes all of the records in the batch to the function in a single call, up to the payload limit for synchronous invocation (6 MB).
Default value: Varies by service. For Amazon SQS, the default is 10. For all other services, the default is 100.
Related setting: When you set BatchSize to a value greater than 10, you must set MaximumBatchingWindowInSeconds to at least 1.
The maximum amount of time, in seconds, that Lambda spends gathering records before invoking the function. You can configure MaximumBatchingWindowInSeconds to any value from 0 seconds to 300 seconds in increments of seconds.
For streams and Amazon SQS event sources, the default batching window is 0 seconds. For Amazon MSK, Self-managed Apache Kafka, Amazon MQ, and DocumentDB event sources, the default batching window is 500 ms. Note that because you can only change MaximumBatchingWindowInSeconds in increments of seconds, you cannot revert back to the 500 ms default batching window after you have changed it. To restore the default batching window, you must create a new event source mapping.
Related setting: For streams and Amazon SQS event sources, when you set BatchSize to a value greater than 10, you must set MaximumBatchingWindowInSeconds to at least 1.
(Kinesis and DynamoDB Streams only) The number of batches to process concurrently from each shard. The default value is 1.
" - }, - "EventSourceArn":{ - "shape":"Arn", - "documentation":"The Amazon Resource Name (ARN) of the event source.
" - }, - "FilterCriteria":{ - "shape":"FilterCriteria", - "documentation":"An object that defines the filter criteria that determine whether Lambda should process an event. For more information, see Lambda event filtering.
If filter criteria is encrypted, this field shows up as null in the response of ListEventSourceMapping API calls. You can view this field in plaintext in the response of GetEventSourceMapping and DeleteEventSourceMapping calls if you have kms:Decrypt permissions for the correct KMS key.
The ARN of the Lambda function.
" - }, - "LastModified":{ - "shape":"Date", - "documentation":"The date that the event source mapping was last updated or that its state changed.
" - }, - "LastProcessingResult":{ - "shape":"String", - "documentation":"The result of the event source mapping's last processing attempt.
" - }, - "State":{ - "shape":"String", - "documentation":"The state of the event source mapping. It can be one of the following: Creating, Enabling, Enabled, Disabling, Disabled, Updating, or Deleting.
Indicates whether a user or Lambda made the last change to the event source mapping.
" - }, - "DestinationConfig":{ - "shape":"DestinationConfig", - "documentation":"(Kinesis, DynamoDB Streams, Amazon MSK, and self-managed Apache Kafka event sources only) A configuration object that specifies the destination of an event after Lambda processes it.
" - }, - "Topics":{ - "shape":"Topics", - "documentation":"The name of the Kafka topic.
" - }, - "Queues":{ - "shape":"Queues", - "documentation":"(Amazon MQ) The name of the Amazon MQ broker destination queue to consume.
" - }, - "SourceAccessConfigurations":{ - "shape":"SourceAccessConfigurations", - "documentation":"An array of the authentication protocol, VPC components, or virtual host to secure and define your event source.
" - }, - "SelfManagedEventSource":{ - "shape":"SelfManagedEventSource", - "documentation":"The self-managed Apache Kafka cluster for your event source.
" - }, - "MaximumRecordAgeInSeconds":{ - "shape":"MaximumRecordAgeInSeconds", - "documentation":"(Kinesis and DynamoDB Streams only) Discard records older than the specified age. The default value is -1, which sets the maximum age to infinite. When the value is set to infinite, Lambda never discards old records.
The minimum valid value for maximum record age is 60s. Although values less than 60 and greater than -1 fall within the parameter's absolute range, they are not allowed
(Kinesis and DynamoDB Streams only) If the function returns an error, split the batch in two and retry. The default value is false.
" - }, - "MaximumRetryAttempts":{ - "shape":"MaximumRetryAttemptsEventSourceMapping", - "documentation":"(Kinesis and DynamoDB Streams only) Discard records after the specified number of retries. The default value is -1, which sets the maximum number of retries to infinite. When MaximumRetryAttempts is infinite, Lambda retries failed records until the record expires in the event source.
" - }, - "TumblingWindowInSeconds":{ - "shape":"TumblingWindowInSeconds", - "documentation":"(Kinesis and DynamoDB Streams only) The duration in seconds of a processing window for DynamoDB and Kinesis Streams event sources. A value of 0 seconds indicates no tumbling window.
" - }, - "FunctionResponseTypes":{ - "shape":"FunctionResponseTypeList", - "documentation":"(Kinesis, DynamoDB Streams, and Amazon SQS) A list of current response type enums applied to the event source mapping.
" - }, - "AmazonManagedKafkaEventSourceConfig":{ - "shape":"AmazonManagedKafkaEventSourceConfig", - "documentation":"Specific configuration settings for an Amazon Managed Streaming for Apache Kafka (Amazon MSK) event source.
" - }, - "SelfManagedKafkaEventSourceConfig":{ - "shape":"SelfManagedKafkaEventSourceConfig", - "documentation":"Specific configuration settings for a self-managed Apache Kafka event source.
" - }, - "ScalingConfig":{ - "shape":"ScalingConfig", - "documentation":"(Amazon SQS only) The scaling configuration for the event source. For more information, see Configuring maximum concurrency for Amazon SQS event sources.
" - }, - "DocumentDBEventSourceConfig":{ - "shape":"DocumentDBEventSourceConfig", - "documentation":"Specific configuration settings for a DocumentDB event source.
" - }, - "KMSKeyArn":{ - "shape":"KMSKeyArn", - "documentation":"The ARN of the Key Management Service (KMS) customer managed key that Lambda uses to encrypt your function's filter criteria.
" - }, - "FilterCriteriaError":{ - "shape":"FilterCriteriaError", - "documentation":"An object that contains details about an error related to filter criteria encryption.
" - }, - "EventSourceMappingArn":{ - "shape":"EventSourceMappingArn", - "documentation":"The Amazon Resource Name (ARN) of the event source mapping.
" - }, - "MetricsConfig":{ - "shape":"EventSourceMappingMetricsConfig", - "documentation":"The metrics configuration for your event source. For more information, see Event source mapping metrics.
" - }, - "ProvisionedPollerConfig":{ - "shape":"ProvisionedPollerConfig", - "documentation":"(Amazon MSK and self-managed Apache Kafka only) The provisioned mode configuration for the event source. For more information, see provisioned mode.
" - } - }, - "documentation":"A mapping between an Amazon Web Services resource and a Lambda function. For details, see CreateEventSourceMapping.
" - }, - "EventSourceMappingMetric":{ - "type":"string", - "enum":["EventCount"] - }, - "EventSourceMappingMetricList":{ - "type":"list", - "member":{"shape":"EventSourceMappingMetric"}, - "max":1, - "min":0 - }, - "EventSourceMappingMetricsConfig":{ - "type":"structure", - "members":{ - "Metrics":{ - "shape":"EventSourceMappingMetricList", - "documentation":" The metrics you want your event source mapping to produce. Include EventCount to receive event source mapping metrics related to the number of events processed by your event source mapping. For more information about these metrics, see Event source mapping metrics.
The metrics configuration for your event source. Use this configuration object to define which metrics you want your event source mapping to produce.
" - }, - "EventSourceMappingsList":{ - "type":"list", - "member":{"shape":"EventSourceMappingConfiguration"} - }, - "EventSourcePosition":{ - "type":"string", - "enum":[ - "TRIM_HORIZON", - "LATEST", - "AT_TIMESTAMP" - ] - }, - "EventSourceToken":{ - "type":"string", - "max":256, - "min":0, - "pattern":"[a-zA-Z0-9._\\-]+" - }, - "EventType":{ - "type":"string", - "enum":[ - "ExecutionStarted", - "ExecutionSucceeded", - "ExecutionFailed", - "ExecutionTimedOut", - "ExecutionStopped", - "ContextStarted", - "ContextSucceeded", - "ContextFailed", - "WaitStarted", - "WaitSucceeded", - "WaitCancelled", - "StepStarted", - "StepSucceeded", - "StepFailed", - "ChainedInvokePending", - "ChainedInvokeStarted", - "ChainedInvokeSucceeded", - "ChainedInvokeFailed", - "ChainedInvokeTimedOut", - "ChainedInvokeCancelled", - "CallbackStarted", - "CallbackSucceeded", - "CallbackFailed", - "CallbackTimedOut", - "InvocationCompleted" - ] - }, - "Events":{ - "type":"list", - "member":{"shape":"Event"} - }, - "Execution":{ - "type":"structure", - "required":[ - "DurableExecutionArn", - "DurableExecutionName", - "FunctionArn", - "Status", - "StartTimestamp" - ], - "members":{ - "DurableExecutionArn":{"shape":"DurableExecutionArn"}, - "DurableExecutionName":{"shape":"DurableExecutionName"}, - "FunctionArn":{"shape":"FunctionArn"}, - "Status":{"shape":"ExecutionStatus"}, - "StartTimestamp":{"shape":"ExecutionTimestamp"}, - "EndTimestamp":{"shape":"ExecutionTimestamp"} - } - }, - "ExecutionDetails":{ - "type":"structure", - "members":{ - "InputPayload":{"shape":"InputPayload"} - } - }, - "ExecutionFailedDetails":{ - "type":"structure", - "required":["Error"], - "members":{ - "Error":{"shape":"EventError"} - } - }, - "ExecutionStartedDetails":{ - "type":"structure", - "required":[ - "Input", - "ExecutionTimeout" - ], - "members":{ - "Input":{"shape":"EventInput"}, - "ExecutionTimeout":{"shape":"DurationSeconds"} - } - }, - "ExecutionStatus":{ - "type":"string", - "enum":[ - "RUNNING", - "SUCCEEDED", - "FAILED", - "TIMED_OUT", - "STOPPED" - ] - }, - "ExecutionStatusList":{ - "type":"list", - "member":{"shape":"ExecutionStatus"} - }, - "ExecutionStoppedDetails":{ - "type":"structure", - "required":["Error"], - "members":{ - "Error":{"shape":"EventError"} - } - }, - "ExecutionSucceededDetails":{ - "type":"structure", - "required":["Result"], - "members":{ - "Result":{"shape":"EventResult"} - } - }, - "ExecutionTimedOutDetails":{ - "type":"structure", - "members":{ - "Error":{"shape":"EventError"} - } - }, - "ExecutionTimeout":{ - "type":"integer", - "box":true, - "max":31622400, - "min":1 - }, - "ExecutionTimestamp":{"type":"timestamp"}, - "FileSystemArn":{ - "type":"string", - "max":200, - "min":0, - "pattern":"arn:aws[a-zA-Z-]*:elasticfilesystem:[a-z]{2}((-gov)|(-iso(b?)))?-[a-z]+-\\d{1}:\\d{12}:access-point/fsap-[a-f0-9]{17}" - }, - "FileSystemConfig":{ - "type":"structure", - "required":[ - "Arn", - "LocalMountPath" - ], - "members":{ - "Arn":{ - "shape":"FileSystemArn", - "documentation":"The Amazon Resource Name (ARN) of the Amazon EFS access point that provides access to the file system.
" - }, - "LocalMountPath":{ - "shape":"LocalMountPath", - "documentation":"The path where the function can access the file system, starting with /mnt/.
Details about the connection between a Lambda function and an Amazon EFS file system.
" - }, - "FileSystemConfigList":{ - "type":"list", - "member":{"shape":"FileSystemConfig"}, - "max":1, - "min":0 - }, - "Filter":{ - "type":"structure", - "members":{ - "Pattern":{ - "shape":"Pattern", - "documentation":"A filter pattern. For more information on the syntax of a filter pattern, see Filter rule syntax.
" - } - }, - "documentation":" A structure within a FilterCriteria object that defines an event filtering pattern.
A list of filters.
" - } - }, - "documentation":"An object that contains the filters for an event source.
" - }, - "FilterCriteriaError":{ - "type":"structure", - "members":{ - "ErrorCode":{ - "shape":"FilterCriteriaErrorCode", - "documentation":"The KMS exception that resulted from filter criteria encryption or decryption.
" - }, - "Message":{ - "shape":"FilterCriteriaErrorMessage", - "documentation":"The error message.
" - } - }, - "documentation":"An object that contains details about an error related to filter criteria encryption.
" - }, - "FilterCriteriaErrorCode":{ - "type":"string", - "max":50, - "min":10, - "pattern":"[A-Za-z]+Exception" - }, - "FilterCriteriaErrorMessage":{ - "type":"string", - "max":2048, - "min":10, - "pattern":".*" - }, - "FilterList":{ - "type":"list", - "member":{"shape":"Filter"} - }, - "FullDocument":{ - "type":"string", - "enum":[ - "UpdateLookup", - "Default" - ] - }, - "FunctionArn":{ - "type":"string", - "max":10000, - "min":0, - "pattern":"arn:(aws[a-zA-Z-]*)?:lambda:[a-z]{2}(-gov)?-[a-z]+-\\d{1}:\\d{12}:function:[a-zA-Z0-9-_]+(:(\\$LATEST|[a-zA-Z0-9-_]+))?" - }, - "FunctionArnList":{ - "type":"list", - "member":{"shape":"FunctionArn"} - }, - "FunctionCode":{ - "type":"structure", - "members":{ - "ZipFile":{ - "shape":"Blob", - "documentation":"The base64-encoded contents of the deployment package. Amazon Web Services SDK and CLI clients handle the encoding for you.
" - }, - "S3Bucket":{ - "shape":"S3Bucket", - "documentation":"An Amazon S3 bucket in the same Amazon Web Services Region as your function. The bucket can be in a different Amazon Web Services account.
" - }, - "S3Key":{ - "shape":"S3Key", - "documentation":"The Amazon S3 key of the deployment package.
" - }, - "S3ObjectVersion":{ - "shape":"S3ObjectVersion", - "documentation":"For versioned objects, the version of the deployment package object to use.
" - }, - "ImageUri":{ - "shape":"String", - "documentation":"URI of a container image in the Amazon ECR registry.
" - }, - "SourceKMSKeyArn":{ - "shape":"KMSKeyArn", - "documentation":"The ARN of the Key Management Service (KMS) customer managed key that's used to encrypt your function's .zip deployment package. If you don't provide a customer managed key, Lambda uses an Amazon Web Services owned key.
" - } - }, - "documentation":"The code for the Lambda function. You can either specify an object in Amazon S3, upload a .zip file archive deployment package directly, or specify the URI of a container image.
" - }, - "FunctionCodeLocation":{ - "type":"structure", - "members":{ - "RepositoryType":{ - "shape":"String", - "documentation":"The service that's hosting the file.
" - }, - "Location":{ - "shape":"String", - "documentation":"A presigned URL that you can use to download the deployment package.
" - }, - "ImageUri":{ - "shape":"String", - "documentation":"URI of a container image in the Amazon ECR registry.
" - }, - "ResolvedImageUri":{ - "shape":"String", - "documentation":"The resolved URI for the image.
" - }, - "SourceKMSKeyArn":{ - "shape":"String", - "documentation":"The ARN of the Key Management Service (KMS) customer managed key that's used to encrypt your function's .zip deployment package. If you don't provide a customer managed key, Lambda uses an Amazon Web Services owned key.
" - } - }, - "documentation":"Details about a function's deployment package.
" - }, - "FunctionConfiguration":{ - "type":"structure", - "members":{ - "FunctionName":{ - "shape":"NamespacedFunctionName", - "documentation":"The name of the function.
" - }, - "FunctionArn":{ - "shape":"NameSpacedFunctionArn", - "documentation":"The function's Amazon Resource Name (ARN).
" - }, - "Runtime":{ - "shape":"Runtime", - "documentation":"The identifier of the function's runtime. Runtime is required if the deployment package is a .zip file archive. Specifying a runtime results in an error if you're deploying a function using a container image.
The following list includes deprecated runtimes. Lambda blocks creating new functions and updating existing functions shortly after each runtime is deprecated. For more information, see Runtime use after deprecation.
For a list of all currently supported runtimes, see Supported runtimes.
" - }, - "Role":{ - "shape":"RoleArn", - "documentation":"The function's execution role.
" - }, - "Handler":{ - "shape":"Handler", - "documentation":"The function that Lambda calls to begin running your function.
" - }, - "CodeSize":{ - "shape":"Long", - "documentation":"The size of the function's deployment package, in bytes.
" - }, - "Description":{ - "shape":"Description", - "documentation":"The function's description.
" - }, - "Timeout":{ - "shape":"Timeout", - "documentation":"The amount of time in seconds that Lambda allows a function to run before stopping it.
" - }, - "MemorySize":{ - "shape":"MemorySize", - "documentation":"The amount of memory available to the function at runtime.
" - }, - "LastModified":{ - "shape":"Timestamp", - "documentation":"The date and time that the function was last updated, in ISO-8601 format (YYYY-MM-DDThh:mm:ss.sTZD).
" - }, - "CodeSha256":{ - "shape":"String", - "documentation":"The SHA256 hash of the function's deployment package.
" - }, - "Version":{ - "shape":"Version", - "documentation":"The version of the Lambda function.
" - }, - "VpcConfig":{ - "shape":"VpcConfigResponse", - "documentation":"The function's networking configuration.
" - }, - "DeadLetterConfig":{ - "shape":"DeadLetterConfig", - "documentation":"The function's dead letter queue.
" - }, - "Environment":{ - "shape":"EnvironmentResponse", - "documentation":"The function's environment variables. Omitted from CloudTrail logs.
" - }, - "KMSKeyArn":{ - "shape":"KMSKeyArn", - "documentation":"The ARN of the Key Management Service (KMS) customer managed key that's used to encrypt the following resources:
The function's environment variables.
The function's Lambda SnapStart snapshots.
When used with SourceKMSKeyArn, the unzipped version of the .zip deployment package that's used for function invocations. For more information, see Specifying a customer managed key for Lambda.
The optimized version of the container image that's used for function invocations. Note that this is not the same key that's used to protect your container image in the Amazon Elastic Container Registry (Amazon ECR). For more information, see Function lifecycle.
If you don't provide a customer managed key, Lambda uses an Amazon Web Services owned key or an Amazon Web Services managed key.
" - }, - "TracingConfig":{ - "shape":"TracingConfigResponse", - "documentation":"The function's X-Ray tracing configuration.
" - }, - "MasterArn":{ - "shape":"FunctionArn", - "documentation":"For Lambda@Edge functions, the ARN of the main function.
" - }, - "RevisionId":{ - "shape":"String", - "documentation":"The latest updated revision of the function or alias.
" - }, - "Layers":{ - "shape":"LayersReferenceList", - "documentation":"The function's layers.
" - }, - "State":{ - "shape":"State", - "documentation":"The current state of the function. When the state is Inactive, you can reactivate the function by invoking it.
The reason for the function's current state.
" - }, - "StateReasonCode":{ - "shape":"StateReasonCode", - "documentation":"The reason code for the function's current state. When the code is Creating, you can't invoke or modify the function.
The status of the last update that was performed on the function. This is first set to Successful after function creation completes.
The reason for the last update that was performed on the function.
" - }, - "LastUpdateStatusReasonCode":{ - "shape":"LastUpdateStatusReasonCode", - "documentation":"The reason code for the last update that was performed on the function.
" - }, - "FileSystemConfigs":{ - "shape":"FileSystemConfigList", - "documentation":"Connection settings for an Amazon EFS file system.
" - }, - "PackageType":{ - "shape":"PackageType", - "documentation":"The type of deployment package. Set to Image for container image and set Zip for .zip file archive.
The function's image configuration values.
" - }, - "SigningProfileVersionArn":{ - "shape":"Arn", - "documentation":"The ARN of the signing profile version.
" - }, - "SigningJobArn":{ - "shape":"Arn", - "documentation":"The ARN of the signing job.
" - }, - "Architectures":{ - "shape":"ArchitecturesList", - "documentation":"The instruction set architecture that the function supports. Architecture is a string array with one of the valid values. The default architecture value is x86_64.
The size of the function's /tmp directory in MB. The default value is 512, but can be any whole number between 512 and 10,240 MB. For more information, see Configuring ephemeral storage (console).
Set ApplyOn to PublishedVersions to create a snapshot of the initialized execution environment when you publish a function version. For more information, see Improving startup performance with Lambda SnapStart.
The ARN of the runtime and any errors that occured.
" - }, - "LoggingConfig":{ - "shape":"LoggingConfig", - "documentation":"The function's Amazon CloudWatch Logs configuration settings.
" - }, - "DurableConfig":{"shape":"DurableConfig"} - }, - "documentation":"Details about a function's configuration.
" - }, - "FunctionEventInvokeConfig":{ - "type":"structure", - "members":{ - "LastModified":{ - "shape":"Date", - "documentation":"The date and time that the configuration was last updated.
" - }, - "FunctionArn":{ - "shape":"FunctionArn", - "documentation":"The Amazon Resource Name (ARN) of the function.
" - }, - "MaximumRetryAttempts":{ - "shape":"MaximumRetryAttempts", - "documentation":"The maximum number of times to retry when the function returns an error.
" - }, - "MaximumEventAgeInSeconds":{ - "shape":"MaximumEventAgeInSeconds", - "documentation":"The maximum age of a request that Lambda sends to a function for processing.
" - }, - "DestinationConfig":{ - "shape":"DestinationConfig", - "documentation":"A destination for events after they have been sent to a function for processing.
Destinations
Function - The Amazon Resource Name (ARN) of a Lambda function.
Queue - The ARN of a standard SQS queue.
Bucket - The ARN of an Amazon S3 bucket.
Topic - The ARN of a standard SNS topic.
Event Bus - The ARN of an Amazon EventBridge event bus.
S3 buckets are supported only for on-failure destinations. To retain records of successful invocations, use another destination type.
The HTTP URL endpoint for your function.
" - }, - "FunctionArn":{ - "shape":"FunctionArn", - "documentation":"The Amazon Resource Name (ARN) of your function.
" - }, - "CreationTime":{ - "shape":"Timestamp", - "documentation":"When the function URL was created, in ISO-8601 format (YYYY-MM-DDThh:mm:ss.sTZD).
" - }, - "LastModifiedTime":{ - "shape":"Timestamp", - "documentation":"When the function URL configuration was last updated, in ISO-8601 format (YYYY-MM-DDThh:mm:ss.sTZD).
" - }, - "Cors":{ - "shape":"Cors", - "documentation":"The cross-origin resource sharing (CORS) settings for your function URL.
" - }, - "AuthType":{ - "shape":"FunctionUrlAuthType", - "documentation":"The type of authentication that your function URL uses. Set to AWS_IAM if you want to restrict access to authenticated users only. Set to NONE if you want to bypass IAM authentication to create a public endpoint. For more information, see Security and auth model for Lambda function URLs.
Use one of the following options:
BUFFERED – This is the default option. Lambda invokes your function using the Invoke API operation. Invocation results are available when the payload is complete. The maximum payload size is 6 MB.
RESPONSE_STREAM – Your function streams payload results as they become available. Lambda invokes your function using the InvokeWithResponseStream API operation. The maximum response payload size is 20 MB, however, you can request a quota increase.
Details about a Lambda function URL.
" - }, - "FunctionUrlConfigList":{ - "type":"list", - "member":{"shape":"FunctionUrlConfig"} - }, - "FunctionUrlQualifier":{ - "type":"string", - "max":128, - "min":1, - "pattern":"(^\\$LATEST$)|((?!^[0-9]+$)([a-zA-Z0-9-_]+))" - }, - "FunctionVersion":{ - "type":"string", - "enum":["ALL"] - }, - "GetAccountSettingsRequest":{ - "type":"structure", - "members":{} - }, - "GetAccountSettingsResponse":{ - "type":"structure", - "members":{ - "AccountLimit":{ - "shape":"AccountLimit", - "documentation":"Limits that are related to concurrency and code storage.
" - }, - "AccountUsage":{ - "shape":"AccountUsage", - "documentation":"The number of functions and amount of storage in use.
" - } - } - }, - "GetAliasRequest":{ - "type":"structure", - "required":[ - "FunctionName", - "Name" - ], - "members":{ - "FunctionName":{ - "shape":"FunctionName", - "documentation":"The name or ARN of the Lambda function.
Name formats
Function name - MyFunction.
Function ARN - arn:aws:lambda:us-west-2:123456789012:function:MyFunction.
Partial ARN - 123456789012:function:MyFunction.
The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.
", - "location":"uri", - "locationName":"FunctionName" - }, - "Name":{ - "shape":"Alias", - "documentation":"The name of the alias.
", - "location":"uri", - "locationName":"Name" - } - } - }, - "GetCodeSigningConfigRequest":{ - "type":"structure", - "required":["CodeSigningConfigArn"], - "members":{ - "CodeSigningConfigArn":{ - "shape":"CodeSigningConfigArn", - "documentation":"The The Amazon Resource Name (ARN) of the code signing configuration.
", - "location":"uri", - "locationName":"CodeSigningConfigArn" - } - } - }, - "GetCodeSigningConfigResponse":{ - "type":"structure", - "required":["CodeSigningConfig"], - "members":{ - "CodeSigningConfig":{ - "shape":"CodeSigningConfig", - "documentation":"The code signing configuration
" - } - } - }, - "GetDurableExecutionHistoryRequest":{ - "type":"structure", - "required":["DurableExecutionArn"], - "members":{ - "DurableExecutionArn":{ - "shape":"DurableExecutionArn", - "location":"uri", - "locationName":"DurableExecutionArn" - }, - "IncludeExecutionData":{ - "shape":"IncludeExecutionData", - "location":"querystring", - "locationName":"IncludeExecutionData" - }, - "MaxItems":{ - "shape":"ItemCount", - "location":"querystring", - "locationName":"MaxItems" - }, - "Marker":{ - "shape":"String", - "location":"querystring", - "locationName":"Marker" - }, - "ReverseOrder":{ - "shape":"ReverseOrder", - "location":"querystring", - "locationName":"ReverseOrder" - } - } - }, - "GetDurableExecutionHistoryResponse":{ - "type":"structure", - "required":["Events"], - "members":{ - "Events":{"shape":"Events"}, - "NextMarker":{"shape":"String"} - } - }, - "GetDurableExecutionRequest":{ - "type":"structure", - "required":["DurableExecutionArn"], - "members":{ - "DurableExecutionArn":{ - "shape":"DurableExecutionArn", - "location":"uri", - "locationName":"DurableExecutionArn" - } - } - }, - "GetDurableExecutionResponse":{ - "type":"structure", - "required":[ - "DurableExecutionArn", - "DurableExecutionName", - "FunctionArn", - "StartTimestamp", - "Status" - ], - "members":{ - "DurableExecutionArn":{"shape":"DurableExecutionArn"}, - "DurableExecutionName":{"shape":"DurableExecutionName"}, - "FunctionArn":{"shape":"FunctionArn"}, - "InputPayload":{"shape":"InputPayload"}, - "Result":{"shape":"OutputPayload"}, - "Error":{"shape":"ErrorObject"}, - "StartTimestamp":{"shape":"ExecutionTimestamp"}, - "Status":{"shape":"ExecutionStatus"}, - "EndTimestamp":{"shape":"ExecutionTimestamp"}, - "Version":{"shape":"Version"} - } - }, - "GetDurableExecutionStateRequest":{ - "type":"structure", - "required":[ - "DurableExecutionArn", - "CheckpointToken" - ], - "members":{ - "DurableExecutionArn":{ - "shape":"DurableExecutionArn", - "location":"uri", - "locationName":"DurableExecutionArn" - }, - "CheckpointToken":{ - "shape":"CheckpointToken", - "location":"querystring", - "locationName":"CheckpointToken" - }, - "Marker":{ - "shape":"String", - "location":"querystring", - "locationName":"Marker" - }, - "MaxItems":{ - "shape":"ItemCount", - "location":"querystring", - "locationName":"MaxItems" - } - } - }, - "GetDurableExecutionStateResponse":{ - "type":"structure", - "required":["Operations"], - "members":{ - "Operations":{"shape":"Operations"}, - "NextMarker":{"shape":"String"} - } - }, - "GetEventSourceMappingRequest":{ - "type":"structure", - "required":["UUID"], - "members":{ - "UUID":{ - "shape":"String", - "documentation":"The identifier of the event source mapping.
", - "location":"uri", - "locationName":"UUID" - } - } - }, - "GetFunctionCodeSigningConfigRequest":{ - "type":"structure", - "required":["FunctionName"], - "members":{ - "FunctionName":{ - "shape":"FunctionName", - "documentation":"The name or ARN of the Lambda function.
Name formats
Function name - MyFunction.
Function ARN - arn:aws:lambda:us-west-2:123456789012:function:MyFunction.
Partial ARN - 123456789012:function:MyFunction.
The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.
", - "location":"uri", - "locationName":"FunctionName" - } - } - }, - "GetFunctionCodeSigningConfigResponse":{ - "type":"structure", - "required":[ - "CodeSigningConfigArn", - "FunctionName" - ], - "members":{ - "CodeSigningConfigArn":{ - "shape":"CodeSigningConfigArn", - "documentation":"The The Amazon Resource Name (ARN) of the code signing configuration.
" - }, - "FunctionName":{ - "shape":"FunctionName", - "documentation":"The name or ARN of the Lambda function.
Name formats
Function name - MyFunction.
Function ARN - arn:aws:lambda:us-west-2:123456789012:function:MyFunction.
Partial ARN - 123456789012:function:MyFunction.
The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.
" - } - } - }, - "GetFunctionConcurrencyRequest":{ - "type":"structure", - "required":["FunctionName"], - "members":{ - "FunctionName":{ - "shape":"FunctionName", - "documentation":"The name or ARN of the Lambda function.
Name formats
Function name – my-function.
Function ARN – arn:aws:lambda:us-west-2:123456789012:function:my-function.
Partial ARN – 123456789012:function:my-function.
The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.
", - "location":"uri", - "locationName":"FunctionName" - } - } - }, - "GetFunctionConcurrencyResponse":{ - "type":"structure", - "members":{ - "ReservedConcurrentExecutions":{ - "shape":"ReservedConcurrentExecutions", - "documentation":"The number of simultaneous executions that are reserved for the function.
" - } - } - }, - "GetFunctionConfigurationRequest":{ - "type":"structure", - "required":["FunctionName"], - "members":{ - "FunctionName":{ - "shape":"NamespacedFunctionName", - "documentation":"The name or ARN of the Lambda function, version, or alias.
Name formats
Function name – my-function (name-only), my-function:v1 (with alias).
Function ARN – arn:aws:lambda:us-west-2:123456789012:function:my-function.
Partial ARN – 123456789012:function:my-function.
You can append a version number or alias to any of the formats. The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.
", - "location":"uri", - "locationName":"FunctionName" - }, - "Qualifier":{ - "shape":"Qualifier", - "documentation":"Specify a version or alias to get details about a published version of the function.
", - "location":"querystring", - "locationName":"Qualifier" - } - } - }, - "GetFunctionEventInvokeConfigRequest":{ - "type":"structure", - "required":["FunctionName"], - "members":{ - "FunctionName":{ - "shape":"FunctionName", - "documentation":"The name or ARN of the Lambda function, version, or alias.
Name formats
Function name - my-function (name-only), my-function:v1 (with alias).
Function ARN - arn:aws:lambda:us-west-2:123456789012:function:my-function.
Partial ARN - 123456789012:function:my-function.
You can append a version number or alias to any of the formats. The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.
", - "location":"uri", - "locationName":"FunctionName" - }, - "Qualifier":{ - "shape":"Qualifier", - "documentation":"A version number or alias name.
", - "location":"querystring", - "locationName":"Qualifier" - } - } - }, - "GetFunctionRecursionConfigRequest":{ - "type":"structure", - "required":["FunctionName"], - "members":{ - "FunctionName":{ - "shape":"UnqualifiedFunctionName", - "documentation":"", - "location":"uri", - "locationName":"FunctionName" - } - } - }, - "GetFunctionRecursionConfigResponse":{ - "type":"structure", - "members":{ - "RecursiveLoop":{ - "shape":"RecursiveLoop", - "documentation":"If your function's recursive loop detection configuration is Allow, Lambda doesn't take any action when it detects your function being invoked as part of a recursive loop.
If your function's recursive loop detection configuration is Terminate, Lambda stops your function being invoked and notifies you when it detects your function being invoked as part of a recursive loop.
By default, Lambda sets your function's configuration to Terminate. You can update this configuration using the PutFunctionRecursionConfig action.
The name or ARN of the Lambda function, version, or alias.
Name formats
Function name – my-function (name-only), my-function:v1 (with alias).
Function ARN – arn:aws:lambda:us-west-2:123456789012:function:my-function.
Partial ARN – 123456789012:function:my-function.
You can append a version number or alias to any of the formats. The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.
", - "location":"uri", - "locationName":"FunctionName" - }, - "Qualifier":{ - "shape":"Qualifier", - "documentation":"Specify a version or alias to get details about a published version of the function.
", - "location":"querystring", - "locationName":"Qualifier" - } - } - }, - "GetFunctionResponse":{ - "type":"structure", - "members":{ - "Configuration":{ - "shape":"FunctionConfiguration", - "documentation":"The configuration of the function or version.
" - }, - "Code":{ - "shape":"FunctionCodeLocation", - "documentation":"The deployment package of the function or version.
" - }, - "Tags":{ - "shape":"Tags", - "documentation":"The function's tags. Lambda returns tag data only if you have explicit allow permissions for lambda:ListTags.
" - }, - "TagsError":{ - "shape":"TagsError", - "documentation":"An object that contains details about an error related to retrieving tags.
" - }, - "Concurrency":{ - "shape":"Concurrency", - "documentation":"The function's reserved concurrency.
" - } - } - }, - "GetFunctionUrlConfigRequest":{ - "type":"structure", - "required":["FunctionName"], - "members":{ - "FunctionName":{ - "shape":"FunctionName", - "documentation":"The name or ARN of the Lambda function.
Name formats
Function name – my-function.
Function ARN – arn:aws:lambda:us-west-2:123456789012:function:my-function.
Partial ARN – 123456789012:function:my-function.
The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.
", - "location":"uri", - "locationName":"FunctionName" - }, - "Qualifier":{ - "shape":"FunctionUrlQualifier", - "documentation":"The alias name.
", - "location":"querystring", - "locationName":"Qualifier" - } - } - }, - "GetFunctionUrlConfigResponse":{ - "type":"structure", - "required":[ - "FunctionUrl", - "FunctionArn", - "AuthType", - "CreationTime", - "LastModifiedTime" - ], - "members":{ - "FunctionUrl":{ - "shape":"FunctionUrl", - "documentation":"The HTTP URL endpoint for your function.
" - }, - "FunctionArn":{ - "shape":"FunctionArn", - "documentation":"The Amazon Resource Name (ARN) of your function.
" - }, - "AuthType":{ - "shape":"FunctionUrlAuthType", - "documentation":"The type of authentication that your function URL uses. Set to AWS_IAM if you want to restrict access to authenticated users only. Set to NONE if you want to bypass IAM authentication to create a public endpoint. For more information, see Security and auth model for Lambda function URLs.
The cross-origin resource sharing (CORS) settings for your function URL.
" - }, - "CreationTime":{ - "shape":"Timestamp", - "documentation":"When the function URL was created, in ISO-8601 format (YYYY-MM-DDThh:mm:ss.sTZD).
" - }, - "LastModifiedTime":{ - "shape":"Timestamp", - "documentation":"When the function URL configuration was last updated, in ISO-8601 format (YYYY-MM-DDThh:mm:ss.sTZD).
" - }, - "InvokeMode":{ - "shape":"InvokeMode", - "documentation":"Use one of the following options:
BUFFERED – This is the default option. Lambda invokes your function using the Invoke API operation. Invocation results are available when the payload is complete. The maximum payload size is 6 MB.
RESPONSE_STREAM – Your function streams payload results as they become available. Lambda invokes your function using the InvokeWithResponseStream API operation. The maximum response payload size is 20 MB, however, you can request a quota increase.
The ARN of the layer version.
", - "location":"querystring", - "locationName":"Arn" - } - } - }, - "GetLayerVersionPolicyRequest":{ - "type":"structure", - "required":[ - "LayerName", - "VersionNumber" - ], - "members":{ - "LayerName":{ - "shape":"LayerName", - "documentation":"The name or Amazon Resource Name (ARN) of the layer.
", - "location":"uri", - "locationName":"LayerName" - }, - "VersionNumber":{ - "shape":"LayerVersionNumber", - "documentation":"The version number.
", - "location":"uri", - "locationName":"VersionNumber" - } - } - }, - "GetLayerVersionPolicyResponse":{ - "type":"structure", - "members":{ - "Policy":{ - "shape":"String", - "documentation":"The policy document.
" - }, - "RevisionId":{ - "shape":"String", - "documentation":"A unique identifier for the current revision of the policy.
" - } - } - }, - "GetLayerVersionRequest":{ - "type":"structure", - "required":[ - "LayerName", - "VersionNumber" - ], - "members":{ - "LayerName":{ - "shape":"LayerName", - "documentation":"The name or Amazon Resource Name (ARN) of the layer.
", - "location":"uri", - "locationName":"LayerName" - }, - "VersionNumber":{ - "shape":"LayerVersionNumber", - "documentation":"The version number.
", - "location":"uri", - "locationName":"VersionNumber" - } - } - }, - "GetLayerVersionResponse":{ - "type":"structure", - "members":{ - "Content":{ - "shape":"LayerVersionContentOutput", - "documentation":"Details about the layer version.
" - }, - "LayerArn":{ - "shape":"LayerArn", - "documentation":"The ARN of the layer.
" - }, - "LayerVersionArn":{ - "shape":"LayerVersionArn", - "documentation":"The ARN of the layer version.
" - }, - "Description":{ - "shape":"Description", - "documentation":"The description of the version.
" - }, - "CreatedDate":{ - "shape":"Timestamp", - "documentation":"The date that the layer version was created, in ISO-8601 format (YYYY-MM-DDThh:mm:ss.sTZD).
" - }, - "Version":{ - "shape":"LayerVersionNumber", - "documentation":"The version number.
" - }, - "CompatibleRuntimes":{ - "shape":"CompatibleRuntimes", - "documentation":"The layer's compatible runtimes.
The following list includes deprecated runtimes. For more information, see Runtime use after deprecation.
For a list of all currently supported runtimes, see Supported runtimes.
" - }, - "LicenseInfo":{ - "shape":"LicenseInfo", - "documentation":"The layer's software license.
" - }, - "CompatibleArchitectures":{ - "shape":"CompatibleArchitectures", - "documentation":"A list of compatible instruction set architectures.
" - } - } - }, - "GetPolicyRequest":{ - "type":"structure", - "required":["FunctionName"], - "members":{ - "FunctionName":{ - "shape":"NamespacedFunctionName", - "documentation":"The name or ARN of the Lambda function, version, or alias.
Name formats
Function name – my-function (name-only), my-function:v1 (with alias).
Function ARN – arn:aws:lambda:us-west-2:123456789012:function:my-function.
Partial ARN – 123456789012:function:my-function.
You can append a version number or alias to any of the formats. The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.
", - "location":"uri", - "locationName":"FunctionName" - }, - "Qualifier":{ - "shape":"Qualifier", - "documentation":"Specify a version or alias to get the policy for that resource.
", - "location":"querystring", - "locationName":"Qualifier" - } - } - }, - "GetPolicyResponse":{ - "type":"structure", - "members":{ - "Policy":{ - "shape":"String", - "documentation":"The resource-based policy.
" - }, - "RevisionId":{ - "shape":"String", - "documentation":"A unique identifier for the current revision of the policy.
" - } - } - }, - "GetProvisionedConcurrencyConfigRequest":{ - "type":"structure", - "required":[ - "FunctionName", - "Qualifier" - ], - "members":{ - "FunctionName":{ - "shape":"FunctionName", - "documentation":"The name or ARN of the Lambda function.
Name formats
Function name – my-function.
Function ARN – arn:aws:lambda:us-west-2:123456789012:function:my-function.
Partial ARN – 123456789012:function:my-function.
The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.
", - "location":"uri", - "locationName":"FunctionName" - }, - "Qualifier":{ - "shape":"Qualifier", - "documentation":"The version number or alias name.
", - "location":"querystring", - "locationName":"Qualifier" - } - } - }, - "GetProvisionedConcurrencyConfigResponse":{ - "type":"structure", - "members":{ - "RequestedProvisionedConcurrentExecutions":{ - "shape":"PositiveInteger", - "documentation":"The amount of provisioned concurrency requested.
" - }, - "AvailableProvisionedConcurrentExecutions":{ - "shape":"NonNegativeInteger", - "documentation":"The amount of provisioned concurrency available.
" - }, - "AllocatedProvisionedConcurrentExecutions":{ - "shape":"NonNegativeInteger", - "documentation":"The amount of provisioned concurrency allocated. When a weighted alias is used during linear and canary deployments, this value fluctuates depending on the amount of concurrency that is provisioned for the function versions.
" - }, - "Status":{ - "shape":"ProvisionedConcurrencyStatusEnum", - "documentation":"The status of the allocation process.
" - }, - "StatusReason":{ - "shape":"String", - "documentation":"For failed allocations, the reason that provisioned concurrency could not be allocated.
" - }, - "LastModified":{ - "shape":"Timestamp", - "documentation":"The date and time that a user last updated the configuration, in ISO 8601 format.
" - } - } - }, - "GetRuntimeManagementConfigRequest":{ - "type":"structure", - "required":["FunctionName"], - "members":{ - "FunctionName":{ - "shape":"NamespacedFunctionName", - "documentation":"The name or ARN of the Lambda function.
Name formats
Function name – my-function.
Function ARN – arn:aws:lambda:us-west-2:123456789012:function:my-function.
Partial ARN – 123456789012:function:my-function.
The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.
", - "location":"uri", - "locationName":"FunctionName" - }, - "Qualifier":{ - "shape":"Qualifier", - "documentation":"Specify a version of the function. This can be $LATEST or a published version number. If no value is specified, the configuration for the $LATEST version is returned.
The current runtime update mode of the function.
" - }, - "RuntimeVersionArn":{ - "shape":"RuntimeVersionArn", - "documentation":"The ARN of the runtime the function is configured to use. If the runtime update mode is Manual, the ARN is returned, otherwise null is returned.
The Amazon Resource Name (ARN) of your function.
" - } - } - }, - "Handler":{ - "type":"string", - "max":128, - "min":0, - "pattern":"[^\\s]+" - }, - "Header":{ - "type":"string", - "max":1024, - "min":0, - "pattern":".*" - }, - "HeadersList":{ - "type":"list", - "member":{"shape":"Header"}, - "max":100, - "min":0 - }, - "HttpStatus":{"type":"integer"}, - "ImageConfig":{ - "type":"structure", - "members":{ - "EntryPoint":{ - "shape":"StringList", - "documentation":"Specifies the entry point to their application, which is typically the location of the runtime executable.
" - }, - "Command":{ - "shape":"StringList", - "documentation":"Specifies parameters that you want to pass in with ENTRYPOINT.
" - }, - "WorkingDirectory":{ - "shape":"WorkingDirectory", - "documentation":"Specifies the working directory.
" - } - }, - "documentation":"Configuration values that override the container image Dockerfile settings. For more information, see Container image settings.
" - }, - "ImageConfigError":{ - "type":"structure", - "members":{ - "ErrorCode":{ - "shape":"String", - "documentation":"Error code.
" - }, - "Message":{ - "shape":"SensitiveString", - "documentation":"Error message.
" - } - }, - "documentation":"Error response to GetFunctionConfiguration.
Configuration values that override the container image Dockerfile.
" - }, - "Error":{ - "shape":"ImageConfigError", - "documentation":"Error response to GetFunctionConfiguration.
Response to a GetFunctionConfiguration request.
The code signature failed the integrity check. If the integrity check fails, then Lambda blocks deployment, even if the code signing policy is set to WARN.
", - "error":{ - "httpStatusCode":400, - "senderFault":true - }, - "exception":true - }, - "InvalidParameterValueException":{ - "type":"structure", - "members":{ - "Type":{ - "shape":"String", - "documentation":"The exception type.
" - }, - "message":{ - "shape":"String", - "documentation":"The exception message.
" - } - }, - "documentation":"One of the parameters in the request is not valid.
", - "error":{ - "httpStatusCode":400, - "senderFault":true - }, - "exception":true - }, - "InvalidRequestContentException":{ - "type":"structure", - "members":{ - "Type":{ - "shape":"String", - "documentation":"The exception type.
" - }, - "message":{ - "shape":"String", - "documentation":"The exception message.
" - } - }, - "documentation":"The request body could not be parsed as JSON, or a request header is invalid. For example, the 'x-amzn-RequestId' header is not a valid UUID string.
", - "error":{ - "httpStatusCode":400, - "senderFault":true - }, - "exception":true - }, - "InvalidRuntimeException":{ - "type":"structure", - "members":{ - "Type":{"shape":"String"}, - "Message":{"shape":"String"} - }, - "documentation":"The runtime or runtime version specified is not supported.
", - "error":{"httpStatusCode":502}, - "exception":true, - "fault":true - }, - "InvalidSecurityGroupIDException":{ - "type":"structure", - "members":{ - "Type":{"shape":"String"}, - "Message":{"shape":"String"} - }, - "documentation":"The security group ID provided in the Lambda function VPC configuration is not valid.
", - "error":{"httpStatusCode":502}, - "exception":true, - "fault":true - }, - "InvalidSubnetIDException":{ - "type":"structure", - "members":{ - "Type":{"shape":"String"}, - "Message":{"shape":"String"} - }, - "documentation":"The subnet ID provided in the Lambda function VPC configuration is not valid.
", - "error":{"httpStatusCode":502}, - "exception":true, - "fault":true - }, - "InvalidZipFileException":{ - "type":"structure", - "members":{ - "Type":{"shape":"String"}, - "Message":{"shape":"String"} - }, - "documentation":"Lambda could not unzip the deployment package.
", - "error":{"httpStatusCode":502}, - "exception":true, - "fault":true - }, - "InvocationCompletedDetails":{ - "type":"structure", - "required":[ - "StartTimestamp", - "EndTimestamp", - "RequestId" - ], - "members":{ - "StartTimestamp":{"shape":"ExecutionTimestamp"}, - "EndTimestamp":{"shape":"ExecutionTimestamp"}, - "RequestId":{"shape":"String"}, - "Error":{"shape":"EventError"} - } - }, - "InvocationRequest":{ - "type":"structure", - "required":["FunctionName"], - "members":{ - "FunctionName":{ - "shape":"NamespacedFunctionName", - "documentation":"The name or ARN of the Lambda function, version, or alias.
Name formats
Function name – my-function (name-only), my-function:v1 (with alias).
Function ARN – arn:aws:lambda:us-west-2:123456789012:function:my-function.
Partial ARN – 123456789012:function:my-function.
You can append a version number or alias to any of the formats. The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.
", - "location":"uri", - "locationName":"FunctionName" - }, - "InvocationType":{ - "shape":"InvocationType", - "documentation":"Choose from the following options.
RequestResponse (default) – Invoke the function synchronously. Keep the connection open until the function returns a response or times out. The API response includes the function response and additional data.
Event – Invoke the function asynchronously. Send events that fail multiple times to the function's dead-letter queue (if one is configured). The API response only includes a status code.
DryRun – Validate parameter values and verify that the user or role has permission to invoke the function.
Set to Tail to include the execution log in the response. Applies to synchronously invoked functions only.
Up to 3,583 bytes of base64-encoded data about the invoking client to pass to the function in the context object. Lambda passes the ClientContext object to your function for synchronous invocations only.
The JSON that you want to provide to your Lambda function as input.
You can enter the JSON directly. For example, --payload '{ \"key\": \"value\" }'. You can also specify a file path. For example, --payload file://payload.json.
Specify a version or alias to invoke a published version of the function.
", - "location":"querystring", - "locationName":"Qualifier" - } - }, - "payload":"Payload" - }, - "InvocationResponse":{ - "type":"structure", - "members":{ - "StatusCode":{ - "shape":"Integer", - "documentation":"The HTTP status code is in the 200 range for a successful request. For the RequestResponse invocation type, this status code is 200. For the Event invocation type, this status code is 202. For the DryRun invocation type, the status code is 204.
If present, indicates that an error occurred during function execution. Details about the error are included in the response payload.
", - "location":"header", - "locationName":"X-Amz-Function-Error" - }, - "LogResult":{ - "shape":"String", - "documentation":"The last 4 KB of the execution log, which is base64-encoded.
", - "location":"header", - "locationName":"X-Amz-Log-Result" - }, - "Payload":{ - "shape":"Blob", - "documentation":"The response from the function, or an error object.
" - }, - "ExecutedVersion":{ - "shape":"Version", - "documentation":"The version of the function that executed. When you invoke a function with an alias, this indicates which version the alias resolved to.
", - "location":"header", - "locationName":"X-Amz-Executed-Version" - }, - "DurableExecutionArn":{ - "shape":"DurableExecutionArn", - "location":"header", - "locationName":"X-Amz-Durable-Execution-Arn" - } - }, - "payload":"Payload" - }, - "InvocationType":{ - "type":"string", - "enum":[ - "Event", - "RequestResponse", - "DryRun" - ] - }, - "InvokeAsyncRequest":{ - "type":"structure", - "required":[ - "FunctionName", - "InvokeArgs" - ], - "members":{ - "FunctionName":{ - "shape":"NamespacedFunctionName", - "documentation":"The name or ARN of the Lambda function.
Name formats
Function name – my-function.
Function ARN – arn:aws:lambda:us-west-2:123456789012:function:my-function.
Partial ARN – 123456789012:function:my-function.
The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.
", - "location":"uri", - "locationName":"FunctionName" - }, - "InvokeArgs":{ - "shape":"BlobStream", - "documentation":"The JSON that you want to provide to your Lambda function as input.
" - } - }, - "deprecated":true, - "payload":"InvokeArgs" - }, - "InvokeAsyncResponse":{ - "type":"structure", - "members":{ - "Status":{ - "shape":"HttpStatus", - "documentation":"The status code.
", - "location":"statusCode" - } - }, - "documentation":"A success response (202 Accepted) indicates that the request is queued for invocation.
Data returned by your Lambda function.
", - "eventpayload":true - } - }, - "documentation":"A chunk of the streamed response payload.
", - "event":true - }, - "InvokeWithResponseStreamCompleteEvent":{ - "type":"structure", - "members":{ - "ErrorCode":{ - "shape":"String", - "documentation":"An error code.
" - }, - "ErrorDetails":{ - "shape":"String", - "documentation":"The details of any returned error.
" - }, - "LogResult":{ - "shape":"String", - "documentation":"The last 4 KB of the execution log, which is base64-encoded.
" - } - }, - "documentation":"A response confirming that the event stream is complete.
", - "event":true - }, - "InvokeWithResponseStreamRequest":{ - "type":"structure", - "required":["FunctionName"], - "members":{ - "FunctionName":{ - "shape":"NamespacedFunctionName", - "documentation":"The name or ARN of the Lambda function.
Name formats
Function name – my-function.
Function ARN – arn:aws:lambda:us-west-2:123456789012:function:my-function.
Partial ARN – 123456789012:function:my-function.
The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.
", - "location":"uri", - "locationName":"FunctionName" - }, - "InvocationType":{ - "shape":"ResponseStreamingInvocationType", - "documentation":"Use one of the following options:
RequestResponse (default) – Invoke the function synchronously. Keep the connection open until the function returns a response or times out. The API operation response includes the function response and additional data.
DryRun – Validate parameter values and verify that the IAM user or role has permission to invoke the function.
Set to Tail to include the execution log in the response. Applies to synchronously invoked functions only.
Up to 3,583 bytes of base64-encoded data about the invoking client to pass to the function in the context object.
", - "location":"header", - "locationName":"X-Amz-Client-Context" - }, - "Qualifier":{ - "shape":"Qualifier", - "documentation":"The alias name.
", - "location":"querystring", - "locationName":"Qualifier" - }, - "Payload":{ - "shape":"Blob", - "documentation":"The JSON that you want to provide to your Lambda function as input.
You can enter the JSON directly. For example, --payload '{ \"key\": \"value\" }'. You can also specify a file path. For example, --payload file://payload.json.
For a successful request, the HTTP status code is in the 200 range. For the RequestResponse invocation type, this status code is 200. For the DryRun invocation type, this status code is 204.
The version of the function that executed. When you invoke a function with an alias, this indicates which version the alias resolved to.
", - "location":"header", - "locationName":"X-Amz-Executed-Version" - }, - "EventStream":{ - "shape":"InvokeWithResponseStreamResponseEvent", - "documentation":"The stream of response payloads.
" - }, - "ResponseStreamContentType":{ - "shape":"String", - "documentation":"The type of data the stream is returning.
", - "location":"header", - "locationName":"Content-Type" - } - }, - "payload":"EventStream" - }, - "InvokeWithResponseStreamResponseEvent":{ - "type":"structure", - "members":{ - "PayloadChunk":{ - "shape":"InvokeResponseStreamUpdate", - "documentation":"A chunk of the streamed response payload.
" - }, - "InvokeComplete":{ - "shape":"InvokeWithResponseStreamCompleteEvent", - "documentation":"An object that's returned when the stream has ended and all the payload chunks have been returned.
" - } - }, - "documentation":"An object that includes a chunk of the response payload. When the stream has ended, Lambda includes a InvokeComplete object.
Lambda couldn't decrypt the environment variables because KMS access was denied. Check the Lambda function's KMS permissions.
", - "error":{"httpStatusCode":502}, - "exception":true, - "fault":true - }, - "KMSDisabledException":{ - "type":"structure", - "members":{ - "Type":{"shape":"String"}, - "Message":{"shape":"String"} - }, - "documentation":"Lambda couldn't decrypt the environment variables because the KMS key used is disabled. Check the Lambda function's KMS key settings.
", - "error":{"httpStatusCode":502}, - "exception":true, - "fault":true - }, - "KMSInvalidStateException":{ - "type":"structure", - "members":{ - "Type":{"shape":"String"}, - "Message":{"shape":"String"} - }, - "documentation":"Lambda couldn't decrypt the environment variables because the state of the KMS key used is not valid for Decrypt. Check the function's KMS key settings.
", - "error":{"httpStatusCode":502}, - "exception":true, - "fault":true - }, - "KMSKeyArn":{ - "type":"string", - "pattern":"(arn:(aws[a-zA-Z-]*)?:[a-z0-9-.]+:.*)|()" - }, - "KMSNotFoundException":{ - "type":"structure", - "members":{ - "Type":{"shape":"String"}, - "Message":{"shape":"String"} - }, - "documentation":"Lambda couldn't decrypt the environment variables because the KMS key was not found. Check the function's KMS key settings.
", - "error":{"httpStatusCode":502}, - "exception":true, - "fault":true - }, - "KafkaSchemaRegistryAccessConfig":{ - "type":"structure", - "members":{ - "Type":{ - "shape":"KafkaSchemaRegistryAuthType", - "documentation":"The type of authentication Lambda uses to access your schema registry.
" - }, - "URI":{ - "shape":"Arn", - "documentation":"The URI of the secret (Secrets Manager secret ARN) to authenticate with your schema registry.
" - } - }, - "documentation":"Specific access configuration settings that tell Lambda how to authenticate with your schema registry.
If you're working with an Glue schema registry, don't provide authentication details in this object. Instead, ensure that your execution role has the required permissions for Lambda to access your cluster.
If you're working with a Confluent schema registry, choose the authentication method in the Type field, and provide the Secrets Manager secret ARN in the URI field.
The URI for your schema registry. The correct URI format depends on the type of schema registry you're using.
For Glue schema registries, use the ARN of the registry.
For Confluent schema registries, use the URL of the registry.
The record format that Lambda delivers to your function after schema validation.
Choose JSON to have Lambda deliver the record to your function as a standard JSON object.
Choose SOURCE to have Lambda deliver the record to your function in its original source format. Lambda removes all schema metadata, such as the schema ID, before sending the record to your function.
An array of access configuration objects that tell Lambda how to authenticate with your schema registry.
" - }, - "SchemaValidationConfigs":{ - "shape":"KafkaSchemaValidationConfigList", - "documentation":"An array of schema validation configuration objects, which tell Lambda the message attributes you want to validate and filter using your schema registry.
" - } - }, - "documentation":"Specific configuration settings for a Kafka schema registry.
" - }, - "KafkaSchemaValidationAttribute":{ - "type":"string", - "enum":[ - "KEY", - "VALUE" - ] - }, - "KafkaSchemaValidationConfig":{ - "type":"structure", - "members":{ - "Attribute":{ - "shape":"KafkaSchemaValidationAttribute", - "documentation":" The attributes you want your schema registry to validate and filter for. If you selected JSON as the EventRecordFormat, Lambda also deserializes the selected message attributes.
Specific schema validation configuration settings that tell Lambda the message attributes you want to validate and filter using your schema registry.
" - }, - "KafkaSchemaValidationConfigList":{ - "type":"list", - "member":{"shape":"KafkaSchemaValidationConfig"} - }, - "LastUpdateStatus":{ - "type":"string", - "enum":[ - "Successful", - "Failed", - "InProgress" - ] - }, - "LastUpdateStatusReason":{"type":"string"}, - "LastUpdateStatusReasonCode":{ - "type":"string", - "enum":[ - "EniLimitExceeded", - "InsufficientRolePermissions", - "InvalidConfiguration", - "InternalError", - "SubnetOutOfIPAddresses", - "InvalidSubnet", - "InvalidSecurityGroup", - "ImageDeleted", - "ImageAccessDenied", - "InvalidImage", - "KMSKeyAccessDenied", - "KMSKeyNotFound", - "InvalidStateKMSKey", - "DisabledKMSKey", - "EFSIOError", - "EFSMountConnectivityError", - "EFSMountFailure", - "EFSMountTimeout", - "InvalidRuntime", - "InvalidZipFileException", - "FunctionError" - ] - }, - "Layer":{ - "type":"structure", - "members":{ - "Arn":{ - "shape":"LayerVersionArn", - "documentation":"The Amazon Resource Name (ARN) of the function layer.
" - }, - "CodeSize":{ - "shape":"Long", - "documentation":"The size of the layer archive in bytes.
" - }, - "SigningProfileVersionArn":{ - "shape":"Arn", - "documentation":"The Amazon Resource Name (ARN) for a signing profile version.
" - }, - "SigningJobArn":{ - "shape":"Arn", - "documentation":"The Amazon Resource Name (ARN) of a signing job.
" - } - }, - "documentation":"An Lambda layer.
" - }, - "LayerArn":{ - "type":"string", - "max":140, - "min":1, - "pattern":"arn:[a-zA-Z0-9-]+:lambda:[a-zA-Z0-9-]+:\\d{12}:layer:[a-zA-Z0-9-_]+" - }, - "LayerList":{ - "type":"list", - "member":{"shape":"LayerVersionArn"} - }, - "LayerName":{ - "type":"string", - "max":140, - "min":1, - "pattern":"(arn:[a-zA-Z0-9-]+:lambda:[a-zA-Z0-9-]+:\\d{12}:layer:[a-zA-Z0-9-_]+)|[a-zA-Z0-9-_]+" - }, - "LayerPermissionAllowedAction":{ - "type":"string", - "max":22, - "min":0, - "pattern":"lambda:GetLayerVersion" - }, - "LayerPermissionAllowedPrincipal":{ - "type":"string", - "pattern":"\\d{12}|\\*|arn:(aws[a-zA-Z-]*):iam::\\d{12}:root" - }, - "LayerVersionArn":{ - "type":"string", - "max":140, - "min":1, - "pattern":"arn:[a-zA-Z0-9-]+:lambda:[a-zA-Z0-9-]+:\\d{12}:layer:[a-zA-Z0-9-_]+:[0-9]+" - }, - "LayerVersionContentInput":{ - "type":"structure", - "members":{ - "S3Bucket":{ - "shape":"S3Bucket", - "documentation":"The Amazon S3 bucket of the layer archive.
" - }, - "S3Key":{ - "shape":"S3Key", - "documentation":"The Amazon S3 key of the layer archive.
" - }, - "S3ObjectVersion":{ - "shape":"S3ObjectVersion", - "documentation":"For versioned objects, the version of the layer archive object to use.
" - }, - "ZipFile":{ - "shape":"Blob", - "documentation":"The base64-encoded contents of the layer archive. Amazon Web Services SDK and Amazon Web Services CLI clients handle the encoding for you.
" - } - }, - "documentation":"A ZIP archive that contains the contents of an Lambda layer. You can specify either an Amazon S3 location, or upload a layer archive directly.
" - }, - "LayerVersionContentOutput":{ - "type":"structure", - "members":{ - "Location":{ - "shape":"String", - "documentation":"A link to the layer archive in Amazon S3 that is valid for 10 minutes.
" - }, - "CodeSha256":{ - "shape":"String", - "documentation":"The SHA-256 hash of the layer archive.
" - }, - "CodeSize":{ - "shape":"Long", - "documentation":"The size of the layer archive in bytes.
" - }, - "SigningProfileVersionArn":{ - "shape":"String", - "documentation":"The Amazon Resource Name (ARN) for a signing profile version.
" - }, - "SigningJobArn":{ - "shape":"String", - "documentation":"The Amazon Resource Name (ARN) of a signing job.
" - } - }, - "documentation":"Details about a version of an Lambda layer.
" - }, - "LayerVersionNumber":{"type":"long"}, - "LayerVersionsList":{ - "type":"list", - "member":{"shape":"LayerVersionsListItem"} - }, - "LayerVersionsListItem":{ - "type":"structure", - "members":{ - "LayerVersionArn":{ - "shape":"LayerVersionArn", - "documentation":"The ARN of the layer version.
" - }, - "Version":{ - "shape":"LayerVersionNumber", - "documentation":"The version number.
" - }, - "Description":{ - "shape":"Description", - "documentation":"The description of the version.
" - }, - "CreatedDate":{ - "shape":"Timestamp", - "documentation":"The date that the version was created, in ISO 8601 format. For example, 2018-11-27T15:10:45.123+0000.
The layer's compatible runtimes.
The following list includes deprecated runtimes. For more information, see Runtime use after deprecation.
For a list of all currently supported runtimes, see Supported runtimes.
" - }, - "LicenseInfo":{ - "shape":"LicenseInfo", - "documentation":"The layer's open-source license.
" - }, - "CompatibleArchitectures":{ - "shape":"CompatibleArchitectures", - "documentation":"A list of compatible instruction set architectures.
" - } - }, - "documentation":"Details about a version of an Lambda layer.
" - }, - "LayersList":{ - "type":"list", - "member":{"shape":"LayersListItem"} - }, - "LayersListItem":{ - "type":"structure", - "members":{ - "LayerName":{ - "shape":"LayerName", - "documentation":"The name of the layer.
" - }, - "LayerArn":{ - "shape":"LayerArn", - "documentation":"The Amazon Resource Name (ARN) of the function layer.
" - }, - "LatestMatchingVersion":{ - "shape":"LayerVersionsListItem", - "documentation":"The newest version of the layer.
" - } - }, - "documentation":"Details about an Lambda layer.
" - }, - "LayersReferenceList":{ - "type":"list", - "member":{"shape":"Layer"} - }, - "LicenseInfo":{ - "type":"string", - "max":512, - "min":0 - }, - "ListAliasesRequest":{ - "type":"structure", - "required":["FunctionName"], - "members":{ - "FunctionName":{ - "shape":"FunctionName", - "documentation":"The name or ARN of the Lambda function.
Name formats
Function name - MyFunction.
Function ARN - arn:aws:lambda:us-west-2:123456789012:function:MyFunction.
Partial ARN - 123456789012:function:MyFunction.
The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.
", - "location":"uri", - "locationName":"FunctionName" - }, - "FunctionVersion":{ - "shape":"Version", - "documentation":"Specify a function version to only list aliases that invoke that version.
", - "location":"querystring", - "locationName":"FunctionVersion" - }, - "Marker":{ - "shape":"String", - "documentation":"Specify the pagination token that's returned by a previous request to retrieve the next page of results.
", - "location":"querystring", - "locationName":"Marker" - }, - "MaxItems":{ - "shape":"MaxListItems", - "documentation":"Limit the number of aliases returned.
", - "location":"querystring", - "locationName":"MaxItems" - } - } - }, - "ListAliasesResponse":{ - "type":"structure", - "members":{ - "NextMarker":{ - "shape":"String", - "documentation":"The pagination token that's included if more results are available.
" - }, - "Aliases":{ - "shape":"AliasList", - "documentation":"A list of aliases.
" - } - } - }, - "ListCodeSigningConfigsRequest":{ - "type":"structure", - "members":{ - "Marker":{ - "shape":"String", - "documentation":"Specify the pagination token that's returned by a previous request to retrieve the next page of results.
", - "location":"querystring", - "locationName":"Marker" - }, - "MaxItems":{ - "shape":"MaxListItems", - "documentation":"Maximum number of items to return.
", - "location":"querystring", - "locationName":"MaxItems" - } - } - }, - "ListCodeSigningConfigsResponse":{ - "type":"structure", - "members":{ - "NextMarker":{ - "shape":"String", - "documentation":"The pagination token that's included if more results are available.
" - }, - "CodeSigningConfigs":{ - "shape":"CodeSigningConfigList", - "documentation":"The code signing configurations
" - } - } - }, - "ListDurableExecutionsByFunctionRequest":{ - "type":"structure", - "required":["FunctionName"], - "members":{ - "FunctionName":{ - "shape":"FunctionName", - "location":"uri", - "locationName":"FunctionName" - }, - "Qualifier":{ - "shape":"Qualifier", - "location":"querystring", - "locationName":"Qualifier" - }, - "DurableExecutionName":{ - "shape":"DurableExecutionName", - "location":"querystring", - "locationName":"DurableExecutionName" - }, - "Statuses":{ - "shape":"ExecutionStatusList", - "location":"querystring", - "locationName":"Statuses" - }, - "StartedAfter":{ - "shape":"ExecutionTimestamp", - "location":"querystring", - "locationName":"StartedAfter" - }, - "StartedBefore":{ - "shape":"ExecutionTimestamp", - "location":"querystring", - "locationName":"StartedBefore" - }, - "ReverseOrder":{ - "shape":"ReverseOrder", - "location":"querystring", - "locationName":"ReverseOrder" - }, - "Marker":{ - "shape":"String", - "location":"querystring", - "locationName":"Marker" - }, - "MaxItems":{ - "shape":"ItemCount", - "location":"querystring", - "locationName":"MaxItems" - } - } - }, - "ListDurableExecutionsByFunctionResponse":{ - "type":"structure", - "members":{ - "DurableExecutions":{"shape":"DurableExecutions"}, - "NextMarker":{"shape":"String"} - } - }, - "ListEventSourceMappingsRequest":{ - "type":"structure", - "members":{ - "EventSourceArn":{ - "shape":"Arn", - "documentation":"The Amazon Resource Name (ARN) of the event source.
Amazon Kinesis – The ARN of the data stream or a stream consumer.
Amazon DynamoDB Streams – The ARN of the stream.
Amazon Simple Queue Service – The ARN of the queue.
Amazon Managed Streaming for Apache Kafka – The ARN of the cluster or the ARN of the VPC connection (for cross-account event source mappings).
Amazon MQ – The ARN of the broker.
Amazon DocumentDB – The ARN of the DocumentDB change stream.
The name or ARN of the Lambda function.
Name formats
Function name – MyFunction.
Function ARN – arn:aws:lambda:us-west-2:123456789012:function:MyFunction.
Version or Alias ARN – arn:aws:lambda:us-west-2:123456789012:function:MyFunction:PROD.
Partial ARN – 123456789012:function:MyFunction.
The length constraint applies only to the full ARN. If you specify only the function name, it's limited to 64 characters in length.
", - "location":"querystring", - "locationName":"FunctionName" - }, - "Marker":{ - "shape":"String", - "documentation":"A pagination token returned by a previous call.
", - "location":"querystring", - "locationName":"Marker" - }, - "MaxItems":{ - "shape":"MaxListItems", - "documentation":"The maximum number of event source mappings to return. Note that ListEventSourceMappings returns a maximum of 100 items in each response, even if you set the number higher.
", - "location":"querystring", - "locationName":"MaxItems" - } - } - }, - "ListEventSourceMappingsResponse":{ - "type":"structure", - "members":{ - "NextMarker":{ - "shape":"String", - "documentation":"A pagination token that's returned when the response doesn't contain all event source mappings.
" - }, - "EventSourceMappings":{ - "shape":"EventSourceMappingsList", - "documentation":"A list of event source mappings.
" - } - } - }, - "ListFunctionEventInvokeConfigsRequest":{ - "type":"structure", - "required":["FunctionName"], - "members":{ - "FunctionName":{ - "shape":"FunctionName", - "documentation":"The name or ARN of the Lambda function.
Name formats
Function name - my-function.
Function ARN - arn:aws:lambda:us-west-2:123456789012:function:my-function.
Partial ARN - 123456789012:function:my-function.
The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.
", - "location":"uri", - "locationName":"FunctionName" - }, - "Marker":{ - "shape":"String", - "documentation":"Specify the pagination token that's returned by a previous request to retrieve the next page of results.
", - "location":"querystring", - "locationName":"Marker" - }, - "MaxItems":{ - "shape":"MaxFunctionEventInvokeConfigListItems", - "documentation":"The maximum number of configurations to return.
", - "location":"querystring", - "locationName":"MaxItems" - } - } - }, - "ListFunctionEventInvokeConfigsResponse":{ - "type":"structure", - "members":{ - "FunctionEventInvokeConfigs":{ - "shape":"FunctionEventInvokeConfigList", - "documentation":"A list of configurations.
" - }, - "NextMarker":{ - "shape":"String", - "documentation":"The pagination token that's included if more results are available.
" - } - } - }, - "ListFunctionUrlConfigsRequest":{ - "type":"structure", - "required":["FunctionName"], - "members":{ - "FunctionName":{ - "shape":"FunctionName", - "documentation":"The name or ARN of the Lambda function.
Name formats
Function name – my-function.
Function ARN – arn:aws:lambda:us-west-2:123456789012:function:my-function.
Partial ARN – 123456789012:function:my-function.
The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.
", - "location":"uri", - "locationName":"FunctionName" - }, - "Marker":{ - "shape":"String", - "documentation":"Specify the pagination token that's returned by a previous request to retrieve the next page of results.
", - "location":"querystring", - "locationName":"Marker" - }, - "MaxItems":{ - "shape":"MaxItems", - "documentation":"The maximum number of function URLs to return in the response. Note that ListFunctionUrlConfigs returns a maximum of 50 items in each response, even if you set the number higher.
A list of function URL configurations.
" - }, - "NextMarker":{ - "shape":"String", - "documentation":"The pagination token that's included if more results are available.
" - } - } - }, - "ListFunctionsByCodeSigningConfigRequest":{ - "type":"structure", - "required":["CodeSigningConfigArn"], - "members":{ - "CodeSigningConfigArn":{ - "shape":"CodeSigningConfigArn", - "documentation":"The The Amazon Resource Name (ARN) of the code signing configuration.
", - "location":"uri", - "locationName":"CodeSigningConfigArn" - }, - "Marker":{ - "shape":"String", - "documentation":"Specify the pagination token that's returned by a previous request to retrieve the next page of results.
", - "location":"querystring", - "locationName":"Marker" - }, - "MaxItems":{ - "shape":"MaxListItems", - "documentation":"Maximum number of items to return.
", - "location":"querystring", - "locationName":"MaxItems" - } - } - }, - "ListFunctionsByCodeSigningConfigResponse":{ - "type":"structure", - "members":{ - "NextMarker":{ - "shape":"String", - "documentation":"The pagination token that's included if more results are available.
" - }, - "FunctionArns":{ - "shape":"FunctionArnList", - "documentation":"The function ARNs.
" - } - } - }, - "ListFunctionsRequest":{ - "type":"structure", - "members":{ - "MasterRegion":{ - "shape":"MasterRegion", - "documentation":"For Lambda@Edge functions, the Amazon Web Services Region of the master function. For example, us-east-1 filters the list of functions to include only Lambda@Edge functions replicated from a master function in US East (N. Virginia). If specified, you must set FunctionVersion to ALL.
Set to ALL to include entries for all published versions of each function.
Specify the pagination token that's returned by a previous request to retrieve the next page of results.
", - "location":"querystring", - "locationName":"Marker" - }, - "MaxItems":{ - "shape":"MaxListItems", - "documentation":"The maximum number of functions to return in the response. Note that ListFunctions returns a maximum of 50 items in each response, even if you set the number higher.
The pagination token that's included if more results are available.
" - }, - "Functions":{ - "shape":"FunctionList", - "documentation":"A list of Lambda functions.
" - } - }, - "documentation":"A list of Lambda functions.
" - }, - "ListLayerVersionsRequest":{ - "type":"structure", - "required":["LayerName"], - "members":{ - "CompatibleRuntime":{ - "shape":"Runtime", - "documentation":"A runtime identifier.
The following list includes deprecated runtimes. For more information, see Runtime use after deprecation.
For a list of all currently supported runtimes, see Supported runtimes.
", - "location":"querystring", - "locationName":"CompatibleRuntime" - }, - "LayerName":{ - "shape":"LayerName", - "documentation":"The name or Amazon Resource Name (ARN) of the layer.
", - "location":"uri", - "locationName":"LayerName" - }, - "Marker":{ - "shape":"String", - "documentation":"A pagination token returned by a previous call.
", - "location":"querystring", - "locationName":"Marker" - }, - "MaxItems":{ - "shape":"MaxLayerListItems", - "documentation":"The maximum number of versions to return.
", - "location":"querystring", - "locationName":"MaxItems" - }, - "CompatibleArchitecture":{ - "shape":"Architecture", - "documentation":"The compatible instruction set architecture.
", - "location":"querystring", - "locationName":"CompatibleArchitecture" - } - } - }, - "ListLayerVersionsResponse":{ - "type":"structure", - "members":{ - "NextMarker":{ - "shape":"String", - "documentation":"A pagination token returned when the response doesn't contain all versions.
" - }, - "LayerVersions":{ - "shape":"LayerVersionsList", - "documentation":"A list of versions.
" - } - } - }, - "ListLayersRequest":{ - "type":"structure", - "members":{ - "CompatibleRuntime":{ - "shape":"Runtime", - "documentation":"A runtime identifier.
The following list includes deprecated runtimes. For more information, see Runtime use after deprecation.
For a list of all currently supported runtimes, see Supported runtimes.
", - "location":"querystring", - "locationName":"CompatibleRuntime" - }, - "Marker":{ - "shape":"String", - "documentation":"A pagination token returned by a previous call.
", - "location":"querystring", - "locationName":"Marker" - }, - "MaxItems":{ - "shape":"MaxLayerListItems", - "documentation":"The maximum number of layers to return.
", - "location":"querystring", - "locationName":"MaxItems" - }, - "CompatibleArchitecture":{ - "shape":"Architecture", - "documentation":"The compatible instruction set architecture.
", - "location":"querystring", - "locationName":"CompatibleArchitecture" - } - } - }, - "ListLayersResponse":{ - "type":"structure", - "members":{ - "NextMarker":{ - "shape":"String", - "documentation":"A pagination token returned when the response doesn't contain all layers.
" - }, - "Layers":{ - "shape":"LayersList", - "documentation":"A list of function layers.
" - } - } - }, - "ListProvisionedConcurrencyConfigsRequest":{ - "type":"structure", - "required":["FunctionName"], - "members":{ - "FunctionName":{ - "shape":"FunctionName", - "documentation":"The name or ARN of the Lambda function.
Name formats
Function name – my-function.
Function ARN – arn:aws:lambda:us-west-2:123456789012:function:my-function.
Partial ARN – 123456789012:function:my-function.
The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.
", - "location":"uri", - "locationName":"FunctionName" - }, - "Marker":{ - "shape":"String", - "documentation":"Specify the pagination token that's returned by a previous request to retrieve the next page of results.
", - "location":"querystring", - "locationName":"Marker" - }, - "MaxItems":{ - "shape":"MaxProvisionedConcurrencyConfigListItems", - "documentation":"Specify a number to limit the number of configurations returned.
", - "location":"querystring", - "locationName":"MaxItems" - } - } - }, - "ListProvisionedConcurrencyConfigsResponse":{ - "type":"structure", - "members":{ - "ProvisionedConcurrencyConfigs":{ - "shape":"ProvisionedConcurrencyConfigList", - "documentation":"A list of provisioned concurrency configurations.
" - }, - "NextMarker":{ - "shape":"String", - "documentation":"The pagination token that's included if more results are available.
" - } - } - }, - "ListTagsRequest":{ - "type":"structure", - "required":["Resource"], - "members":{ - "Resource":{ - "shape":"TaggableResource", - "documentation":"The resource's Amazon Resource Name (ARN). Note: Lambda does not support adding tags to function aliases or versions.
", - "location":"uri", - "locationName":"Resource" - } - } - }, - "ListTagsResponse":{ - "type":"structure", - "members":{ - "Tags":{ - "shape":"Tags", - "documentation":"The function's tags.
" - } - } - }, - "ListVersionsByFunctionRequest":{ - "type":"structure", - "required":["FunctionName"], - "members":{ - "FunctionName":{ - "shape":"NamespacedFunctionName", - "documentation":"The name or ARN of the Lambda function.
Name formats
Function name - MyFunction.
Function ARN - arn:aws:lambda:us-west-2:123456789012:function:MyFunction.
Partial ARN - 123456789012:function:MyFunction.
The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.
", - "location":"uri", - "locationName":"FunctionName" - }, - "Marker":{ - "shape":"String", - "documentation":"Specify the pagination token that's returned by a previous request to retrieve the next page of results.
", - "location":"querystring", - "locationName":"Marker" - }, - "MaxItems":{ - "shape":"MaxListItems", - "documentation":"The maximum number of versions to return. Note that ListVersionsByFunction returns a maximum of 50 items in each response, even if you set the number higher.
The pagination token that's included if more results are available.
" - }, - "Versions":{ - "shape":"FunctionList", - "documentation":"A list of Lambda function versions.
" - } - } - }, - "LocalMountPath":{ - "type":"string", - "max":160, - "min":0, - "pattern":"/mnt/[a-zA-Z0-9-_.]+" - }, - "LogFormat":{ - "type":"string", - "enum":[ - "JSON", - "Text" - ] - }, - "LogGroup":{ - "type":"string", - "max":512, - "min":1, - "pattern":"[\\.\\-_/#A-Za-z0-9]+" - }, - "LogType":{ - "type":"string", - "enum":[ - "None", - "Tail" - ] - }, - "LoggingConfig":{ - "type":"structure", - "members":{ - "LogFormat":{ - "shape":"LogFormat", - "documentation":"The format in which Lambda sends your function's application and system logs to CloudWatch. Select between plain text and structured JSON.
" - }, - "ApplicationLogLevel":{ - "shape":"ApplicationLogLevel", - "documentation":"Set this property to filter the application logs for your function that Lambda sends to CloudWatch. Lambda only sends application logs at the selected level of detail and lower, where TRACE is the highest level and FATAL is the lowest.
Set this property to filter the system logs for your function that Lambda sends to CloudWatch. Lambda only sends system logs at the selected level of detail and lower, where DEBUG is the highest level and WARN is the lowest.
The name of the Amazon CloudWatch log group the function sends logs to. By default, Lambda functions send logs to a default log group named /aws/lambda/<function name>. To use a different log group, enter an existing log group or enter a new log group name.
The function's Amazon CloudWatch Logs configuration settings.
" - }, - "Long":{"type":"long"}, - "MasterRegion":{ - "type":"string", - "pattern":"ALL|[a-z]{2}(-gov)?-[a-z]+-\\d{1}" - }, - "MaxAge":{ - "type":"integer", - "box":true, - "max":86400, - "min":0 - }, - "MaxFunctionEventInvokeConfigListItems":{ - "type":"integer", - "box":true, - "max":50, - "min":1 - }, - "MaxItems":{ - "type":"integer", - "box":true, - "max":50, - "min":1 - }, - "MaxLayerListItems":{ - "type":"integer", - "box":true, - "max":50, - "min":1 - }, - "MaxListItems":{ - "type":"integer", - "box":true, - "max":10000, - "min":1 - }, - "MaxProvisionedConcurrencyConfigListItems":{ - "type":"integer", - "box":true, - "max":50, - "min":1 - }, - "MaximumBatchingWindowInSeconds":{ - "type":"integer", - "box":true, - "max":300, - "min":0 - }, - "MaximumConcurrency":{ - "type":"integer", - "box":true, - "max":1000, - "min":2 - }, - "MaximumEventAgeInSeconds":{ - "type":"integer", - "box":true, - "max":21600, - "min":60 - }, - "MaximumNumberOfPollers":{ - "type":"integer", - "box":true, - "max":2000, - "min":1 - }, - "MaximumRecordAgeInSeconds":{ - "type":"integer", - "box":true, - "max":604800, - "min":-1 - }, - "MaximumRetryAttempts":{ - "type":"integer", - "box":true, - "max":2, - "min":0 - }, - "MaximumRetryAttemptsEventSourceMapping":{ - "type":"integer", - "box":true, - "max":10000, - "min":-1 - }, - "MemorySize":{ - "type":"integer", - "box":true, - "max":10240, - "min":128 - }, - "Method":{ - "type":"string", - "max":6, - "min":0, - "pattern":".*" - }, - "MinimumNumberOfPollers":{ - "type":"integer", - "box":true, - "max":200, - "min":1 - }, - "NameSpacedFunctionArn":{ - "type":"string", - "pattern":"arn:(aws[a-zA-Z-]*)?:lambda:[a-z]{2}(-gov)?-[a-z]+-\\d{1}:\\d{12}:function:[a-zA-Z0-9-_\\.]+(:(\\$LATEST|[a-zA-Z0-9-_]+))?" - }, - "NamespacedFunctionName":{ - "type":"string", - "max":170, - "min":1, - "pattern":"(arn:(aws[a-zA-Z-]*)?:lambda:)?([a-z]{2}(-gov)?-[a-z]+-\\d{1}:)?(\\d{12}:)?(function:)?([a-zA-Z0-9-_\\.]+)(:(\\$LATEST|[a-zA-Z0-9-_]+))?" - }, - "NamespacedStatementId":{ - "type":"string", - "max":100, - "min":1, - "pattern":"([a-zA-Z0-9-_.]+)" - }, - "NonNegativeInteger":{ - "type":"integer", - "box":true, - "min":0 - }, - "NullableBoolean":{ - "type":"boolean", - "box":true - }, - "OnFailure":{ - "type":"structure", - "members":{ - "Destination":{ - "shape":"DestinationArn", - "documentation":"The Amazon Resource Name (ARN) of the destination resource.
To retain records of unsuccessful asynchronous invocations, you can configure an Amazon SNS topic, Amazon SQS queue, Amazon S3 bucket, Lambda function, or Amazon EventBridge event bus as the destination.
To retain records of failed invocations from Kinesis, DynamoDB, self-managed Kafka or Amazon MSK, you can configure an Amazon SNS topic, Amazon SQS queue, or Amazon S3 bucket as the destination.
" - } - }, - "documentation":"A destination for events that failed processing. For more information, see Adding a destination.
" - }, - "OnSuccess":{ - "type":"structure", - "members":{ - "Destination":{ - "shape":"DestinationArn", - "documentation":"The Amazon Resource Name (ARN) of the destination resource.
" - } - }, - "documentation":"A destination for events that were processed successfully.
To retain records of successful asynchronous invocations, you can configure an Amazon SNS topic, Amazon SQS queue, Lambda function, or Amazon EventBridge event bus as the destination.
OnSuccess is not supported in CreateEventSourceMapping or UpdateEventSourceMapping requests.
The permissions policy for the resource is too large. For more information, see Lambda quotas.
", - "error":{ - "httpStatusCode":400, - "senderFault":true - }, - "exception":true - }, - "PositiveInteger":{ - "type":"integer", - "box":true, - "min":1 - }, - "PreconditionFailedException":{ - "type":"structure", - "members":{ - "Type":{ - "shape":"String", - "documentation":"The exception type.
" - }, - "message":{ - "shape":"String", - "documentation":"The exception message.
" - } - }, - "documentation":"The RevisionId provided does not match the latest RevisionId for the Lambda function or alias.
For AddPermission and RemovePermission API operations: Call GetPolicy to retrieve the latest RevisionId for your resource.
For all other API operations: Call GetFunction or GetAlias to retrieve the latest RevisionId for your resource.
The Amazon Resource Name (ARN) of the alias or version.
" - }, - "RequestedProvisionedConcurrentExecutions":{ - "shape":"PositiveInteger", - "documentation":"The amount of provisioned concurrency requested.
" - }, - "AvailableProvisionedConcurrentExecutions":{ - "shape":"NonNegativeInteger", - "documentation":"The amount of provisioned concurrency available.
" - }, - "AllocatedProvisionedConcurrentExecutions":{ - "shape":"NonNegativeInteger", - "documentation":"The amount of provisioned concurrency allocated. When a weighted alias is used during linear and canary deployments, this value fluctuates depending on the amount of concurrency that is provisioned for the function versions.
" - }, - "Status":{ - "shape":"ProvisionedConcurrencyStatusEnum", - "documentation":"The status of the allocation process.
" - }, - "StatusReason":{ - "shape":"String", - "documentation":"For failed allocations, the reason that provisioned concurrency could not be allocated.
" - }, - "LastModified":{ - "shape":"Timestamp", - "documentation":"The date and time that a user last updated the configuration, in ISO 8601 format.
" - } - }, - "documentation":"Details about the provisioned concurrency configuration for a function alias or version.
" - }, - "ProvisionedConcurrencyConfigNotFoundException":{ - "type":"structure", - "members":{ - "Type":{"shape":"String"}, - "message":{"shape":"String"} - }, - "documentation":"The specified configuration does not exist.
", - "error":{ - "httpStatusCode":404, - "senderFault":true - }, - "exception":true - }, - "ProvisionedConcurrencyStatusEnum":{ - "type":"string", - "enum":[ - "IN_PROGRESS", - "READY", - "FAILED" - ] - }, - "ProvisionedPollerConfig":{ - "type":"structure", - "members":{ - "MinimumPollers":{ - "shape":"MinimumNumberOfPollers", - "documentation":"The minimum number of event pollers this event source can scale down to.
" - }, - "MaximumPollers":{ - "shape":"MaximumNumberOfPollers", - "documentation":"The maximum number of event pollers this event source can scale up to.
" - } - }, - "documentation":"The provisioned mode configuration for the event source. Use Provisioned Mode to customize the minimum and maximum number of event pollers for your event source. An event poller is a compute unit that provides approximately 5 MBps of throughput.
" - }, - "PublishLayerVersionRequest":{ - "type":"structure", - "required":[ - "LayerName", - "Content" - ], - "members":{ - "LayerName":{ - "shape":"LayerName", - "documentation":"The name or Amazon Resource Name (ARN) of the layer.
", - "location":"uri", - "locationName":"LayerName" - }, - "Description":{ - "shape":"Description", - "documentation":"The description of the version.
" - }, - "Content":{ - "shape":"LayerVersionContentInput", - "documentation":"The function layer archive.
" - }, - "CompatibleRuntimes":{ - "shape":"CompatibleRuntimes", - "documentation":"A list of compatible function runtimes. Used for filtering with ListLayers and ListLayerVersions.
The following list includes deprecated runtimes. For more information, see Runtime deprecation policy.
" - }, - "LicenseInfo":{ - "shape":"LicenseInfo", - "documentation":"The layer's software license. It can be any of the following:
An SPDX license identifier. For example, MIT.
The URL of a license hosted on the internet. For example, https://opensource.org/licenses/MIT.
The full text of the license.
A list of compatible instruction set architectures.
" - } - } - }, - "PublishLayerVersionResponse":{ - "type":"structure", - "members":{ - "Content":{ - "shape":"LayerVersionContentOutput", - "documentation":"Details about the layer version.
" - }, - "LayerArn":{ - "shape":"LayerArn", - "documentation":"The ARN of the layer.
" - }, - "LayerVersionArn":{ - "shape":"LayerVersionArn", - "documentation":"The ARN of the layer version.
" - }, - "Description":{ - "shape":"Description", - "documentation":"The description of the version.
" - }, - "CreatedDate":{ - "shape":"Timestamp", - "documentation":"The date that the layer version was created, in ISO-8601 format (YYYY-MM-DDThh:mm:ss.sTZD).
" - }, - "Version":{ - "shape":"LayerVersionNumber", - "documentation":"The version number.
" - }, - "CompatibleRuntimes":{ - "shape":"CompatibleRuntimes", - "documentation":"The layer's compatible runtimes.
The following list includes deprecated runtimes. For more information, see Runtime use after deprecation.
For a list of all currently supported runtimes, see Supported runtimes.
" - }, - "LicenseInfo":{ - "shape":"LicenseInfo", - "documentation":"The layer's software license.
" - }, - "CompatibleArchitectures":{ - "shape":"CompatibleArchitectures", - "documentation":"A list of compatible instruction set architectures.
" - } - } - }, - "PublishVersionRequest":{ - "type":"structure", - "required":["FunctionName"], - "members":{ - "FunctionName":{ - "shape":"FunctionName", - "documentation":"The name or ARN of the Lambda function.
Name formats
Function name - MyFunction.
Function ARN - arn:aws:lambda:us-west-2:123456789012:function:MyFunction.
Partial ARN - 123456789012:function:MyFunction.
The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.
", - "location":"uri", - "locationName":"FunctionName" - }, - "CodeSha256":{ - "shape":"String", - "documentation":"Only publish a version if the hash value matches the value that's specified. Use this option to avoid publishing a version if the function code has changed since you last updated it. You can get the hash for the version that you uploaded from the output of UpdateFunctionCode.
" - }, - "Description":{ - "shape":"Description", - "documentation":"A description for the version to override the description in the function configuration.
" - }, - "RevisionId":{ - "shape":"String", - "documentation":"Only update the function if the revision ID matches the ID that's specified. Use this option to avoid publishing a version if the function configuration has changed since you last updated it.
" - } - } - }, - "PutFunctionCodeSigningConfigRequest":{ - "type":"structure", - "required":[ - "CodeSigningConfigArn", - "FunctionName" - ], - "members":{ - "CodeSigningConfigArn":{ - "shape":"CodeSigningConfigArn", - "documentation":"The The Amazon Resource Name (ARN) of the code signing configuration.
" - }, - "FunctionName":{ - "shape":"FunctionName", - "documentation":"The name or ARN of the Lambda function.
Name formats
Function name - MyFunction.
Function ARN - arn:aws:lambda:us-west-2:123456789012:function:MyFunction.
Partial ARN - 123456789012:function:MyFunction.
The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.
", - "location":"uri", - "locationName":"FunctionName" - } - } - }, - "PutFunctionCodeSigningConfigResponse":{ - "type":"structure", - "required":[ - "CodeSigningConfigArn", - "FunctionName" - ], - "members":{ - "CodeSigningConfigArn":{ - "shape":"CodeSigningConfigArn", - "documentation":"The The Amazon Resource Name (ARN) of the code signing configuration.
" - }, - "FunctionName":{ - "shape":"FunctionName", - "documentation":"The name or ARN of the Lambda function.
Name formats
Function name - MyFunction.
Function ARN - arn:aws:lambda:us-west-2:123456789012:function:MyFunction.
Partial ARN - 123456789012:function:MyFunction.
The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.
" - } - } - }, - "PutFunctionConcurrencyRequest":{ - "type":"structure", - "required":[ - "FunctionName", - "ReservedConcurrentExecutions" - ], - "members":{ - "FunctionName":{ - "shape":"FunctionName", - "documentation":"The name or ARN of the Lambda function.
Name formats
Function name – my-function.
Function ARN – arn:aws:lambda:us-west-2:123456789012:function:my-function.
Partial ARN – 123456789012:function:my-function.
The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.
", - "location":"uri", - "locationName":"FunctionName" - }, - "ReservedConcurrentExecutions":{ - "shape":"ReservedConcurrentExecutions", - "documentation":"The number of simultaneous executions to reserve for the function.
" - } - } - }, - "PutFunctionEventInvokeConfigRequest":{ - "type":"structure", - "required":["FunctionName"], - "members":{ - "FunctionName":{ - "shape":"FunctionName", - "documentation":"The name or ARN of the Lambda function, version, or alias.
Name formats
Function name - my-function (name-only), my-function:v1 (with alias).
Function ARN - arn:aws:lambda:us-west-2:123456789012:function:my-function.
Partial ARN - 123456789012:function:my-function.
You can append a version number or alias to any of the formats. The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.
", - "location":"uri", - "locationName":"FunctionName" - }, - "Qualifier":{ - "shape":"Qualifier", - "documentation":"A version number or alias name.
", - "location":"querystring", - "locationName":"Qualifier" - }, - "MaximumRetryAttempts":{ - "shape":"MaximumRetryAttempts", - "documentation":"The maximum number of times to retry when the function returns an error.
" - }, - "MaximumEventAgeInSeconds":{ - "shape":"MaximumEventAgeInSeconds", - "documentation":"The maximum age of a request that Lambda sends to a function for processing.
" - }, - "DestinationConfig":{ - "shape":"DestinationConfig", - "documentation":"A destination for events after they have been sent to a function for processing.
Destinations
Function - The Amazon Resource Name (ARN) of a Lambda function.
Queue - The ARN of a standard SQS queue.
Bucket - The ARN of an Amazon S3 bucket.
Topic - The ARN of a standard SNS topic.
Event Bus - The ARN of an Amazon EventBridge event bus.
S3 buckets are supported only for on-failure destinations. To retain records of successful invocations, use another destination type.
The name or ARN of the Lambda function.
Name formats
Function name – my-function.
Function ARN – arn:aws:lambda:us-west-2:123456789012:function:my-function.
Partial ARN – 123456789012:function:my-function.
The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.
", - "location":"uri", - "locationName":"FunctionName" - }, - "RecursiveLoop":{ - "shape":"RecursiveLoop", - "documentation":"If you set your function's recursive loop detection configuration to Allow, Lambda doesn't take any action when it detects your function being invoked as part of a recursive loop. We recommend that you only use this setting if your design intentionally uses a Lambda function to write data back to the same Amazon Web Services resource that invokes it.
If you set your function's recursive loop detection configuration to Terminate, Lambda stops your function being invoked and notifies you when it detects your function being invoked as part of a recursive loop.
By default, Lambda sets your function's configuration to Terminate.
If your design intentionally uses a Lambda function to write data back to the same Amazon Web Services resource that invokes the function, then use caution and implement suitable guard rails to prevent unexpected charges being billed to your Amazon Web Services account. To learn more about best practices for using recursive invocation patterns, see Recursive patterns that cause run-away Lambda functions in Serverless Land.
The status of your function's recursive loop detection configuration.
When this value is set to Allowand Lambda detects your function being invoked as part of a recursive loop, it doesn't take any action.
When this value is set to Terminate and Lambda detects your function being invoked as part of a recursive loop, it stops your function being invoked and notifies you.
The name or ARN of the Lambda function.
Name formats
Function name – my-function.
Function ARN – arn:aws:lambda:us-west-2:123456789012:function:my-function.
Partial ARN – 123456789012:function:my-function.
The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.
", - "location":"uri", - "locationName":"FunctionName" - }, - "Qualifier":{ - "shape":"Qualifier", - "documentation":"The version number or alias name.
", - "location":"querystring", - "locationName":"Qualifier" - }, - "ProvisionedConcurrentExecutions":{ - "shape":"PositiveInteger", - "documentation":"The amount of provisioned concurrency to allocate for the version or alias.
" - } - } - }, - "PutProvisionedConcurrencyConfigResponse":{ - "type":"structure", - "members":{ - "RequestedProvisionedConcurrentExecutions":{ - "shape":"PositiveInteger", - "documentation":"The amount of provisioned concurrency requested.
" - }, - "AvailableProvisionedConcurrentExecutions":{ - "shape":"NonNegativeInteger", - "documentation":"The amount of provisioned concurrency available.
" - }, - "AllocatedProvisionedConcurrentExecutions":{ - "shape":"NonNegativeInteger", - "documentation":"The amount of provisioned concurrency allocated. When a weighted alias is used during linear and canary deployments, this value fluctuates depending on the amount of concurrency that is provisioned for the function versions.
" - }, - "Status":{ - "shape":"ProvisionedConcurrencyStatusEnum", - "documentation":"The status of the allocation process.
" - }, - "StatusReason":{ - "shape":"String", - "documentation":"For failed allocations, the reason that provisioned concurrency could not be allocated.
" - }, - "LastModified":{ - "shape":"Timestamp", - "documentation":"The date and time that a user last updated the configuration, in ISO 8601 format.
" - } - } - }, - "PutRuntimeManagementConfigRequest":{ - "type":"structure", - "required":[ - "FunctionName", - "UpdateRuntimeOn" - ], - "members":{ - "FunctionName":{ - "shape":"FunctionName", - "documentation":"The name or ARN of the Lambda function.
Name formats
Function name – my-function.
Function ARN – arn:aws:lambda:us-west-2:123456789012:function:my-function.
Partial ARN – 123456789012:function:my-function.
The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.
", - "location":"uri", - "locationName":"FunctionName" - }, - "Qualifier":{ - "shape":"Qualifier", - "documentation":"Specify a version of the function. This can be $LATEST or a published version number. If no value is specified, the configuration for the $LATEST version is returned.
Specify the runtime update mode.
Auto (default) - Automatically update to the most recent and secure runtime version using a Two-phase runtime version rollout. This is the best choice for most customers to ensure they always benefit from runtime updates.
Function update - Lambda updates the runtime of your function to the most recent and secure runtime version when you update your function. This approach synchronizes runtime updates with function deployments, giving you control over when runtime updates are applied and allowing you to detect and mitigate rare runtime update incompatibilities early. When using this setting, you need to regularly update your functions to keep their runtime up-to-date.
Manual - You specify a runtime version in your function configuration. The function will use this runtime version indefinitely. In the rare case where a new runtime version is incompatible with an existing function, this allows you to roll back your function to an earlier runtime version. For more information, see Roll back a runtime version.
The ARN of the runtime version you want the function to use.
This is only required if you're using the Manual runtime update mode.
The runtime update mode.
" - }, - "FunctionArn":{ - "shape":"FunctionArn", - "documentation":"The ARN of the function
" - }, - "RuntimeVersionArn":{ - "shape":"RuntimeVersionArn", - "documentation":"The ARN of the runtime the function is configured to use. If the runtime update mode is manual, the ARN is returned, otherwise null is returned.
The exception type.
" - }, - "Message":{ - "shape":"String", - "documentation":"The exception message.
" - } - }, - "documentation":"Lambda has detected your function being invoked in a recursive loop with other Amazon Web Services resources and stopped your function's invocation.
", - "error":{ - "httpStatusCode":400, - "senderFault":true - }, - "exception":true - }, - "RecursiveLoop":{ - "type":"string", - "enum":[ - "Allow", - "Terminate" - ] - }, - "RemoveLayerVersionPermissionRequest":{ - "type":"structure", - "required":[ - "LayerName", - "VersionNumber", - "StatementId" - ], - "members":{ - "LayerName":{ - "shape":"LayerName", - "documentation":"The name or Amazon Resource Name (ARN) of the layer.
", - "location":"uri", - "locationName":"LayerName" - }, - "VersionNumber":{ - "shape":"LayerVersionNumber", - "documentation":"The version number.
", - "location":"uri", - "locationName":"VersionNumber" - }, - "StatementId":{ - "shape":"StatementId", - "documentation":"The identifier that was specified when the statement was added.
", - "location":"uri", - "locationName":"StatementId" - }, - "RevisionId":{ - "shape":"String", - "documentation":"Only update the policy if the revision ID matches the ID specified. Use this option to avoid modifying a policy that has changed since you last read it.
", - "location":"querystring", - "locationName":"RevisionId" - } - } - }, - "RemovePermissionRequest":{ - "type":"structure", - "required":[ - "FunctionName", - "StatementId" - ], - "members":{ - "FunctionName":{ - "shape":"FunctionName", - "documentation":"The name or ARN of the Lambda function, version, or alias.
Name formats
Function name – my-function (name-only), my-function:v1 (with alias).
Function ARN – arn:aws:lambda:us-west-2:123456789012:function:my-function.
Partial ARN – 123456789012:function:my-function.
You can append a version number or alias to any of the formats. The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.
", - "location":"uri", - "locationName":"FunctionName" - }, - "StatementId":{ - "shape":"NamespacedStatementId", - "documentation":"Statement ID of the permission to remove.
", - "location":"uri", - "locationName":"StatementId" - }, - "Qualifier":{ - "shape":"Qualifier", - "documentation":"Specify a version or alias to remove permissions from a published version of the function.
", - "location":"querystring", - "locationName":"Qualifier" - }, - "RevisionId":{ - "shape":"String", - "documentation":"Update the policy only if the revision ID matches the ID that's specified. Use this option to avoid modifying a policy that has changed since you last read it.
", - "location":"querystring", - "locationName":"RevisionId" - } - } - }, - "ReplayChildren":{ - "type":"boolean", - "box":true - }, - "RequestTooLargeException":{ - "type":"structure", - "members":{ - "Type":{"shape":"String"}, - "message":{"shape":"String"} - }, - "documentation":"The request payload exceeded the Invoke request body JSON input quota. For more information, see Lambda quotas.
The exception type.
" - }, - "message":{ - "shape":"String", - "documentation":"The exception message.
" - } - }, - "documentation":"The resource already exists, or another operation is in progress.
", - "error":{ - "httpStatusCode":409, - "senderFault":true - }, - "exception":true - }, - "ResourceInUseException":{ - "type":"structure", - "members":{ - "Type":{"shape":"String"}, - "Message":{"shape":"String"} - }, - "documentation":"The operation conflicts with the resource's availability. For example, you tried to update an event source mapping in the CREATING state, or you tried to delete an event source mapping currently UPDATING.
", - "error":{ - "httpStatusCode":400, - "senderFault":true - }, - "exception":true - }, - "ResourceNotFoundException":{ - "type":"structure", - "members":{ - "Type":{"shape":"String"}, - "Message":{"shape":"String"} - }, - "documentation":"The resource specified in the request does not exist.
", - "error":{ - "httpStatusCode":404, - "senderFault":true - }, - "exception":true - }, - "ResourceNotReadyException":{ - "type":"structure", - "members":{ - "Type":{ - "shape":"String", - "documentation":"The exception type.
" - }, - "message":{ - "shape":"String", - "documentation":"The exception message.
" - } - }, - "documentation":"The function is inactive and its VPC connection is no longer available. Wait for the VPC connection to reestablish and try again.
", - "error":{"httpStatusCode":502}, - "exception":true, - "fault":true - }, - "ResponseStreamingInvocationType":{ - "type":"string", - "enum":[ - "RequestResponse", - "DryRun" - ] - }, - "RetentionPeriodInDays":{ - "type":"integer", - "box":true, - "max":90, - "min":1 - }, - "RetryDetails":{ - "type":"structure", - "members":{ - "CurrentAttempt":{"shape":"AttemptCount"}, - "NextAttemptDelaySeconds":{"shape":"DurationSeconds"} - } - }, - "ReverseOrder":{ - "type":"boolean", - "box":true - }, - "RoleArn":{ - "type":"string", - "pattern":"arn:(aws[a-zA-Z-]*)?:iam::\\d{12}:role/?[a-zA-Z_0-9+=,.@\\-_/]+" - }, - "Runtime":{ - "type":"string", - "enum":[ - "nodejs", - "nodejs4.3", - "nodejs6.10", - "nodejs8.10", - "nodejs10.x", - "nodejs12.x", - "nodejs14.x", - "nodejs16.x", - "java8", - "java8.al2", - "java11", - "python2.7", - "python3.6", - "python3.7", - "python3.8", - "python3.9", - "dotnetcore1.0", - "dotnetcore2.0", - "dotnetcore2.1", - "dotnetcore3.1", - "dotnet6", - "dotnet8", - "nodejs4.3-edge", - "go1.x", - "ruby2.5", - "ruby2.7", - "provided", - "provided.al2", - "nodejs18.x", - "python3.10", - "java17", - "ruby3.2", - "ruby3.3", - "ruby3.4", - "python3.11", - "nodejs20.x", - "provided.al2023", - "python3.12", - "java21", - "python3.13", - "nodejs22.x" - ] - }, - "RuntimeVersionArn":{ - "type":"string", - "max":2048, - "min":26, - "pattern":"arn:(aws[a-zA-Z-]*):lambda:[a-z]{2}((-gov)|(-iso(b?)))?-[a-z]+-\\d{1}::runtime:.+" - }, - "RuntimeVersionConfig":{ - "type":"structure", - "members":{ - "RuntimeVersionArn":{ - "shape":"RuntimeVersionArn", - "documentation":"The ARN of the runtime version you want the function to use.
" - }, - "Error":{ - "shape":"RuntimeVersionError", - "documentation":"Error response when Lambda is unable to retrieve the runtime version for a function.
" - } - }, - "documentation":"The ARN of the runtime and any errors that occured.
" - }, - "RuntimeVersionError":{ - "type":"structure", - "members":{ - "ErrorCode":{ - "shape":"String", - "documentation":"The error code.
" - }, - "Message":{ - "shape":"SensitiveString", - "documentation":"The error message.
" - } - }, - "documentation":"Any error returned when the runtime version information for the function could not be retrieved.
" - }, - "S3Bucket":{ - "type":"string", - "max":63, - "min":3, - "pattern":"[0-9A-Za-z\\.\\-_]*(?Limits the number of concurrent instances that the Amazon SQS event source can invoke." - } - }, - "documentation":"(Amazon SQS only) The scaling configuration for the event source. To remove the configuration, pass an empty value.
" - }, - "SchemaRegistryEventRecordFormat":{ - "type":"string", - "enum":[ - "JSON", - "SOURCE" - ] - }, - "SchemaRegistryUri":{ - "type":"string", - "max":10000, - "min":1, - "pattern":"[a-zA-Z0-9-\\/*:_+=.@-]*" - }, - "SecurityGroupId":{"type":"string"}, - "SecurityGroupIds":{ - "type":"list", - "member":{"shape":"SecurityGroupId"}, - "max":5, - "min":0 - }, - "SelfManagedEventSource":{ - "type":"structure", - "members":{ - "Endpoints":{ - "shape":"Endpoints", - "documentation":"The list of bootstrap servers for your Kafka brokers in the following format: \"KAFKA_BOOTSTRAP_SERVERS\": [\"abc.xyz.com:xxxx\",\"abc2.xyz.com:xxxx\"].
The self-managed Apache Kafka cluster for your event source.
" - }, - "SelfManagedKafkaEventSourceConfig":{ - "type":"structure", - "members":{ - "ConsumerGroupId":{ - "shape":"URI", - "documentation":"The identifier for the Kafka consumer group to join. The consumer group ID must be unique among all your Kafka event sources. After creating a Kafka event source mapping with the consumer group ID specified, you cannot update this value. For more information, see Customizable consumer group ID.
" - }, - "SchemaRegistryConfig":{ - "shape":"KafkaSchemaRegistryConfig", - "documentation":"Specific configuration settings for a Kafka schema registry.
" - } - }, - "documentation":"Specific configuration settings for a self-managed Apache Kafka event source.
" - }, - "SendDurableExecutionCallbackFailureRequest":{ - "type":"structure", - "required":["CallbackId"], - "members":{ - "CallbackId":{ - "shape":"CallbackId", - "location":"uri", - "locationName":"CallbackId" - }, - "Error":{"shape":"ErrorObject"} - }, - "payload":"Error" - }, - "SendDurableExecutionCallbackFailureResponse":{ - "type":"structure", - "members":{} - }, - "SendDurableExecutionCallbackHeartbeatRequest":{ - "type":"structure", - "required":["CallbackId"], - "members":{ - "CallbackId":{ - "shape":"CallbackId", - "location":"uri", - "locationName":"CallbackId" - } - } - }, - "SendDurableExecutionCallbackHeartbeatResponse":{ - "type":"structure", - "members":{} - }, - "SendDurableExecutionCallbackSuccessRequest":{ - "type":"structure", - "required":["CallbackId"], - "members":{ - "CallbackId":{ - "shape":"CallbackId", - "location":"uri", - "locationName":"CallbackId" - }, - "Result":{"shape":"BinaryOperationPayload"} - }, - "payload":"Result" - }, - "SendDurableExecutionCallbackSuccessResponse":{ - "type":"structure", - "members":{} - }, - "SensitiveString":{ - "type":"string", - "sensitive":true - }, - "ServiceException":{ - "type":"structure", - "members":{ - "Type":{"shape":"String"}, - "Message":{"shape":"String"} - }, - "documentation":"The Lambda service encountered an internal error.
", - "error":{"httpStatusCode":500}, - "exception":true, - "fault":true - }, - "SigningProfileVersionArns":{ - "type":"list", - "member":{"shape":"Arn"}, - "max":20, - "min":1 - }, - "SnapStart":{ - "type":"structure", - "members":{ - "ApplyOn":{ - "shape":"SnapStartApplyOn", - "documentation":"Set to PublishedVersions to create a snapshot of the initialized execution environment when you publish a function version.
The function's Lambda SnapStart setting. Set ApplyOn to PublishedVersions to create a snapshot of the initialized execution environment when you publish a function version.
The afterRestore() runtime hook encountered an error. For more information, check the Amazon CloudWatch logs.
Lambda is initializing your function. You can invoke the function when the function state becomes Active.
When set to PublishedVersions, Lambda creates a snapshot of the execution environment when you publish a function version.
When you provide a qualified Amazon Resource Name (ARN), this response element indicates whether SnapStart is activated for the specified function version.
" - } - }, - "documentation":"The function's SnapStart setting.
" - }, - "SnapStartTimeoutException":{ - "type":"structure", - "members":{ - "Type":{"shape":"String"}, - "Message":{"shape":"String"} - }, - "documentation":"Lambda couldn't restore the snapshot within the timeout limit.
", - "error":{ - "httpStatusCode":408, - "senderFault":true - }, - "exception":true - }, - "SourceAccessConfiguration":{ - "type":"structure", - "members":{ - "Type":{ - "shape":"SourceAccessType", - "documentation":"The type of authentication protocol, VPC components, or virtual host for your event source. For example: \"Type\":\"SASL_SCRAM_512_AUTH\".
BASIC_AUTH – (Amazon MQ) The Secrets Manager secret that stores your broker credentials.
BASIC_AUTH – (Self-managed Apache Kafka) The Secrets Manager ARN of your secret key used for SASL/PLAIN authentication of your Apache Kafka brokers.
VPC_SUBNET – (Self-managed Apache Kafka) The subnets associated with your VPC. Lambda connects to these subnets to fetch data from your self-managed Apache Kafka cluster.
VPC_SECURITY_GROUP – (Self-managed Apache Kafka) The VPC security group used to manage access to your self-managed Apache Kafka brokers.
SASL_SCRAM_256_AUTH – (Self-managed Apache Kafka) The Secrets Manager ARN of your secret key used for SASL SCRAM-256 authentication of your self-managed Apache Kafka brokers.
SASL_SCRAM_512_AUTH – (Amazon MSK, Self-managed Apache Kafka) The Secrets Manager ARN of your secret key used for SASL SCRAM-512 authentication of your self-managed Apache Kafka brokers.
VIRTUAL_HOST –- (RabbitMQ) The name of the virtual host in your RabbitMQ broker. Lambda uses this RabbitMQ host as the event source. This property cannot be specified in an UpdateEventSourceMapping API call.
CLIENT_CERTIFICATE_TLS_AUTH – (Amazon MSK, self-managed Apache Kafka) The Secrets Manager ARN of your secret key containing the certificate chain (X.509 PEM), private key (PKCS#8 PEM), and private key password (optional) used for mutual TLS authentication of your MSK/Apache Kafka brokers.
SERVER_ROOT_CA_CERTIFICATE – (Self-managed Apache Kafka) The Secrets Manager ARN of your secret key containing the root CA certificate (X.509 PEM) used for TLS encryption of your Apache Kafka brokers.
The value for your chosen configuration in Type. For example: \"URI\": \"arn:aws:secretsmanager:us-east-1:01234567890:secret:MyBrokerSecretName\".
To secure and define access to your event source, you can specify the authentication protocol, VPC components, or virtual host.
" - }, - "SourceAccessConfigurations":{ - "type":"list", - "member":{"shape":"SourceAccessConfiguration"}, - "max":22, - "min":0 - }, - "SourceAccessType":{ - "type":"string", - "enum":[ - "BASIC_AUTH", - "VPC_SUBNET", - "VPC_SECURITY_GROUP", - "SASL_SCRAM_512_AUTH", - "SASL_SCRAM_256_AUTH", - "VIRTUAL_HOST", - "CLIENT_CERTIFICATE_TLS_AUTH", - "SERVER_ROOT_CA_CERTIFICATE" - ] - }, - "SourceOwner":{ - "type":"string", - "max":12, - "min":0, - "pattern":"\\d{12}" - }, - "StackTraceEntries":{ - "type":"list", - "member":{"shape":"StackTraceEntry"} - }, - "StackTraceEntry":{ - "type":"string", - "sensitive":true - }, - "State":{ - "type":"string", - "enum":[ - "Pending", - "Active", - "Inactive", - "Failed" - ] - }, - "StateReason":{"type":"string"}, - "StateReasonCode":{ - "type":"string", - "enum":[ - "Idle", - "Creating", - "Restoring", - "EniLimitExceeded", - "InsufficientRolePermissions", - "InvalidConfiguration", - "InternalError", - "SubnetOutOfIPAddresses", - "InvalidSubnet", - "InvalidSecurityGroup", - "ImageDeleted", - "ImageAccessDenied", - "InvalidImage", - "KMSKeyAccessDenied", - "KMSKeyNotFound", - "InvalidStateKMSKey", - "DisabledKMSKey", - "EFSIOError", - "EFSMountConnectivityError", - "EFSMountFailure", - "EFSMountTimeout", - "InvalidRuntime", - "InvalidZipFileException", - "FunctionError", - "DrainingDurableExecutions" - ] - }, - "StatementId":{ - "type":"string", - "max":100, - "min":1, - "pattern":"([a-zA-Z0-9-_]+)" - }, - "StepDetails":{ - "type":"structure", - "members":{ - "Attempt":{"shape":"AttemptCount"}, - "NextAttemptTimestamp":{"shape":"ExecutionTimestamp"}, - "Result":{"shape":"OperationPayload"}, - "Error":{"shape":"ErrorObject"} - } - }, - "StepFailedDetails":{ - "type":"structure", - "required":[ - "Error", - "RetryDetails" - ], - "members":{ - "Error":{"shape":"EventError"}, - "RetryDetails":{"shape":"RetryDetails"} - } - }, - "StepOptions":{ - "type":"structure", - "members":{ - "NextAttemptDelaySeconds":{"shape":"StepOptionsNextAttemptDelaySecondsInteger"} - } - }, - "StepOptionsNextAttemptDelaySecondsInteger":{ - "type":"integer", - "box":true, - "max":31622400, - "min":1 - }, - "StepStartedDetails":{ - "type":"structure", - "members":{} - }, - "StepSucceededDetails":{ - "type":"structure", - "required":[ - "Result", - "RetryDetails" - ], - "members":{ - "Result":{"shape":"EventResult"}, - "RetryDetails":{"shape":"RetryDetails"} - } - }, - "StopDurableExecutionRequest":{ - "type":"structure", - "required":["DurableExecutionArn"], - "members":{ - "DurableExecutionArn":{ - "shape":"DurableExecutionArn", - "location":"uri", - "locationName":"DurableExecutionArn" - }, - "Error":{"shape":"ErrorObject"} - }, - "payload":"Error" - }, - "StopDurableExecutionResponse":{ - "type":"structure", - "required":["StopTimestamp"], - "members":{ - "StopTimestamp":{"shape":"ExecutionTimestamp"} - } - }, - "String":{"type":"string"}, - "StringList":{ - "type":"list", - "member":{"shape":"String"}, - "max":1500, - "min":0 - }, - "SubnetIPAddressLimitReachedException":{ - "type":"structure", - "members":{ - "Type":{"shape":"String"}, - "Message":{"shape":"String"} - }, - "documentation":"Lambda couldn't set up VPC access for the Lambda function because one or more configured subnets has no available IP addresses.
", - "error":{"httpStatusCode":502}, - "exception":true, - "fault":true - }, - "SubnetId":{"type":"string"}, - "SubnetIds":{ - "type":"list", - "member":{"shape":"SubnetId"}, - "max":16, - "min":0 - }, - "SystemLogLevel":{ - "type":"string", - "enum":[ - "DEBUG", - "INFO", - "WARN" - ] - }, - "TagKey":{"type":"string"}, - "TagKeyList":{ - "type":"list", - "member":{"shape":"TagKey"} - }, - "TagResourceRequest":{ - "type":"structure", - "required":[ - "Resource", - "Tags" - ], - "members":{ - "Resource":{ - "shape":"TaggableResource", - "documentation":"The resource's Amazon Resource Name (ARN).
", - "location":"uri", - "locationName":"Resource" - }, - "Tags":{ - "shape":"Tags", - "documentation":"A list of tags to apply to the resource.
" - } - } - }, - "TagValue":{"type":"string"}, - "TaggableResource":{ - "type":"string", - "max":256, - "min":1, - "pattern":"arn:(aws[a-zA-Z-]*):lambda:[a-z]{2}((-gov)|(-iso([a-z]?)))?-[a-z]+-\\d{1}:\\d{12}:(function:[a-zA-Z0-9-_]+(:(\\$LATEST|[a-zA-Z0-9-_]+))?|code-signing-config:csc-[a-z0-9]{17}|event-source-mapping:[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12})" - }, - "Tags":{ - "type":"map", - "key":{"shape":"TagKey"}, - "value":{"shape":"TagValue"} - }, - "TagsError":{ - "type":"structure", - "required":[ - "ErrorCode", - "Message" - ], - "members":{ - "ErrorCode":{ - "shape":"TagsErrorCode", - "documentation":"The error code.
" - }, - "Message":{ - "shape":"TagsErrorMessage", - "documentation":"The error message.
" - } - }, - "documentation":"An object that contains details about an error related to retrieving tags.
" - }, - "TagsErrorCode":{ - "type":"string", - "max":21, - "min":10, - "pattern":"[A-Za-z]+Exception" - }, - "TagsErrorMessage":{ - "type":"string", - "max":1000, - "min":84, - "pattern":".*" - }, - "ThrottleReason":{ - "type":"string", - "enum":[ - "ConcurrentInvocationLimitExceeded", - "FunctionInvocationRateLimitExceeded", - "ReservedFunctionConcurrentInvocationLimitExceeded", - "ReservedFunctionInvocationRateLimitExceeded", - "CallerRateLimitExceeded", - "ConcurrentSnapshotCreateLimitExceeded" - ] - }, - "Timeout":{ - "type":"integer", - "box":true, - "min":1 - }, - "Timestamp":{"type":"string"}, - "TooManyRequestsException":{ - "type":"structure", - "members":{ - "retryAfterSeconds":{ - "shape":"String", - "documentation":"The number of seconds the caller should wait before retrying.
", - "location":"header", - "locationName":"Retry-After" - }, - "Type":{"shape":"String"}, - "message":{"shape":"String"}, - "Reason":{"shape":"ThrottleReason"} - }, - "documentation":"The request throughput limit was exceeded. For more information, see Lambda quotas.
", - "error":{ - "httpStatusCode":429, - "senderFault":true - }, - "exception":true - }, - "Topic":{ - "type":"string", - "max":249, - "min":1, - "pattern":"[^.]([a-zA-Z0-9\\-_.]+)" - }, - "Topics":{ - "type":"list", - "member":{"shape":"Topic"}, - "max":1, - "min":1 - }, - "TracingConfig":{ - "type":"structure", - "members":{ - "Mode":{ - "shape":"TracingMode", - "documentation":"The tracing mode.
" - } - }, - "documentation":"The function's X-Ray tracing configuration. To sample and record incoming requests, set Mode to Active.
The tracing mode.
" - } - }, - "documentation":"The function's X-Ray tracing configuration.
" - }, - "TracingMode":{ - "type":"string", - "enum":[ - "Active", - "PassThrough" - ] - }, - "Truncated":{ - "type":"boolean", - "box":true - }, - "TumblingWindowInSeconds":{ - "type":"integer", - "box":true, - "max":900, - "min":0 - }, - "URI":{ - "type":"string", - "max":200, - "min":1, - "pattern":"[a-zA-Z0-9-\\/*:_+=.@-]*" - }, - "UnqualifiedFunctionName":{ - "type":"string", - "max":140, - "min":1, - "pattern":"(arn:(aws[a-zA-Z-]*)?:lambda:)?([a-z]{2}((-gov)|(-iso([a-z]?)))?-[a-z]+-\\d{1}:)?(\\d{12}:)?(function:)?([a-zA-Z0-9-_]+)" - }, - "UnreservedConcurrentExecutions":{ - "type":"integer", - "box":true, - "min":0 - }, - "UnsupportedMediaTypeException":{ - "type":"structure", - "members":{ - "Type":{"shape":"String"}, - "message":{"shape":"String"} - }, - "documentation":"The content type of the Invoke request body is not JSON.
The resource's Amazon Resource Name (ARN).
", - "location":"uri", - "locationName":"Resource" - }, - "TagKeys":{ - "shape":"TagKeyList", - "documentation":"A list of tag keys to remove from the resource.
", - "location":"querystring", - "locationName":"tagKeys" - } - } - }, - "UpdateAliasRequest":{ - "type":"structure", - "required":[ - "FunctionName", - "Name" - ], - "members":{ - "FunctionName":{ - "shape":"FunctionName", - "documentation":"The name or ARN of the Lambda function.
Name formats
Function name - MyFunction.
Function ARN - arn:aws:lambda:us-west-2:123456789012:function:MyFunction.
Partial ARN - 123456789012:function:MyFunction.
The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.
", - "location":"uri", - "locationName":"FunctionName" - }, - "Name":{ - "shape":"Alias", - "documentation":"The name of the alias.
", - "location":"uri", - "locationName":"Name" - }, - "FunctionVersion":{ - "shape":"Version", - "documentation":"The function version that the alias invokes.
" - }, - "Description":{ - "shape":"Description", - "documentation":"A description of the alias.
" - }, - "RoutingConfig":{ - "shape":"AliasRoutingConfiguration", - "documentation":"The routing configuration of the alias.
" - }, - "RevisionId":{ - "shape":"String", - "documentation":"Only update the alias if the revision ID matches the ID that's specified. Use this option to avoid modifying an alias that has changed since you last read it.
" - } - } - }, - "UpdateCodeSigningConfigRequest":{ - "type":"structure", - "required":["CodeSigningConfigArn"], - "members":{ - "CodeSigningConfigArn":{ - "shape":"CodeSigningConfigArn", - "documentation":"The The Amazon Resource Name (ARN) of the code signing configuration.
", - "location":"uri", - "locationName":"CodeSigningConfigArn" - }, - "Description":{ - "shape":"Description", - "documentation":"Descriptive name for this code signing configuration.
" - }, - "AllowedPublishers":{ - "shape":"AllowedPublishers", - "documentation":"Signing profiles for this code signing configuration.
" - }, - "CodeSigningPolicies":{ - "shape":"CodeSigningPolicies", - "documentation":"The code signing policy.
" - } - } - }, - "UpdateCodeSigningConfigResponse":{ - "type":"structure", - "required":["CodeSigningConfig"], - "members":{ - "CodeSigningConfig":{ - "shape":"CodeSigningConfig", - "documentation":"The code signing configuration
" - } - } - }, - "UpdateEventSourceMappingRequest":{ - "type":"structure", - "required":["UUID"], - "members":{ - "UUID":{ - "shape":"String", - "documentation":"The identifier of the event source mapping.
", - "location":"uri", - "locationName":"UUID" - }, - "FunctionName":{ - "shape":"FunctionName", - "documentation":"The name or ARN of the Lambda function.
Name formats
Function name – MyFunction.
Function ARN – arn:aws:lambda:us-west-2:123456789012:function:MyFunction.
Version or Alias ARN – arn:aws:lambda:us-west-2:123456789012:function:MyFunction:PROD.
Partial ARN – 123456789012:function:MyFunction.
The length constraint applies only to the full ARN. If you specify only the function name, it's limited to 64 characters in length.
" - }, - "Enabled":{ - "shape":"Enabled", - "documentation":"When true, the event source mapping is active. When false, Lambda pauses polling and invocation.
Default: True
" - }, - "BatchSize":{ - "shape":"BatchSize", - "documentation":"The maximum number of records in each batch that Lambda pulls from your stream or queue and sends to your function. Lambda passes all of the records in the batch to the function in a single call, up to the payload limit for synchronous invocation (6 MB).
Amazon Kinesis – Default 100. Max 10,000.
Amazon DynamoDB Streams – Default 100. Max 10,000.
Amazon Simple Queue Service – Default 10. For standard queues the max is 10,000. For FIFO queues the max is 10.
Amazon Managed Streaming for Apache Kafka – Default 100. Max 10,000.
Self-managed Apache Kafka – Default 100. Max 10,000.
Amazon MQ (ActiveMQ and RabbitMQ) – Default 100. Max 10,000.
DocumentDB – Default 100. Max 10,000.
An object that defines the filter criteria that determine whether Lambda should process an event. For more information, see Lambda event filtering.
" - }, - "MaximumBatchingWindowInSeconds":{ - "shape":"MaximumBatchingWindowInSeconds", - "documentation":"The maximum amount of time, in seconds, that Lambda spends gathering records before invoking the function. You can configure MaximumBatchingWindowInSeconds to any value from 0 seconds to 300 seconds in increments of seconds.
For Kinesis, DynamoDB, and Amazon SQS event sources, the default batching window is 0 seconds. For Amazon MSK, Self-managed Apache Kafka, Amazon MQ, and DocumentDB event sources, the default batching window is 500 ms. Note that because you can only change MaximumBatchingWindowInSeconds in increments of seconds, you cannot revert back to the 500 ms default batching window after you have changed it. To restore the default batching window, you must create a new event source mapping.
Related setting: For Kinesis, DynamoDB, and Amazon SQS event sources, when you set BatchSize to a value greater than 10, you must set MaximumBatchingWindowInSeconds to at least 1.
(Kinesis, DynamoDB Streams, Amazon MSK, and self-managed Kafka only) A configuration object that specifies the destination of an event after Lambda processes it.
" - }, - "MaximumRecordAgeInSeconds":{ - "shape":"MaximumRecordAgeInSeconds", - "documentation":"(Kinesis and DynamoDB Streams only) Discard records older than the specified age. The default value is infinite (-1).
" - }, - "BisectBatchOnFunctionError":{ - "shape":"BisectBatchOnFunctionError", - "documentation":"(Kinesis and DynamoDB Streams only) If the function returns an error, split the batch in two and retry.
" - }, - "MaximumRetryAttempts":{ - "shape":"MaximumRetryAttemptsEventSourceMapping", - "documentation":"(Kinesis and DynamoDB Streams only) Discard records after the specified number of retries. The default value is infinite (-1). When set to infinite (-1), failed records are retried until the record expires.
" - }, - "ParallelizationFactor":{ - "shape":"ParallelizationFactor", - "documentation":"(Kinesis and DynamoDB Streams only) The number of batches to process from each shard concurrently.
" - }, - "SourceAccessConfigurations":{ - "shape":"SourceAccessConfigurations", - "documentation":"An array of authentication protocols or VPC components required to secure your event source.
" - }, - "TumblingWindowInSeconds":{ - "shape":"TumblingWindowInSeconds", - "documentation":"(Kinesis and DynamoDB Streams only) The duration in seconds of a processing window for DynamoDB and Kinesis Streams event sources. A value of 0 seconds indicates no tumbling window.
" - }, - "FunctionResponseTypes":{ - "shape":"FunctionResponseTypeList", - "documentation":"(Kinesis, DynamoDB Streams, and Amazon SQS) A list of current response type enums applied to the event source mapping.
" - }, - "ScalingConfig":{ - "shape":"ScalingConfig", - "documentation":"(Amazon SQS only) The scaling configuration for the event source. For more information, see Configuring maximum concurrency for Amazon SQS event sources.
" - }, - "AmazonManagedKafkaEventSourceConfig":{"shape":"AmazonManagedKafkaEventSourceConfig"}, - "SelfManagedKafkaEventSourceConfig":{"shape":"SelfManagedKafkaEventSourceConfig"}, - "DocumentDBEventSourceConfig":{ - "shape":"DocumentDBEventSourceConfig", - "documentation":"Specific configuration settings for a DocumentDB event source.
" - }, - "KMSKeyArn":{ - "shape":"KMSKeyArn", - "documentation":"The ARN of the Key Management Service (KMS) customer managed key that Lambda uses to encrypt your function's filter criteria. By default, Lambda does not encrypt your filter criteria object. Specify this property to encrypt data using your own customer managed key.
" - }, - "MetricsConfig":{ - "shape":"EventSourceMappingMetricsConfig", - "documentation":"The metrics configuration for your event source. For more information, see Event source mapping metrics.
" - }, - "ProvisionedPollerConfig":{ - "shape":"ProvisionedPollerConfig", - "documentation":"(Amazon MSK and self-managed Apache Kafka only) The provisioned mode configuration for the event source. For more information, see provisioned mode.
" - } - } - }, - "UpdateFunctionCodeRequest":{ - "type":"structure", - "required":["FunctionName"], - "members":{ - "FunctionName":{ - "shape":"FunctionName", - "documentation":"The name or ARN of the Lambda function.
Name formats
Function name – my-function.
Function ARN – arn:aws:lambda:us-west-2:123456789012:function:my-function.
Partial ARN – 123456789012:function:my-function.
The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.
", - "location":"uri", - "locationName":"FunctionName" - }, - "ZipFile":{ - "shape":"Blob", - "documentation":"The base64-encoded contents of the deployment package. Amazon Web Services SDK and CLI clients handle the encoding for you. Use only with a function defined with a .zip file archive deployment package.
" - }, - "S3Bucket":{ - "shape":"S3Bucket", - "documentation":"An Amazon S3 bucket in the same Amazon Web Services Region as your function. The bucket can be in a different Amazon Web Services account. Use only with a function defined with a .zip file archive deployment package.
" - }, - "S3Key":{ - "shape":"S3Key", - "documentation":"The Amazon S3 key of the deployment package. Use only with a function defined with a .zip file archive deployment package.
" - }, - "S3ObjectVersion":{ - "shape":"S3ObjectVersion", - "documentation":"For versioned objects, the version of the deployment package object to use.
" - }, - "ImageUri":{ - "shape":"String", - "documentation":"URI of a container image in the Amazon ECR registry. Do not use for a function defined with a .zip file archive.
" - }, - "Publish":{ - "shape":"Boolean", - "documentation":"Set to true to publish a new version of the function after updating the code. This has the same effect as calling PublishVersion separately.
" - }, - "DryRun":{ - "shape":"Boolean", - "documentation":"Set to true to validate the request parameters and access permissions without modifying the function code.
" - }, - "RevisionId":{ - "shape":"String", - "documentation":"Update the function only if the revision ID matches the ID that's specified. Use this option to avoid modifying a function that has changed since you last read it.
" - }, - "Architectures":{ - "shape":"ArchitecturesList", - "documentation":"The instruction set architecture that the function supports. Enter a string array with one of the valid values (arm64 or x86_64). The default value is x86_64.
The ARN of the Key Management Service (KMS) customer managed key that's used to encrypt your function's .zip deployment package. If you don't provide a customer managed key, Lambda uses an Amazon Web Services managed key.
" - } - } - }, - "UpdateFunctionConfigurationRequest":{ - "type":"structure", - "required":["FunctionName"], - "members":{ - "FunctionName":{ - "shape":"FunctionName", - "documentation":"The name or ARN of the Lambda function.
Name formats
Function name – my-function.
Function ARN – arn:aws:lambda:us-west-2:123456789012:function:my-function.
Partial ARN – 123456789012:function:my-function.
The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.
", - "location":"uri", - "locationName":"FunctionName" - }, - "Role":{ - "shape":"RoleArn", - "documentation":"The Amazon Resource Name (ARN) of the function's execution role.
" - }, - "Handler":{ - "shape":"Handler", - "documentation":"The name of the method within your code that Lambda calls to run your function. Handler is required if the deployment package is a .zip file archive. The format includes the file name. It can also include namespaces and other qualifiers, depending on the runtime. For more information, see Lambda programming model.
" - }, - "Description":{ - "shape":"Description", - "documentation":"A description of the function.
" - }, - "Timeout":{ - "shape":"Timeout", - "documentation":"The amount of time (in seconds) that Lambda allows a function to run before stopping it. The default is 3 seconds. The maximum allowed value is 900 seconds. For more information, see Lambda execution environment.
" - }, - "MemorySize":{ - "shape":"MemorySize", - "documentation":"The amount of memory available to the function at runtime. Increasing the function memory also increases its CPU allocation. The default value is 128 MB. The value can be any multiple of 1 MB.
" - }, - "VpcConfig":{ - "shape":"VpcConfig", - "documentation":"For network connectivity to Amazon Web Services resources in a VPC, specify a list of security groups and subnets in the VPC. When you connect a function to a VPC, it can access resources and the internet only through that VPC. For more information, see Configuring a Lambda function to access resources in a VPC.
" - }, - "Environment":{ - "shape":"Environment", - "documentation":"Environment variables that are accessible from function code during execution.
" - }, - "Runtime":{ - "shape":"Runtime", - "documentation":"The identifier of the function's runtime. Runtime is required if the deployment package is a .zip file archive. Specifying a runtime results in an error if you're deploying a function using a container image.
The following list includes deprecated runtimes. Lambda blocks creating new functions and updating existing functions shortly after each runtime is deprecated. For more information, see Runtime use after deprecation.
For a list of all currently supported runtimes, see Supported runtimes.
" - }, - "DeadLetterConfig":{ - "shape":"DeadLetterConfig", - "documentation":"A dead-letter queue configuration that specifies the queue or topic where Lambda sends asynchronous events when they fail processing. For more information, see Dead-letter queues.
" - }, - "KMSKeyArn":{ - "shape":"KMSKeyArn", - "documentation":"The ARN of the Key Management Service (KMS) customer managed key that's used to encrypt the following resources:
The function's environment variables.
The function's Lambda SnapStart snapshots.
When used with SourceKMSKeyArn, the unzipped version of the .zip deployment package that's used for function invocations. For more information, see Specifying a customer managed key for Lambda.
The optimized version of the container image that's used for function invocations. Note that this is not the same key that's used to protect your container image in the Amazon Elastic Container Registry (Amazon ECR). For more information, see Function lifecycle.
If you don't provide a customer managed key, Lambda uses an Amazon Web Services owned key or an Amazon Web Services managed key.
" - }, - "TracingConfig":{ - "shape":"TracingConfig", - "documentation":"Set Mode to Active to sample and trace a subset of incoming requests with X-Ray.
Update the function only if the revision ID matches the ID that's specified. Use this option to avoid modifying a function that has changed since you last read it.
" - }, - "Layers":{ - "shape":"LayerList", - "documentation":"A list of function layers to add to the function's execution environment. Specify each layer by its ARN, including the version.
" - }, - "FileSystemConfigs":{ - "shape":"FileSystemConfigList", - "documentation":"Connection settings for an Amazon EFS file system.
" - }, - "ImageConfig":{ - "shape":"ImageConfig", - "documentation":"Container image configuration values that override the values in the container image Docker file.
" - }, - "EphemeralStorage":{ - "shape":"EphemeralStorage", - "documentation":"The size of the function's /tmp directory in MB. The default value is 512, but can be any whole number between 512 and 10,240 MB. For more information, see Configuring ephemeral storage (console).
The function's SnapStart setting.
" - }, - "LoggingConfig":{ - "shape":"LoggingConfig", - "documentation":"The function's Amazon CloudWatch Logs configuration settings.
" - }, - "DurableConfig":{"shape":"DurableConfig"} - } - }, - "UpdateFunctionEventInvokeConfigRequest":{ - "type":"structure", - "required":["FunctionName"], - "members":{ - "FunctionName":{ - "shape":"FunctionName", - "documentation":"The name or ARN of the Lambda function, version, or alias.
Name formats
Function name - my-function (name-only), my-function:v1 (with alias).
Function ARN - arn:aws:lambda:us-west-2:123456789012:function:my-function.
Partial ARN - 123456789012:function:my-function.
You can append a version number or alias to any of the formats. The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.
", - "location":"uri", - "locationName":"FunctionName" - }, - "Qualifier":{ - "shape":"Qualifier", - "documentation":"A version number or alias name.
", - "location":"querystring", - "locationName":"Qualifier" - }, - "MaximumRetryAttempts":{ - "shape":"MaximumRetryAttempts", - "documentation":"The maximum number of times to retry when the function returns an error.
" - }, - "MaximumEventAgeInSeconds":{ - "shape":"MaximumEventAgeInSeconds", - "documentation":"The maximum age of a request that Lambda sends to a function for processing.
" - }, - "DestinationConfig":{ - "shape":"DestinationConfig", - "documentation":"A destination for events after they have been sent to a function for processing.
Destinations
Function - The Amazon Resource Name (ARN) of a Lambda function.
Queue - The ARN of a standard SQS queue.
Bucket - The ARN of an Amazon S3 bucket.
Topic - The ARN of a standard SNS topic.
Event Bus - The ARN of an Amazon EventBridge event bus.
S3 buckets are supported only for on-failure destinations. To retain records of successful invocations, use another destination type.
The name or ARN of the Lambda function.
Name formats
Function name – my-function.
Function ARN – arn:aws:lambda:us-west-2:123456789012:function:my-function.
Partial ARN – 123456789012:function:my-function.
The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.
", - "location":"uri", - "locationName":"FunctionName" - }, - "Qualifier":{ - "shape":"FunctionUrlQualifier", - "documentation":"The alias name.
", - "location":"querystring", - "locationName":"Qualifier" - }, - "AuthType":{ - "shape":"FunctionUrlAuthType", - "documentation":"The type of authentication that your function URL uses. Set to AWS_IAM if you want to restrict access to authenticated users only. Set to NONE if you want to bypass IAM authentication to create a public endpoint. For more information, see Security and auth model for Lambda function URLs.
The cross-origin resource sharing (CORS) settings for your function URL.
" - }, - "InvokeMode":{ - "shape":"InvokeMode", - "documentation":"Use one of the following options:
BUFFERED – This is the default option. Lambda invokes your function using the Invoke API operation. Invocation results are available when the payload is complete. The maximum payload size is 6 MB.
RESPONSE_STREAM – Your function streams payload results as they become available. Lambda invokes your function using the InvokeWithResponseStream API operation. The maximum response payload size is 20 MB, however, you can request a quota increase.
The HTTP URL endpoint for your function.
" - }, - "FunctionArn":{ - "shape":"FunctionArn", - "documentation":"The Amazon Resource Name (ARN) of your function.
" - }, - "AuthType":{ - "shape":"FunctionUrlAuthType", - "documentation":"The type of authentication that your function URL uses. Set to AWS_IAM if you want to restrict access to authenticated users only. Set to NONE if you want to bypass IAM authentication to create a public endpoint. For more information, see Security and auth model for Lambda function URLs.
The cross-origin resource sharing (CORS) settings for your function URL.
" - }, - "CreationTime":{ - "shape":"Timestamp", - "documentation":"When the function URL was created, in ISO-8601 format (YYYY-MM-DDThh:mm:ss.sTZD).
" - }, - "LastModifiedTime":{ - "shape":"Timestamp", - "documentation":"When the function URL configuration was last updated, in ISO-8601 format (YYYY-MM-DDThh:mm:ss.sTZD).
" - }, - "InvokeMode":{ - "shape":"InvokeMode", - "documentation":"Use one of the following options:
BUFFERED – This is the default option. Lambda invokes your function using the Invoke API operation. Invocation results are available when the payload is complete. The maximum payload size is 6 MB.
RESPONSE_STREAM – Your function streams payload results as they become available. Lambda invokes your function using the InvokeWithResponseStream API operation. The maximum response payload size is 20 MB, however, you can request a quota increase.
A list of VPC subnet IDs.
" - }, - "SecurityGroupIds":{ - "shape":"SecurityGroupIds", - "documentation":"A list of VPC security group IDs.
" - }, - "Ipv6AllowedForDualStack":{ - "shape":"NullableBoolean", - "documentation":"Allows outbound IPv6 traffic on VPC functions that are connected to dual-stack subnets.
" - } - }, - "documentation":"The VPC security groups and subnets that are attached to a Lambda function. For more information, see Configuring a Lambda function to access resources in a VPC.
" - }, - "VpcConfigResponse":{ - "type":"structure", - "members":{ - "SubnetIds":{ - "shape":"SubnetIds", - "documentation":"A list of VPC subnet IDs.
" - }, - "SecurityGroupIds":{ - "shape":"SecurityGroupIds", - "documentation":"A list of VPC security group IDs.
" - }, - "VpcId":{ - "shape":"VpcId", - "documentation":"The ID of the VPC.
" - }, - "Ipv6AllowedForDualStack":{ - "shape":"NullableBoolean", - "documentation":"Allows outbound IPv6 traffic on VPC functions that are connected to dual-stack subnets.
" - } - }, - "documentation":"The VPC security groups and subnets that are attached to a Lambda function.
" - }, - "VpcId":{"type":"string"}, - "WaitCancelledDetails":{ - "type":"structure", - "members":{ - "Error":{"shape":"EventError"} - } - }, - "WaitDetails":{ - "type":"structure", - "members":{ - "ScheduledEndTimestamp":{"shape":"ExecutionTimestamp"} - } - }, - "WaitOptions":{ - "type":"structure", - "members":{ - "WaitSeconds":{"shape":"WaitOptionsWaitSecondsInteger"} - } - }, - "WaitOptionsWaitSecondsInteger":{ - "type":"integer", - "box":true, - "max":31622400, - "min":1 - }, - "WaitStartedDetails":{ - "type":"structure", - "required":[ - "Duration", - "ScheduledEndTimestamp" - ], - "members":{ - "Duration":{"shape":"DurationSeconds"}, - "ScheduledEndTimestamp":{"shape":"ExecutionTimestamp"} - } - }, - "WaitSucceededDetails":{ - "type":"structure", - "members":{ - "Duration":{"shape":"DurationSeconds"} - } - }, - "Weight":{ - "type":"double", - "max":1.0, - "min":0.0 - }, - "WorkingDirectory":{ - "type":"string", - "max":1000, - "min":0 - } - }, - "documentation":"Overview
Lambda is a compute service that lets you run code without provisioning or managing servers. Lambda runs your code on a high-availability compute infrastructure and performs all of the administration of the compute resources, including server and operating system maintenance, capacity provisioning and automatic scaling, code monitoring and logging. With Lambda, you can run code for virtually any type of application or backend service. For more information about the Lambda service, see What is Lambda in the Lambda Developer Guide.
The Lambda API Reference provides information about each of the API methods, including details about the parameters in each API request and response.
You can use Software Development Kits (SDKs), Integrated Development Environment (IDE) Toolkits, and command line tools to access the API. For installation instructions, see Tools for Amazon Web Services.
For a list of Region-specific endpoints that Lambda supports, see Lambda endpoints and quotas in the Amazon Web Services General Reference..
When making the API calls, you will need to authenticate your request by providing a signature. Lambda supports signature version 4. For more information, see Signature Version 4 signing process in the Amazon Web Services General Reference..
CA certificates
Because Amazon Web Services SDKs use the CA certificates from your computer, changes to the certificates on the Amazon Web Services servers can cause connection failures when you attempt to use an SDK. You can prevent these failures by keeping your computer's CA certificates and operating system up-to-date. If you encounter this issue in a corporate environment and do not manage your own computer, you might need to ask an administrator to assist with the update process. The following list shows minimum operating system and Java versions:
Microsoft Windows versions that have updates from January 2005 or later installed contain at least one of the required CAs in their trust list.
Mac OS X 10.4 with Java for Mac OS X 10.4 Release 5 (February 2007), Mac OS X 10.5 (October 2007), and later versions contain at least one of the required CAs in their trust list.
Red Hat Enterprise Linux 5 (March 2007), 6, and 7 and CentOS 5, 6, and 7 all contain at least one of the required CAs in their default trusted CA list.
Java 1.4.2_12 (May 2006), 5 Update 2 (March 2005), and all later versions, including Java 6 (December 2006), 7, and 8, contain at least one of the required CAs in their default trusted CA list.
When accessing the Lambda management console or Lambda API endpoints, whether through browsers or programmatically, you will need to ensure your client machines support any of the following CAs:
Amazon Root CA 1
Starfield Services Root Certificate Authority - G2
Starfield Class 2 Certification Authority
Root certificates from the first two authorities are available from Amazon trust services, but keeping your computer up-to-date is the more straightforward solution. To learn more about ACM-provided certificates, see Amazon Web Services Certificate Manager FAQs.
" -} diff --git a/src/aws_durable_execution_sdk_python/concurrency.py b/src/aws_durable_execution_sdk_python/concurrency.py deleted file mode 100644 index 4797d05..0000000 --- a/src/aws_durable_execution_sdk_python/concurrency.py +++ /dev/null @@ -1,862 +0,0 @@ -"""Concurrent executor for parallel and map operations.""" - -from __future__ import annotations - -import heapq -import logging -import threading -import time -from abc import ABC, abstractmethod -from collections import Counter -from concurrent.futures import Future, ThreadPoolExecutor -from dataclasses import dataclass -from enum import Enum -from typing import TYPE_CHECKING, Generic, Self, TypeVar - -from aws_durable_execution_sdk_python.config import ChildConfig -from aws_durable_execution_sdk_python.exceptions import ( - InvalidStateError, - SuspendExecution, - TimedSuspendExecution, -) -from aws_durable_execution_sdk_python.identifier import OperationIdentifier -from aws_durable_execution_sdk_python.lambda_service import ErrorObject -from aws_durable_execution_sdk_python.operation.child import child_handler -from aws_durable_execution_sdk_python.types import BatchResult as BatchResultProtocol - -if TYPE_CHECKING: - from collections.abc import Callable - - from aws_durable_execution_sdk_python.config import CompletionConfig - from aws_durable_execution_sdk_python.context import DurableContext - from aws_durable_execution_sdk_python.lambda_service import OperationSubType - from aws_durable_execution_sdk_python.serdes import SerDes - from aws_durable_execution_sdk_python.state import ExecutionState - from aws_durable_execution_sdk_python.types import SummaryGenerator - - -logger = logging.getLogger(__name__) - -T = TypeVar("T") -R = TypeVar("R") - -CallableType = TypeVar("CallableType") -ResultType = TypeVar("ResultType") - - -# region Result models -class BatchItemStatus(Enum): - SUCCEEDED = "SUCCEEDED" - FAILED = "FAILED" - STARTED = "STARTED" - - -class CompletionReason(Enum): - ALL_COMPLETED = "ALL_COMPLETED" - MIN_SUCCESSFUL_REACHED = "MIN_SUCCESSFUL_REACHED" - FAILURE_TOLERANCE_EXCEEDED = "FAILURE_TOLERANCE_EXCEEDED" - - -@dataclass(frozen=True) -class SuspendResult: - should_suspend: bool - exception: SuspendExecution | None = None - - @staticmethod - def do_not_suspend() -> SuspendResult: - return SuspendResult(should_suspend=False) - - @staticmethod - def suspend(exception: SuspendExecution) -> SuspendResult: - return SuspendResult(should_suspend=True, exception=exception) - - -@dataclass(frozen=True) -class BatchItem(Generic[R]): - index: int - status: BatchItemStatus - result: R | None = None - error: ErrorObject | None = None - - def to_dict(self) -> dict: - return { - "index": self.index, - "status": self.status.value, - "result": self.result, - "error": self.error.to_dict() if self.error else None, - } - - @classmethod - def from_dict(cls, data: dict) -> BatchItem[R]: - return cls( - index=data["index"], - status=BatchItemStatus(data["status"]), - result=data.get("result"), - error=ErrorObject.from_dict(data["error"]) if data.get("error") else None, - ) - - -@dataclass(frozen=True) -class BatchResult(Generic[R], BatchResultProtocol[R]): # noqa: PYI059 - all: list[BatchItem[R]] - completion_reason: CompletionReason - - @classmethod - def from_dict( - cls, data: dict, completion_config: CompletionConfig | None = None - ) -> BatchResult[R]: - batch_items: list[BatchItem[R]] = [ - BatchItem.from_dict(item) for item in data["all"] - ] - - completion_reason_value = data.get("completionReason") - if completion_reason_value is None: - # Infer completion reason from batch item statuses and completion config - # This aligns with the TypeScript implementation that uses completion config - # to accurately reconstruct the completion reason during replay - result = cls.from_items(batch_items, completion_config) - logger.warning( - "Missing completionReason in BatchResult deserialization, " - "inferred '%s' from batch item statuses. " - "This may indicate incomplete serialization data.", - result.completion_reason.value, - ) - return result - - completion_reason = CompletionReason(completion_reason_value) - return cls(batch_items, completion_reason) - - @classmethod - def from_items( - cls, - items: list[BatchItem[R]], - completion_config: CompletionConfig | None = None, - ): - """ - Infer completion reason based on batch item statuses and completion config. - - This follows the same logic as the TypeScript implementation: - - If all items completed: ALL_COMPLETED - - If minSuccessful threshold met and not all completed: MIN_SUCCESSFUL_REACHED - - Otherwise: FAILURE_TOLERANCE_EXCEEDED - """ - - statuses = (item.status for item in items) - counts = Counter(statuses) - succeeded_count = counts.get(BatchItemStatus.SUCCEEDED, 0) - failed_count = counts.get(BatchItemStatus.FAILED, 0) - started_count = counts.get(BatchItemStatus.STARTED, 0) - - completed_count = succeeded_count + failed_count - total_count = started_count + completed_count - - # If all items completed (no started items), it's ALL_COMPLETED - if completed_count == total_count: - completion_reason = CompletionReason.ALL_COMPLETED - elif ( # If we have completion config and minSuccessful threshold is met - completion_config - and (min_successful := completion_config.min_successful) is not None - and succeeded_count >= min_successful - ): - completion_reason = CompletionReason.MIN_SUCCESSFUL_REACHED - else: - # Otherwise, assume failure tolerance was exceeded - completion_reason = CompletionReason.FAILURE_TOLERANCE_EXCEEDED - - return cls(items, completion_reason) - - def to_dict(self) -> dict: - return { - "all": [item.to_dict() for item in self.all], - "completionReason": self.completion_reason.value, - } - - def succeeded(self) -> list[BatchItem[R]]: - return [ - item - for item in self.all - if item.status is BatchItemStatus.SUCCEEDED and item.result is not None - ] - - def failed(self) -> list[BatchItem[R]]: - return [ - item - for item in self.all - if item.status is BatchItemStatus.FAILED and item.error is not None - ] - - def started(self) -> list[BatchItem[R]]: - return [item for item in self.all if item.status is BatchItemStatus.STARTED] - - @property - def status(self) -> BatchItemStatus: - return BatchItemStatus.FAILED if self.has_failure else BatchItemStatus.SUCCEEDED - - @property - def has_failure(self) -> bool: - return any(item.status is BatchItemStatus.FAILED for item in self.all) - - def throw_if_error(self) -> None: - first_error = next( - (item.error for item in self.all if item.status is BatchItemStatus.FAILED), - None, - ) - if first_error: - raise first_error.to_callable_runtime_error() - - def get_results(self) -> list[R]: - return [ - item.result - for item in self.all - if item.status is BatchItemStatus.SUCCEEDED and item.result is not None - ] - - def get_errors(self) -> list[ErrorObject]: - return [ - item.error - for item in self.all - if item.status is BatchItemStatus.FAILED and item.error is not None - ] - - @property - def success_count(self) -> int: - return sum(1 for item in self.all if item.status is BatchItemStatus.SUCCEEDED) - - @property - def failure_count(self) -> int: - return sum(1 for item in self.all if item.status is BatchItemStatus.FAILED) - - @property - def started_count(self) -> int: - return sum(1 for item in self.all if item.status is BatchItemStatus.STARTED) - - @property - def total_count(self) -> int: - return len(self.all) - - -# endregion Result models - - -# region concurrency models -@dataclass(frozen=True) -class Executable(Generic[CallableType]): - index: int - func: CallableType - - -class BranchStatus(Enum): - PENDING = "pending" - RUNNING = "running" - COMPLETED = "completed" - SUSPENDED = "suspended" - SUSPENDED_WITH_TIMEOUT = "suspended_with_timeout" - FAILED = "failed" - - -class ExecutableWithState(Generic[CallableType, ResultType]): - """Manages the execution state and lifecycle of an executable.""" - - def __init__(self, executable: Executable[CallableType]): - self.executable = executable - self._status = BranchStatus.PENDING - self._future: Future | None = None - self._suspend_until: float | None = None - self._result: ResultType = None # type: ignore[assignment] - self._is_result_set: bool = False - self._error: Exception | None = None - - @property - def future(self) -> Future: - """Get the future, raising error if not available.""" - if self._future is None: - msg = f"ExecutableWithState was never started. {self.executable.index}" - raise InvalidStateError(msg) - return self._future - - @property - def status(self) -> BranchStatus: - """Get current status.""" - return self._status - - @property - def result(self) -> ResultType: - """Get result if completed.""" - if not self._is_result_set or self._status != BranchStatus.COMPLETED: - msg = f"result not available in status {self._status}" - raise InvalidStateError(msg) - return self._result - - @property - def error(self) -> Exception: - """Get error if failed.""" - if self._error is None or self._status != BranchStatus.FAILED: - msg = f"error not available in status {self._status}" - raise InvalidStateError(msg) - return self._error - - @property - def suspend_until(self) -> float | None: - """Get suspend timestamp.""" - return self._suspend_until - - @property - def is_running(self) -> bool: - """Check if currently running.""" - return self._status is BranchStatus.RUNNING - - @property - def can_resume(self) -> bool: - """Check if can resume from suspension.""" - return self._status is BranchStatus.SUSPENDED or ( - self._status is BranchStatus.SUSPENDED_WITH_TIMEOUT - and self._suspend_until is not None - and time.time() >= self._suspend_until - ) - - @property - def index(self) -> int: - return self.executable.index - - @property - def callable(self) -> CallableType: - return self.executable.func - - # region State transitions - def run(self, future: Future) -> None: - """Transition to RUNNING state with a future.""" - if self._status != BranchStatus.PENDING: - msg = f"Cannot start running from {self._status}" - raise InvalidStateError(msg) - self._status = BranchStatus.RUNNING - self._future = future - - def suspend(self) -> None: - """Transition to SUSPENDED state (indefinite).""" - self._status = BranchStatus.SUSPENDED - self._suspend_until = None - - def suspend_with_timeout(self, timestamp: float) -> None: - """Transition to SUSPENDED_WITH_TIMEOUT state.""" - self._status = BranchStatus.SUSPENDED_WITH_TIMEOUT - self._suspend_until = timestamp - - def complete(self, result: ResultType) -> None: - """Transition to COMPLETED state.""" - self._status = BranchStatus.COMPLETED - self._result = result - self._is_result_set = True - - def fail(self, error: Exception) -> None: - """Transition to FAILED state.""" - self._status = BranchStatus.FAILED - self._error = error - - def reset_to_pending(self) -> None: - """Reset to PENDING state for resubmission.""" - self._status = BranchStatus.PENDING - self._future = None - self._suspend_until = None - - # endregion State transitions - - -class ExecutionCounters: - """Thread-safe counters for tracking execution state.""" - - def __init__( - self, - total_tasks: int, - min_successful: int, - tolerated_failure_count: int | None, - tolerated_failure_percentage: float | None, - ): - self.total_tasks: int = total_tasks - self.min_successful: int = min_successful - self.tolerated_failure_count: int | None = tolerated_failure_count - self.tolerated_failure_percentage: float | None = tolerated_failure_percentage - self.success_count: int = 0 - self.failure_count: int = 0 - self._lock = threading.Lock() - - def complete_task(self) -> None: - """Task completed successfully.""" - with self._lock: - self.success_count += 1 - - def fail_task(self) -> None: - """Task failed.""" - with self._lock: - self.failure_count += 1 - - def should_continue(self) -> bool: - """ - Check if we should continue starting new tasks (based on failure tolerance). - Matches TypeScript shouldContinue() logic. - """ - with self._lock: - # If no completion config, only continue if no failures - if ( - self.tolerated_failure_count is None - and self.tolerated_failure_percentage is None - ): - return self.failure_count == 0 - - # Check failure count tolerance - if ( - self.tolerated_failure_count is not None - and self.failure_count > self.tolerated_failure_count - ): - return False - - # Check failure percentage tolerance - if self.tolerated_failure_percentage is not None and self.total_tasks > 0: - failure_percentage = (self.failure_count / self.total_tasks) * 100 - if failure_percentage > self.tolerated_failure_percentage: - return False - - return True - - def is_complete(self) -> bool: - """ - Check if execution should complete (based on completion criteria). - Matches TypeScript isComplete() logic. - """ - with self._lock: - completed_count = self.success_count + self.failure_count - - # All tasks completed - if completed_count == self.total_tasks: - # Complete if no failure tolerance OR no failures OR min successful reached - return ( - ( - self.tolerated_failure_count is None - and self.tolerated_failure_percentage is None - ) - or self.failure_count == 0 - or self.success_count >= self.min_successful - ) - - # when we breach min successful, we've completed - return self.success_count >= self.min_successful - - def should_complete(self) -> bool: - """ - Check if execution should complete. - Combines TypeScript shouldContinue() and isComplete() logic. - """ - return self.is_complete() or not self.should_continue() - - def is_all_completed(self) -> bool: - """True if all tasks completed successfully.""" - with self._lock: - return self.success_count == self.total_tasks - - def is_min_successful_reached(self) -> bool: - """True if minimum successful tasks reached.""" - with self._lock: - return self.success_count >= self.min_successful - - def is_failure_tolerance_exceeded(self) -> bool: - """True if failure tolerance was exceeded.""" - with self._lock: - return self._is_failure_condition_reached( - tolerated_count=self.tolerated_failure_count, - tolerated_percentage=self.tolerated_failure_percentage, - failure_count=self.failure_count, - ) - - def _is_failure_condition_reached( - self, - tolerated_count: int | None, - tolerated_percentage: float | None, - failure_count: int, - ) -> bool: - """True if failure conditions are reached (no locking - caller must lock).""" - # Failure count condition - if tolerated_count is not None and failure_count > tolerated_count: - return True - - # Failure percentage condition - if tolerated_percentage is not None and self.total_tasks > 0: - failure_percentage = (failure_count / self.total_tasks) * 100 - if failure_percentage > tolerated_percentage: - return True - - return False - - -# endegion concurrency models - - -# region concurrency logic -class TimerScheduler: - """Manage timed suspend tasks with a background timer thread.""" - - def __init__( - self, resubmit_callback: Callable[[ExecutableWithState], None] - ) -> None: - self.resubmit_callback = resubmit_callback - self._pending_resumes: list[tuple[float, ExecutableWithState]] = [] - self._lock = threading.Lock() - self._shutdown = threading.Event() - self._timer_thread = threading.Thread(target=self._timer_loop, daemon=True) - self._timer_thread.start() - - def __enter__(self) -> Self: - return self - - def __exit__(self, exc_type, exc_val, exc_tb) -> None: - self.shutdown() - - def schedule_resume( - self, exe_state: ExecutableWithState, resume_time: float - ) -> None: - """Schedule a task to resume at the specified time.""" - with self._lock: - heapq.heappush(self._pending_resumes, (resume_time, exe_state)) - - def shutdown(self) -> None: - """Shutdown the timer thread and cancel all pending resumes.""" - self._shutdown.set() - self._timer_thread.join(timeout=1.0) - with self._lock: - self._pending_resumes.clear() - - def _timer_loop(self) -> None: - """Background thread that processes timed resumes.""" - while not self._shutdown.is_set(): - next_resume_time = None - - with self._lock: - if self._pending_resumes: - next_resume_time = self._pending_resumes[0][0] - - if next_resume_time is None: - # No pending resumes, wait a bit and check again - self._shutdown.wait(timeout=0.1) - continue - - current_time = time.time() - if current_time >= next_resume_time: - # Time to resume - with self._lock: - # no branch cover because hard to test reliably - this is a double-safety check if heap mutated - # since the first peek on next_resume_time further up - if ( # pragma: no branch - self._pending_resumes - and self._pending_resumes[0][0] <= current_time - ): - _, exe_state = heapq.heappop(self._pending_resumes) - if exe_state.can_resume: - exe_state.reset_to_pending() - self.resubmit_callback(exe_state) - else: - # Wait until next resume time - wait_time = min(next_resume_time - current_time, 0.1) - self._shutdown.wait(timeout=wait_time) - - -class ConcurrentExecutor(ABC, Generic[CallableType, ResultType]): - """Execute durable operations concurrently. This contains the execution logic for Map and Parallel.""" - - def __init__( - self, - executables: list[Executable[CallableType]], - max_concurrency: int | None, - completion_config: CompletionConfig, - sub_type_top: OperationSubType, - sub_type_iteration: OperationSubType, - name_prefix: str, - serdes: SerDes | None, - item_serdes: SerDes | None = None, - summary_generator: SummaryGenerator | None = None, - ): - """Initialize ConcurrentExecutor. - - Args: - summary_generator: Optional function to generate compact summaries for large results. - When the serialized result exceeds 256KB, this generator creates a JSON summary - instead of checkpointing the full result. Used by map/parallel operations to - handle large BatchResult payloads efficiently. Matches TypeScript behavior in - run-in-child-context-handler.ts. - """ - self.executables = executables - self.max_concurrency = max_concurrency - self.completion_config = completion_config - self.sub_type_top = sub_type_top - self.sub_type_iteration = sub_type_iteration - self.name_prefix = name_prefix - self.summary_generator = summary_generator - - # Event-driven state tracking for when the executor is done - self._completion_event = threading.Event() - self._suspend_exception: SuspendExecution | None = None - - # ExecutionCounters will keep track of completion criteria and on-going counters - min_successful = self.completion_config.min_successful or len(self.executables) - tolerated_failure_count = self.completion_config.tolerated_failure_count - tolerated_failure_percentage = ( - self.completion_config.tolerated_failure_percentage - ) - - self.counters: ExecutionCounters = ExecutionCounters( - len(executables), - min_successful, - tolerated_failure_count, - tolerated_failure_percentage, - ) - self.executables_with_state: list[ExecutableWithState] = [] - self.serdes = serdes - self.item_serdes = item_serdes - - @abstractmethod - def execute_item( - self, child_context: DurableContext, executable: Executable[CallableType] - ) -> ResultType: - """Execute a single executable in a child context and return the result.""" - raise NotImplementedError - - def execute( - self, execution_state: ExecutionState, executor_context: DurableContext - ) -> BatchResult[ResultType]: - """Execute items concurrently with event-driven state management.""" - logger.debug( - "▶️ Executing concurrent operation, items: %d", len(self.executables) - ) - - max_workers = self.max_concurrency or len(self.executables) - - self.executables_with_state = [ - ExecutableWithState(executable=exe) for exe in self.executables - ] - self._completion_event.clear() - self._suspend_exception = None - - def resubmitter(executable_with_state: ExecutableWithState) -> None: - """Resubmit a timed suspended task.""" - execution_state.create_checkpoint() - submit_task(executable_with_state) - - with ( - TimerScheduler(resubmitter) as scheduler, - ThreadPoolExecutor(max_workers=max_workers) as thread_executor, - ): - - def submit_task(executable_with_state: ExecutableWithState) -> None: - """Submit task to the thread executor and mark its state as started.""" - future = thread_executor.submit( - self._execute_item_in_child_context, - executor_context, - executable_with_state.executable, - ) - executable_with_state.run(future) - - def on_done(future: Future) -> None: - self._on_task_complete(executable_with_state, future, scheduler) - - future.add_done_callback(on_done) - - # Submit initial tasks - for exe_state in self.executables_with_state: - submit_task(exe_state) - - # Wait for completion - self._completion_event.wait() - - # Suspend execution if everything done and at least one of the tasks raised a suspend exception. - if self._suspend_exception: - raise self._suspend_exception - - # Build final result - return self._create_result() - - def should_execution_suspend(self) -> SuspendResult: - """Check if execution should suspend.""" - earliest_timestamp: float = float("inf") - indefinite_suspend_task: ( - ExecutableWithState[CallableType, ResultType] | None - ) = None - - for exe_state in self.executables_with_state: - if exe_state.status in {BranchStatus.PENDING, BranchStatus.RUNNING}: - # Exit here! Still have tasks that can make progress, don't suspend. - return SuspendResult.do_not_suspend() - if exe_state.status is BranchStatus.SUSPENDED_WITH_TIMEOUT: - if ( - exe_state.suspend_until - and exe_state.suspend_until < earliest_timestamp - ): - earliest_timestamp = exe_state.suspend_until - elif exe_state.status is BranchStatus.SUSPENDED: - indefinite_suspend_task = exe_state - - # All tasks are in final states and at least one of them is a suspend. - if earliest_timestamp != float("inf"): - return SuspendResult.suspend( - TimedSuspendExecution( - "All concurrent work complete or suspended pending retry.", - earliest_timestamp, - ) - ) - if indefinite_suspend_task: - return SuspendResult.suspend( - SuspendExecution( - "All concurrent work complete or suspended and pending external callback." - ) - ) - - return SuspendResult.do_not_suspend() - - def _on_task_complete( - self, - exe_state: ExecutableWithState, - future: Future, - scheduler: TimerScheduler, - ) -> None: - """Handle task completion, suspension, or failure.""" - try: - result = future.result() - exe_state.complete(result) - self.counters.complete_task() - except TimedSuspendExecution as tse: - exe_state.suspend_with_timeout(tse.scheduled_timestamp) - scheduler.schedule_resume(exe_state, tse.scheduled_timestamp) - except SuspendExecution: - exe_state.suspend() - # For indefinite suspend, don't schedule resume - except Exception as e: # noqa: BLE001 - exe_state.fail(e) - self.counters.fail_task() - - # Check if execution should complete or suspend - if self.counters.should_complete(): - self._completion_event.set() - else: - suspend_result = self.should_execution_suspend() - if suspend_result.should_suspend: - self._suspend_exception = suspend_result.exception - self._completion_event.set() - - def _create_result(self) -> BatchResult[ResultType]: - """ - Build the final BatchResult. - - When this function executes, we've terminated the upper/parent context for whatever reason. - It follows that our items can be only in 3 states, Completed, Failed and Started (in all of the possible forms). - We tag each branch based on its observed value at the time of completion of the parent / upper context, and pass the - results to BatchResult. - - Any inference wrt completion reason is left up to BatchResult, keeping the logic inference isolated. - """ - batch_items: list[BatchItem[ResultType]] = [] - for executable in self.executables_with_state: - match executable.status: - case BranchStatus.COMPLETED: - batch_items.append( - BatchItem( - executable.index, - BatchItemStatus.SUCCEEDED, - executable.result, - ) - ) - case BranchStatus.FAILED: - batch_items.append( - BatchItem( - executable.index, - BatchItemStatus.FAILED, - error=ErrorObject.from_exception(executable.error), - ) - ) - case ( - BranchStatus.PENDING - | BranchStatus.RUNNING - | BranchStatus.SUSPENDED - | BranchStatus.SUSPENDED_WITH_TIMEOUT - ): - batch_items.append( - BatchItem(executable.index, BatchItemStatus.STARTED) - ) - - return BatchResult.from_items(batch_items, self.completion_config) - - def _execute_item_in_child_context( - self, - executor_context: DurableContext, - executable: Executable[CallableType], - ) -> ResultType: - """ - Execute a single item in a derived child context. - - instead of relying on `executor_context.run_in_child_context` - we generate an operation_id for the child, and then call `child_handler` - directly. This avoids the hidden mutation of the context's internal counter. - we can do this because we explicitly control the generation of step_id and do it - using executable.index. - - - invariant: `operation_id` for a given executable is deterministic, - and execution order invariant. - """ - - operation_id = executor_context._create_step_id_for_logical_step( # noqa: SLF001 - executable.index - ) - name = f"{self.name_prefix}{executable.index}" - child_context = executor_context.create_child_context(operation_id) - operation_identifier = OperationIdentifier( - operation_id, - executor_context._parent_id, # noqa: SLF001 - name, - ) - - def run_in_child_handler(): - return self.execute_item(child_context, executable) - - return child_handler( - run_in_child_handler, - child_context.state, - operation_identifier=operation_identifier, - config=ChildConfig( - serdes=self.item_serdes or self.serdes, - sub_type=self.sub_type_iteration, - summary_generator=self.summary_generator, - ), - ) - - def replay(self, execution_state: ExecutionState, executor_context: DurableContext): - """ - Replay rather than re-run children. - - if we are here, then we are in replay_children. - This will pre-generate all the operation ids for the children and collect the checkpointed - results. - """ - items: list[BatchItem[ResultType]] = [] - for executable in self.executables: - operation_id = executor_context._create_step_id_for_logical_step( # noqa: SLF001 - executable.index - ) - checkpoint = execution_state.get_checkpoint_result(operation_id) - - result: ResultType | None = None - error = None - status: BatchItemStatus - if checkpoint.is_succeeded(): - status = BatchItemStatus.SUCCEEDED - result = self._execute_item_in_child_context( - executor_context, executable - ) - - elif checkpoint.is_failed(): - error = checkpoint.error - status = BatchItemStatus.FAILED - else: - status = BatchItemStatus.STARTED - - batch_item = BatchItem(executable.index, status, result=result, error=error) - items.append(batch_item) - return BatchResult.from_items(items, self.completion_config) - - -# endregion concurrency logic diff --git a/src/aws_durable_execution_sdk_python/concurrency/__init__.py b/src/aws_durable_execution_sdk_python/concurrency/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/aws_durable_execution_sdk_python/concurrency/executor.py b/src/aws_durable_execution_sdk_python/concurrency/executor.py new file mode 100644 index 0000000..da1a5cd --- /dev/null +++ b/src/aws_durable_execution_sdk_python/concurrency/executor.py @@ -0,0 +1,451 @@ +"""Concurrent executor for parallel and map operations.""" + +from __future__ import annotations + +import heapq +import logging +import threading +import time +from abc import ABC, abstractmethod +from concurrent.futures import Future, ThreadPoolExecutor +from typing import TYPE_CHECKING, Generic, Self, TypeVar + +from aws_durable_execution_sdk_python.concurrency.models import ( + BatchItem, + BatchItemStatus, + BatchResult, + BranchStatus, + Executable, + ExecutableWithState, + ExecutionCounters, + SuspendResult, +) +from aws_durable_execution_sdk_python.config import ChildConfig +from aws_durable_execution_sdk_python.exceptions import ( + OrphanedChildException, + SuspendExecution, + TimedSuspendExecution, +) +from aws_durable_execution_sdk_python.identifier import OperationIdentifier +from aws_durable_execution_sdk_python.lambda_service import ErrorObject +from aws_durable_execution_sdk_python.operation.child import child_handler + +if TYPE_CHECKING: + from collections.abc import Callable + + from aws_durable_execution_sdk_python.config import CompletionConfig + from aws_durable_execution_sdk_python.context import DurableContext + from aws_durable_execution_sdk_python.lambda_service import OperationSubType + from aws_durable_execution_sdk_python.serdes import SerDes + from aws_durable_execution_sdk_python.state import ExecutionState + from aws_durable_execution_sdk_python.types import SummaryGenerator + + +logger = logging.getLogger(__name__) + +T = TypeVar("T") +R = TypeVar("R") + +CallableType = TypeVar("CallableType") +ResultType = TypeVar("ResultType") + + +# region concurrency logic +class TimerScheduler: + """Manage timed suspend tasks with a background timer thread.""" + + def __init__( + self, resubmit_callback: Callable[[ExecutableWithState], None] + ) -> None: + self.resubmit_callback = resubmit_callback + self._pending_resumes: list[tuple[float, ExecutableWithState]] = [] + self._lock = threading.Lock() + self._shutdown = threading.Event() + self._timer_thread = threading.Thread(target=self._timer_loop, daemon=True) + self._timer_thread.start() + + def __enter__(self) -> Self: + return self + + def __exit__(self, exc_type, exc_val, exc_tb) -> None: + self.shutdown() + + def schedule_resume( + self, exe_state: ExecutableWithState, resume_time: float + ) -> None: + """Schedule a task to resume at the specified time.""" + with self._lock: + heapq.heappush(self._pending_resumes, (resume_time, exe_state)) + + def shutdown(self) -> None: + """Shutdown the timer thread and cancel all pending resumes.""" + self._shutdown.set() + self._timer_thread.join(timeout=1.0) + with self._lock: + self._pending_resumes.clear() + + def _timer_loop(self) -> None: + """Background thread that processes timed resumes.""" + while not self._shutdown.is_set(): + next_resume_time = None + + with self._lock: + if self._pending_resumes: + next_resume_time = self._pending_resumes[0][0] + + if next_resume_time is None: + # No pending resumes, wait a bit and check again + self._shutdown.wait(timeout=0.1) + continue + + current_time = time.time() + if current_time >= next_resume_time: + # Time to resume + with self._lock: + # no branch cover because hard to test reliably - this is a double-safety check if heap mutated + # since the first peek on next_resume_time further up + if ( # pragma: no branch + self._pending_resumes + and self._pending_resumes[0][0] <= current_time + ): + _, exe_state = heapq.heappop(self._pending_resumes) + if exe_state.can_resume: + exe_state.reset_to_pending() + self.resubmit_callback(exe_state) + else: + # Wait until next resume time + wait_time = min(next_resume_time - current_time, 0.1) + self._shutdown.wait(timeout=wait_time) + + +class ConcurrentExecutor(ABC, Generic[CallableType, ResultType]): + """Execute durable operations concurrently. This contains the execution logic for Map and Parallel.""" + + def __init__( + self, + executables: list[Executable[CallableType]], + max_concurrency: int | None, + completion_config: CompletionConfig, + sub_type_top: OperationSubType, + sub_type_iteration: OperationSubType, + name_prefix: str, + serdes: SerDes | None, + item_serdes: SerDes | None = None, + summary_generator: SummaryGenerator | None = None, + ): + """Initialize ConcurrentExecutor. + + Args: + summary_generator: Optional function to generate compact summaries for large results. + When the serialized result exceeds 256KB, this generator creates a JSON summary + instead of checkpointing the full result. Used by map/parallel operations to + handle large BatchResult payloads efficiently. Matches TypeScript behavior in + run-in-child-context-handler.ts. + """ + self.executables = executables + self.max_concurrency = max_concurrency + self.completion_config = completion_config + self.sub_type_top = sub_type_top + self.sub_type_iteration = sub_type_iteration + self.name_prefix = name_prefix + self.summary_generator = summary_generator + + # Event-driven state tracking for when the executor is done + self._completion_event = threading.Event() + self._suspend_exception: SuspendExecution | None = None + + # ExecutionCounters will keep track of completion criteria and on-going counters + min_successful = self.completion_config.min_successful or len(self.executables) + tolerated_failure_count = self.completion_config.tolerated_failure_count + tolerated_failure_percentage = ( + self.completion_config.tolerated_failure_percentage + ) + + self.counters: ExecutionCounters = ExecutionCounters( + len(executables), + min_successful, + tolerated_failure_count, + tolerated_failure_percentage, + ) + self.executables_with_state: list[ExecutableWithState] = [] + self.serdes = serdes + self.item_serdes = item_serdes + + @abstractmethod + def execute_item( + self, child_context: DurableContext, executable: Executable[CallableType] + ) -> ResultType: + """Execute a single executable in a child context and return the result.""" + raise NotImplementedError + + def execute( + self, execution_state: ExecutionState, executor_context: DurableContext + ) -> BatchResult[ResultType]: + """Execute items concurrently with event-driven state management.""" + logger.debug( + "▶️ Executing concurrent operation, items: %d", len(self.executables) + ) + + max_workers = self.max_concurrency or len(self.executables) + + self.executables_with_state = [ + ExecutableWithState(executable=exe) for exe in self.executables + ] + self._completion_event.clear() + self._suspend_exception = None + + def resubmitter(executable_with_state: ExecutableWithState) -> None: + """Resubmit a timed suspended task.""" + execution_state.create_checkpoint() + submit_task(executable_with_state) + + thread_executor = ThreadPoolExecutor(max_workers=max_workers) + try: + with TimerScheduler(resubmitter) as scheduler: + + def submit_task(executable_with_state: ExecutableWithState) -> Future: + """Submit task to the thread executor and mark its state as started.""" + future = thread_executor.submit( + self._execute_item_in_child_context, + executor_context, + executable_with_state.executable, + ) + executable_with_state.run(future) + + def on_done(future: Future) -> None: + self._on_task_complete(executable_with_state, future, scheduler) + + future.add_done_callback(on_done) + return future + + # Submit initial tasks + futures = [ + submit_task(exe_state) for exe_state in self.executables_with_state + ] + + # Wait for completion + self._completion_event.wait() + + # Cancel futures that haven't started yet + for future in futures: + future.cancel() + + # Suspend execution if everything done and at least one of the tasks raised a suspend exception. + if self._suspend_exception: + raise self._suspend_exception + + finally: + # Shutdown without waiting for running threads for early return when + # completion criteria are met (e.g., min_successful). + # Running threads will continue in background but they raise OrphanedChildException + # on the next attempt to checkpoint. + thread_executor.shutdown(wait=False, cancel_futures=True) + + # Build final result + return self._create_result() + + def should_execution_suspend(self) -> SuspendResult: + """Check if execution should suspend.""" + earliest_timestamp: float = float("inf") + indefinite_suspend_task: ( + ExecutableWithState[CallableType, ResultType] | None + ) = None + + for exe_state in self.executables_with_state: + if exe_state.status in {BranchStatus.PENDING, BranchStatus.RUNNING}: + # Exit here! Still have tasks that can make progress, don't suspend. + return SuspendResult.do_not_suspend() + if exe_state.status is BranchStatus.SUSPENDED_WITH_TIMEOUT: + if ( + exe_state.suspend_until + and exe_state.suspend_until < earliest_timestamp + ): + earliest_timestamp = exe_state.suspend_until + elif exe_state.status is BranchStatus.SUSPENDED: + indefinite_suspend_task = exe_state + + # All tasks are in final states and at least one of them is a suspend. + if earliest_timestamp != float("inf"): + return SuspendResult.suspend( + TimedSuspendExecution( + "All concurrent work complete or suspended pending retry.", + earliest_timestamp, + ) + ) + if indefinite_suspend_task: + return SuspendResult.suspend( + SuspendExecution( + "All concurrent work complete or suspended and pending external callback." + ) + ) + + return SuspendResult.do_not_suspend() + + def _on_task_complete( + self, + exe_state: ExecutableWithState, + future: Future, + scheduler: TimerScheduler, + ) -> None: + """Handle task completion, suspension, or failure.""" + + if future.cancelled(): + exe_state.suspend() + return + + try: + result = future.result() + exe_state.complete(result) + self.counters.complete_task() + except OrphanedChildException: + # Parent already completed and returned. + # State is already RUNNING, which _create_result() marked as STARTED + # Just log and exit - no state change needed + logger.debug( + "Terminating orphaned branch %s without error because parent has completed already", + exe_state.index, + ) + return + except TimedSuspendExecution as tse: + exe_state.suspend_with_timeout(tse.scheduled_timestamp) + scheduler.schedule_resume(exe_state, tse.scheduled_timestamp) + except SuspendExecution: + exe_state.suspend() + # For indefinite suspend, don't schedule resume + except Exception as e: # noqa: BLE001 + exe_state.fail(e) + self.counters.fail_task() + + # Check if execution should complete or suspend + if self.counters.should_complete(): + self._completion_event.set() + else: + suspend_result = self.should_execution_suspend() + if suspend_result.should_suspend: + self._suspend_exception = suspend_result.exception + self._completion_event.set() + + def _create_result(self) -> BatchResult[ResultType]: + """ + Build the final BatchResult. + + When this function executes, we've terminated the upper/parent context for whatever reason. + It follows that our items can be only in 3 states, Completed, Failed and Started (in all of the possible forms). + We tag each branch based on its observed value at the time of completion of the parent / upper context, and pass the + results to BatchResult. + + Any inference wrt completion reason is left up to BatchResult, keeping the logic inference isolated. + """ + batch_items: list[BatchItem[ResultType]] = [] + for executable in self.executables_with_state: + match executable.status: + case BranchStatus.COMPLETED: + batch_items.append( + BatchItem( + executable.index, + BatchItemStatus.SUCCEEDED, + executable.result, + ) + ) + case BranchStatus.FAILED: + batch_items.append( + BatchItem( + executable.index, + BatchItemStatus.FAILED, + error=ErrorObject.from_exception(executable.error), + ) + ) + case ( + BranchStatus.PENDING + | BranchStatus.RUNNING + | BranchStatus.SUSPENDED + | BranchStatus.SUSPENDED_WITH_TIMEOUT + ): + batch_items.append( + BatchItem(executable.index, BatchItemStatus.STARTED) + ) + + return BatchResult.from_items(batch_items, self.completion_config) + + def _execute_item_in_child_context( + self, + executor_context: DurableContext, + executable: Executable[CallableType], + ) -> ResultType: + """ + Execute a single item in a derived child context. + + instead of relying on `executor_context.run_in_child_context` + we generate an operation_id for the child, and then call `child_handler` + directly. This avoids the hidden mutation of the context's internal counter. + we can do this because we explicitly control the generation of step_id and do it + using executable.index. + + + invariant: `operation_id` for a given executable is deterministic, + and execution order invariant. + """ + + operation_id = executor_context._create_step_id_for_logical_step( # noqa: SLF001 + executable.index + ) + name = f"{self.name_prefix}{executable.index}" + child_context = executor_context.create_child_context(operation_id) + operation_identifier = OperationIdentifier( + operation_id, + executor_context._parent_id, # noqa: SLF001 + name, + ) + + def run_in_child_handler(): + return self.execute_item(child_context, executable) + + result: ResultType = child_handler( + run_in_child_handler, + child_context.state, + operation_identifier=operation_identifier, + config=ChildConfig( + serdes=self.item_serdes or self.serdes, + sub_type=self.sub_type_iteration, + summary_generator=self.summary_generator, + ), + ) + child_context.state.track_replay(operation_id=operation_id) + return result + + def replay(self, execution_state: ExecutionState, executor_context: DurableContext): + """ + Replay rather than re-run children. + + if we are here, then we are in replay_children. + This will pre-generate all the operation ids for the children and collect the checkpointed + results. + """ + items: list[BatchItem[ResultType]] = [] + for executable in self.executables: + operation_id = executor_context._create_step_id_for_logical_step( # noqa: SLF001 + executable.index + ) + checkpoint = execution_state.get_checkpoint_result(operation_id) + + result: ResultType | None = None + error = None + status: BatchItemStatus + if checkpoint.is_succeeded(): + status = BatchItemStatus.SUCCEEDED + result = self._execute_item_in_child_context( + executor_context, executable + ) + + elif checkpoint.is_failed(): + error = checkpoint.error + status = BatchItemStatus.FAILED + else: + status = BatchItemStatus.STARTED + + batch_item = BatchItem(executable.index, status, result=result, error=error) + items.append(batch_item) + return BatchResult.from_items(items, self.completion_config) + + +# endregion concurrency logic diff --git a/src/aws_durable_execution_sdk_python/concurrency/models.py b/src/aws_durable_execution_sdk_python/concurrency/models.py new file mode 100644 index 0000000..29ffeaf --- /dev/null +++ b/src/aws_durable_execution_sdk_python/concurrency/models.py @@ -0,0 +1,469 @@ +"""Concurrent executor for parallel and map operations.""" + +from __future__ import annotations + +import logging +import threading +import time +from collections import Counter +from dataclasses import dataclass +from enum import Enum +from typing import TYPE_CHECKING, Generic, TypeVar + +from aws_durable_execution_sdk_python.exceptions import ( + InvalidStateError, + SuspendExecution, +) +from aws_durable_execution_sdk_python.lambda_service import ErrorObject +from aws_durable_execution_sdk_python.types import BatchResult as BatchResultProtocol + +if TYPE_CHECKING: + from concurrent.futures import Future + + from aws_durable_execution_sdk_python.config import CompletionConfig + + +logger = logging.getLogger(__name__) + +T = TypeVar("T") +R = TypeVar("R") + +CallableType = TypeVar("CallableType") +ResultType = TypeVar("ResultType") + + +# region Result models +class BatchItemStatus(Enum): + SUCCEEDED = "SUCCEEDED" + FAILED = "FAILED" + STARTED = "STARTED" + + +class CompletionReason(Enum): + ALL_COMPLETED = "ALL_COMPLETED" + MIN_SUCCESSFUL_REACHED = "MIN_SUCCESSFUL_REACHED" + FAILURE_TOLERANCE_EXCEEDED = "FAILURE_TOLERANCE_EXCEEDED" + + +@dataclass(frozen=True) +class SuspendResult: + should_suspend: bool + exception: SuspendExecution | None = None + + @staticmethod + def do_not_suspend() -> SuspendResult: + return SuspendResult(should_suspend=False) + + @staticmethod + def suspend(exception: SuspendExecution) -> SuspendResult: + return SuspendResult(should_suspend=True, exception=exception) + + +@dataclass(frozen=True) +class BatchItem(Generic[R]): + index: int + status: BatchItemStatus + result: R | None = None + error: ErrorObject | None = None + + def to_dict(self) -> dict: + return { + "index": self.index, + "status": self.status.value, + "result": self.result, + "error": self.error.to_dict() if self.error else None, + } + + @classmethod + def from_dict(cls, data: dict) -> BatchItem[R]: + return cls( + index=data["index"], + status=BatchItemStatus(data["status"]), + result=data.get("result"), + error=ErrorObject.from_dict(data["error"]) if data.get("error") else None, + ) + + +@dataclass(frozen=True) +class BatchResult(Generic[R], BatchResultProtocol[R]): # noqa: PYI059 + all: list[BatchItem[R]] + completion_reason: CompletionReason + + @classmethod + def from_dict( + cls, data: dict, completion_config: CompletionConfig | None = None + ) -> BatchResult[R]: + batch_items: list[BatchItem[R]] = [ + BatchItem.from_dict(item) for item in data["all"] + ] + + completion_reason_value = data.get("completionReason") + if completion_reason_value is None: + # Infer completion reason from batch item statuses and completion config + # This aligns with the TypeScript implementation that uses completion config + # to accurately reconstruct the completion reason during replay + result = cls.from_items(batch_items, completion_config) + logger.warning( + "Missing completionReason in BatchResult deserialization, " + "inferred '%s' from batch item statuses. " + "This may indicate incomplete serialization data.", + result.completion_reason.value, + ) + return result + + completion_reason = CompletionReason(completion_reason_value) + return cls(batch_items, completion_reason) + + @classmethod + def from_items( + cls, + items: list[BatchItem[R]], + completion_config: CompletionConfig | None = None, + ): + """ + Infer completion reason based on batch item statuses and completion config. + + This follows the same logic as the TypeScript implementation: + - If all items completed: ALL_COMPLETED + - If minSuccessful threshold met and not all completed: MIN_SUCCESSFUL_REACHED + - Otherwise: FAILURE_TOLERANCE_EXCEEDED + """ + + statuses = (item.status for item in items) + counts = Counter(statuses) + succeeded_count = counts.get(BatchItemStatus.SUCCEEDED, 0) + failed_count = counts.get(BatchItemStatus.FAILED, 0) + started_count = counts.get(BatchItemStatus.STARTED, 0) + + completed_count = succeeded_count + failed_count + total_count = started_count + completed_count + + # If all items completed (no started items), it's ALL_COMPLETED + if completed_count == total_count: + completion_reason = CompletionReason.ALL_COMPLETED + elif ( # If we have completion config and minSuccessful threshold is met + completion_config + and (min_successful := completion_config.min_successful) is not None + and succeeded_count >= min_successful + ): + completion_reason = CompletionReason.MIN_SUCCESSFUL_REACHED + else: + # Otherwise, assume failure tolerance was exceeded + completion_reason = CompletionReason.FAILURE_TOLERANCE_EXCEEDED + + return cls(items, completion_reason) + + def to_dict(self) -> dict: + return { + "all": [item.to_dict() for item in self.all], + "completionReason": self.completion_reason.value, + } + + def succeeded(self) -> list[BatchItem[R]]: + return [ + item + for item in self.all + if item.status is BatchItemStatus.SUCCEEDED and item.result is not None + ] + + def failed(self) -> list[BatchItem[R]]: + return [ + item + for item in self.all + if item.status is BatchItemStatus.FAILED and item.error is not None + ] + + def started(self) -> list[BatchItem[R]]: + return [item for item in self.all if item.status is BatchItemStatus.STARTED] + + @property + def status(self) -> BatchItemStatus: + return BatchItemStatus.FAILED if self.has_failure else BatchItemStatus.SUCCEEDED + + @property + def has_failure(self) -> bool: + return any(item.status is BatchItemStatus.FAILED for item in self.all) + + def throw_if_error(self) -> None: + first_error = next( + (item.error for item in self.all if item.status is BatchItemStatus.FAILED), + None, + ) + if first_error: + raise first_error.to_callable_runtime_error() + + def get_results(self) -> list[R]: + return [ + item.result + for item in self.all + if item.status is BatchItemStatus.SUCCEEDED and item.result is not None + ] + + def get_errors(self) -> list[ErrorObject]: + return [ + item.error + for item in self.all + if item.status is BatchItemStatus.FAILED and item.error is not None + ] + + @property + def success_count(self) -> int: + return sum(1 for item in self.all if item.status is BatchItemStatus.SUCCEEDED) + + @property + def failure_count(self) -> int: + return sum(1 for item in self.all if item.status is BatchItemStatus.FAILED) + + @property + def started_count(self) -> int: + return sum(1 for item in self.all if item.status is BatchItemStatus.STARTED) + + @property + def total_count(self) -> int: + return len(self.all) + + +# endregion Result models + + +# region concurrency models +@dataclass(frozen=True) +class Executable(Generic[CallableType]): + index: int + func: CallableType + + +class BranchStatus(Enum): + PENDING = "pending" + RUNNING = "running" + COMPLETED = "completed" + SUSPENDED = "suspended" + SUSPENDED_WITH_TIMEOUT = "suspended_with_timeout" + FAILED = "failed" + + +class ExecutableWithState(Generic[CallableType, ResultType]): + """Manages the execution state and lifecycle of an executable.""" + + def __init__(self, executable: Executable[CallableType]): + self.executable = executable + self._status = BranchStatus.PENDING + self._future: Future | None = None + self._suspend_until: float | None = None + self._result: ResultType = None # type: ignore[assignment] + self._is_result_set: bool = False + self._error: Exception | None = None + + @property + def future(self) -> Future: + """Get the future, raising error if not available.""" + if self._future is None: + msg = f"ExecutableWithState was never started. {self.executable.index}" + raise InvalidStateError(msg) + return self._future + + @property + def status(self) -> BranchStatus: + """Get current status.""" + return self._status + + @property + def result(self) -> ResultType: + """Get result if completed.""" + if not self._is_result_set or self._status != BranchStatus.COMPLETED: + msg = f"result not available in status {self._status}" + raise InvalidStateError(msg) + return self._result + + @property + def error(self) -> Exception: + """Get error if failed.""" + if self._error is None or self._status != BranchStatus.FAILED: + msg = f"error not available in status {self._status}" + raise InvalidStateError(msg) + return self._error + + @property + def suspend_until(self) -> float | None: + """Get suspend timestamp.""" + return self._suspend_until + + @property + def is_running(self) -> bool: + """Check if currently running.""" + return self._status is BranchStatus.RUNNING + + @property + def can_resume(self) -> bool: + """Check if can resume from suspension.""" + return self._status is BranchStatus.SUSPENDED or ( + self._status is BranchStatus.SUSPENDED_WITH_TIMEOUT + and self._suspend_until is not None + and time.time() >= self._suspend_until + ) + + @property + def index(self) -> int: + return self.executable.index + + @property + def callable(self) -> CallableType: + return self.executable.func + + # region State transitions + def run(self, future: Future) -> None: + """Transition to RUNNING state with a future.""" + if self._status != BranchStatus.PENDING: + msg = f"Cannot start running from {self._status}" + raise InvalidStateError(msg) + self._status = BranchStatus.RUNNING + self._future = future + + def suspend(self) -> None: + """Transition to SUSPENDED state (indefinite).""" + self._status = BranchStatus.SUSPENDED + self._suspend_until = None + + def suspend_with_timeout(self, timestamp: float) -> None: + """Transition to SUSPENDED_WITH_TIMEOUT state.""" + self._status = BranchStatus.SUSPENDED_WITH_TIMEOUT + self._suspend_until = timestamp + + def complete(self, result: ResultType) -> None: + """Transition to COMPLETED state.""" + self._status = BranchStatus.COMPLETED + self._result = result + self._is_result_set = True + + def fail(self, error: Exception) -> None: + """Transition to FAILED state.""" + self._status = BranchStatus.FAILED + self._error = error + + def reset_to_pending(self) -> None: + """Reset to PENDING state for resubmission.""" + self._status = BranchStatus.PENDING + self._future = None + self._suspend_until = None + + # endregion State transitions + + +class ExecutionCounters: + """Thread-safe counters for tracking execution state.""" + + def __init__( + self, + total_tasks: int, + min_successful: int, + tolerated_failure_count: int | None, + tolerated_failure_percentage: float | None, + ): + self.total_tasks: int = total_tasks + self.min_successful: int = min_successful + self.tolerated_failure_count: int | None = tolerated_failure_count + self.tolerated_failure_percentage: float | None = tolerated_failure_percentage + self.success_count: int = 0 + self.failure_count: int = 0 + self._lock = threading.Lock() + + def complete_task(self) -> None: + """Task completed successfully.""" + with self._lock: + self.success_count += 1 + + def fail_task(self) -> None: + """Task failed.""" + with self._lock: + self.failure_count += 1 + + def should_continue(self) -> bool: + """ + Check if we should continue starting new tasks (based on failure tolerance). + Matches TypeScript shouldContinue() logic. + """ + with self._lock: + # If no completion config, only continue if no failures + if ( + self.tolerated_failure_count is None + and self.tolerated_failure_percentage is None + ): + return self.failure_count == 0 + + # Check failure count tolerance + if ( + self.tolerated_failure_count is not None + and self.failure_count > self.tolerated_failure_count + ): + return False + + # Check failure percentage tolerance + if self.tolerated_failure_percentage is not None and self.total_tasks > 0: + failure_percentage = (self.failure_count / self.total_tasks) * 100 + if failure_percentage > self.tolerated_failure_percentage: + return False + + return True + + def is_complete(self) -> bool: + """ + Check if execution should complete (based on completion criteria). + Matches TypeScript isComplete() logic. + """ + with self._lock: + completed_count = self.success_count + self.failure_count + + # All tasks completed + if completed_count == self.total_tasks: + return True + + # when we breach min successful, we've completed + return self.success_count >= self.min_successful + + def should_complete(self) -> bool: + """ + Check if execution should complete. + Combines TypeScript shouldContinue() and isComplete() logic. + """ + return self.is_complete() or not self.should_continue() + + def is_all_completed(self) -> bool: + """True if all tasks completed successfully.""" + with self._lock: + return self.success_count == self.total_tasks + + def is_min_successful_reached(self) -> bool: + """True if minimum successful tasks reached.""" + with self._lock: + return self.success_count >= self.min_successful + + def is_failure_tolerance_exceeded(self) -> bool: + """True if failure tolerance was exceeded.""" + with self._lock: + return self._is_failure_condition_reached( + tolerated_count=self.tolerated_failure_count, + tolerated_percentage=self.tolerated_failure_percentage, + failure_count=self.failure_count, + ) + + def _is_failure_condition_reached( + self, + tolerated_count: int | None, + tolerated_percentage: float | None, + failure_count: int, + ) -> bool: + """True if failure conditions are reached (no locking - caller must lock).""" + # Failure count condition + if tolerated_count is not None and failure_count > tolerated_count: + return True + + # Failure percentage condition + if tolerated_percentage is not None and self.total_tasks > 0: + failure_percentage = (failure_count / self.total_tasks) * 100 + if failure_percentage > tolerated_percentage: + return True + + return False + + +# endegion concurrency models diff --git a/src/aws_durable_execution_sdk_python/config.py b/src/aws_durable_execution_sdk_python/config.py index 4f4c5b5..548b6c1 100644 --- a/src/aws_durable_execution_sdk_python/config.py +++ b/src/aws_durable_execution_sdk_python/config.py @@ -7,6 +7,8 @@ from enum import Enum, StrEnum from typing import TYPE_CHECKING, Generic, TypeVar +from aws_durable_execution_sdk_python.exceptions import ValidationError + P = TypeVar("P") # Payload type R = TypeVar("R") # Result type T = TypeVar("T") @@ -25,6 +27,42 @@ Numeric = int | float # deliberately leaving off complex +@dataclass(frozen=True) +class Duration: + """Represents a duration stored as total seconds.""" + + seconds: int = 0 + + def __post_init__(self): + if self.seconds < 0: + msg = "Duration seconds must be positive" + raise ValidationError(msg) + + def to_seconds(self) -> int: + """Convert the duration to total seconds.""" + return self.seconds + + @classmethod + def from_seconds(cls, value: float) -> Duration: + """Create a Duration from total seconds.""" + return cls(seconds=int(value)) + + @classmethod + def from_minutes(cls, value: float) -> Duration: + """Create a Duration from minutes.""" + return cls(seconds=int(value * 60)) + + @classmethod + def from_hours(cls, value: float) -> Duration: + """Create a Duration from hours.""" + return cls(seconds=int(value * 3600)) + + @classmethod + def from_days(cls, value: float) -> Duration: + """Create a Duration from days.""" + return cls(seconds=int(value * 86400)) + + @dataclass(frozen=True) class BatchedInput(Generic[T, U]): batch_input: T @@ -340,22 +378,61 @@ class MapConfig: summary_generator: SummaryGenerator | None = None -@dataclass +@dataclass(frozen=True) class InvokeConfig(Generic[P, R]): + """ + Configuration for invoke operations. + + This class configures how function invocations are executed, including + timeout behavior, serialization, and tenant isolation. + + Args: + timeout: Maximum duration to wait for the invoked function to complete. + Default is no timeout. Use this to prevent long-running invocations + from blocking execution indefinitely. + + serdes_payload: Custom serialization/deserialization for the payload + sent to the invoked function. Defaults to DEFAULT_JSON_SERDES when + not set. + + serdes_result: Custom serialization/deserialization for the result + returned from the invoked function. Defaults to DEFAULT_JSON_SERDES when + not set. + + tenant_id: Optional tenant identifier for multi-tenant isolation. + If provided, the invocation will be scoped to this tenant. + """ + # retry_strategy: Callable[[Exception, int], RetryDecision] | None = None - timeout_seconds: int = 0 + timeout: Duration = field(default_factory=Duration) serdes_payload: SerDes[P] | None = None serdes_result: SerDes[R] | None = None + tenant_id: str | None = None + + @property + def timeout_seconds(self) -> int: + """Get timeout in seconds.""" + return self.timeout.to_seconds() @dataclass(frozen=True) class CallbackConfig: """Configuration for callbacks.""" - timeout_seconds: int = 0 - heartbeat_timeout_seconds: int = 0 + timeout: Duration = field(default_factory=Duration) + heartbeat_timeout: Duration = field(default_factory=Duration) serdes: SerDes | None = None + @property + def timeout_seconds(self) -> int: + """Get timeout in seconds.""" + return self.timeout.to_seconds() + + @property + def heartbeat_timeout_seconds(self) -> int: + """Get heartbeat timeout in seconds.""" + return self.heartbeat_timeout.to_seconds() + @dataclass(frozen=True) class WaitForCallbackConfig(CallbackConfig): @@ -387,23 +464,35 @@ class JitterStrategy(StrEnum): Jitter is meant to be used to spread operations across time. + Based on AWS Architecture Blog: https://aws.amazon.com/blogs/architecture/exponential-backoff-and-jitter/ + members: :NONE: No jitter; use the exact calculated delay :FULL: Full jitter; random delay between 0 and calculated delay - :HALF: Half jitter; random delay between 0.5x and 1.0x of the calculated delay + :HALF: Equal jitter; random delay between 0.5x and 1.0x of the calculated delay """ NONE = "NONE" FULL = "FULL" HALF = "HALF" - def compute_jitter(self, delay) -> float: + def apply_jitter(self, delay: float) -> float: + """Apply jitter to a delay value and return the final delay. + + Args: + delay: The base delay value to apply jitter to + + Returns: + The final delay after applying jitter strategy + """ match self: case JitterStrategy.NONE: - return 0 + return delay case JitterStrategy.HALF: - return delay * (random.random() * 0.5 + 0.5) # noqa: S311 + # Equal jitter: delay/2 + random(0, delay/2) + return delay / 2 + random.random() * (delay / 2) # noqa: S311 case _: # default is FULL + # Full jitter: random(0, delay) return random.random() * delay # noqa: S311 diff --git a/src/aws_durable_execution_sdk_python/context.py b/src/aws_durable_execution_sdk_python/context.py index 2938ca9..8efaed0 100644 --- a/src/aws_durable_execution_sdk_python/context.py +++ b/src/aws_durable_execution_sdk_python/context.py @@ -8,6 +8,7 @@ BatchedInput, CallbackConfig, ChildConfig, + Duration, InvokeConfig, MapConfig, ParallelConfig, @@ -23,25 +24,30 @@ from aws_durable_execution_sdk_python.lambda_service import OperationSubType from aws_durable_execution_sdk_python.logger import Logger, LogInfo from aws_durable_execution_sdk_python.operation.callback import ( - create_callback_handler, + CallbackOperationExecutor, wait_for_callback_handler, ) from aws_durable_execution_sdk_python.operation.child import child_handler -from aws_durable_execution_sdk_python.operation.invoke import invoke_handler +from aws_durable_execution_sdk_python.operation.invoke import InvokeOperationExecutor from aws_durable_execution_sdk_python.operation.map import map_handler from aws_durable_execution_sdk_python.operation.parallel import parallel_handler -from aws_durable_execution_sdk_python.operation.step import step_handler -from aws_durable_execution_sdk_python.operation.wait import wait_handler +from aws_durable_execution_sdk_python.operation.step import StepOperationExecutor +from aws_durable_execution_sdk_python.operation.wait import WaitOperationExecutor from aws_durable_execution_sdk_python.operation.wait_for_condition import ( - wait_for_condition_handler, + WaitForConditionOperationExecutor, +) +from aws_durable_execution_sdk_python.serdes import ( + PassThroughSerDes, + SerDes, + deserialize, ) -from aws_durable_execution_sdk_python.serdes import SerDes, deserialize from aws_durable_execution_sdk_python.state import ExecutionState # noqa: TCH001 from aws_durable_execution_sdk_python.threading import OrderedCounter from aws_durable_execution_sdk_python.types import ( BatchResult, LoggerInterface, StepContext, + WaitForCallbackContext, WaitForConditionCheckContext, ) from aws_durable_execution_sdk_python.types import Callback as CallbackProtocol @@ -65,6 +71,8 @@ logger = logging.getLogger(__name__) +PASS_THROUGH_SERDES: SerDes[Any] = PassThroughSerDes() + def durable_step( func: Callable[Concatenate[StepContext, Params], T], @@ -96,6 +104,52 @@ def function_with_arguments(child_context: DurableContext): return wrapper +def durable_wait_for_callback( + func: Callable[Concatenate[str, WaitForCallbackContext, Params], T], +) -> Callable[Params, Callable[[str, WaitForCallbackContext], T]]: + """Wrap your callable into a wait_for_callback submitter function. + + This decorator allows you to define a submitter function with additional + parameters that will be bound when called. + + Args: + func: A callable that takes callback_id, context, and additional parameters + + Returns: + A wrapper function that binds the additional parameters and returns + a submitter function compatible with wait_for_callback + + Example: + @durable_wait_for_callback + def submit_to_external_system( + callback_id: str, + context: WaitForCallbackContext, + task_name: str, + priority: int + ): + context.logger.info(f"Submitting {task_name} with callback {callback_id}") + external_api.submit_task( + task_name=task_name, + priority=priority, + callback_id=callback_id + ) + + # Usage in durable handler: + result = context.wait_for_callback( + submit_to_external_system("my_task", priority=5) + ) + """ + + def wrapper(*args, **kwargs): + def submitter_with_arguments(callback_id: str, context: WaitForCallbackContext): + return func(callback_id, context, *args, **kwargs) + + submitter_with_arguments._original_name = func.__name__ # noqa: SLF001 + return submitter_with_arguments + + return wrapper + + class Callback(Generic[T], CallbackProtocol[T]): # noqa: PYI059 """A future that will block on result() until callback_id returns.""" @@ -128,7 +182,7 @@ def result(self) -> T | None: if not checkpointed_result.is_existent(): msg = "Callback operation must exist" - raise CallbackError(msg) + raise CallbackError(message=msg, callback_id=self.callback_id) if ( checkpointed_result.is_failed() @@ -136,14 +190,19 @@ def result(self) -> T | None: or checkpointed_result.is_timed_out() or checkpointed_result.is_stopped() ): - checkpointed_result.raise_callable_error() + msg = ( + checkpointed_result.error.message + if checkpointed_result.error and checkpointed_result.error.message + else "Callback failed" + ) + raise CallbackError(message=msg, callback_id=self.callback_id) if checkpointed_result.is_succeeded(): if checkpointed_result.result is None: return None # type: ignore return deserialize( - serdes=self.serdes, + serdes=self.serdes if self.serdes is not None else PASS_THROUGH_SERDES, data=checkpointed_result.result, operation_id=self.operation_id, durable_execution_arn=self.state.durable_execution_arn, @@ -169,7 +228,8 @@ def __init__( self._step_counter: OrderedCounter = OrderedCounter() log_info = LogInfo( - execution_arn=state.durable_execution_arn, parent_id=parent_id + execution_state=state, + parent_id=parent_id, ) self._log_info = log_info self.logger: Logger = logger or Logger.from_log_info( @@ -198,7 +258,8 @@ def create_child_context(self, parent_id: str) -> DurableContext: parent_id=parent_id, logger=self.logger.with_log_info( LogInfo( - execution_arn=self.state.durable_execution_arn, parent_id=parent_id + execution_state=self.state, + parent_id=parent_id, ) ), ) @@ -262,20 +323,22 @@ def create_callback( if not config: config = CallbackConfig() operation_id: str = self._create_step_id() - callback_id: str = create_callback_handler( + executor: CallbackOperationExecutor = CallbackOperationExecutor( state=self.state, operation_identifier=OperationIdentifier( operation_id=operation_id, parent_id=self._parent_id, name=name ), config=config, ) - - return Callback( + callback_id: str = executor.process() + result: Callback = Callback( callback_id=callback_id, operation_id=operation_id, state=self.state, serdes=config.serdes, ) + self.state.track_replay(operation_id=operation_id) + return result def invoke( self, @@ -295,17 +358,23 @@ def invoke( Returns: The result of the invoked function """ - return invoke_handler( + if not config: + config = InvokeConfig[P, R]() + operation_id = self._create_step_id() + executor: InvokeOperationExecutor[R] = InvokeOperationExecutor( function_name=function_name, payload=payload, state=self.state, operation_identifier=OperationIdentifier( - operation_id=self._create_step_id(), + operation_id=operation_id, parent_id=self._parent_id, name=name, ), config=config, ) + result: R = executor.process() + self.state.track_replay(operation_id=operation_id) + return result def map( self, @@ -337,15 +406,21 @@ def map_in_child_context() -> BatchResult[R]: operation_identifier=operation_identifier, ) - return child_handler( + result: BatchResult[R] = child_handler( func=map_in_child_context, state=self.state, operation_identifier=operation_identifier, config=ChildConfig( sub_type=OperationSubType.MAP, - serdes=config.serdes if config is not None else None, + serdes=getattr(config, "serdes", None), + # child_handler should only know the serdes of the parent serdes, + # the item serdes will be passed when we are actually executing + # the branch within its own child_handler. + item_serdes=None, ), ) + self.state.track_replay(operation_id=operation_id) + return result def parallel( self, @@ -374,15 +449,21 @@ def parallel_in_child_context() -> BatchResult[T]: operation_identifier=operation_identifier, ) - return child_handler( + result: BatchResult[T] = child_handler( func=parallel_in_child_context, state=self.state, operation_identifier=operation_identifier, config=ChildConfig( sub_type=OperationSubType.PARALLEL, - serdes=config.serdes if config is not None else None, + serdes=getattr(config, "serdes", None), + # child_handler should only know the serdes of the parent serdes, + # the item serdes will be passed when we are actually executing + # the branch within its own child_handler. + item_serdes=None, ), ) + self.state.track_replay(operation_id=operation_id) + return result def run_in_child_context( self, @@ -409,7 +490,7 @@ def run_in_child_context( def callable_with_child_context(): return func(self.create_child_context(parent_id=operation_id)) - return child_handler( + result: T = child_handler( func=callable_with_child_context, state=self.state, operation_identifier=OperationIdentifier( @@ -417,6 +498,8 @@ def callable_with_child_context(): ), config=config, ) + self.state.track_replay(operation_id=operation_id) + return result def step( self, @@ -426,42 +509,52 @@ def step( ) -> T: step_name = self._resolve_step_name(name, func) logger.debug("Step name: %s", step_name) - - return step_handler( + if not config: + config = StepConfig() + operation_id = self._create_step_id() + executor: StepOperationExecutor[T] = StepOperationExecutor( func=func, config=config, state=self.state, operation_identifier=OperationIdentifier( - operation_id=self._create_step_id(), + operation_id=operation_id, parent_id=self._parent_id, name=step_name, ), context_logger=self.logger, ) + result: T = executor.process() + self.state.track_replay(operation_id=operation_id) + return result - def wait(self, seconds: int, name: str | None = None) -> None: + def wait(self, duration: Duration, name: str | None = None) -> None: """Wait for a specified amount of time. Args: - seconds: Time to wait in seconds + duration: Duration to wait name: Optional name for the wait step """ + seconds = duration.to_seconds() if seconds < 1: - msg = "seconds must be an integer greater than 0" + msg = "duration must be at least 1 second" raise ValidationError(msg) - wait_handler( - seconds=seconds, + operation_id = self._create_step_id() + wait_seconds = duration.seconds + executor: WaitOperationExecutor = WaitOperationExecutor( + seconds=wait_seconds, state=self.state, operation_identifier=OperationIdentifier( - operation_id=self._create_step_id(), + operation_id=operation_id, parent_id=self._parent_id, name=name, ), ) + executor.process() + self.state.track_replay(operation_id=operation_id) def wait_for_callback( self, - submitter: Callable[[str], None], + submitter: Callable[[str, WaitForCallbackContext], None], name: str | None = None, config: WaitForCallbackConfig | None = None, ) -> Any: @@ -499,17 +592,23 @@ def wait_for_condition( msg = "`config` is required for wait_for_condition" raise ValidationError(msg) - return wait_for_condition_handler( - check=check, - config=config, - state=self.state, - operation_identifier=OperationIdentifier( - operation_id=self._create_step_id(), - parent_id=self._parent_id, - name=name, - ), - context_logger=self.logger, + operation_id = self._create_step_id() + executor: WaitForConditionOperationExecutor[T] = ( + WaitForConditionOperationExecutor( + check=check, + config=config, + state=self.state, + operation_identifier=OperationIdentifier( + operation_id=operation_id, + parent_id=self._parent_id, + name=name, + ), + context_logger=self.logger, + ) ) + result: T = executor.process() + self.state.track_replay(operation_id=operation_id) + return result # endregion Operations diff --git a/src/aws_durable_execution_sdk_python/exceptions.py b/src/aws_durable_execution_sdk_python/exceptions.py index dcaa2c1..72f0aa0 100644 --- a/src/aws_durable_execution_sdk_python/exceptions.py +++ b/src/aws_durable_execution_sdk_python/exceptions.py @@ -8,12 +8,29 @@ import time from dataclasses import dataclass from enum import Enum -from typing import TYPE_CHECKING +from typing import TYPE_CHECKING, Self, TypedDict + +BAD_REQUEST_ERROR: int = 400 +TOO_MANY_REQUESTS_ERROR: int = 429 +SERVICE_ERROR: int = 500 if TYPE_CHECKING: import datetime +class AwsErrorObj(TypedDict): + Code: str | None + Message: str | None + + +class AwsErrorMetadata(TypedDict): + RequestId: str | None + HostId: str | None + HTTPStatusCode: int | None + HTTPHeaders: str | None + RetryAttempts: str | None + + class TerminationReason(Enum): """Reasons why a durable execution terminated.""" @@ -69,12 +86,35 @@ def __init__(self, message: str, callback_id: str | None = None): self.callback_id = callback_id -class CheckpointFailedError(InvocationError): - """Error when checkpoint operation fails.""" +class BotoClientError(InvocationError): + def __init__( + self, + message: str, + error: AwsErrorObj | None = None, + response_metadata: AwsErrorMetadata | None = None, + termination_reason=TerminationReason.INVOCATION_ERROR, + ): + super().__init__(message=message, termination_reason=termination_reason) + self.error: AwsErrorObj | None = error + self.response_metadata: AwsErrorMetadata | None = response_metadata - def __init__(self, message: str, step_id: str | None = None): - super().__init__(message, TerminationReason.CHECKPOINT_FAILED) - self.step_id = step_id + @classmethod + def from_exception(cls, exception: Exception) -> Self: + response = getattr(exception, "response", {}) + response_metadata = response.get("ResponseMetadata") + error = response.get("Error") + return cls( + message=str(exception), error=error, response_metadata=response_metadata + ) + + def build_logger_extras(self) -> dict: + extras: dict = {} + # preserve PascalCase to be consistent with other langauges + if error := self.error: + extras["Error"] = error + if response_metadata := self.response_metadata: + extras["ResponseMetadata"] = response_metadata + return extras class NonDeterministicExecutionError(ExecutionError): @@ -85,21 +125,85 @@ def __init__(self, message: str, step_id: str | None = None): self.step_id = step_id -class CheckpointError(CheckpointFailedError): +class CheckpointErrorCategory(Enum): + INVOCATION = "INVOCATION" + EXECUTION = "EXECUTION" + + +class CheckpointError(BotoClientError): """Failure to checkpoint. Will terminate the lambda.""" - def __init__(self, message: str): - super().__init__(message) + def __init__( + self, + message: str, + error_category: CheckpointErrorCategory, + error: AwsErrorObj | None = None, + response_metadata: AwsErrorMetadata | None = None, + ): + super().__init__( + message, + error, + response_metadata, + termination_reason=TerminationReason.CHECKPOINT_FAILED, + ) + self.error_category: CheckpointErrorCategory = error_category @classmethod def from_exception(cls, exception: Exception) -> CheckpointError: - return cls(message=str(exception)) + base = BotoClientError.from_exception(exception) + metadata: AwsErrorMetadata | None = base.response_metadata + error: AwsErrorObj | None = base.error + error_category: CheckpointErrorCategory = CheckpointErrorCategory.INVOCATION + + # InvalidParameterValueException and error message starts with "Invalid Checkpoint Token" is an InvocationError + # all other 4xx errors are Execution Errors and should be retried + # all 5xx errors are Invocation Errors + status_code: int | None = (metadata and metadata.get("HTTPStatusCode")) or None + if ( + status_code + # if we are in 4xx range (except 429) and is not an InvalidParameterValueException with Invalid Checkpoint Token + # then it's an execution error + and status_code < SERVICE_ERROR + and status_code >= BAD_REQUEST_ERROR + and status_code != TOO_MANY_REQUESTS_ERROR + and error + and ( + # is not InvalidParam => Execution + (error.get("Code", "") or "") != "InvalidParameterValueException" + # is not Invalid Token => Execution + or not (error.get("Message") or "").startswith( + "Invalid Checkpoint Token" + ) + ) + ): + error_category = CheckpointErrorCategory.EXECUTION + return CheckpointError(str(exception), error_category, error, metadata) + + def is_retriable(self): + return self.error_category == CheckpointErrorCategory.EXECUTION class ValidationError(DurableExecutionsError): """Incorrect arguments to a Durable Function operation.""" +class GetExecutionStateError(BotoClientError): + """Raised when failing to retrieve execution state""" + + def __init__( + self, + message: str, + error: AwsErrorObj | None = None, + response_metadata: AwsErrorMetadata | None = None, + ): + super().__init__( + message, + error, + response_metadata, + termination_reason=TerminationReason.INVOCATION_ERROR, + ) + + class InvalidStateError(DurableExecutionsError): """Raised when an operation is attempted on an object in an invalid state.""" @@ -268,3 +372,32 @@ def __str__(self) -> str: class SerDesError(DurableExecutionsError): """Raised when serialization fails.""" + + +class OrphanedChildException(BaseException): + """Raised when a child operation attempts to checkpoint after its parent context has completed. + + This exception inherits from BaseException (not Exception) so that user-space doesn't + accidentally catch it with broad exception handlers like 'except Exception'. + + This exception will happen when a parallel branch or map item tries to create a checkpoint + after its parent context (i.e the parallel/map operation) has already completed due to meeting + completion criteria (e.g., min_successful reached, failure tolerance exceeded). + + Although you cannot cancel running futures in user-space, this will at least terminate the + child operation on the next checkpoint attempt, preventing subsequent operations in the + child scope from executing. + + Attributes: + operation_id: Operation ID of the orphaned child + """ + + def __init__(self, message: str, operation_id: str): + """Initialize OrphanedChildException. + + Args: + message: Human-readable error message + operation_id: Operation ID of the orphaned child (required) + """ + super().__init__(message) + self.operation_id = operation_id diff --git a/src/aws_durable_execution_sdk_python/execution.py b/src/aws_durable_execution_sdk_python/execution.py index 1a47408..6f4e438 100644 --- a/src/aws_durable_execution_sdk_python/execution.py +++ b/src/aws_durable_execution_sdk_python/execution.py @@ -1,5 +1,7 @@ from __future__ import annotations +import contextlib +import functools import json import logging from concurrent.futures import ThreadPoolExecutor @@ -7,9 +9,10 @@ from enum import Enum from typing import TYPE_CHECKING, Any -from aws_durable_execution_sdk_python.context import DurableContext, ExecutionState +from aws_durable_execution_sdk_python.context import DurableContext from aws_durable_execution_sdk_python.exceptions import ( BackgroundThreadError, + BotoClientError, CheckpointError, DurableExecutionsError, ExecutionError, @@ -24,10 +27,13 @@ OperationType, OperationUpdate, ) +from aws_durable_execution_sdk_python.state import ExecutionState, ReplayStatus if TYPE_CHECKING: from collections.abc import Callable, MutableMapping + import boto3 # type: ignore + from aws_durable_execution_sdk_python.types import LambdaContext @@ -53,10 +59,15 @@ def from_dict(input_dict: MutableMapping[str, Any]) -> InitialExecutionState: next_marker=input_dict.get("NextMarker", ""), ) - def get_execution_operation(self) -> Operation: - if len(self.operations) < 1: + def get_execution_operation(self) -> Operation | None: + if not self.operations: + # Due to payload size limitations we may have an empty operations list. + # This will only happen when loading the initial page of results and is + # expected behaviour. We don't fail, but instead return None + # as the execution operation does not exist msg: str = "No durable operations found in initial execution state." - raise DurableExecutionsError(msg) + logger.debug(msg) + return None candidate = self.operations[0] if candidate.operation_type is not OperationType.EXECUTION: @@ -66,11 +77,13 @@ def get_execution_operation(self) -> Operation: return candidate def get_input_payload(self) -> str | None: - # TODO: are these None checks necessary? i.e will there always be execution_details with input_payload - if execution_details := self.get_execution_operation().execution_details: - return execution_details.input_payload - - return None + # It is possible that backend will not provide an execution operation + # for the initial page of results. + if not (operations := self.get_execution_operation()): + return None + if not (execution_details := operations.execution_details): + return None + return execution_details.input_payload def to_dict(self) -> MutableMapping[str, Any]: return { @@ -84,7 +97,6 @@ class DurableExecutionInvocationInput: durable_execution_arn: str checkpoint_token: str initial_execution_state: InitialExecutionState - is_local_runner: bool @staticmethod def from_dict( @@ -96,7 +108,6 @@ def from_dict( initial_execution_state=InitialExecutionState.from_dict( input_dict.get("InitialExecutionState", {}) ), - is_local_runner=input_dict.get("LocalRunner", False), ) def to_dict(self) -> MutableMapping[str, Any]: @@ -104,7 +115,6 @@ def to_dict(self) -> MutableMapping[str, Any]: "DurableExecutionArn": self.durable_execution_arn, "CheckpointToken": self.checkpoint_token, "InitialExecutionState": self.initial_execution_state.to_dict(), - "LocalRunner": self.is_local_runner, } @@ -126,7 +136,6 @@ def from_durable_execution_invocation_input( durable_execution_arn=invocation_input.durable_execution_arn, checkpoint_token=invocation_input.checkpoint_token, initial_execution_state=invocation_input.initial_execution_state, - is_local_runner=invocation_input.is_local_runner, service_client=service_client, ) @@ -191,8 +200,15 @@ def create_succeeded(cls, result: str) -> DurableExecutionInvocationOutput: def durable_execution( - func: Callable[[Any, DurableContext], Any], + func: Callable[[Any, DurableContext], Any] | None = None, + *, + boto3_client: boto3.client | None = None, ) -> Callable[[Any, LambdaContext], Any]: + # Decorator called with parameters + if func is None: + logger.debug("Decorator called with parameters") + return functools.partial(durable_execution, boto3_client=boto3_client) + logger.debug("Starting durable execution handler...") def wrapper(event: Any, context: LambdaContext) -> MutableMapping[str, Any]: @@ -205,13 +221,23 @@ def wrapper(event: Any, context: LambdaContext) -> MutableMapping[str, Any]: invocation_input = event service_client = invocation_input.service_client else: - logger.debug("durableExecutionArn: %s", event.get("DurableExecutionArn")) - invocation_input = DurableExecutionInvocationInput.from_dict(event) + try: + logger.debug( + "durableExecutionArn: %s", event.get("DurableExecutionArn") + ) + invocation_input = DurableExecutionInvocationInput.from_dict(event) + except (KeyError, TypeError, AttributeError) as e: + msg = ( + "Unexpected payload provided to start the durable execution. " + "Check your resource configurations to confirm the durability is set." + ) + raise ExecutionError(msg) from e + # Use custom client if provided, otherwise initialize from environment service_client = ( - LambdaClient.initialize_local_runner_client() - if invocation_input.is_local_runner - else LambdaClient.initialize_from_env() + LambdaClient(client=boto3_client) + if boto3_client is not None + else LambdaClient.initialize_client() ) raw_input_payload: str | None = ( @@ -236,6 +262,10 @@ def wrapper(event: Any, context: LambdaContext) -> MutableMapping[str, Any]: initial_checkpoint_token=invocation_input.checkpoint_token, operations={}, service_client=service_client, + # If there are operations other than the initial EXECUTION one, current state is in replay mode + replay_status=ReplayStatus.REPLAY + if len(invocation_input.initial_execution_state.operations) > 1 + else ReplayStatus.NEW, ) execution_state.fetch_paginated_operations( @@ -249,9 +279,12 @@ def wrapper(event: Any, context: LambdaContext) -> MutableMapping[str, Any]: ) # Use ThreadPoolExecutor for concurrent execution of user code and background checkpoint processing - with ThreadPoolExecutor( - max_workers=2, thread_name_prefix="dex-handler" - ) as executor: + with ( + ThreadPoolExecutor( + max_workers=2, thread_name_prefix="dex-handler" + ) as executor, + contextlib.closing(execution_state) as execution_state, + ): # Thread 1: Run background checkpoint processing executor.submit(execution_state.checkpoint_batches_forever) @@ -276,7 +309,6 @@ def wrapper(event: Any, context: LambdaContext) -> MutableMapping[str, Any]: invocation_input.durable_execution_arn, ) serialized_result = json.dumps(result) - # large response handling here. Remember if checkpointing to complete, NOT to include # payload in response if ( @@ -295,18 +327,16 @@ def wrapper(event: Any, context: LambdaContext) -> MutableMapping[str, Any]: # Must ensure the result is persisted before returning to Lambda. # Large results exceed Lambda response limits and must be stored durably # before the execution completes. - execution_state.create_checkpoint_sync(success_operation) - - # Stop background checkpointing thread - execution_state.stop_checkpointing() - + try: + execution_state.create_checkpoint( + success_operation, is_sync=True + ) + except CheckpointError as e: + return handle_checkpoint_error(e).to_dict() return DurableExecutionInvocationOutput.create_succeeded( result="" ).to_dict() - # Stop background checkpointing thread - execution_state.stop_checkpointing() - return DurableExecutionInvocationOutput.create_succeeded( result=serialized_result ).to_dict() @@ -314,31 +344,37 @@ def wrapper(event: Any, context: LambdaContext) -> MutableMapping[str, Any]: except BackgroundThreadError as bg_error: # Background checkpoint system failed - propagated through CompletionEvent # Do not attempt to checkpoint anything, just terminate immediately - logger.exception("Checkpoint processing failed") - execution_state.stop_checkpointing() - # Raise the original exception + if isinstance(bg_error.source_exception, BotoClientError): + logger.exception( + "Checkpoint processing failed", + extra=bg_error.source_exception.build_logger_extras(), + ) + else: + logger.exception("Checkpoint processing failed") + # handle the original exception + if isinstance(bg_error.source_exception, CheckpointError): + return handle_checkpoint_error(bg_error.source_exception).to_dict() raise bg_error.source_exception from bg_error except SuspendExecution: # User code suspended - stop background checkpointing thread logger.debug("Suspending execution...") - execution_state.stop_checkpointing() return DurableExecutionInvocationOutput( status=InvocationStatus.PENDING ).to_dict() - except CheckpointError: + except CheckpointError as e: # Checkpoint system is broken - stop background thread and exit immediately - execution_state.stop_checkpointing() - logger.exception("Checkpoint system failed") - raise # Terminate Lambda immediately + logger.exception( + "Checkpoint system failed", + extra=e.build_logger_extras(), + ) + return handle_checkpoint_error(e).to_dict() except InvocationError: - execution_state.stop_checkpointing() logger.exception("Invocation error. Must terminate.") # Throw the error to trigger Lambda retry raise except ExecutionError as e: - execution_state.stop_checkpointing() logger.exception("Execution error. Must terminate without retry.") return DurableExecutionInvocationOutput( status=InvocationStatus.FAILED, @@ -347,15 +383,46 @@ def wrapper(event: Any, context: LambdaContext) -> MutableMapping[str, Any]: except Exception as e: # all user-space errors go here logger.exception("Execution failed") - failed_operation = OperationUpdate.create_execution_fail( - error=ErrorObject.from_exception(e) - ) - # TODO: can optimize, if not too large can just return response rather than checkpoint - execution_state.create_checkpoint_sync(failed_operation) - execution_state.stop_checkpointing() - return DurableExecutionInvocationOutput( - status=InvocationStatus.FAILED + result = DurableExecutionInvocationOutput( + status=InvocationStatus.FAILED, error=ErrorObject.from_exception(e) ).to_dict() + serialized_result = json.dumps(result) + + if ( + serialized_result + and len(serialized_result) > LAMBDA_RESPONSE_SIZE_LIMIT + ): + logger.debug( + "Response size (%s bytes) exceeds Lambda limit (%s) bytes). Checkpointing result.", + len(serialized_result), + LAMBDA_RESPONSE_SIZE_LIMIT, + ) + failed_operation = OperationUpdate.create_execution_fail( + error=ErrorObject.from_exception(e) + ) + + # Checkpoint large result with blocking (is_sync=True, default). + # Must ensure the result is persisted before returning to Lambda. + # Large results exceed Lambda response limits and must be stored durably + # before the execution completes. + try: + execution_state.create_checkpoint_sync(failed_operation) + except CheckpointError as e: + return handle_checkpoint_error(e).to_dict() + return DurableExecutionInvocationOutput( + status=InvocationStatus.FAILED + ).to_dict() + + return result + return wrapper + + +def handle_checkpoint_error(error: CheckpointError) -> DurableExecutionInvocationOutput: + if error.is_retriable(): + raise error from None # Terminate Lambda immediately and have it be retried + return DurableExecutionInvocationOutput( + status=InvocationStatus.FAILED, error=ErrorObject.from_exception(error) + ) diff --git a/src/aws_durable_execution_sdk_python/lambda_service.py b/src/aws_durable_execution_sdk_python/lambda_service.py index 97841c1..b907391 100644 --- a/src/aws_durable_execution_sdk_python/lambda_service.py +++ b/src/aws_durable_execution_sdk_python/lambda_service.py @@ -2,17 +2,17 @@ import datetime import logging -import os from dataclasses import dataclass, field from enum import Enum -from pathlib import Path from typing import TYPE_CHECKING, Any, Protocol, TypeAlias import boto3 # type: ignore +from botocore.config import Config # type: ignore from aws_durable_execution_sdk_python.exceptions import ( CallableRuntimeError, CheckpointError, + GetExecutionStateError, ) if TYPE_CHECKING: @@ -20,10 +20,10 @@ from aws_durable_execution_sdk_python.identifier import OperationIdentifier -ReplayChildren: TypeAlias = bool # noqa UP040 ignore due to python3.11 minimum version -OperationPayload: TypeAlias = str # noqa UP040 ignore due to python3.11 minimum version -TimeoutSeconds: TypeAlias = int # noqa UP040 ignore due to python3.11 minimum version - +# Replace with `type` it when dropping support to Python 3.11 +ReplayChildren: TypeAlias = bool +OperationPayload: TypeAlias = str +TimeoutSeconds: TypeAlias = int logger = logging.getLogger(__name__) @@ -85,7 +85,7 @@ class OperationSubType(Enum): PARALLEL_BRANCH = "ParallelBranch" WAIT_FOR_CALLBACK = "WaitForCallback" WAIT_FOR_CONDITION = "WaitForCondition" - INVOKE = "Invoke" + CHAINED_INVOKE = "ChainedInvoke" @dataclass(frozen=True) @@ -301,17 +301,22 @@ class ChainedInvokeOptions: """ function_name: str + tenant_id: str | None = None @classmethod def from_dict(cls, data: MutableMapping[str, Any]) -> ChainedInvokeOptions: return cls( function_name=data["FunctionName"], + tenant_id=data.get("TenantId"), ) def to_dict(self) -> MutableMapping[str, Any]: result: MutableMapping[str, Any] = { "FunctionName": self.function_name, } + if self.tenant_id is not None: + result["TenantId"] = self.tenant_id + return result @@ -494,7 +499,7 @@ def create_context_fail( def create_execution_succeed(cls, payload: str) -> OperationUpdate: """Create an instance of OperationUpdate for type: EXECUTION, action: SUCCEED.""" return cls( - operation_id=f"execution-result-{datetime.datetime.now(tz=datetime.UTC)}", + operation_id=f"execution-result-{int(datetime.datetime.now(tz=datetime.UTC).timestamp() * 1000)}", operation_type=OperationType.EXECUTION, action=OperationAction.SUCCEED, payload=payload, @@ -504,7 +509,7 @@ def create_execution_succeed(cls, payload: str) -> OperationUpdate: def create_execution_fail(cls, error: ErrorObject) -> OperationUpdate: """Create an instance of OperationUpdate for type: EXECUTION, action: FAIL.""" return cls( - operation_id=f"execution-result-{datetime.datetime.now(tz=datetime.UTC)}", + operation_id=f"execution-result-{int(datetime.datetime.now(tz=datetime.UTC).timestamp() * 1000)}", operation_type=OperationType.EXECUTION, action=OperationAction.FAIL, error=error, @@ -591,7 +596,7 @@ def create_invoke_start( operation_id=identifier.operation_id, parent_id=identifier.parent_id, operation_type=OperationType.CHAINED_INVOKE, - sub_type=OperationSubType.INVOKE, + sub_type=OperationSubType.CHAINED_INVOKE, action=OperationAction.START, name=identifier.name, payload=payload, @@ -744,7 +749,7 @@ def from_dict(cls, data: MutableMapping[str, Any]) -> Operation: callback_details = CallbackDetails.from_dict(callback_details_input) chained_invoke_details = None - if chained_invoke_details := data.get("chained_invoke_details"): + if chained_invoke_details := data.get("ChainedInvokeDetails"): chained_invoke_details = ChainedInvokeDetails.from_dict( chained_invoke_details ) @@ -936,55 +941,16 @@ def __init__(self, client: Any) -> None: self.client = client @staticmethod - def load_preview_botocore_models() -> None: - """ - Load boto3 models from the Python path for custom preview client. - """ - os.environ["AWS_DATA_PATH"] = str( - Path(__file__).parent.joinpath("botocore", "data") - ) - - @staticmethod - def initialize_local_runner_client() -> LambdaClient: - endpoint = os.getenv( - "DURABLE_LOCAL_RUNNER_ENDPOINT", "http://host.docker.internal:5000/" - ) - region = os.getenv("DURABLE_LOCAL_RUNNER_REGION", "us-west-2") - - # The local runner client needs execute-api as the signing service name, - # so we have a second `lambdainternal-local` boto model with this. - LambdaClient.load_preview_botocore_models() + def initialize_client() -> LambdaClient: client = boto3.client( - "lambdainternal-local", - endpoint_url=endpoint, - region_name=region, - ) - - logger.debug( - "Initialized lambda client with endpoint: '%s', region: '%s'", - endpoint, - region, + "lambda", + config=Config( + connect_timeout=5, + read_timeout=50, + ), ) return LambdaClient(client=client) - @staticmethod - def initialize_from_env() -> LambdaClient: - LambdaClient.load_preview_botocore_models() - - """ - TODO - we can remove this when were using the actual lambda client, - but we need this with the preview model because boto won't match against lambdainternal. - """ - endpoint_url = os.getenv("AWS_ENDPOINT_URL_LAMBDA", None) - if not endpoint_url: - client = boto3.client( - "lambdainternal", - ) - else: - client = boto3.client("lambdainternal", endpoint_url=endpoint_url) - - return LambdaClient(client=client) - def checkpoint( self, durable_execution_arn: str, @@ -1007,8 +973,11 @@ def checkpoint( return CheckpointOutput.from_dict(result) except Exception as e: - logger.exception("Failed to checkpoint.") - raise CheckpointError.from_exception(e) from e + checkpoint_error = CheckpointError.from_exception(e) + logger.exception( + "Failed to checkpoint.", extra=checkpoint_error.build_logger_extras() + ) + raise checkpoint_error from None def get_execution_state( self, @@ -1017,13 +986,20 @@ def get_execution_state( next_marker: str, max_items: int = 1000, ) -> StateOutput: - result: MutableMapping[str, Any] = self.client.get_durable_execution_state( - DurableExecutionArn=durable_execution_arn, - CheckpointToken=checkpoint_token, - Marker=next_marker, - MaxItems=max_items, - ) - return StateOutput.from_dict(result) + try: + result: MutableMapping[str, Any] = self.client.get_durable_execution_state( + DurableExecutionArn=durable_execution_arn, + CheckpointToken=checkpoint_token, + Marker=next_marker, + MaxItems=max_items, + ) + return StateOutput.from_dict(result) + except Exception as e: + error = GetExecutionStateError.from_exception(e) + logger.exception( + "Failed to get execution state.", extra=error.build_logger_extras() + ) + raise error from None # endregion client diff --git a/src/aws_durable_execution_sdk_python/logger.py b/src/aws_durable_execution_sdk_python/logger.py index f68b9b8..1ad68a9 100644 --- a/src/aws_durable_execution_sdk_python/logger.py +++ b/src/aws_durable_execution_sdk_python/logger.py @@ -8,26 +8,32 @@ from aws_durable_execution_sdk_python.types import LoggerInterface if TYPE_CHECKING: - from collections.abc import Mapping, MutableMapping + from collections.abc import Callable, Mapping, MutableMapping + from aws_durable_execution_sdk_python.context import ExecutionState from aws_durable_execution_sdk_python.identifier import OperationIdentifier @dataclass(frozen=True) class LogInfo: - execution_arn: str + execution_state: ExecutionState parent_id: str | None = None + operation_id: str | None = None name: str | None = None attempt: int | None = None @classmethod def from_operation_identifier( - cls, execution_arn: str, op_id: OperationIdentifier, attempt: int | None = None + cls, + execution_state: ExecutionState, + op_id: OperationIdentifier, + attempt: int | None = None, ) -> LogInfo: """Create new log info from an execution arn, OperationIdentifier and attempt.""" return cls( - execution_arn=execution_arn, + execution_state=execution_state, parent_id=op_id.parent_id, + operation_id=op_id.operation_id, name=op_id.name, attempt=attempt, ) @@ -35,8 +41,9 @@ def from_operation_identifier( def with_parent_id(self, parent_id: str) -> LogInfo: """Clone the log info with a new parent id.""" return LogInfo( - execution_arn=self.execution_arn, + execution_state=self.execution_state, parent_id=parent_id, + operation_id=self.operation_id, name=self.name, attempt=self.attempt, ) @@ -44,22 +51,33 @@ def with_parent_id(self, parent_id: str) -> LogInfo: class Logger(LoggerInterface): def __init__( - self, logger: LoggerInterface, default_extra: Mapping[str, object] + self, + logger: LoggerInterface, + default_extra: Mapping[str, object], + execution_state: ExecutionState, ) -> None: self._logger = logger self._default_extra = default_extra + self._execution_state = execution_state @classmethod def from_log_info(cls, logger: LoggerInterface, info: LogInfo) -> Logger: """Create a new logger with the given LogInfo.""" - extra: MutableMapping[str, object] = {"execution_arn": info.execution_arn} + extra: MutableMapping[str, object] = { + "executionArn": info.execution_state.durable_execution_arn + } if info.parent_id: - extra["parent_id"] = info.parent_id + extra["parentId"] = info.parent_id if info.name: - extra["name"] = info.name - if info.attempt: - extra["attempt"] = info.attempt - return cls(logger, extra) + # Use 'operation_name' instead of 'name' as key because the stdlib LogRecord internally reserved 'name' parameter + extra["operationName"] = info.name + if info.attempt is not None: + extra["attempt"] = info.attempt + 1 + if info.operation_id: + extra["operationId"] = info.operation_id + return cls( + logger=logger, default_extra=extra, execution_state=info.execution_state + ) def with_log_info(self, info: LogInfo) -> Logger: """Clone the existing logger with new LogInfo.""" @@ -75,29 +93,39 @@ def get_logger(self) -> LoggerInterface: def debug( self, msg: object, *args: object, extra: Mapping[str, object] | None = None ) -> None: - merged_extra = {**self._default_extra, **(extra or {})} - self._logger.debug(msg, *args, extra=merged_extra) + self._log(self._logger.debug, msg, *args, extra=extra) def info( self, msg: object, *args: object, extra: Mapping[str, object] | None = None ) -> None: - merged_extra = {**self._default_extra, **(extra or {})} - self._logger.info(msg, *args, extra=merged_extra) + self._log(self._logger.info, msg, *args, extra=extra) def warning( self, msg: object, *args: object, extra: Mapping[str, object] | None = None ) -> None: - merged_extra = {**self._default_extra, **(extra or {})} - self._logger.warning(msg, *args, extra=merged_extra) + self._log(self._logger.warning, msg, *args, extra=extra) def error( self, msg: object, *args: object, extra: Mapping[str, object] | None = None ) -> None: - merged_extra = {**self._default_extra, **(extra or {})} - self._logger.error(msg, *args, extra=merged_extra) + self._log(self._logger.error, msg, *args, extra=extra) def exception( self, msg: object, *args: object, extra: Mapping[str, object] | None = None ) -> None: + self._log(self._logger.exception, msg, *args, extra=extra) + + def _log( + self, + log_func: Callable, + msg: object, + *args: object, + extra: Mapping[str, object] | None = None, + ): + if not self._should_log(): + return merged_extra = {**self._default_extra, **(extra or {})} - self._logger.exception(msg, *args, extra=merged_extra) + log_func(msg, *args, extra=merged_extra) + + def _should_log(self) -> bool: + return not self._execution_state.is_replaying() diff --git a/src/aws_durable_execution_sdk_python/operation/base.py b/src/aws_durable_execution_sdk_python/operation/base.py new file mode 100644 index 0000000..5836cda --- /dev/null +++ b/src/aws_durable_execution_sdk_python/operation/base.py @@ -0,0 +1,187 @@ +"""Base classes for operation executors with checkpoint response handling.""" + +from __future__ import annotations + +from abc import ABC, abstractmethod +from dataclasses import dataclass +from typing import TYPE_CHECKING, Generic, TypeVar + +from aws_durable_execution_sdk_python.exceptions import InvalidStateError + +if TYPE_CHECKING: + from aws_durable_execution_sdk_python.state import CheckpointedResult + +T = TypeVar("T") + + +@dataclass(frozen=True) +class CheckResult(Generic[T]): + """Result of checking operation checkpoint status. + + Encapsulates the outcome of checking an operation's status and determines + the next action in the operation execution flow. + + IMPORTANT: Do not construct directly. Use factory methods: + - create_is_ready_to_execute(checkpoint) - operation ready to execute + - create_started() - checkpoint created, check status again + - create_completed(result) - terminal result available + + Attributes: + is_ready_to_execute: True if the operation is ready to execute its logic + has_checkpointed_result: True if a terminal result is already available + checkpointed_result: Checkpoint data for execute() + deserialized_result: Final result when operation completed + """ + + is_ready_to_execute: bool + has_checkpointed_result: bool + checkpointed_result: CheckpointedResult | None = None + deserialized_result: T | None = None + + @classmethod + def create_is_ready_to_execute( + cls, checkpoint: CheckpointedResult + ) -> CheckResult[T]: + """Create a CheckResult indicating the operation is ready to execute. + + Args: + checkpoint: The checkpoint data to pass to execute() + + Returns: + CheckResult with is_ready_to_execute=True + """ + return cls( + is_ready_to_execute=True, + has_checkpointed_result=False, + checkpointed_result=checkpoint, + ) + + @classmethod + def create_started(cls) -> CheckResult[T]: + """Create a CheckResult signaling that a checkpoint was created. + + Signals that process() should verify checkpoint status again to detect + if the operation completed already during checkpoint creation. + + Returns: + CheckResult indicating process() should check status again + """ + return cls(is_ready_to_execute=False, has_checkpointed_result=False) + + @classmethod + def create_completed(cls, result: T) -> CheckResult[T]: + """Create a CheckResult with a terminal result already deserialized. + + Args: + result: The final deserialized result + + Returns: + CheckResult with has_checkpointed_result=True and deserialized_result set + """ + return cls( + is_ready_to_execute=False, + has_checkpointed_result=True, + deserialized_result=result, + ) + + +class OperationExecutor(ABC, Generic[T]): + """Base class for durable operations with checkpoint response handling. + + Provides a framework for implementing operations that check status after + creating START checkpoints to handle synchronous completion, avoiding + unnecessary execution or suspension. + + The common pattern: + 1. Check operation status + 2. Create START checkpoint if needed + 3. Check status again (detects synchronous completion) + 4. Execute operation logic when ready + + Subclasses must implement: + - check_result_status(): Check status, create checkpoint if needed, return next action + - execute(): Execute the operation logic with checkpoint data + """ + + @abstractmethod + def check_result_status(self) -> CheckResult[T]: + """Check operation status and create START checkpoint if needed. + + Called twice by process() when creating synchronous checkpoints: once before + and once after, to detect if the operation completed immediately. + + This method should: + 1. Get the current checkpoint result + 2. Check for terminal statuses (SUCCEEDED, FAILED, etc.) and handle them + 3. Check for pending statuses and suspend if needed + 4. Create a START checkpoint if the operation hasn't started + 5. Return a CheckResult indicating the next action + + Returns: + CheckResult indicating whether to: + - Return a terminal result (has_checkpointed_result=True) + - Execute operation logic (is_ready_to_execute=True) + - Check status again (neither flag set - checkpoint was just created) + + Raises: + Operation-specific exceptions for terminal failure states + SuspendExecution for pending states + """ + ... # pragma: no cover + + @abstractmethod + def execute(self, checkpointed_result: CheckpointedResult) -> T: + """Execute operation logic with checkpoint data. + + This method is called when the operation is ready to execute its core logic. + It receives the checkpoint data that was returned by check_result_status(). + + Args: + checkpointed_result: The checkpoint data containing operation state + + Returns: + The result of executing the operation + + Raises: + May raise operation-specific errors during execution + """ + ... # pragma: no cover + + def process(self) -> T: + """Process operation with checkpoint response handling. + + Orchestrates the double-check pattern: + 1. Check status (handles replay and existing checkpoints) + 2. If checkpoint was just created, check status again (detects synchronous completion) + 3. Return terminal result if available + 4. Execute operation logic if ready + 5. Raise error for invalid states + + Returns: + The final result of the operation + + Raises: + InvalidStateError: If the check result is in an invalid state + May raise operation-specific errors from check_result_status() or execute() + """ + # Check 1: Entry (handles replay and existing checkpoints) + result = self.check_result_status() + + # If checkpoint was created, verify checkpoint response for immediate status change + if not result.is_ready_to_execute and not result.has_checkpointed_result: + result = self.check_result_status() + + # Return terminal result if available (can be None for operations that return None) + if result.has_checkpointed_result: + return result.deserialized_result # type: ignore[return-value] + + # Execute operation logic + if result.is_ready_to_execute: + if result.checkpointed_result is None: + msg = "CheckResult is marked ready to execute but checkpointed result is not set." + raise InvalidStateError(msg) + return self.execute(result.checkpointed_result) + + # Invalid state - neither terminal nor ready to execute + msg = "Invalid CheckResult state: neither terminal nor ready to execute" + raise InvalidStateError(msg) diff --git a/src/aws_durable_execution_sdk_python/operation/callback.py b/src/aws_durable_execution_sdk_python/operation/callback.py index 4fe2a1e..67c51eb 100644 --- a/src/aws_durable_execution_sdk_python/operation/callback.py +++ b/src/aws_durable_execution_sdk_python/operation/callback.py @@ -10,6 +10,11 @@ CallbackOptions, OperationUpdate, ) +from aws_durable_execution_sdk_python.operation.base import ( + CheckResult, + OperationExecutor, +) +from aws_durable_execution_sdk_python.types import WaitForCallbackContext if TYPE_CHECKING: from collections.abc import Callable @@ -23,69 +28,128 @@ CheckpointedResult, ExecutionState, ) - from aws_durable_execution_sdk_python.types import Callback, DurableContext - - -def create_callback_handler( - state: ExecutionState, - operation_identifier: OperationIdentifier, - config: CallbackConfig | None = None, -) -> str: - """Create the callback checkpoint and return the callback id.""" - callback_options: CallbackOptions = ( - CallbackOptions( - timeout_seconds=config.timeout_seconds, - heartbeat_timeout_seconds=config.heartbeat_timeout_seconds, - ) - if config - else CallbackOptions() + from aws_durable_execution_sdk_python.types import ( + Callback, + DurableContext, + StepContext, ) - checkpointed_result: CheckpointedResult = state.get_checkpoint_result( - operation_identifier.operation_id - ) - if checkpointed_result.is_failed(): - # have to throw the exact same error on replay as the checkpointed failure - checkpointed_result.raise_callable_error() - - if ( - checkpointed_result.is_started() - or checkpointed_result.is_succeeded() - or checkpointed_result.is_timed_out() + +class CallbackOperationExecutor(OperationExecutor[str]): + """Executor for callback operations. + + Checks operation status after creating START checkpoints to handle operations + that complete synchronously, avoiding unnecessary execution or suspension. + + Unlike other operations, callbacks NEVER execute logic - they only create + checkpoints and return callback IDs. + + CRITICAL: Errors are deferred to Callback.result() for deterministic replay. + create_callback() always returns the callback_id, even for FAILED callbacks. + """ + + def __init__( + self, + state: ExecutionState, + operation_identifier: OperationIdentifier, + config: CallbackConfig | None, ): - # callback id should already exist + """Initialize the callback operation executor. + + Args: + state: The execution state + operation_identifier: The operation identifier + config: The callback configuration (optional) + """ + self.state = state + self.operation_identifier = operation_identifier + self.config = config + + def check_result_status(self) -> CheckResult[str]: + """Check operation status and create START checkpoint if needed. + + Called twice by process() when creating synchronous checkpoints: once before + and once after, to detect if the operation completed immediately. + + CRITICAL: This method does NOT raise on FAILED status. Errors are deferred + to Callback.result() to ensure deterministic replay. Code between + create_callback() and callback.result() must always execute. + + Returns: + CheckResult.create_is_ready_to_execute() for any existing status (including FAILED) + or CheckResult.create_started() after creating checkpoint + + Raises: + CallbackError: If callback_details are missing from checkpoint + """ + checkpointed_result: CheckpointedResult = self.state.get_checkpoint_result( + self.operation_identifier.operation_id + ) + + # CRITICAL: Do NOT raise on FAILED - defer error to Callback.result() + # If checkpoint exists (any status including FAILED), return ready to execute + # The execute() method will extract the callback_id + if checkpointed_result.is_existent(): + if ( + not checkpointed_result.operation + or not checkpointed_result.operation.callback_details + ): + msg = f"Missing callback details for operation: {self.operation_identifier.operation_id}" + raise CallbackError(msg) + + return CheckResult.create_is_ready_to_execute(checkpointed_result) + + # Create START checkpoint + callback_options: CallbackOptions = ( + CallbackOptions( + timeout_seconds=self.config.timeout_seconds, + heartbeat_timeout_seconds=self.config.heartbeat_timeout_seconds, + ) + if self.config + else CallbackOptions() + ) + + create_callback_operation: OperationUpdate = OperationUpdate.create_callback( + identifier=self.operation_identifier, + callback_options=callback_options, + ) + + # Checkpoint callback START with blocking (is_sync=True, default). + # Must wait for the API to generate and return the callback ID before proceeding. + # The callback ID is needed immediately by the caller to pass to external systems. + self.state.create_checkpoint(operation_update=create_callback_operation) + + # Signal to process() to check status again for immediate response + return CheckResult.create_started() + + def execute(self, checkpointed_result: CheckpointedResult) -> str: + """Execute callback operation by extracting the callback_id. + + Callbacks don't execute logic - they just extract and return the callback_id + from the checkpoint data. + + Args: + checkpointed_result: The checkpoint data containing callback_details + + Returns: + The callback_id from the checkpoint + + Raises: + CallbackError: If callback_details are missing (should never happen) + """ if ( not checkpointed_result.operation or not checkpointed_result.operation.callback_details ): - msg = f"Missing callback details for operation: {operation_identifier.operation_id}" + msg = f"Missing callback details for operation: {self.operation_identifier.operation_id}" raise CallbackError(msg) return checkpointed_result.operation.callback_details.callback_id - create_callback_operation = OperationUpdate.create_callback( - identifier=operation_identifier, - callback_options=callback_options, - ) - # Checkpoint callback START with blocking (is_sync=True, default). - # Must wait for the API to generate and return the callback ID before proceeding. - # The callback ID is needed immediately by the caller to pass to external systems. - state.create_checkpoint(operation_update=create_callback_operation) - - result: CheckpointedResult = state.get_checkpoint_result( - operation_identifier.operation_id - ) - - if not result.operation or not result.operation.callback_details: - msg = f"Missing callback details for operation: {operation_identifier.operation_id}" - raise CallbackError(msg) - - return result.operation.callback_details.callback_id - def wait_for_callback_handler( context: DurableContext, - submitter: Callable[[str], None], + submitter: Callable[[str, WaitForCallbackContext], None], name: str | None = None, config: WaitForCallbackConfig | None = None, ) -> Any: @@ -98,8 +162,10 @@ def wait_for_callback_handler( name=f"{name_with_space}create callback id", config=config ) - def submitter_step(step_context): # noqa: ARG001 - return submitter(callback.callback_id) + def submitter_step(step_context: StepContext): + return submitter( + callback.callback_id, WaitForCallbackContext(logger=step_context.logger) + ) step_config = ( StepConfig( diff --git a/src/aws_durable_execution_sdk_python/operation/child.py b/src/aws_durable_execution_sdk_python/operation/child.py index 07d0a08..04819d4 100644 --- a/src/aws_durable_execution_sdk_python/operation/child.py +++ b/src/aws_durable_execution_sdk_python/operation/child.py @@ -16,13 +16,20 @@ OperationSubType, OperationUpdate, ) +from aws_durable_execution_sdk_python.operation.base import ( + CheckResult, + OperationExecutor, +) from aws_durable_execution_sdk_python.serdes import deserialize, serialize if TYPE_CHECKING: from collections.abc import Callable from aws_durable_execution_sdk_python.identifier import OperationIdentifier - from aws_durable_execution_sdk_python.state import ExecutionState + from aws_durable_execution_sdk_python.state import ( + CheckpointedResult, + ExecutionState, + ) logger = logging.getLogger(__name__) @@ -32,131 +39,239 @@ CHECKPOINT_SIZE_LIMIT = 256 * 1024 -def child_handler( - func: Callable[[], T], - state: ExecutionState, - operation_identifier: OperationIdentifier, - config: ChildConfig | None, -) -> T: - logger.debug( - "▶️ Executing child context for id: %s, name: %s", - operation_identifier.operation_id, - operation_identifier.name, - ) +class ChildOperationExecutor(OperationExecutor[T]): + """Executor for child context operations. - if not config: - config = ChildConfig() + Checks operation status after creating START checkpoints to handle operations + that complete synchronously, avoiding unnecessary execution or suspension. + + Handles large payload scenarios with ReplayChildren mode. + """ - checkpointed_result = state.get_checkpoint_result(operation_identifier.operation_id) - if ( - checkpointed_result.is_succeeded() - and not checkpointed_result.is_replay_children() + def __init__( + self, + func: Callable[[], T], + state: ExecutionState, + operation_identifier: OperationIdentifier, + config: ChildConfig, ): - logger.debug( - "Child context already completed, skipping execution for id: %s, name: %s", - operation_identifier.operation_id, - operation_identifier.name, - ) - if checkpointed_result.result is None: - return None # type: ignore - return deserialize( - serdes=config.serdes, - data=checkpointed_result.result, - operation_id=operation_identifier.operation_id, - durable_execution_arn=state.durable_execution_arn, - ) - if checkpointed_result.is_failed(): - checkpointed_result.raise_callable_error() - sub_type = config.sub_type or OperationSubType.RUN_IN_CHILD_CONTEXT - - if not checkpointed_result.is_existent(): - start_operation = OperationUpdate.create_context_start( - identifier=operation_identifier, - sub_type=sub_type, + """Initialize the child operation executor. + + Args: + func: The child context function to execute + state: The execution state + operation_identifier: The operation identifier + config: The child configuration + """ + self.func = func + self.state = state + self.operation_identifier = operation_identifier + self.config = config + self.sub_type = config.sub_type or OperationSubType.RUN_IN_CHILD_CONTEXT + + def check_result_status(self) -> CheckResult[T]: + """Check operation status and create START checkpoint if needed. + + Called twice by process() when creating synchronous checkpoints: once before + and once after, to detect if the operation completed immediately. + + Returns: + CheckResult indicating the next action to take + + Raises: + CallableRuntimeError: For FAILED operations + """ + checkpointed_result: CheckpointedResult = self.state.get_checkpoint_result( + self.operation_identifier.operation_id ) - # Checkpoint child context START with non-blocking (is_sync=False). - # This is a fire-and-forget operation for performance - we don't need to wait for - # persistence before executing the child context. The START checkpoint is purely - # for observability and tracking the operation hierarchy. - state.create_checkpoint(operation_update=start_operation, is_sync=False) - - try: - raw_result: T = func() - if checkpointed_result.is_replay_children(): + + # Terminal success without replay_children - deserialize and return + if ( + checkpointed_result.is_succeeded() + and not checkpointed_result.is_replay_children() + ): logger.debug( - "ReplayChildren mode: Executed child context again on replay due to large payload. Exiting child context without creating another checkpoint. id: %s, name: %s", - operation_identifier.operation_id, - operation_identifier.name, + "Child context already completed, skipping execution for id: %s, name: %s", + self.operation_identifier.operation_id, + self.operation_identifier.name, ) - return raw_result - serialized_result: str = serialize( - serdes=config.serdes, - value=raw_result, - operation_id=operation_identifier.operation_id, - durable_execution_arn=state.durable_execution_arn, - ) - # Summary Generator Logic: - # When the serialized result exceeds 256KB, we use ReplayChildren mode to avoid - # checkpointing large payloads. Instead, we checkpoint a compact summary and mark - # the operation for replay. This matches the TypeScript implementation behavior. - # - # See TypeScript reference: - # - aws-durable-execution-sdk-js/src/handlers/run-in-child-context-handler/run-in-child-context-handler.ts (lines ~200-220) - # - # The summary generator creates a JSON summary with metadata (type, counts, status) - # instead of the full BatchResult. During replay, the child context is re-executed - # to reconstruct the full result rather than deserializing from the checkpoint. - replay_children: bool = False - if len(serialized_result) > CHECKPOINT_SIZE_LIMIT: - logger.debug( - "Large payload detected, using ReplayChildren mode: id: %s, name: %s, payload_size: %d, limit: %d", - operation_identifier.operation_id, - operation_identifier.name, - len(serialized_result), - CHECKPOINT_SIZE_LIMIT, + if checkpointed_result.result is None: + return CheckResult.create_completed(None) # type: ignore + + result: T = deserialize( + serdes=self.config.serdes, + data=checkpointed_result.result, + operation_id=self.operation_identifier.operation_id, + durable_execution_arn=self.state.durable_execution_arn, ) - replay_children = True - # Use summary generator if provided, otherwise use empty string (matches TypeScript) - serialized_result = ( - config.summary_generator(raw_result) if config.summary_generator else "" + return CheckResult.create_completed(result) + + # Terminal success with replay_children - re-execute + if ( + checkpointed_result.is_succeeded() + and checkpointed_result.is_replay_children() + ): + return CheckResult.create_is_ready_to_execute(checkpointed_result) + + # Terminal failure + if checkpointed_result.is_failed(): + checkpointed_result.raise_callable_error() + + # Create START checkpoint if not exists + if not checkpointed_result.is_existent(): + start_operation: OperationUpdate = OperationUpdate.create_context_start( + identifier=self.operation_identifier, + sub_type=self.sub_type, + ) + # Checkpoint child context START with non-blocking (is_sync=False). + # This is a fire-and-forget operation for performance - we don't need to wait for + # persistence before executing the child context. The START checkpoint is purely + # for observability and tracking the operation hierarchy. + self.state.create_checkpoint( + operation_update=start_operation, is_sync=False ) - success_operation = OperationUpdate.create_context_succeed( - identifier=operation_identifier, - payload=serialized_result, - sub_type=sub_type, - context_options=ContextOptions(replay_children=replay_children), - ) - # Checkpoint child context SUCCEED with blocking (is_sync=True, default). - # Must ensure the child context result is persisted before returning to the parent. - # This guarantees the result is durable and child operations won't be re-executed on replay - # (unless replay_children=True for large payloads). - state.create_checkpoint(operation_update=success_operation) + # Ready to execute (checkpoint exists or was just created) + return CheckResult.create_is_ready_to_execute(checkpointed_result) + + def execute(self, checkpointed_result: CheckpointedResult) -> T: + """Execute child context function with error handling and large payload support. + Args: + checkpointed_result: The checkpoint data containing operation state + + Returns: + The result of executing the child context function + + Raises: + SuspendExecution: Re-raised without checkpointing + InvocationError: Re-raised after checkpointing FAIL + CallableRuntimeError: Raised for other exceptions after checkpointing FAIL + """ logger.debug( - "✅ Successfully completed child context for id: %s, name: %s", - operation_identifier.operation_id, - operation_identifier.name, + "▶️ Executing child context for id: %s, name: %s", + self.operation_identifier.operation_id, + self.operation_identifier.name, ) - return raw_result # noqa: TRY300 - except SuspendExecution: - # Don't checkpoint SuspendExecution - let it bubble up - raise - except Exception as e: - error_object = ErrorObject.from_exception(e) - fail_operation = OperationUpdate.create_context_fail( - identifier=operation_identifier, error=error_object, sub_type=sub_type - ) - # Checkpoint child context FAIL with blocking (is_sync=True, default). - # Must ensure the failure state is persisted before raising the exception. - # This guarantees the error is durable and child operations won't be re-executed on replay. - state.create_checkpoint(operation_update=fail_operation) - - # InvocationError and its derivatives can be retried - # When we encounter an invocation error (in all of its forms), we bubble that - # error upwards (with the checkpoint in place) such that we reach the - # execution handler at the very top, which will then induce a retry from the - # dataplane. - if isinstance(e, InvocationError): + + try: + raw_result: T = self.func() + + # If in replay_children mode, return without checkpointing + if checkpointed_result.is_replay_children(): + logger.debug( + "ReplayChildren mode: Executed child context again on replay due to large payload. Exiting child context without creating another checkpoint. id: %s, name: %s", + self.operation_identifier.operation_id, + self.operation_identifier.name, + ) + return raw_result + + # Serialize result + serialized_result: str = serialize( + serdes=self.config.serdes, + value=raw_result, + operation_id=self.operation_identifier.operation_id, + durable_execution_arn=self.state.durable_execution_arn, + ) + + # Check payload size and use ReplayChildren mode if needed + # Summary Generator Logic: + # When the serialized result exceeds 256KB, we use ReplayChildren mode to avoid + # checkpointing large payloads. Instead, we checkpoint a compact summary and mark + # the operation for replay. This matches the TypeScript implementation behavior. + # + # See TypeScript reference: + # - aws-durable-execution-sdk-js/src/handlers/run-in-child-context-handler/run-in-child-context-handler.ts (lines ~200-220) + # + # The summary generator creates a JSON summary with metadata (type, counts, status) + # instead of the full BatchResult. During replay, the child context is re-executed + # to reconstruct the full result rather than deserializing from the checkpoint. + replay_children: bool = False + if len(serialized_result) > CHECKPOINT_SIZE_LIMIT: + logger.debug( + "Large payload detected, using ReplayChildren mode: id: %s, name: %s, payload_size: %d, limit: %d", + self.operation_identifier.operation_id, + self.operation_identifier.name, + len(serialized_result), + CHECKPOINT_SIZE_LIMIT, + ) + replay_children = True + # Use summary generator if provided, otherwise use empty string (matches TypeScript) + serialized_result = ( + self.config.summary_generator(raw_result) + if self.config.summary_generator + else "" + ) + + # Checkpoint SUCCEED + success_operation: OperationUpdate = OperationUpdate.create_context_succeed( + identifier=self.operation_identifier, + payload=serialized_result, + sub_type=self.sub_type, + context_options=ContextOptions(replay_children=replay_children), + ) + # Checkpoint child context SUCCEED with blocking (is_sync=True, default). + # Must ensure the child context result is persisted before returning to the parent. + # This guarantees the result is durable and child operations won't be re-executed on replay + # (unless replay_children=True for large payloads). + self.state.create_checkpoint(operation_update=success_operation) + + logger.debug( + "✅ Successfully completed child context for id: %s, name: %s", + self.operation_identifier.operation_id, + self.operation_identifier.name, + ) + return raw_result # noqa: TRY300 + except SuspendExecution: + # Don't checkpoint SuspendExecution - let it bubble up raise - raise error_object.to_callable_runtime_error() from e + except Exception as e: + error_object = ErrorObject.from_exception(e) + fail_operation: OperationUpdate = OperationUpdate.create_context_fail( + identifier=self.operation_identifier, + error=error_object, + sub_type=self.sub_type, + ) + # Checkpoint child context FAIL with blocking (is_sync=True, default). + # Must ensure the failure state is persisted before raising the exception. + # This guarantees the error is durable and child operations won't be re-executed on replay. + self.state.create_checkpoint(operation_update=fail_operation) + + # InvocationError and its derivatives can be retried + # When we encounter an invocation error (in all of its forms), we bubble that + # error upwards (with the checkpoint in place) such that we reach the + # execution handler at the very top, which will then induce a retry from the + # dataplane. + if isinstance(e, InvocationError): + raise + raise error_object.to_callable_runtime_error() from e + + +def child_handler( + func: Callable[[], T], + state: ExecutionState, + operation_identifier: OperationIdentifier, + config: ChildConfig | None, +) -> T: + """Public API for child context operations - maintains existing signature. + + This function creates a ChildOperationExecutor and delegates to its process() method, + maintaining backward compatibility with existing code that calls child_handler. + + Args: + func: The child context function to execute + state: The execution state + operation_identifier: The operation identifier + config: The child configuration (optional) + + Returns: + The result of executing the child context + + Raises: + May raise operation-specific errors during execution + """ + if not config: + config = ChildConfig() + + executor = ChildOperationExecutor(func, state, operation_identifier, config) + return executor.process() diff --git a/src/aws_durable_execution_sdk_python/operation/invoke.py b/src/aws_durable_execution_sdk_python/operation/invoke.py index 1c752de..9288c98 100644 --- a/src/aws_durable_execution_sdk_python/operation/invoke.py +++ b/src/aws_durable_execution_sdk_python/operation/invoke.py @@ -5,18 +5,31 @@ import logging from typing import TYPE_CHECKING, TypeVar -from aws_durable_execution_sdk_python.config import InvokeConfig from aws_durable_execution_sdk_python.exceptions import ExecutionError from aws_durable_execution_sdk_python.lambda_service import ( ChainedInvokeOptions, OperationUpdate, ) -from aws_durable_execution_sdk_python.serdes import deserialize, serialize + +# Import base classes for operation executor pattern +from aws_durable_execution_sdk_python.operation.base import ( + CheckResult, + OperationExecutor, +) +from aws_durable_execution_sdk_python.serdes import ( + DEFAULT_JSON_SERDES, + deserialize, + serialize, +) from aws_durable_execution_sdk_python.suspend import suspend_with_optional_resume_delay if TYPE_CHECKING: + from aws_durable_execution_sdk_python.config import InvokeConfig from aws_durable_execution_sdk_python.identifier import OperationIdentifier - from aws_durable_execution_sdk_python.state import ExecutionState + from aws_durable_execution_sdk_python.state import ( + CheckpointedResult, + ExecutionState, + ) P = TypeVar("P") # Payload type R = TypeVar("R") # Result type @@ -24,88 +37,136 @@ logger = logging.getLogger(__name__) -def invoke_handler( - function_name: str, - payload: P, - state: ExecutionState, - operation_identifier: OperationIdentifier, - config: InvokeConfig[P, R] | None, -) -> R: - """Invoke another Durable Function.""" - logger.debug( - "🔗 Invoke %s (%s)", - operation_identifier.name or function_name, - operation_identifier.operation_id, - ) +class InvokeOperationExecutor(OperationExecutor[R]): + """Executor for invoke operations. + + Checks operation status after creating START checkpoints to handle operations + that complete synchronously, avoiding unnecessary execution or suspension. - if not config: - config = InvokeConfig[P, R]() + The invoke operation never actually "executes" in the traditional sense - + it always suspends to wait for the async invocation to complete. + """ - # Check if we have existing step data - checkpointed_result = state.get_checkpoint_result(operation_identifier.operation_id) + def __init__( + self, + function_name: str, + payload: P, + state: ExecutionState, + operation_identifier: OperationIdentifier, + config: InvokeConfig[P, R], + ): + """Initialize the invoke operation executor. + + Args: + function_name: Name of the function to invoke + payload: The payload to pass to the invoked function + state: The execution state + operation_identifier: The operation identifier + config: Configuration for the invoke operation + """ + self.function_name = function_name + self.payload = payload + self.state = state + self.operation_identifier = operation_identifier + self.payload = payload + self.config = config + + def check_result_status(self) -> CheckResult[R]: + """Check operation status and create START checkpoint if needed. + + Called twice by process() when creating synchronous checkpoints: once before + and once after, to detect if the operation completed immediately. + + Returns: + CheckResult indicating the next action to take + + Raises: + CallableRuntimeError: For FAILED, TIMED_OUT, or STOPPED operations + SuspendExecution: For STARTED operations waiting for completion + """ + checkpointed_result: CheckpointedResult = self.state.get_checkpoint_result( + self.operation_identifier.operation_id + ) + + # Terminal success - deserialize and return + if checkpointed_result.is_succeeded(): + if checkpointed_result.result is None: + return CheckResult.create_completed(None) # type: ignore - if checkpointed_result.is_succeeded(): - # Return persisted result - no need to check for errors in successful operations + result: R = deserialize( + serdes=self.config.serdes_result or DEFAULT_JSON_SERDES, + data=checkpointed_result.result, + operation_id=self.operation_identifier.operation_id, + durable_execution_arn=self.state.durable_execution_arn, + ) + return CheckResult.create_completed(result) + + # Terminal failures if ( - checkpointed_result.operation - and checkpointed_result.operation.chained_invoke_details - and checkpointed_result.operation.chained_invoke_details.result + checkpointed_result.is_failed() + or checkpointed_result.is_timed_out() + or checkpointed_result.is_stopped() ): - return deserialize( - serdes=config.serdes_result, - data=checkpointed_result.operation.chained_invoke_details.result, - operation_id=operation_identifier.operation_id, - durable_execution_arn=state.durable_execution_arn, + checkpointed_result.raise_callable_error() + + # Still running - ready to suspend + if checkpointed_result.is_started(): + logger.debug( + "⏳ Invoke %s still in progress, will suspend", + self.operation_identifier.name or self.function_name, ) - return None # type: ignore + return CheckResult.create_is_ready_to_execute(checkpointed_result) + + # Create START checkpoint if not exists + if not checkpointed_result.is_existent(): + serialized_payload: str = serialize( + serdes=self.config.serdes_payload or DEFAULT_JSON_SERDES, + value=self.payload, + operation_id=self.operation_identifier.operation_id, + durable_execution_arn=self.state.durable_execution_arn, + ) + start_operation: OperationUpdate = OperationUpdate.create_invoke_start( + identifier=self.operation_identifier, + payload=serialized_payload, + chained_invoke_options=ChainedInvokeOptions( + function_name=self.function_name, + tenant_id=self.config.tenant_id, + ), + ) + # Checkpoint invoke START with blocking (is_sync=True). + # Must ensure the chained invocation is recorded before suspending execution. + self.state.create_checkpoint(operation_update=start_operation, is_sync=True) - if ( - checkpointed_result.is_failed() - or checkpointed_result.is_timed_out() - or checkpointed_result.is_stopped() - ): - # Operation failed, throw the exact same error on replay as the checkpointed failure - checkpointed_result.raise_callable_error() - - if checkpointed_result.is_started() or checkpointed_result.is_pending(): - # Operation is still running, suspend until completion - logger.debug( - "⏳ Invoke %s still in progress, suspending", - operation_identifier.name or function_name, - ) - msg = f"Invoke {operation_identifier.operation_id} still in progress" - suspend_with_optional_resume_delay(msg, config.timeout_seconds) - - serialized_payload: str = serialize( - serdes=config.serdes_payload, - value=payload, - operation_id=operation_identifier.operation_id, - durable_execution_arn=state.durable_execution_arn, - ) + logger.debug( + "🚀 Invoke %s started, will check for immediate response", + self.operation_identifier.name or self.function_name, + ) - # the backend will do the invoke once it gets this checkpoint - start_operation: OperationUpdate = OperationUpdate.create_invoke_start( - identifier=operation_identifier, - payload=serialized_payload, - chained_invoke_options=ChainedInvokeOptions(function_name=function_name), - ) + # Signal to process() that checkpoint was created - to recheck status for permissions errs etc. + # before proceeding. + return CheckResult.create_started() - # Checkpoint invoke START with blocking (is_sync=True, default). - # Must ensure the chained invocation is recorded before suspending execution. - # This guarantees the invoke operation is durable and will be tracked by the backend. - state.create_checkpoint(operation_update=start_operation) + # Ready to suspend (checkpoint exists but not in a terminal or started state) + return CheckResult.create_is_ready_to_execute(checkpointed_result) - logger.debug( - "🚀 Invoke %s started, suspending for async execution", - operation_identifier.name or function_name, - ) + def execute(self, _checkpointed_result: CheckpointedResult) -> R: + """Execute invoke operation by suspending to wait for async completion. - # Suspend so invoke executes asynchronously without consuming cpu here - msg = ( - f"Invoke {operation_identifier.operation_id} started, suspending for completion" - ) - suspend_with_optional_resume_delay(msg, config.timeout_seconds) - # This line should never be reached since suspend_with_optional_resume_delay always raises - # if it is ever reached, we will crash in a non-retryable manner via ExecutionError - msg = "suspend_with_optional_resume_delay should have raised an exception, but did not." - raise ExecutionError(msg) from None + The invoke operation doesn't execute synchronously - it suspends and + the backend executes the invoked function asynchronously. + + Args: + checkpointed_result: The checkpoint data (unused, but required by interface) + + Returns: + Never returns - always suspends + + Raises: + Always suspends via suspend_with_optional_resume_delay + ExecutionError: If suspend doesn't raise (should never happen) + """ + msg: str = f"Invoke {self.operation_identifier.operation_id} started, suspending for completion" + suspend_with_optional_resume_delay(msg, self.config.timeout_seconds) + # This line should never be reached since suspend_with_optional_resume_delay always raises + error_msg: str = "suspend_with_optional_resume_delay should have raised an exception, but did not." + raise ExecutionError(error_msg) from None diff --git a/src/aws_durable_execution_sdk_python/operation/map.py b/src/aws_durable_execution_sdk_python/operation/map.py index ed76bb4..2551b48 100644 --- a/src/aws_durable_execution_sdk_python/operation/map.py +++ b/src/aws_durable_execution_sdk_python/operation/map.py @@ -7,9 +7,9 @@ from collections.abc import Callable, Sequence from typing import TYPE_CHECKING, Generic, TypeVar -from aws_durable_execution_sdk_python.concurrency import ( +from aws_durable_execution_sdk_python.concurrency.executor import ConcurrentExecutor +from aws_durable_execution_sdk_python.concurrency.models import ( BatchResult, - ConcurrentExecutor, Executable, ) from aws_durable_execution_sdk_python.config import MapConfig @@ -82,6 +82,7 @@ def from_items( name_prefix="map-item-", serdes=config.serdes, summary_generator=config.summary_generator, + item_serdes=config.item_serdes, ) def execute_item(self, child_context, executable: Executable[Callable]) -> R: diff --git a/src/aws_durable_execution_sdk_python/operation/parallel.py b/src/aws_durable_execution_sdk_python/operation/parallel.py index e81499f..5046c75 100644 --- a/src/aws_durable_execution_sdk_python/operation/parallel.py +++ b/src/aws_durable_execution_sdk_python/operation/parallel.py @@ -7,12 +7,13 @@ from collections.abc import Callable, Sequence from typing import TYPE_CHECKING, TypeVar -from aws_durable_execution_sdk_python.concurrency import ConcurrentExecutor, Executable +from aws_durable_execution_sdk_python.concurrency.executor import ConcurrentExecutor +from aws_durable_execution_sdk_python.concurrency.models import Executable from aws_durable_execution_sdk_python.config import ParallelConfig from aws_durable_execution_sdk_python.lambda_service import OperationSubType if TYPE_CHECKING: - from aws_durable_execution_sdk_python.concurrency import BatchResult + from aws_durable_execution_sdk_python.concurrency.models import BatchResult from aws_durable_execution_sdk_python.context import DurableContext from aws_durable_execution_sdk_python.identifier import OperationIdentifier from aws_durable_execution_sdk_python.serdes import SerDes @@ -69,6 +70,7 @@ def from_callables( name_prefix="parallel-branch-", serdes=config.serdes, summary_generator=config.summary_generator, + item_serdes=config.item_serdes, ) def execute_item(self, child_context, executable: Executable[Callable]) -> R: # noqa: PLR6301 diff --git a/src/aws_durable_execution_sdk_python/operation/step.py b/src/aws_durable_execution_sdk_python/operation/step.py index c80b18b..eb49c9b 100644 --- a/src/aws_durable_execution_sdk_python/operation/step.py +++ b/src/aws_durable_execution_sdk_python/operation/step.py @@ -11,6 +11,7 @@ ) from aws_durable_execution_sdk_python.exceptions import ( ExecutionError, + InvalidStateError, StepInterruptedError, ) from aws_durable_execution_sdk_python.lambda_service import ( @@ -18,6 +19,10 @@ OperationUpdate, ) from aws_durable_execution_sdk_python.logger import Logger, LogInfo +from aws_durable_execution_sdk_python.operation.base import ( + CheckResult, + OperationExecutor, +) from aws_durable_execution_sdk_python.retries import RetryDecision, RetryPresets from aws_durable_execution_sdk_python.serdes import deserialize, serialize from aws_durable_execution_sdk_python.suspend import ( @@ -40,230 +45,314 @@ T = TypeVar("T") -def step_handler( - func: Callable[[StepContext], T], - state: ExecutionState, - operation_identifier: OperationIdentifier, - config: StepConfig | None, - context_logger: Logger, -) -> T: - logger.debug( - "▶️ Executing step for id: %s, name: %s", - operation_identifier.operation_id, - operation_identifier.name, - ) - - if not config: - config = StepConfig() - - checkpointed_result: CheckpointedResult = state.get_checkpoint_result( - operation_identifier.operation_id - ) - if checkpointed_result.is_succeeded(): - logger.debug( - "Step already completed, skipping execution for id: %s, name: %s", - operation_identifier.operation_id, - operation_identifier.name, - ) - if checkpointed_result.result is None: - return None # type: ignore - - return deserialize( - serdes=config.serdes, - data=checkpointed_result.result, - operation_id=operation_identifier.operation_id, - durable_execution_arn=state.durable_execution_arn, - ) +class StepOperationExecutor(OperationExecutor[T]): + """Executor for step operations. - if checkpointed_result.is_failed(): - # have to throw the exact same error on replay as the checkpointed failure - checkpointed_result.raise_callable_error() - - if checkpointed_result.is_pending(): - scheduled_timestamp = checkpointed_result.get_next_attempt_timestamp() - # normally, we'd ensure that a suspension here would be for > 0 seconds; - # however, this is coming from a checkpoint, and we can trust that it is a correct target timestamp. - suspend_with_optional_resume_timestamp( - msg=f"Retry scheduled for {operation_identifier.name or operation_identifier.operation_id} will retry at timestamp {scheduled_timestamp}", - datetime_timestamp=scheduled_timestamp, - ) + Checks operation status after creating START checkpoints to handle operations + that complete synchronously, avoiding unnecessary execution or suspension. + """ - if ( - checkpointed_result.is_started() - and config.step_semantics is StepSemantics.AT_MOST_ONCE_PER_RETRY + def __init__( + self, + func: Callable[[StepContext], T], + config: StepConfig, + state: ExecutionState, + operation_identifier: OperationIdentifier, + context_logger: Logger, ): - # step was previously interrupted - msg = f"Step operation_id={operation_identifier.operation_id} name={operation_identifier.name} was previously interrupted" - retry_handler( - StepInterruptedError(msg), - state, - operation_identifier, - config, - checkpointed_result, + """Initialize the step operation executor. + + Args: + func: The step function to execute + config: The step configuration + state: The execution state + operation_identifier: The operation identifier + context_logger: The logger for the step context + """ + self.func = func + self.config = config + self.state = state + self.operation_identifier = operation_identifier + self.context_logger = context_logger + self._checkpoint_created = False # Track if we created the checkpoint + + def check_result_status(self) -> CheckResult[T]: + """Check operation status and create START checkpoint if needed. + + Called twice by process() when creating synchronous checkpoints: once before + and once after, to detect if the operation completed immediately. + + Returns: + CheckResult indicating the next action to take + + Raises: + CallableRuntimeError: For FAILED operations + StepInterruptedError: For interrupted AT_MOST_ONCE operations + SuspendExecution: For PENDING operations waiting for retry + """ + checkpointed_result: CheckpointedResult = self.state.get_checkpoint_result( + self.operation_identifier.operation_id ) - checkpointed_result.raise_callable_error() + # Terminal success - deserialize and return + if checkpointed_result.is_succeeded(): + logger.debug( + "Step already completed, skipping execution for id: %s, name: %s", + self.operation_identifier.operation_id, + self.operation_identifier.name, + ) + if checkpointed_result.result is None: + return CheckResult.create_completed(None) # type: ignore + + result: T = deserialize( + serdes=self.config.serdes, + data=checkpointed_result.result, + operation_id=self.operation_identifier.operation_id, + durable_execution_arn=self.state.durable_execution_arn, + ) + return CheckResult.create_completed(result) + + # Terminal failure + if checkpointed_result.is_failed(): + # Have to throw the exact same error on replay as the checkpointed failure + checkpointed_result.raise_callable_error() + + # Pending retry + if checkpointed_result.is_pending(): + scheduled_timestamp = checkpointed_result.get_next_attempt_timestamp() + # Normally, we'd ensure that a suspension here would be for > 0 seconds; + # however, this is coming from a checkpoint, and we can trust that it is a correct target timestamp. + suspend_with_optional_resume_timestamp( + msg=f"Retry scheduled for {self.operation_identifier.name or self.operation_identifier.operation_id} will retry at timestamp {scheduled_timestamp}", + datetime_timestamp=scheduled_timestamp, + ) - if not ( - checkpointed_result.is_started() - and config.step_semantics is StepSemantics.AT_LEAST_ONCE_PER_RETRY - ): - # Do not checkpoint start for started & AT_LEAST_ONCE execution - # Checkpoint start for the other - start_operation: OperationUpdate = OperationUpdate.create_step_start( - identifier=operation_identifier, - ) - # Checkpoint START operation with appropriate synchronization: - # - AtMostOncePerRetry: Use blocking checkpoint (is_sync=True) to prevent duplicate execution. - # The step must not execute until the START checkpoint is persisted, ensuring exactly-once semantics. - # - AtLeastOncePerRetry: Use non-blocking checkpoint (is_sync=False) for performance optimization. - # The step can execute immediately without waiting for checkpoint persistence, allowing at-least-once semantics. - is_sync: bool = config.step_semantics is StepSemantics.AT_MOST_ONCE_PER_RETRY - state.create_checkpoint(operation_update=start_operation, is_sync=is_sync) - - attempt: int = 0 - if checkpointed_result.operation and checkpointed_result.operation.step_details: - attempt = checkpointed_result.operation.step_details.attempt - - step_context = StepContext( - logger=context_logger.with_log_info( - LogInfo.from_operation_identifier( - execution_arn=state.durable_execution_arn, - op_id=operation_identifier, - attempt=attempt, + # Handle interrupted AT_MOST_ONCE (replay scenario only) + # This check only applies on REPLAY when a new Lambda invocation starts after interruption. + # A STARTED checkpoint with AT_MOST_ONCE on entry means the previous invocation + # was interrupted and it should NOT re-execute. + # + # This check is skipped on fresh executions because: + # - First call (fresh): checkpoint doesn't exist → is_started() returns False → skip this check + # - After creating sync checkpoint and refreshing: if status is STARTED, we return + # ready_to_execute directly, so process() never calls check_result_status() again + if ( + checkpointed_result.is_started() + and self.config.step_semantics is StepSemantics.AT_MOST_ONCE_PER_RETRY + ): + # Step was previously interrupted in a prior invocation - handle retry + msg: str = f"Step operation_id={self.operation_identifier.operation_id} name={self.operation_identifier.name} was previously interrupted" + self.retry_handler(StepInterruptedError(msg), checkpointed_result) + checkpointed_result.raise_callable_error() + + # Ready to execute if STARTED + AT_LEAST_ONCE + if ( + checkpointed_result.is_started() + and self.config.step_semantics is StepSemantics.AT_LEAST_ONCE_PER_RETRY + ): + return CheckResult.create_is_ready_to_execute(checkpointed_result) + + # Create START checkpoint if not exists + if not checkpointed_result.is_existent(): + start_operation: OperationUpdate = OperationUpdate.create_step_start( + identifier=self.operation_identifier, + ) + # Checkpoint START operation with appropriate synchronization: + # - AtMostOncePerRetry: Use blocking checkpoint (is_sync=True) to prevent duplicate execution. + # The step must not execute until the START checkpoint is persisted, ensuring exactly-once semantics. + # - AtLeastOncePerRetry: Use non-blocking checkpoint (is_sync=False) for performance optimization. + # The step can execute immediately without waiting for checkpoint persistence, allowing at-least-once semantics. + is_sync: bool = ( + self.config.step_semantics is StepSemantics.AT_MOST_ONCE_PER_RETRY + ) + self.state.create_checkpoint( + operation_update=start_operation, is_sync=is_sync ) - ) - ) - try: - # this is the actual code provided by the caller to execute durably inside the step - raw_result: T = func(step_context) - serialized_result: str = serialize( - serdes=config.serdes, - value=raw_result, - operation_id=operation_identifier.operation_id, - durable_execution_arn=state.durable_execution_arn, - ) - success_operation: OperationUpdate = OperationUpdate.create_step_succeed( - identifier=operation_identifier, - payload=serialized_result, + # After creating sync checkpoint, check the status + if is_sync: + # Refresh checkpoint result to check for immediate response + refreshed_result: CheckpointedResult = self.state.get_checkpoint_result( + self.operation_identifier.operation_id + ) + + # START checkpoint only returns STARTED status + # Any errors would be thrown as runtime exceptions during checkpoint creation + if not refreshed_result.is_started(): + # This should never happen - defensive check + error_msg: str = f"Unexpected status after START checkpoint: {refreshed_result.status}" + raise InvalidStateError(error_msg) + + # If we reach here, status must be STARTED - ready to execute + return CheckResult.create_is_ready_to_execute(refreshed_result) + + # Ready to execute + return CheckResult.create_is_ready_to_execute(checkpointed_result) + + def execute(self, checkpointed_result: CheckpointedResult) -> T: + """Execute step function with error handling and retry logic. + + Args: + checkpointed_result: The checkpoint data containing operation state + + Returns: + The result of executing the step function + + Raises: + ExecutionError: For fatal errors that should not be retried + May raise other exceptions that will be handled by retry_handler + """ + attempt: int = 0 + if checkpointed_result.operation and checkpointed_result.operation.step_details: + attempt = checkpointed_result.operation.step_details.attempt + + step_context: StepContext = StepContext( + logger=self.context_logger.with_log_info( + LogInfo.from_operation_identifier( + execution_state=self.state, + op_id=self.operation_identifier, + attempt=attempt, + ) + ) ) - # Checkpoint SUCCEED operation with blocking (is_sync=True, default). - # Must ensure the success state is persisted before returning the result to the caller. - # This guarantees the step result is durable and won't be lost if Lambda terminates. - state.create_checkpoint(operation_update=success_operation) - - logger.debug( - "✅ Successfully completed step for id: %s, name: %s", - operation_identifier.operation_id, - operation_identifier.name, - ) - return raw_result # noqa: TRY300 - except Exception as e: - if isinstance(e, ExecutionError): - # no retry on fatal - e.g checkpoint exception - logger.debug( - "💥 Fatal error for id: %s, name: %s", - operation_identifier.operation_id, - operation_identifier.name, + try: + # This is the actual code provided by the caller to execute durably inside the step + raw_result: T = self.func(step_context) + serialized_result: str = serialize( + serdes=self.config.serdes, + value=raw_result, + operation_id=self.operation_identifier.operation_id, + durable_execution_arn=self.state.durable_execution_arn, ) - # this bubbles up to execution.durable_execution, where it will exit with FAILED - raise - - logger.exception( - "❌ failed step for id: %s, name: %s", - operation_identifier.operation_id, - operation_identifier.name, - ) - retry_handler(e, state, operation_identifier, config, checkpointed_result) - # if we've failed to raise an exception from the retry_handler, then we are in a - # weird state, and should crash terminate the execution - msg = "retry handler should have raised an exception, but did not." - raise ExecutionError(msg) from None + success_operation: OperationUpdate = OperationUpdate.create_step_succeed( + identifier=self.operation_identifier, + payload=serialized_result, + ) + # Checkpoint SUCCEED operation with blocking (is_sync=True, default). + # Must ensure the success state is persisted before returning the result to the caller. + # This guarantees the step result is durable and won't be lost if Lambda terminates. + self.state.create_checkpoint(operation_update=success_operation) -# TODO: I don't much like this func, needs refactor. Messy grab-bag of args, refine. -def retry_handler( - error: Exception, - state: ExecutionState, - operation_identifier: OperationIdentifier, - config: StepConfig, - checkpointed_result: CheckpointedResult, -): - """Checkpoint and suspend for replay if retry required, otherwise raise error.""" - error_object = ErrorObject.from_exception(error) + logger.debug( + "✅ Successfully completed step for id: %s, name: %s", + self.operation_identifier.operation_id, + self.operation_identifier.name, + ) + return raw_result # noqa: TRY300 + except Exception as e: + if isinstance(e, ExecutionError): + # No retry on fatal - e.g checkpoint exception + logger.debug( + "💥 Fatal error for id: %s, name: %s", + self.operation_identifier.operation_id, + self.operation_identifier.name, + ) + # This bubbles up to execution.durable_execution, where it will exit with FAILED + raise + + logger.exception( + "❌ failed step for id: %s, name: %s", + self.operation_identifier.operation_id, + self.operation_identifier.name, + ) - retry_strategy = config.retry_strategy or RetryPresets.default() + self.retry_handler(e, checkpointed_result) + # If we've failed to raise an exception from the retry_handler, then we are in a + # weird state, and should crash terminate the execution + msg = "retry handler should have raised an exception, but did not." + raise ExecutionError(msg) from None - retry_attempt: int = ( - checkpointed_result.operation.step_details.attempt - if ( - checkpointed_result.operation and checkpointed_result.operation.step_details - ) - else 0 - ) - retry_decision: RetryDecision = retry_strategy(error, retry_attempt + 1) - - if retry_decision.should_retry: - logger.debug( - "Retrying step for id: %s, name: %s, attempt: %s", - operation_identifier.operation_id, - operation_identifier.name, - retry_attempt + 1, + def retry_handler( + self, + error: Exception, + checkpointed_result: CheckpointedResult, + ): + """Checkpoint and suspend for replay if retry required, otherwise raise error. + + Args: + error: The exception that occurred during step execution + checkpointed_result: The checkpoint data containing operation state + + Raises: + SuspendExecution: If retry is scheduled + StepInterruptedError: If the error is a StepInterruptedError + CallableRuntimeError: If retry is exhausted or error is not retryable + """ + error_object = ErrorObject.from_exception(error) + + retry_strategy = self.config.retry_strategy or RetryPresets.default() + + retry_attempt: int = ( + checkpointed_result.operation.step_details.attempt + if ( + checkpointed_result.operation + and checkpointed_result.operation.step_details + ) + else 0 ) + retry_decision: RetryDecision = retry_strategy(error, retry_attempt + 1) - # because we are issuing a retry and create an OperationUpdate - # we enforce a minimum delay second of 1, to match model behaviour. - # we localize enforcement and keep it outside suspension methods as: - # a) those are used throughout the codebase, e.g. in wait(..) <- enforcement is done in context - # b) they shouldn't know model specific details <- enforcement is done above - # and c) this "issue" arises from retry-decision and we shouldn't push it down - delay_seconds = retry_decision.delay_seconds - if delay_seconds < 1: - logger.warning( - ( - "Retry delay_seconds step for id: %s, name: %s," - "attempt: %s is %d < 1. Setting to minimum of 1 seconds." - ), - operation_identifier.operation_id, - operation_identifier.name, + if retry_decision.should_retry: + logger.debug( + "Retrying step for id: %s, name: %s, attempt: %s", + self.operation_identifier.operation_id, + self.operation_identifier.name, retry_attempt + 1, - delay_seconds, ) - delay_seconds = 1 - retry_operation: OperationUpdate = OperationUpdate.create_step_retry( - identifier=operation_identifier, - error=error_object, - next_attempt_delay_seconds=delay_seconds, - ) + # because we are issuing a retry and create an OperationUpdate + # we enforce a minimum delay second of 1, to match model behaviour. + # we localize enforcement and keep it outside suspension methods as: + # a) those are used throughout the codebase, e.g. in wait(..) <- enforcement is done in context + # b) they shouldn't know model specific details <- enforcement is done above + # and c) this "issue" arises from retry-decision and we shouldn't push it down + delay_seconds = retry_decision.delay_seconds + if delay_seconds < 1: + logger.warning( + ( + "Retry delay_seconds step for id: %s, name: %s," + "attempt: %s is %d < 1. Setting to minimum of 1 seconds." + ), + self.operation_identifier.operation_id, + self.operation_identifier.name, + retry_attempt + 1, + delay_seconds, + ) + delay_seconds = 1 + + retry_operation: OperationUpdate = OperationUpdate.create_step_retry( + identifier=self.operation_identifier, + error=error_object, + next_attempt_delay_seconds=delay_seconds, + ) - # Checkpoint RETRY operation with blocking (is_sync=True, default). - # Must ensure retry state is persisted before suspending execution. - # This guarantees the retry attempt count and next attempt timestamp are durable. - state.create_checkpoint(operation_update=retry_operation) - - suspend_with_optional_resume_delay( - msg=( - f"Retry scheduled for {operation_identifier.operation_id}" - f"in {retry_decision.delay_seconds} seconds" - ), - delay_seconds=delay_seconds, - ) + # Checkpoint RETRY operation with blocking (is_sync=True, default). + # Must ensure retry state is persisted before suspending execution. + # This guarantees the retry attempt count and next attempt timestamp are durable. + self.state.create_checkpoint(operation_update=retry_operation) - # no retry - fail_operation: OperationUpdate = OperationUpdate.create_step_fail( - identifier=operation_identifier, error=error_object - ) + suspend_with_optional_resume_delay( + msg=( + f"Retry scheduled for {self.operation_identifier.operation_id}" + f"in {retry_decision.delay_seconds} seconds" + ), + delay_seconds=delay_seconds, + ) + + # no retry + fail_operation: OperationUpdate = OperationUpdate.create_step_fail( + identifier=self.operation_identifier, error=error_object + ) - # Checkpoint FAIL operation with blocking (is_sync=True, default). - # Must ensure the failure state is persisted before raising the exception. - # This guarantees the error is durable and the step won't be retried on replay. - state.create_checkpoint(operation_update=fail_operation) + # Checkpoint FAIL operation with blocking (is_sync=True, default). + # Must ensure the failure state is persisted before raising the exception. + # This guarantees the error is durable and the step won't be retried on replay. + self.state.create_checkpoint(operation_update=fail_operation) - if isinstance(error, StepInterruptedError): - raise error + if isinstance(error, StepInterruptedError): + raise error - raise error_object.to_callable_runtime_error() + raise error_object.to_callable_runtime_error() diff --git a/src/aws_durable_execution_sdk_python/operation/wait.py b/src/aws_durable_execution_sdk_python/operation/wait.py index 90d0880..fc16e66 100644 --- a/src/aws_durable_execution_sdk_python/operation/wait.py +++ b/src/aws_durable_execution_sdk_python/operation/wait.py @@ -6,6 +6,10 @@ from typing import TYPE_CHECKING from aws_durable_execution_sdk_python.lambda_service import OperationUpdate, WaitOptions +from aws_durable_execution_sdk_python.operation.base import ( + CheckResult, + OperationExecutor, +) from aws_durable_execution_sdk_python.suspend import suspend_with_optional_resume_delay if TYPE_CHECKING: @@ -18,36 +22,90 @@ logger = logging.getLogger(__name__) -def wait_handler( - seconds: int, state: ExecutionState, operation_identifier: OperationIdentifier -) -> None: - logger.debug( - "Wait requested for id: %s, name: %s", - operation_identifier.operation_id, - operation_identifier.name, - ) +class WaitOperationExecutor(OperationExecutor[None]): + """Executor for wait operations. - checkpointed_result: CheckpointedResult = state.get_checkpoint_result( - operation_identifier.operation_id - ) + Checks operation status after creating START checkpoints to handle operations + that complete synchronously, avoiding unnecessary execution or suspension. + """ - if checkpointed_result.is_succeeded(): - logger.debug( - "Wait already completed, skipping wait for id: %s, name: %s", - operation_identifier.operation_id, - operation_identifier.name, - ) - return + def __init__( + self, + seconds: int, + state: ExecutionState, + operation_identifier: OperationIdentifier, + ): + """Initialize the wait operation executor. + + Args: + seconds: Number of seconds to wait + state: The execution state + operation_identifier: The operation identifier + """ + self.seconds = seconds + self.state = state + self.operation_identifier = operation_identifier - if not checkpointed_result.is_existent(): - operation = OperationUpdate.create_wait_start( - identifier=operation_identifier, - wait_options=WaitOptions(wait_seconds=seconds), + def check_result_status(self) -> CheckResult[None]: + """Check operation status and create START checkpoint if needed. + + Called twice by process() when creating synchronous checkpoints: once before + and once after, to detect if the operation completed immediately. + + Returns: + CheckResult indicating the next action to take + + Raises: + SuspendExecution: When wait timer has not completed + """ + checkpointed_result: CheckpointedResult = self.state.get_checkpoint_result( + self.operation_identifier.operation_id ) - # Checkpoint wait START with blocking (is_sync=True, default). - # Must ensure the wait operation and scheduled timestamp are persisted before suspending. - # This guarantees the wait will resume at the correct time on the next invocation. - state.create_checkpoint(operation_update=operation) - msg = f"Wait for {seconds} seconds" - suspend_with_optional_resume_delay(msg, seconds) # throws suspend + # Terminal success - wait completed + if checkpointed_result.is_succeeded(): + logger.debug( + "Wait already completed, skipping wait for id: %s, name: %s", + self.operation_identifier.operation_id, + self.operation_identifier.name, + ) + return CheckResult.create_completed(None) + + # Create START checkpoint if not exists + if not checkpointed_result.is_existent(): + operation: OperationUpdate = OperationUpdate.create_wait_start( + identifier=self.operation_identifier, + wait_options=WaitOptions(wait_seconds=self.seconds), + ) + # Checkpoint wait START with blocking (is_sync=True, default). + # Must ensure the wait operation and scheduled timestamp are persisted before suspending. + # This guarantees the wait will resume at the correct time on the next invocation. + self.state.create_checkpoint(operation_update=operation, is_sync=True) + + logger.debug( + "Wait checkpoint created for id: %s, name: %s, will check for immediate response", + self.operation_identifier.operation_id, + self.operation_identifier.name, + ) + + # Signal to process() that checkpoint was created - which will re-run this check_result_status + # check from the top + return CheckResult.create_started() + + # Ready to suspend (checkpoint exists) + return CheckResult.create_is_ready_to_execute(checkpointed_result) + + def execute(self, _checkpointed_result: CheckpointedResult) -> None: + """Execute wait by suspending. + + Wait operations 'execute' by suspending execution until the timer completes. + This method never returns normally - it always suspends. + + Args: + _checkpointed_result: The checkpoint data (unused for wait) + + Raises: + SuspendExecution: Always suspends to wait for timer completion + """ + msg: str = f"Wait for {self.seconds} seconds" + suspend_with_optional_resume_delay(msg, self.seconds) # throws suspend diff --git a/src/aws_durable_execution_sdk_python/operation/wait_for_condition.py b/src/aws_durable_execution_sdk_python/operation/wait_for_condition.py index bb2d6e7..d1c2b4f 100644 --- a/src/aws_durable_execution_sdk_python/operation/wait_for_condition.py +++ b/src/aws_durable_execution_sdk_python/operation/wait_for_condition.py @@ -13,6 +13,10 @@ OperationUpdate, ) from aws_durable_execution_sdk_python.logger import LogInfo +from aws_durable_execution_sdk_python.operation.base import ( + CheckResult, + OperationExecutor, +) from aws_durable_execution_sdk_python.serdes import deserialize, serialize from aws_durable_execution_sdk_python.suspend import ( suspend_with_optional_resume_delay, @@ -40,196 +44,239 @@ logger = logging.getLogger(__name__) -def wait_for_condition_handler( - check: Callable[[T, WaitForConditionCheckContext], T], - config: WaitForConditionConfig[T], - state: ExecutionState, - operation_identifier: OperationIdentifier, - context_logger: Logger, -) -> T: - """Handle wait_for_condition operation. +class WaitForConditionOperationExecutor(OperationExecutor[T]): + """Executor for wait_for_condition operations. - wait_for_condition creates a STEP checkpoint. + Checks operation status after creating START checkpoints to handle operations + that complete synchronously, avoiding unnecessary execution or suspension. """ - logger.debug( - "▶️ Executing wait_for_condition for id: %s, name: %s", - operation_identifier.operation_id, - operation_identifier.name, - ) - checkpointed_result: CheckpointedResult = state.get_checkpoint_result( - operation_identifier.operation_id - ) - - # Check if already completed - if checkpointed_result.is_succeeded(): - logger.debug( - "wait_for_condition already completed for id: %s, name: %s", - operation_identifier.operation_id, - operation_identifier.name, - ) - if checkpointed_result.result is None: - return None # type: ignore - return deserialize( - serdes=config.serdes, - data=checkpointed_result.result, - operation_id=operation_identifier.operation_id, - durable_execution_arn=state.durable_execution_arn, + def __init__( + self, + check: Callable[[T, WaitForConditionCheckContext], T], + config: WaitForConditionConfig[T], + state: ExecutionState, + operation_identifier: OperationIdentifier, + context_logger: Logger, + ): + """Initialize the wait_for_condition executor. + + Args: + check: The check function to evaluate the condition + config: Configuration for the wait_for_condition operation + state: The execution state + operation_identifier: The operation identifier + context_logger: Logger for the operation context + """ + self.check = check + self.config = config + self.state = state + self.operation_identifier = operation_identifier + self.context_logger = context_logger + + def check_result_status(self) -> CheckResult[T]: + """Check operation status and create START checkpoint if needed. + + Called twice by process() when creating synchronous checkpoints: once before + and once after, to detect if the operation completed immediately. + + Returns: + CheckResult indicating the next action to take + + Raises: + CallableRuntimeError: For FAILED operations + SuspendExecution: For PENDING operations waiting for retry + """ + checkpointed_result = self.state.get_checkpoint_result( + self.operation_identifier.operation_id ) - if checkpointed_result.is_failed(): - checkpointed_result.raise_callable_error() + # Check if already completed + if checkpointed_result.is_succeeded(): + logger.debug( + "wait_for_condition already completed for id: %s, name: %s", + self.operation_identifier.operation_id, + self.operation_identifier.name, + ) + if checkpointed_result.result is None: + return CheckResult.create_completed(None) # type: ignore + result = deserialize( + serdes=self.config.serdes, + data=checkpointed_result.result, + operation_id=self.operation_identifier.operation_id, + durable_execution_arn=self.state.durable_execution_arn, + ) + return CheckResult.create_completed(result) + + # Terminal failure + if checkpointed_result.is_failed(): + checkpointed_result.raise_callable_error() + + # Pending retry + if checkpointed_result.is_pending(): + scheduled_timestamp = checkpointed_result.get_next_attempt_timestamp() + suspend_with_optional_resume_timestamp( + msg=f"wait_for_condition {self.operation_identifier.name or self.operation_identifier.operation_id} will retry at timestamp {scheduled_timestamp}", + datetime_timestamp=scheduled_timestamp, + ) - if checkpointed_result.is_pending(): - scheduled_timestamp = checkpointed_result.get_next_attempt_timestamp() - suspend_with_optional_resume_timestamp( - msg=f"wait_for_condition {operation_identifier.name or operation_identifier.operation_id} will retry at timestamp {scheduled_timestamp}", - datetime_timestamp=scheduled_timestamp, - ) + # Create START checkpoint if not started + if not checkpointed_result.is_started(): + start_operation = OperationUpdate.create_wait_for_condition_start( + identifier=self.operation_identifier, + ) + # Checkpoint wait_for_condition START with non-blocking (is_sync=False). + # This is purely for observability - we don't need to wait for persistence before + # executing the check function. The START checkpoint just records that polling began. + self.state.create_checkpoint( + operation_update=start_operation, is_sync=False + ) + # For async checkpoint, no immediate response possible + # Proceed directly to execute with current checkpoint data + + # Ready to execute check function + return CheckResult.create_is_ready_to_execute(checkpointed_result) + + def execute(self, checkpointed_result: CheckpointedResult) -> T: + """Execute check function and handle decision. + + Args: + checkpointed_result: The checkpoint data - attempt: int = 1 - if checkpointed_result.is_started_or_ready(): - # This is a retry - get state from previous checkpoint - if checkpointed_result.result: + Returns: + The final state when condition is met + + Raises: + Suspends if condition not met + Raises error if check function fails + """ + # Determine current state from checkpoint + if checkpointed_result.is_started_or_ready() and checkpointed_result.result: try: current_state = deserialize( - serdes=config.serdes, + serdes=self.config.serdes, data=checkpointed_result.result, - operation_id=operation_identifier.operation_id, - durable_execution_arn=state.durable_execution_arn, + operation_id=self.operation_identifier.operation_id, + durable_execution_arn=self.state.durable_execution_arn, ) except Exception: - # default to initial state if there's an error getting checkpointed state + # Default to initial state if there's an error getting checkpointed state logger.exception( "⚠️ wait_for_condition failed to deserialize state for id: %s, name: %s. Using initial state.", - operation_identifier.operation_id, - operation_identifier.name, + self.operation_identifier.operation_id, + self.operation_identifier.name, ) - current_state = config.initial_state + current_state = self.config.initial_state else: - current_state = config.initial_state + current_state = self.config.initial_state - # at this point operation has to exist. Nonetheless, just in case somehow it's not there. + # Get attempt number + attempt: int = 1 if checkpointed_result.operation and checkpointed_result.operation.step_details: attempt = checkpointed_result.operation.step_details.attempt - else: - # First execution - current_state = config.initial_state - - # Checkpoint START for observability. - if not checkpointed_result.is_started(): - start_operation: OperationUpdate = ( - OperationUpdate.create_wait_for_condition_start( - identifier=operation_identifier, - ) - ) - # Checkpoint wait_for_condition START with non-blocking (is_sync=False). - # This is purely for observability - we don't need to wait for persistence before - # executing the check function. The START checkpoint just records that polling began. - state.create_checkpoint(operation_update=start_operation, is_sync=False) - - try: - # Execute the check function with the injected logger - check_context = WaitForConditionCheckContext( - logger=context_logger.with_log_info( - LogInfo.from_operation_identifier( - execution_arn=state.durable_execution_arn, - op_id=operation_identifier, - attempt=attempt, + + try: + # Execute the check function with the injected logger + check_context = WaitForConditionCheckContext( + logger=self.context_logger.with_log_info( + LogInfo.from_operation_identifier( + execution_state=self.state, + op_id=self.operation_identifier, + attempt=attempt, + ) ) ) - ) - new_state = check(current_state, check_context) + new_state = self.check(current_state, check_context) - # Check if condition is met with the wait strategy - decision: WaitForConditionDecision = config.wait_strategy(new_state, attempt) - - serialized_state = serialize( - serdes=config.serdes, - value=new_state, - operation_id=operation_identifier.operation_id, - durable_execution_arn=state.durable_execution_arn, - ) - - logger.debug( - "wait_for_condition check completed: %s, name: %s, attempt: %s", - operation_identifier.operation_id, - operation_identifier.name, - attempt, - ) + # Check if condition is met with the wait strategy + decision: WaitForConditionDecision = self.config.wait_strategy( + new_state, attempt + ) - if not decision.should_continue: - # Condition is met - complete successfully - success_operation = OperationUpdate.create_wait_for_condition_succeed( - identifier=operation_identifier, - payload=serialized_state, + serialized_state = serialize( + serdes=self.config.serdes, + value=new_state, + operation_id=self.operation_identifier.operation_id, + durable_execution_arn=self.state.durable_execution_arn, ) - # Checkpoint SUCCEED operation with blocking (is_sync=True, default). - # Must ensure the final state is persisted before returning to the caller. - # This guarantees the condition result is durable and won't be re-evaluated on replay. - state.create_checkpoint(operation_update=success_operation) logger.debug( - "✅ wait_for_condition completed for id: %s, name: %s", - operation_identifier.operation_id, - operation_identifier.name, - ) - return new_state - - # Condition not met - schedule retry - # we enforce a minimum delay second of 1, to match model behaviour. - # we localize enforcement and keep it outside suspension methods as: - # a) those are used throughout the codebase, e.g. in wait(..) <- enforcement is done in context - # b) they shouldn't know model specific details <- enforcement is done above - # and c) this "issue" arises from retry-decision and shouldn't be chased deeper. - delay_seconds = decision.delay_seconds - if delay_seconds is not None and delay_seconds < 1: - logger.warning( - ( - "WaitDecision delay_seconds step for id: %s, name: %s," - "is %d < 1. Setting to minimum of 1 seconds." - ), - operation_identifier.operation_id, - operation_identifier.name, - delay_seconds, + "wait_for_condition check completed: %s, name: %s, attempt: %s", + self.operation_identifier.operation_id, + self.operation_identifier.name, + attempt, ) - delay_seconds = 1 - retry_operation = OperationUpdate.create_wait_for_condition_retry( - identifier=operation_identifier, - payload=serialized_state, - next_attempt_delay_seconds=delay_seconds, - ) + if not decision.should_continue: + # Condition is met - complete successfully + success_operation = OperationUpdate.create_wait_for_condition_succeed( + identifier=self.operation_identifier, + payload=serialized_state, + ) + # Checkpoint SUCCEED operation with blocking (is_sync=True, default). + # Must ensure the final state is persisted before returning to the caller. + # This guarantees the condition result is durable and won't be re-evaluated on replay. + self.state.create_checkpoint(operation_update=success_operation) + + logger.debug( + "✅ wait_for_condition completed for id: %s, name: %s", + self.operation_identifier.operation_id, + self.operation_identifier.name, + ) + return new_state + + # Condition not met - schedule retry + # We enforce a minimum delay second of 1, to match model behaviour. + delay_seconds = decision.delay_seconds + if delay_seconds is not None and delay_seconds < 1: + logger.warning( + ( + "WaitDecision delay_seconds step for id: %s, name: %s," + "is %d < 1. Setting to minimum of 1 seconds." + ), + self.operation_identifier.operation_id, + self.operation_identifier.name, + delay_seconds, + ) + delay_seconds = 1 - # Checkpoint RETRY operation with blocking (is_sync=True, default). - # Must ensure the current state and next attempt timestamp are persisted before suspending. - # This guarantees the polling state is durable and will resume correctly on the next invocation. - state.create_checkpoint(operation_update=retry_operation) + retry_operation = OperationUpdate.create_wait_for_condition_retry( + identifier=self.operation_identifier, + payload=serialized_state, + next_attempt_delay_seconds=delay_seconds, + ) - suspend_with_optional_resume_delay( - msg=f"wait_for_condition {operation_identifier.name or operation_identifier.operation_id} will retry in {decision.delay_seconds} seconds", - delay_seconds=decision.delay_seconds, - ) + # Checkpoint RETRY operation with blocking (is_sync=True, default). + # Must ensure the current state and next attempt timestamp are persisted before suspending. + # This guarantees the polling state is durable and will resume correctly on the next invocation. + self.state.create_checkpoint(operation_update=retry_operation) - except Exception as e: - # Mark as failed - waitForCondition doesn't have its own retry logic for errors - # If the check function throws, it's considered a failure - logger.exception( - "❌ wait_for_condition failed for id: %s, name: %s", - operation_identifier.operation_id, - operation_identifier.name, - ) + suspend_with_optional_resume_delay( + msg=f"wait_for_condition {self.operation_identifier.name or self.operation_identifier.operation_id} will retry in {decision.delay_seconds} seconds", + delay_seconds=decision.delay_seconds, + ) + + except Exception as e: + # Mark as failed - waitForCondition doesn't have its own retry logic for errors + # If the check function throws, it's considered a failure + logger.exception( + "❌ wait_for_condition failed for id: %s, name: %s", + self.operation_identifier.operation_id, + self.operation_identifier.name, + ) - fail_operation = OperationUpdate.create_wait_for_condition_fail( - identifier=operation_identifier, - error=ErrorObject.from_exception(e), + fail_operation = OperationUpdate.create_wait_for_condition_fail( + identifier=self.operation_identifier, + error=ErrorObject.from_exception(e), + ) + # Checkpoint FAIL operation with blocking (is_sync=True, default). + # Must ensure the failure state is persisted before raising the exception. + # This guarantees the error is durable and the condition won't be re-evaluated on replay. + self.state.create_checkpoint(operation_update=fail_operation) + raise + + msg: str = ( + "wait_for_condition should never reach this point" # pragma: no cover ) - # Checkpoint FAIL operation with blocking (is_sync=True, default). - # Must ensure the failure state is persisted before raising the exception. - # This guarantees the error is durable and the condition won't be re-evaluated on replay. - state.create_checkpoint(operation_update=fail_operation) - raise - - msg: str = "wait_for_condition should never reach this point" # pragma: no cover - raise ExecutionError(msg) # pragma: no cover + raise ExecutionError(msg) # pragma: no cover diff --git a/src/aws_durable_execution_sdk_python/retries.py b/src/aws_durable_execution_sdk_python/retries.py index 4b8e885..5a09db2 100644 --- a/src/aws_durable_execution_sdk_python/retries.py +++ b/src/aws_durable_execution_sdk_python/retries.py @@ -7,82 +7,113 @@ from dataclasses import dataclass, field from typing import TYPE_CHECKING -from aws_durable_execution_sdk_python.config import JitterStrategy +from aws_durable_execution_sdk_python.config import Duration, JitterStrategy if TYPE_CHECKING: from collections.abc import Callable Numeric = int | float +# Default pattern that matches all error messages +_DEFAULT_RETRYABLE_ERROR_PATTERN = re.compile(r".*") + @dataclass class RetryDecision: """Decision about whether to retry a step and with what delay.""" should_retry: bool - delay_seconds: int + delay: Duration + + @property + def delay_seconds(self) -> int: + """Get delay in seconds.""" + return self.delay.to_seconds() @classmethod - def retry(cls, delay_seconds: int) -> RetryDecision: + def retry(cls, delay: Duration) -> RetryDecision: """Create a retry decision.""" - return cls(should_retry=True, delay_seconds=delay_seconds) + return cls(should_retry=True, delay=delay) @classmethod def no_retry(cls) -> RetryDecision: """Create a no-retry decision.""" - return cls(should_retry=False, delay_seconds=0) + return cls(should_retry=False, delay=Duration()) @dataclass class RetryStrategyConfig: max_attempts: int = 3 - initial_delay_seconds: int = 5 - max_delay_seconds: int = 300 # 5 minutes + initial_delay: Duration = field(default_factory=lambda: Duration.from_seconds(5)) + max_delay: Duration = field( + default_factory=lambda: Duration.from_minutes(5) + ) # 5 minutes backoff_rate: Numeric = 2.0 jitter_strategy: JitterStrategy = field(default=JitterStrategy.FULL) - retryable_errors: list[str | re.Pattern] = field( - default_factory=lambda: [re.compile(r".*")] - ) - retryable_error_types: list[type[Exception]] = field(default_factory=list) + retryable_errors: list[str | re.Pattern] | None = None + retryable_error_types: list[type[Exception]] | None = None + + @property + def initial_delay_seconds(self) -> int: + """Get initial delay in seconds.""" + return self.initial_delay.to_seconds() + + @property + def max_delay_seconds(self) -> int: + """Get max delay in seconds.""" + return self.max_delay.to_seconds() def create_retry_strategy( - config: RetryStrategyConfig, + config: RetryStrategyConfig | None = None, ) -> Callable[[Exception, int], RetryDecision]: if config is None: config = RetryStrategyConfig() + # Apply default retryableErrors only if user didn't specify either filter + should_use_default_errors: bool = ( + config.retryable_errors is None and config.retryable_error_types is None + ) + + retryable_errors: list[str | re.Pattern] = ( + config.retryable_errors + if config.retryable_errors is not None + else ([_DEFAULT_RETRYABLE_ERROR_PATTERN] if should_use_default_errors else []) + ) + retryable_error_types: list[type[Exception]] = config.retryable_error_types or [] + def retry_strategy(error: Exception, attempts_made: int) -> RetryDecision: # Check if we've exceeded max attempts if attempts_made >= config.max_attempts: return RetryDecision.no_retry() # Check if error is retryable based on error message - is_retryable_error_message = any( + is_retryable_error_message: bool = any( pattern.search(str(error)) if isinstance(pattern, re.Pattern) else pattern in str(error) - for pattern in config.retryable_errors + for pattern in retryable_errors ) # Check if error is retryable based on error type - is_retryable_error_type = any( - isinstance(error, error_type) for error_type in config.retryable_error_types + is_retryable_error_type: bool = any( + isinstance(error, error_type) for error_type in retryable_error_types ) if not is_retryable_error_message and not is_retryable_error_type: return RetryDecision.no_retry() # Calculate delay with exponential backoff - delay = min( + base_delay: float = min( config.initial_delay_seconds * (config.backoff_rate ** (attempts_made - 1)), config.max_delay_seconds, ) - delay_with_jitter = delay + config.jitter_strategy.compute_jitter(delay) - delay_with_jitter = math.ceil(delay_with_jitter) - final_delay = max(1, delay_with_jitter) + # Apply jitter to get final delay + delay_with_jitter: float = config.jitter_strategy.apply_jitter(base_delay) + # Round up and ensure minimum of 1 second + final_delay: int = max(1, math.ceil(delay_with_jitter)) - return RetryDecision.retry(round(final_delay)) + return RetryDecision.retry(Duration(seconds=final_delay)) return retry_strategy @@ -101,8 +132,8 @@ def default(cls) -> Callable[[Exception, int], RetryDecision]: return create_retry_strategy( RetryStrategyConfig( max_attempts=6, - initial_delay_seconds=5, - max_delay_seconds=60, + initial_delay=Duration.from_seconds(5), + max_delay=Duration.from_minutes(1), backoff_rate=2, jitter_strategy=JitterStrategy.FULL, ) @@ -123,8 +154,8 @@ def resource_availability(cls) -> Callable[[Exception, int], RetryDecision]: return create_retry_strategy( RetryStrategyConfig( max_attempts=5, - initial_delay_seconds=5, - max_delay_seconds=300, + initial_delay=Duration.from_seconds(5), + max_delay=Duration.from_minutes(5), backoff_rate=2, ) ) @@ -135,8 +166,8 @@ def critical(cls) -> Callable[[Exception, int], RetryDecision]: return create_retry_strategy( RetryStrategyConfig( max_attempts=10, - initial_delay_seconds=1, - max_delay_seconds=60, + initial_delay=Duration.from_seconds(1), + max_delay=Duration.from_minutes(1), backoff_rate=1.5, jitter_strategy=JitterStrategy.NONE, ) diff --git a/src/aws_durable_execution_sdk_python/serdes.py b/src/aws_durable_execution_sdk_python/serdes.py index e979a72..b3b704a 100644 --- a/src/aws_durable_execution_sdk_python/serdes.py +++ b/src/aws_durable_execution_sdk_python/serdes.py @@ -32,6 +32,7 @@ from enum import StrEnum from typing import Any, Generic, Protocol, TypeVar +from aws_durable_execution_sdk_python.concurrency.models import BatchResult from aws_durable_execution_sdk_python.exceptions import ( DurableExecutionsError, ExecutionError, @@ -62,6 +63,7 @@ class TypeTag(StrEnum): TUPLE = "t" LIST = "l" DICT = "m" + BATCH_RESULT = "br" @dataclass(frozen=True) @@ -206,7 +208,14 @@ def dispatcher(self): def encode(self, obj: Any) -> EncodedValue: """Encode container using dispatcher for recursive elements.""" + match obj: + case BatchResult(): + # Encode BatchResult as dict with special tag + return EncodedValue( + TypeTag.BATCH_RESULT, + self._wrap(obj.to_dict(), self.dispatcher).value, + ) case list(): return EncodedValue( TypeTag.LIST, [self._wrap(v, self.dispatcher) for v in obj] @@ -230,7 +239,13 @@ def encode(self, obj: Any) -> EncodedValue: def decode(self, tag: TypeTag, value: Any) -> Any: """Decode container using dispatcher for recursive elements.""" + match tag: + case TypeTag.BATCH_RESULT: + # Decode BatchResult from dict - value is already the dict structure + # First decode it as a dict to unwrap all nested EncodedValues + decoded_dict = self.decode(TypeTag.DICT, value) + return BatchResult.from_dict(decoded_dict) case TypeTag.LIST: if not isinstance(value, list): msg = f"Expected list, got {type(value)}" @@ -292,7 +307,7 @@ def encode(self, obj: Any) -> EncodedValue: return self.decimal_codec.encode(obj) case datetime() | date(): return self.datetime_codec.encode(obj) - case list() | tuple() | dict(): + case list() | tuple() | dict() | BatchResult(): return self.container_codec.encode(obj) case _: msg = f"Unsupported type: {type(obj)}" @@ -316,7 +331,7 @@ def decode(self, tag: TypeTag, value: Any) -> Any: return self.decimal_codec.decode(tag, value) case TypeTag.DATETIME | TypeTag.DATE: return self.datetime_codec.decode(tag, value) - case TypeTag.LIST | TypeTag.TUPLE | TypeTag.DICT: + case TypeTag.LIST | TypeTag.TUPLE | TypeTag.DICT | TypeTag.BATCH_RESULT: return self.container_codec.decode(tag, value) case _: msg = f"Unknown type tag: {tag}" @@ -357,6 +372,14 @@ def is_primitive(obj: Any) -> bool: return False +class PassThroughSerDes(SerDes[T]): + def serialize(self, value: T, _: SerDesContext) -> str: # noqa: PLR6301 + return value # type: ignore + + def deserialize(self, data: str, _: SerDesContext) -> T: # noqa: PLR6301 + return data # type: ignore + + class JsonSerDes(SerDes[T]): def serialize(self, value: T, _: SerDesContext) -> str: # noqa: PLR6301 return json.dumps(value) @@ -392,10 +415,14 @@ def deserialize(self, data: str, context: SerDesContext | None = None) -> Any: if not (isinstance(obj, dict) and TYPE_TOKEN in obj and VALUE_TOKEN in obj): msg = 'Malformed envelope: missing "t" or "v" at root.' raise SerDesError(msg) - if obj[TYPE_TOKEN] not in TypeTag: + # Python 3.11 compatibility: Using try-except instead of 'in' operator + # because checking 'str in EnumType' raises TypeError in Python 3.11 + try: + tag = TypeTag(obj[TYPE_TOKEN]) + except ValueError: msg = f'Unknown type tag: "{obj[TYPE_TOKEN]}"' - raise SerDesError(msg) - tag = TypeTag(obj[TYPE_TOKEN]) + raise SerDesError(msg) from None + return self._codec.decode(tag, obj[VALUE_TOKEN]) def _to_json_serializable(self, obj: Any) -> Any: @@ -414,8 +441,8 @@ def _to_json_serializable(self, obj: Any) -> Any: return obj -_DEFAULT_JSON_SERDES: SerDes[Any] = JsonSerDes() -_EXTENDED_TYPES_SERDES: SerDes[Any] = ExtendedTypeSerDes() +DEFAULT_JSON_SERDES: SerDes[Any] = JsonSerDes() +EXTENDED_TYPES_SERDES: SerDes[Any] = ExtendedTypeSerDes() def serialize( @@ -436,7 +463,7 @@ def serialize( FatalError: If serialization fails """ serdes_context: SerDesContext = SerDesContext(operation_id, durable_execution_arn) - active_serdes: SerDes[T] = serdes or _EXTENDED_TYPES_SERDES + active_serdes: SerDes[T] = serdes or EXTENDED_TYPES_SERDES try: return active_serdes.serialize(value, serdes_context) except Exception as e: @@ -466,7 +493,7 @@ def deserialize( FatalError: If deserialization fails """ serdes_context: SerDesContext = SerDesContext(operation_id, durable_execution_arn) - active_serdes: SerDes[T] = serdes or _EXTENDED_TYPES_SERDES + active_serdes: SerDes[T] = serdes or EXTENDED_TYPES_SERDES try: return active_serdes.deserialize(data, serdes_context) except Exception as e: diff --git a/src/aws_durable_execution_sdk_python/state.py b/src/aws_durable_execution_sdk_python/state.py index d97d19d..a6fc0c7 100644 --- a/src/aws_durable_execution_sdk_python/state.py +++ b/src/aws_durable_execution_sdk_python/state.py @@ -8,6 +8,7 @@ import threading import time from dataclasses import dataclass +from enum import Enum from threading import Lock from typing import TYPE_CHECKING @@ -15,6 +16,7 @@ BackgroundThreadError, CallableRuntimeError, DurableExecutionsError, + OrphanedChildException, ) from aws_durable_execution_sdk_python.lambda_service import ( CheckpointOutput, @@ -210,6 +212,13 @@ def get_next_attempt_timestamp(self) -> datetime.datetime | None: CHECKPOINT_NOT_FOUND = CheckpointedResult.create_not_found() +class ReplayStatus(Enum): + """Status indicating whether execution is replaying or executing new operations.""" + + REPLAY = "replay" + NEW = "new" + + class ExecutionState: """Get, set and maintain execution state. This is mutable. Create and check checkpoints.""" @@ -220,6 +229,7 @@ def __init__( operations: MutableMapping[str, Operation], service_client: DurableServiceClient, batcher_config: CheckpointBatcherConfig | None = None, + replay_status: ReplayStatus = ReplayStatus.NEW, ): self.durable_execution_arn: str = durable_execution_arn self._current_checkpoint_token: str = initial_checkpoint_token @@ -247,6 +257,9 @@ def __init__( # Protects parent_to_children and parent_done self._parent_done_lock: Lock = Lock() + self._replay_status: ReplayStatus = replay_status + self._replay_status_lock: Lock = Lock() + self._visited_operations: set[str] = set() def fetch_paginated_operations( self, @@ -277,6 +290,48 @@ def fetch_paginated_operations( with self._operations_lock: self.operations.update({op.operation_id: op for op in all_operations}) + def track_replay(self, operation_id: str) -> None: + """Check if operation exists with completed status; if not, transition to NEW status. + + This method is called before each operation (step, wait, invoke, etc.) to determine + if we've reached the replay boundary. Once we encounter an operation that doesn't + exist or isn't completed, we transition from REPLAY to NEW status, which enables + logging for all subsequent code. + + Args: + operation_id: The operation ID to check + """ + with self._replay_status_lock: + if self._replay_status == ReplayStatus.REPLAY: + self._visited_operations.add(operation_id) + completed_ops = { + op_id + for op_id, op in self.operations.items() + if op.operation_type != OperationType.EXECUTION + and op.status + in { + OperationStatus.SUCCEEDED, + OperationStatus.FAILED, + OperationStatus.CANCELLED, + OperationStatus.STOPPED, + } + } + if completed_ops.issubset(self._visited_operations): + logger.debug( + "Transitioning from REPLAY to NEW status at operation %s", + operation_id, + ) + self._replay_status = ReplayStatus.NEW + + def is_replaying(self) -> bool: + """Check if execution is currently in replay mode. + + Returns: + True if in REPLAY status, False if in NEW status + """ + with self._replay_status_lock: + return self._replay_status is ReplayStatus.REPLAY + def get_checkpoint_result(self, checkpoint_id: str) -> CheckpointedResult: """Get checkpoint result. @@ -395,7 +450,13 @@ def create_checkpoint( "Rejecting checkpoint for operation %s - parent is done", operation_update.operation_id, ) - return + error_msg = ( + "Parent context completed, child operation cannot checkpoint" + ) + raise OrphanedChildException( + error_msg, + operation_id=operation_update.operation_id, + ) # Check if background checkpointing has failed if self._checkpointing_failed.is_set(): @@ -731,3 +792,6 @@ def _calculate_operation_size(queued_op: QueuedOperation) -> int: # Use JSON serialization to estimate size serialized = json.dumps(queued_op.operation_update.to_dict()).encode("utf-8") return len(serialized) + + def close(self): + self.stop_checkpointing() diff --git a/src/aws_durable_execution_sdk_python/types.py b/src/aws_durable_execution_sdk_python/types.py index 65c4be5..9181be9 100644 --- a/src/aws_durable_execution_sdk_python/types.py +++ b/src/aws_durable_execution_sdk_python/types.py @@ -13,6 +13,7 @@ BatchedInput, CallbackConfig, ChildConfig, + Duration, MapConfig, ParallelConfig, StepConfig, @@ -56,6 +57,11 @@ class StepContext(OperationContext): pass +@dataclass(frozen=True) +class WaitForCallbackContext(OperationContext): + """Context provided to waitForCallback submitter functions.""" + + @dataclass(frozen=True) class WaitForConditionCheckContext(OperationContext): pass @@ -126,7 +132,7 @@ def parallel( ... # pragma: no cover @abstractmethod - def wait(self, seconds: int, name: str | None = None) -> None: + def wait(self, duration: Duration, name: str | None = None) -> None: """Wait for a specified amount of time.""" ... # pragma: no cover diff --git a/src/aws_durable_execution_sdk_python/waits.py b/src/aws_durable_execution_sdk_python/waits.py index 351fb69..b4d740a 100644 --- a/src/aws_durable_execution_sdk_python/waits.py +++ b/src/aws_durable_execution_sdk_python/waits.py @@ -2,10 +2,11 @@ from __future__ import annotations +import math from dataclasses import dataclass, field from typing import TYPE_CHECKING, Generic -from aws_durable_execution_sdk_python.config import JitterStrategy, T +from aws_durable_execution_sdk_python.config import Duration, JitterStrategy, T if TYPE_CHECKING: from collections.abc import Callable @@ -20,28 +21,52 @@ class WaitDecision: """Decision about whether to wait a step and with what delay.""" should_wait: bool - delay_seconds: int + delay: Duration + + @property + def delay_seconds(self) -> int: + """Get delay in seconds.""" + return self.delay.to_seconds() @classmethod - def wait(cls, delay_seconds: int) -> WaitDecision: + def wait(cls, delay: Duration) -> WaitDecision: """Create a wait decision.""" - return cls(should_wait=True, delay_seconds=delay_seconds) + return cls(should_wait=True, delay=delay) @classmethod def no_wait(cls) -> WaitDecision: """Create a no-wait decision.""" - return cls(should_wait=False, delay_seconds=0) + return cls(should_wait=False, delay=Duration()) @dataclass class WaitStrategyConfig(Generic[T]): should_continue_polling: Callable[[T], bool] max_attempts: int = 60 - initial_delay_seconds: int = 5 - max_delay_seconds: int = 300 # 5 minutes + initial_delay: Duration = field(default_factory=lambda: Duration.from_seconds(5)) + max_delay: Duration = field( + default_factory=lambda: Duration.from_minutes(5) + ) # 5 minutes backoff_rate: Numeric = 1.5 jitter_strategy: JitterStrategy = field(default=JitterStrategy.FULL) - timeout_seconds: int | None = None # Not implemented yet + timeout: Duration | None = None # Not implemented yet + + @property + def initial_delay_seconds(self) -> int: + """Get initial delay in seconds.""" + return self.initial_delay.to_seconds() + + @property + def max_delay_seconds(self) -> int: + """Get max delay in seconds.""" + return self.max_delay.to_seconds() + + @property + def timeout_seconds(self) -> int | None: + """Get timeout in seconds.""" + if self.timeout is None: + return None + return self.timeout.to_seconds() def create_wait_strategy( @@ -57,19 +82,18 @@ def wait_strategy(result: T, attempts_made: int) -> WaitDecision: return WaitDecision.no_wait() # Calculate delay with exponential backoff - base_delay = min( + base_delay: float = min( config.initial_delay_seconds * (config.backoff_rate ** (attempts_made - 1)), config.max_delay_seconds, ) - # Apply jitter (add jitter to base delay) - jitter = config.jitter_strategy.compute_jitter(base_delay) - delay_with_jitter = base_delay + jitter + # Apply jitter to get final delay + delay_with_jitter: float = config.jitter_strategy.apply_jitter(base_delay) - # Ensure delay is an integer >= 1 - final_delay = max(1, round(delay_with_jitter)) + # Round up and ensure minimum of 1 second + final_delay: int = max(1, math.ceil(delay_with_jitter)) - return WaitDecision.wait(final_delay) + return WaitDecision.wait(Duration(seconds=final_delay)) return wait_strategy @@ -79,17 +103,22 @@ class WaitForConditionDecision: """Decision about whether to continue waiting.""" should_continue: bool - delay_seconds: int + delay: Duration + + @property + def delay_seconds(self) -> int: + """Get delay in seconds.""" + return self.delay.to_seconds() @classmethod - def continue_waiting(cls, delay_seconds: int) -> WaitForConditionDecision: + def continue_waiting(cls, delay: Duration) -> WaitForConditionDecision: """Create a decision to continue waiting for delay_seconds.""" - return cls(should_continue=True, delay_seconds=delay_seconds) + return cls(should_continue=True, delay=delay) @classmethod def stop_polling(cls) -> WaitForConditionDecision: """Create a decision to stop polling.""" - return cls(should_continue=False, delay_seconds=-1) + return cls(should_continue=False, delay=Duration()) @dataclass(frozen=True) diff --git a/tests/concurrency_test.py b/tests/concurrency_test.py index ea9c26f..cb2f0ba 100644 --- a/tests/concurrency_test.py +++ b/tests/concurrency_test.py @@ -1,5 +1,6 @@ """Tests for the concurrency module.""" +import json import random import threading import time @@ -10,17 +11,19 @@ import pytest -from aws_durable_execution_sdk_python.concurrency import ( +from aws_durable_execution_sdk_python.concurrency.executor import ( + ConcurrentExecutor, + TimerScheduler, +) +from aws_durable_execution_sdk_python.concurrency.models import ( BatchItem, BatchItemStatus, BatchResult, BranchStatus, CompletionReason, - ConcurrentExecutor, Executable, ExecutableWithState, ExecutionCounters, - TimerScheduler, ) from aws_durable_execution_sdk_python.config import CompletionConfig, MapConfig from aws_durable_execution_sdk_python.exceptions import ( @@ -29,7 +32,9 @@ SuspendExecution, TimedSuspendExecution, ) -from aws_durable_execution_sdk_python.lambda_service import ErrorObject +from aws_durable_execution_sdk_python.lambda_service import ( + ErrorObject, +) from aws_durable_execution_sdk_python.operation.map import MapExecutor @@ -102,28 +107,6 @@ def test_batch_item_from_dict(): assert item.error is None -def test_batch_item_from_dict_with_error(): - """Test BatchItem from_dict with error object.""" - error_data = { - "message": "Test error", - "type": "TestError", - "data": None, - "stackTrace": None, - } - data = { - "index": 1, - "status": "FAILED", - "result": None, - "error": error_data, - } - - item = BatchItem.from_dict(data) - assert item.index == 1 - assert item.status == BatchItemStatus.FAILED - assert item.result is None - assert item.error is not None - - def test_batch_result_creation(): """Test BatchResult creation.""" items = [ @@ -323,7 +306,9 @@ def test_batch_result_from_dict_default_completion_reason(): # No completionReason provided } - with patch("aws_durable_execution_sdk_python.concurrency.logger") as mock_logger: + with patch( + "aws_durable_execution_sdk_python.concurrency.models.logger" + ) as mock_logger: result = BatchResult.from_dict(data) assert result.completion_reason == CompletionReason.ALL_COMPLETED # Verify warning was logged @@ -341,7 +326,9 @@ def test_batch_result_from_dict_infer_all_completed_all_succeeded(): # No completionReason provided } - with patch("aws_durable_execution_sdk_python.concurrency.logger") as mock_logger: + with patch( + "aws_durable_execution_sdk_python.concurrency.models.logger" + ) as mock_logger: result = BatchResult.from_dict(data) assert result.completion_reason == CompletionReason.ALL_COMPLETED mock_logger.warning.assert_called_once() @@ -365,7 +352,9 @@ def test_batch_result_from_dict_infer_failure_tolerance_exceeded_all_failed(): # even if everything has failed, if we've completed all items, then we've finished as ALL_COMPLETED # https://github.com/aws/aws-durable-execution-sdk-js/blob/f20396f24afa9d6539d8e5056ee851ac7ef62301/packages/aws-durable-execution-sdk-js/src/handlers/concurrent-execution-handler/concurrent-execution-handler.ts#L324-L335 - with patch("aws_durable_execution_sdk_python.concurrency.logger") as mock_logger: + with patch( + "aws_durable_execution_sdk_python.concurrency.models.logger" + ) as mock_logger: result = BatchResult.from_dict(data) assert result.completion_reason == CompletionReason.ALL_COMPLETED mock_logger.warning.assert_called_once() @@ -389,7 +378,9 @@ def test_batch_result_from_dict_infer_all_completed_mixed_success_failure(): } # the logic is that when \every item i: hasCompleted(i) then terminate due to all_completed - with patch("aws_durable_execution_sdk_python.concurrency.logger") as mock_logger: + with patch( + "aws_durable_execution_sdk_python.concurrency.models.logger" + ) as mock_logger: result = BatchResult.from_dict(data) assert result.completion_reason == CompletionReason.ALL_COMPLETED mock_logger.warning.assert_called_once() @@ -406,7 +397,9 @@ def test_batch_result_from_dict_infer_min_successful_reached_has_started(): # No completionReason provided } - with patch("aws_durable_execution_sdk_python.concurrency.logger") as mock_logger: + with patch( + "aws_durable_execution_sdk_python.concurrency.models.logger" + ) as mock_logger: result = BatchResult.from_dict(data, CompletionConfig(1)) assert result.completion_reason == CompletionReason.MIN_SUCCESSFUL_REACHED mock_logger.warning.assert_called_once() @@ -419,7 +412,9 @@ def test_batch_result_from_dict_infer_empty_items(): # No completionReason provided } - with patch("aws_durable_execution_sdk_python.concurrency.logger") as mock_logger: + with patch( + "aws_durable_execution_sdk_python.concurrency.models.logger" + ) as mock_logger: result = BatchResult.from_dict(data) assert result.completion_reason == CompletionReason.ALL_COMPLETED mock_logger.warning.assert_called_once() @@ -434,7 +429,9 @@ def test_batch_result_from_dict_with_explicit_completion_reason(): "completionReason": "MIN_SUCCESSFUL_REACHED", } - with patch("aws_durable_execution_sdk_python.concurrency.logger") as mock_logger: + with patch( + "aws_durable_execution_sdk_python.concurrency.models.logger" + ) as mock_logger: result = BatchResult.from_dict(data) assert result.completion_reason == CompletionReason.MIN_SUCCESSFUL_REACHED # No warning should be logged when completionReason is provided @@ -973,6 +970,7 @@ def execute_item(self, child_context, executable): exe_state = ExecutableWithState(executables[0]) future = Mock() future.result.side_effect = TimedSuspendExecution("test message", time.time() + 1) + future.cancelled.return_value = False scheduler = Mock() scheduler.schedule_resume = Mock() @@ -1045,6 +1043,7 @@ def execute_item(self, child_context, executable): exe_state = ExecutableWithState(executables[0]) future = Mock() future.result.side_effect = ValueError("Test error") + future.cancelled.return_value = False scheduler = Mock() @@ -1054,7 +1053,7 @@ def execute_item(self, child_context, executable): assert isinstance(exe_state.error, ValueError) -def test_concurrent_executor_create_result_with_failed_branches(): +def test_concurrent_executor_create_result_with_early_exit(): """Test ConcurrentExecutor with failed branches using public execute method.""" class TestExecutor(ConcurrentExecutor): @@ -1062,6 +1061,8 @@ def execute_item(self, child_context, executable): if executable.index == 0: return f"result_{executable.index}" msg = "Test error" + # giving space to terminate early with + time.sleep(0.5) raise ValueError(msg) def success_callable(): @@ -1072,7 +1073,8 @@ def failure_callable(): executables = [Executable(0, success_callable), Executable(1, failure_callable)] completion_config = CompletionConfig( - min_successful=1, + # setting min successful to None to execute all children and avoid early stopping + min_successful=None, tolerated_failure_count=None, tolerated_failure_percentage=None, ) @@ -2373,7 +2375,9 @@ def test_batch_result_from_dict_with_completion_config(): # With started items, should infer MIN_SUCCESSFUL_REACHED completion_config = CompletionConfig(min_successful=1) - with patch("aws_durable_execution_sdk_python.concurrency.logger") as mock_logger: + with patch( + "aws_durable_execution_sdk_python.concurrency.models.logger" + ) as mock_logger: result = BatchResult.from_dict(data, completion_config) assert result.completion_reason == CompletionReason.MIN_SUCCESSFUL_REACHED mock_logger.warning.assert_called_once() @@ -2399,7 +2403,9 @@ def test_batch_result_from_dict_all_completed(): # No completionReason provided } - with patch("aws_durable_execution_sdk_python.concurrency.logger") as mock_logger: + with patch( + "aws_durable_execution_sdk_python.concurrency.models.logger" + ) as mock_logger: result = BatchResult.from_dict(data) assert result.completion_reason == CompletionReason.ALL_COMPLETED mock_logger.warning.assert_called_once() @@ -2520,7 +2526,7 @@ def create_child_context(operation_id): executor_context.create_child_context = create_child_context with patch( - "aws_durable_execution_sdk_python.concurrency.child_handler", + "aws_durable_execution_sdk_python.concurrency.executor.child_handler", patched_child_handler, ): executor.execute(execution_state, executor_context) @@ -2676,3 +2682,352 @@ def mock_get_checkpoint_result(operation_id): assert len(result.all) == 1 assert result.all[0].status == BatchItemStatus.SUCCEEDED assert result.all[0].result == "re_executed_result" + + +def test_batch_item_from_dict_with_error(): + """Test BatchItem.from_dict() with error.""" + data = { + "index": 3, + "status": "FAILED", + "result": None, + "error": { + "ErrorType": "ValueError", + "ErrorMessage": "bad value", + "StackTrace": [], + }, + } + + item = BatchItem.from_dict(data) + + assert item.index == 3 + assert item.status == BatchItemStatus.FAILED + assert item.error.type == "ValueError" + assert item.error.message == "bad value" + + +def test_batch_result_with_mixed_statuses(): + """Test BatchResult serialization with mixed item statuses.""" + result = BatchResult( + all=[ + BatchItem(0, BatchItemStatus.SUCCEEDED, result="success"), + BatchItem( + 1, + BatchItemStatus.FAILED, + error=ErrorObject(message="msg", type="E", data=None, stack_trace=[]), + ), + BatchItem(2, BatchItemStatus.STARTED), + ], + completion_reason=CompletionReason.FAILURE_TOLERANCE_EXCEEDED, + ) + + serialized = json.dumps(result.to_dict()) + deserialized = BatchResult.from_dict(json.loads(serialized)) + + assert len(deserialized.all) == 3 + assert deserialized.all[0].status == BatchItemStatus.SUCCEEDED + assert deserialized.all[1].status == BatchItemStatus.FAILED + assert deserialized.all[2].status == BatchItemStatus.STARTED + assert deserialized.completion_reason == CompletionReason.FAILURE_TOLERANCE_EXCEEDED + + +def test_batch_result_empty_list(): + """Test BatchResult serialization with empty items list.""" + result = BatchResult(all=[], completion_reason=CompletionReason.ALL_COMPLETED) + + serialized = json.dumps(result.to_dict()) + deserialized = BatchResult.from_dict(json.loads(serialized)) + + assert len(deserialized.all) == 0 + assert deserialized.completion_reason == CompletionReason.ALL_COMPLETED + + +def test_batch_result_complex_nested_data(): + """Test BatchResult with complex nested data structures.""" + complex_result = { + "users": [{"id": 1, "name": "Alice"}, {"id": 2, "name": "Bob"}], + "metadata": {"count": 2, "timestamp": "2025-10-31"}, + } + + result = BatchResult( + all=[BatchItem(0, BatchItemStatus.SUCCEEDED, result=complex_result)], + completion_reason=CompletionReason.ALL_COMPLETED, + ) + + serialized = json.dumps(result.to_dict()) + deserialized = BatchResult.from_dict(json.loads(serialized)) + + assert deserialized.all[0].result == complex_result + assert deserialized.all[0].result["users"][0]["name"] == "Alice" + + +def test_executor_does_not_deadlock_when_all_tasks_terminal_but_completion_config_allows_failures(): + """Ensure executor returns when all tasks are terminal even if completion rules are confusing.""" + + class TestExecutor(ConcurrentExecutor): + def execute_item(self, child_context, executable): + if executable.index == 0: + # fail one task + raise Exception("boom") # noqa EM101 TRY002 + return f"ok_{executable.index}" + + # Two tasks, min_successful=2 but tolerated failure_count set to 1. + # After one fail + one success, counters.is_complete() should return true, + # should_continue() should return false. counters.is_complete was failing to + # stop early, which caused map to hang. + executables = [Executable(0, lambda: "a"), Executable(1, lambda: "b")] + completion_config = CompletionConfig( + min_successful=2, + tolerated_failure_count=1, + tolerated_failure_percentage=None, + ) + + executor = TestExecutor( + executables=executables, + max_concurrency=2, + completion_config=completion_config, + sub_type_top="TOP", + sub_type_iteration="ITER", + name_prefix="test_", + serdes=None, + ) + + execution_state = Mock() + execution_state.create_checkpoint = Mock() + executor_context = Mock() + executor_context._create_step_id_for_logical_step = lambda *args: "1" # noqa SLF001 + executor_context.create_child_context = lambda *args: Mock() + + # Should return (not hang) and batch should reflect one FAILED and one SUCCEEDED + result = executor.execute(execution_state, executor_context) + statuses = {item.index: item.status for item in result.all} + assert statuses[0] == BatchItemStatus.FAILED + assert statuses[1] == BatchItemStatus.SUCCEEDED + + +def test_executor_terminates_quickly_when_impossible_to_succeed(): + """Test that executor terminates when min_successful becomes impossible.""" + executed_count = {"value": 0} + + def task_func(ctx, item, idx, items): + executed_count["value"] += 1 + if idx < 2: + raise Exception(f"fail_{idx}") # noqa EM102 TRY002 + time.sleep(0.05) + return f"ok_{idx}" + + items = list(range(100)) + config = MapConfig( + max_concurrency=10, completion_config=CompletionConfig(min_successful=99) + ) + + executor = MapExecutor.from_items(items=items, func=task_func, config=config) + + execution_state = Mock() + execution_state.create_checkpoint = Mock() + executor_context = Mock() + executor_context._create_step_id_for_logical_step = lambda *args: "1" # noqa SLF001 + executor_context.create_child_context = lambda *args: Mock() + + result = executor.execute(execution_state, executor_context) + + # With concurrency=1, only 2 tasks should execute before terminating + # min_successful(99) + failure_count(2) = 101 > total_tasks(100) + assert executed_count["value"] < 100 + assert ( + result.completion_reason == CompletionReason.FAILURE_TOLERANCE_EXCEEDED + ), executed_count + assert sum(1 for item in result.all if item.status == BatchItemStatus.FAILED) == 2 + assert ( + sum(1 for item in result.all if item.status == BatchItemStatus.SUCCEEDED) < 98 + ) + + +def test_executor_exits_early_with_min_successful(): + """Test that parallel exits immediately when min_successful is reached without waiting for other branches.""" + + class TestExecutor(ConcurrentExecutor): + def execute_item(self, child_context, executable): + return executable.func() + + execution_times = [] + + def fast_branch(): + execution_times.append(("fast", time.time())) + return "fast_result" + + def slow_branch(): + execution_times.append(("slow_start", time.time())) + time.sleep(2) # Long sleep + execution_times.append(("slow_end", time.time())) + return "slow_result" + + executables = [ + Executable(0, fast_branch), + Executable(1, slow_branch), + ] + + completion_config = CompletionConfig(min_successful=1) + + executor = TestExecutor( + executables=executables, + max_concurrency=2, + completion_config=completion_config, + sub_type_top="TOP", + sub_type_iteration="ITER", + name_prefix="test_", + serdes=None, + ) + + execution_state = Mock() + execution_state.create_checkpoint = Mock() + executor_context = Mock() + executor_context._create_step_id_for_logical_step = lambda idx: f"step_{idx}" # noqa: SLF001 + executor_context._parent_id = "parent" # noqa: SLF001 + + def create_child_context(op_id): + child = Mock() + child.state = execution_state + return child + + executor_context.create_child_context = create_child_context + + start_time = time.time() + result = executor.execute(execution_state, executor_context) + elapsed_time = time.time() - start_time + + # Should complete in less than 1.5 second (not wait for 2-second sleep) + assert elapsed_time < 1.5, f"Took {elapsed_time}s, expected < 1.5s" + + # Result should show MIN_SUCCESSFUL_REACHED + assert result.completion_reason == CompletionReason.MIN_SUCCESSFUL_REACHED + + # Fast branch should succeed + assert result.all[0].status == BatchItemStatus.SUCCEEDED + assert result.all[0].result == "fast_result" + + # Slow branch should be marked as STARTED (incomplete) + assert result.all[1].status == BatchItemStatus.STARTED + + # Verify counts + assert result.success_count == 1 + assert result.failure_count == 0 + assert result.started_count == 1 + assert result.total_count == 2 + + +def test_executor_returns_with_incomplete_branches(): + """Test that executor returns when min_successful is reached, leaving other branches incomplete.""" + + class TestExecutor(ConcurrentExecutor): + def execute_item(self, child_context, executable): + return executable.func() + + operation_tracker = Mock() + + def fast_branch(): + operation_tracker.fast_executed() + return "fast_result" + + def slow_branch(): + operation_tracker.slow_started() + time.sleep(2) # Long sleep + operation_tracker.slow_completed() + return "slow_result" + + executables = [ + Executable(0, fast_branch), + Executable(1, slow_branch), + ] + + completion_config = CompletionConfig(min_successful=1) + + executor = TestExecutor( + executables=executables, + max_concurrency=2, + completion_config=completion_config, + sub_type_top="TOP", + sub_type_iteration="ITER", + name_prefix="test_", + serdes=None, + ) + + execution_state = Mock() + execution_state.create_checkpoint = Mock() + executor_context = Mock() + executor_context._create_step_id_for_logical_step = lambda idx: f"step_{idx}" # noqa: SLF001 + executor_context._parent_id = "parent" # noqa: SLF001 + executor_context.create_child_context = lambda op_id: Mock(state=execution_state) + + result = executor.execute(execution_state, executor_context) + + # Verify fast branch executed + assert operation_tracker.fast_executed.call_count == 1 + + # Slow branch may or may not have started (depends on thread scheduling) + # but it definitely should not have completed + assert ( + operation_tracker.slow_completed.call_count == 0 + ), "Executor should return before slow branch completes" + + # Result should show MIN_SUCCESSFUL_REACHED + assert result.completion_reason == CompletionReason.MIN_SUCCESSFUL_REACHED + + # Verify counts - one succeeded, one incomplete + assert result.success_count == 1 + assert result.failure_count == 0 + assert result.started_count == 1 + assert result.total_count == 2 + + +def test_executor_returns_before_slow_branch_completes(): + """Test that executor returns immediately when min_successful is reached, not waiting for slow branches.""" + + class TestExecutor(ConcurrentExecutor): + def execute_item(self, child_context, executable): + return executable.func() + + slow_branch_mock = Mock() + + def fast_func(): + return "fast" + + def slow_func(): + time.sleep(3) # Sleep + slow_branch_mock.completed() # Should not be called before executor returns + return "slow" + + executables = [Executable(0, fast_func), Executable(1, slow_func)] + completion_config = CompletionConfig(min_successful=1) + + executor = TestExecutor( + executables=executables, + max_concurrency=2, + completion_config=completion_config, + sub_type_top="TOP", + sub_type_iteration="ITER", + name_prefix="test_", + serdes=None, + ) + + execution_state = Mock() + execution_state.create_checkpoint = Mock() + executor_context = Mock() + executor_context._create_step_id_for_logical_step = lambda idx: f"step_{idx}" # noqa: SLF001 + executor_context._parent_id = "parent" # noqa: SLF001 + executor_context.create_child_context = lambda op_id: Mock(state=execution_state) + + result = executor.execute(execution_state, executor_context) + + # Executor should have returned before slow branch completed + assert ( + not slow_branch_mock.completed.called + ), "Executor should return before slow branch completes" + + # Result should show MIN_SUCCESSFUL_REACHED + assert result.completion_reason == CompletionReason.MIN_SUCCESSFUL_REACHED + + # Verify counts + assert result.success_count == 1 + assert result.failure_count == 0 + assert result.started_count == 1 + assert result.total_count == 2 diff --git a/tests/config_test.py b/tests/config_test.py index b2b7af9..24edf6d 100644 --- a/tests/config_test.py +++ b/tests/config_test.py @@ -9,6 +9,8 @@ CheckpointMode, ChildConfig, CompletionConfig, + Duration, + InvokeConfig, ItemBatcher, ItemsPerBatchUnit, MapConfig, @@ -85,7 +87,7 @@ def test_parallel_config_defaults(): def test_wait_for_condition_decision_continue(): """Test WaitForConditionDecision.continue_waiting factory method.""" - decision = WaitForConditionDecision.continue_waiting(30) + decision = WaitForConditionDecision.continue_waiting(Duration.from_seconds(30)) assert decision.should_continue is True assert decision.delay_seconds == 30 @@ -94,14 +96,14 @@ def test_wait_for_condition_decision_stop(): """Test WaitForConditionDecision.stop_polling factory method.""" decision = WaitForConditionDecision.stop_polling() assert decision.should_continue is False - assert decision.delay_seconds == -1 + assert decision.delay_seconds == 0 def test_wait_for_condition_config(): """Test WaitForConditionConfig with custom values.""" def wait_strategy(state, attempt): - return WaitForConditionDecision.continue_waiting(10) + return WaitForConditionDecision.continue_waiting(Duration.from_seconds(10)) serdes = Mock() config = WaitForConditionConfig( @@ -237,7 +239,9 @@ def test_callback_config_with_values(): """Test CallbackConfig with custom values.""" serdes = Mock() config = CallbackConfig( - timeout_seconds=30, heartbeat_timeout_seconds=10, serdes=serdes + timeout=Duration.from_seconds(30), + heartbeat_timeout=Duration.from_seconds(10), + serdes=serdes, ) assert config.timeout_seconds == 30 assert config.heartbeat_timeout_seconds == 10 @@ -272,3 +276,16 @@ def test_step_future_without_name(): result = step_future.result() assert result == 42 + + +def test_invoke_config_defaults(): + """Test InvokeConfig defaults.""" + config = InvokeConfig() + assert config.tenant_id is None + assert config.timeout_seconds == 0 + + +def test_invoke_config_with_tenant_id(): + """Test InvokeConfig with explicit tenant_id.""" + config = InvokeConfig(tenant_id="test-tenant") + assert config.tenant_id == "test-tenant" diff --git a/tests/context_test.py b/tests/context_test.py index 3804ee4..4e43347 100644 --- a/tests/context_test.py +++ b/tests/context_test.py @@ -3,13 +3,14 @@ import json import random from itertools import islice -from unittest.mock import ANY, Mock, patch +from unittest.mock import ANY, MagicMock, Mock, patch import pytest from aws_durable_execution_sdk_python.config import ( CallbackConfig, ChildConfig, + Duration, InvokeConfig, MapConfig, ParallelConfig, @@ -17,7 +18,6 @@ ) from aws_durable_execution_sdk_python.context import Callback, DurableContext from aws_durable_execution_sdk_python.exceptions import ( - CallableRuntimeError, CallbackError, SuspendExecution, ValidationError, @@ -74,6 +74,28 @@ def test_callback_result_succeeded(): callback = Callback("callback1", "op1", mock_state) result = callback.result() + assert result == '"success_result"' + mock_state.get_checkpoint_result.assert_called_once_with("op1") + + +def test_callback_result_succeeded_with_plain_str(): + """Test Callback.result() when operation succeeded.""" + mock_state = Mock(spec=ExecutionState) + mock_state.durable_execution_arn = "test_arn" + operation = Operation( + operation_id="op1", + operation_type=OperationType.CALLBACK, + status=OperationStatus.SUCCEEDED, + callback_details=CallbackDetails( + callback_id="callback1", result="success_result" + ), + ) + mock_result = CheckpointedResult.create_from_operation(operation) + mock_state.get_checkpoint_result.return_value = mock_result + + callback = Callback("callback1", "op1", mock_state) + result = callback.result() + assert result == "success_result" mock_state.get_checkpoint_result.assert_called_once_with("op1") @@ -149,7 +171,7 @@ def test_callback_result_failed(): callback = Callback("callback5", "op5", mock_state) - with pytest.raises(CallableRuntimeError): + with pytest.raises(CallbackError): callback.result() @@ -208,7 +230,7 @@ def test_callback_result_timed_out(): callback = Callback("callback_timeout", "op_timeout", mock_state) - with pytest.raises(CallableRuntimeError): + with pytest.raises(CallbackError): callback.result() @@ -216,10 +238,13 @@ def test_callback_result_timed_out(): # region create_callback -@patch("aws_durable_execution_sdk_python.context.create_callback_handler") -def test_create_callback_basic(mock_handler): +@patch("aws_durable_execution_sdk_python.context.CallbackOperationExecutor") +def test_create_callback_basic(mock_executor_class): """Test create_callback with basic parameters.""" - mock_handler.return_value = "callback123" + mock_executor = MagicMock() + mock_executor.process.return_value = "callback123" + mock_executor_class.return_value = mock_executor + mock_state = Mock(spec=ExecutionState) mock_state.durable_execution_arn = ( "arn:aws:durable:us-east-1:123456789012:execution/test" @@ -236,17 +261,21 @@ def test_create_callback_basic(mock_handler): assert callback.operation_id == expected_operation_id assert callback.state is mock_state - mock_handler.assert_called_once_with( + mock_executor_class.assert_called_once_with( state=mock_state, operation_identifier=OperationIdentifier(expected_operation_id, None, None), config=CallbackConfig(), ) + mock_executor.process.assert_called_once() -@patch("aws_durable_execution_sdk_python.context.create_callback_handler") -def test_create_callback_with_name_and_config(mock_handler): +@patch("aws_durable_execution_sdk_python.context.CallbackOperationExecutor") +def test_create_callback_with_name_and_config(mock_executor_class): """Test create_callback with name and config.""" - mock_handler.return_value = "callback456" + mock_executor = MagicMock() + mock_executor.process.return_value = "callback456" + mock_executor_class.return_value = mock_executor + mock_state = Mock(spec=ExecutionState) mock_state.durable_execution_arn = ( "arn:aws:durable:us-east-1:123456789012:execution/test" @@ -264,18 +293,23 @@ def test_create_callback_with_name_and_config(mock_handler): assert callback.callback_id == "callback456" assert callback.operation_id == expected_operation_id - mock_handler.assert_called_once_with( + mock_executor_class.assert_called_once_with( state=mock_state, operation_identifier=OperationIdentifier(expected_operation_id, None, None), config=config, ) + mock_executor.process.assert_called_once() -@patch("aws_durable_execution_sdk_python.context.create_callback_handler") -def test_create_callback_with_parent_id(mock_handler): +@patch("aws_durable_execution_sdk_python.context.CallbackOperationExecutor") +def test_create_callback_with_parent_id(mock_executor_class): """Test create_callback with parent_id.""" - mock_handler.return_value = "callback789" + mock_executor = MagicMock() + + mock_executor.process.return_value = "callback789" + + mock_executor_class.return_value = mock_executor mock_state = Mock(spec=ExecutionState) mock_state.durable_execution_arn = ( "arn:aws:durable:us-east-1:123456789012:execution/test" @@ -291,17 +325,21 @@ def test_create_callback_with_parent_id(mock_handler): assert callback.operation_id == expected_operation_id - mock_handler.assert_called_once_with( + mock_executor_class.assert_called_once_with( state=mock_state, operation_identifier=OperationIdentifier(expected_operation_id, "parent123"), config=CallbackConfig(), ) -@patch("aws_durable_execution_sdk_python.context.create_callback_handler") -def test_create_callback_increments_counter(mock_handler): +@patch("aws_durable_execution_sdk_python.context.CallbackOperationExecutor") +def test_create_callback_increments_counter(mock_executor_class): """Test create_callback increments step counter.""" - mock_handler.return_value = "callback_test" + mock_executor = MagicMock() + + mock_executor.process.return_value = "callback_test" + + mock_executor_class.return_value = mock_executor mock_state = Mock(spec=ExecutionState) mock_state.durable_execution_arn = ( "arn:aws:durable:us-east-1:123456789012:execution/test" @@ -328,10 +366,14 @@ def test_create_callback_increments_counter(mock_handler): # region step -@patch("aws_durable_execution_sdk_python.context.step_handler") -def test_step_basic(mock_handler): +@patch("aws_durable_execution_sdk_python.context.StepOperationExecutor") +def test_step_basic(mock_executor_class): """Test step with basic parameters.""" - mock_handler.return_value = "step_result" + mock_executor = MagicMock() + + mock_executor.process.return_value = "step_result" + + mock_executor_class.return_value = mock_executor mock_state = Mock(spec=ExecutionState) mock_state.durable_execution_arn = ( "arn:aws:durable:us-east-1:123456789012:execution/test" @@ -348,19 +390,24 @@ def test_step_basic(mock_handler): result = context.step(mock_callable) assert result == "step_result" - mock_handler.assert_called_once_with( - func=mock_callable, - config=None, + mock_executor_class.assert_called_once_with( state=mock_state, operation_identifier=OperationIdentifier(expected_operation_id, None, None), + config=ANY, # StepConfig() is created in context.step() + func=mock_callable, context_logger=ANY, ) + mock_executor.process.assert_called_once() -@patch("aws_durable_execution_sdk_python.context.step_handler") -def test_step_with_name_and_config(mock_handler): +@patch("aws_durable_execution_sdk_python.context.StepOperationExecutor") +def test_step_with_name_and_config(mock_executor_class): """Test step with name and config.""" - mock_handler.return_value = "configured_result" + mock_executor = MagicMock() + + mock_executor.process.return_value = "configured_result" + + mock_executor_class.return_value = mock_executor mock_state = Mock(spec=ExecutionState) mock_state.durable_execution_arn = ( "arn:aws:durable:us-east-1:123456789012:execution/test" @@ -382,19 +429,24 @@ def test_step_with_name_and_config(mock_handler): expected_id = next(seq) # 6th assert result == "configured_result" - mock_handler.assert_called_once_with( - func=mock_callable, - config=config, + mock_executor_class.assert_called_once_with( state=mock_state, operation_identifier=OperationIdentifier(expected_id, None, None), + config=config, + func=mock_callable, context_logger=ANY, ) + mock_executor.process.assert_called_once() -@patch("aws_durable_execution_sdk_python.context.step_handler") -def test_step_with_parent_id(mock_handler): +@patch("aws_durable_execution_sdk_python.context.StepOperationExecutor") +def test_step_with_parent_id(mock_executor_class): """Test step with parent_id.""" - mock_handler.return_value = "parent_result" + mock_executor = MagicMock() + + mock_executor.process.return_value = "parent_result" + + mock_executor_class.return_value = mock_executor mock_state = Mock(spec=ExecutionState) mock_state.durable_execution_arn = ( "arn:aws:durable:us-east-1:123456789012:execution/test" @@ -414,19 +466,24 @@ def test_step_with_parent_id(mock_handler): [next(seq) for _ in range(2)] # Skip first 2 expected_id = next(seq) # 3rd - mock_handler.assert_called_once_with( - func=mock_callable, - config=None, + mock_executor_class.assert_called_once_with( state=mock_state, operation_identifier=OperationIdentifier(expected_id, "parent123"), + config=ANY, + func=mock_callable, context_logger=ANY, ) + mock_executor.process.assert_called_once() -@patch("aws_durable_execution_sdk_python.context.step_handler") -def test_step_increments_counter(mock_handler): +@patch("aws_durable_execution_sdk_python.context.StepOperationExecutor") +def test_step_increments_counter(mock_executor_class): """Test step increments step counter.""" - mock_handler.return_value = "result" + mock_executor = MagicMock() + + mock_executor.process.return_value = "result" + + mock_executor_class.return_value = mock_executor mock_state = Mock(spec=ExecutionState) mock_state.durable_execution_arn = ( "arn:aws:durable:us-east-1:123456789012:execution/test" @@ -449,18 +506,22 @@ def test_step_increments_counter(mock_handler): expected_id2 = next(seq) # 12th assert context._step_counter.get_current() == 12 # noqa: SLF001 - assert mock_handler.call_args_list[0][1][ + assert mock_executor_class.call_args_list[0][1][ "operation_identifier" ] == OperationIdentifier(expected_id1, None, None) - assert mock_handler.call_args_list[1][1][ + assert mock_executor_class.call_args_list[1][1][ "operation_identifier" ] == OperationIdentifier(expected_id2, None, None) -@patch("aws_durable_execution_sdk_python.context.step_handler") -def test_step_with_original_name(mock_handler): +@patch("aws_durable_execution_sdk_python.context.StepOperationExecutor") +def test_step_with_original_name(mock_executor_class): """Test step with callable that has _original_name attribute.""" - mock_handler.return_value = "named_result" + mock_executor = MagicMock() + + mock_executor.process.return_value = "named_result" + + mock_executor_class.return_value = mock_executor mock_state = Mock(spec=ExecutionState) mock_state.durable_execution_arn = ( "arn:aws:durable:us-east-1:123456789012:execution/test" @@ -476,23 +537,28 @@ def test_step_with_original_name(mock_handler): seq = operation_id_sequence() expected_id = next(seq) # 1st - mock_handler.assert_called_once_with( - func=mock_callable, - config=None, + mock_executor_class.assert_called_once_with( state=mock_state, operation_identifier=OperationIdentifier(expected_id, None, "override_name"), + config=ANY, + func=mock_callable, context_logger=ANY, ) + mock_executor.process.assert_called_once() # endregion step # region invoke -@patch("aws_durable_execution_sdk_python.context.invoke_handler") -def test_invoke_basic(mock_handler): +@patch("aws_durable_execution_sdk_python.context.InvokeOperationExecutor") +def test_invoke_basic(mock_executor_class): """Test invoke with basic parameters.""" - mock_handler.return_value = "invoke_result" + mock_executor = MagicMock() + + mock_executor.process.return_value = "invoke_result" + + mock_executor_class.return_value = mock_executor mock_state = Mock(spec=ExecutionState) mock_state.durable_execution_arn = ( "arn:aws:durable:us-east-1:123456789012:execution/test" @@ -506,24 +572,29 @@ def test_invoke_basic(mock_handler): assert result == "invoke_result" - mock_handler.assert_called_once_with( - function_name="test_function", - payload="test_payload", + mock_executor_class.assert_called_once_with( state=mock_state, operation_identifier=OperationIdentifier(expected_operation_id, None, None), - config=None, + function_name="test_function", + payload="test_payload", + config=ANY, # InvokeConfig() is created in context.invoke() ) + mock_executor.process.assert_called_once() -@patch("aws_durable_execution_sdk_python.context.invoke_handler") -def test_invoke_with_name_and_config(mock_handler): +@patch("aws_durable_execution_sdk_python.context.InvokeOperationExecutor") +def test_invoke_with_name_and_config(mock_executor_class): """Test invoke with name and config.""" - mock_handler.return_value = "configured_result" + mock_executor = MagicMock() + + mock_executor.process.return_value = "configured_result" + + mock_executor_class.return_value = mock_executor mock_state = Mock(spec=ExecutionState) mock_state.durable_execution_arn = ( "arn:aws:durable:us-east-1:123456789012:execution/test" ) - config = InvokeConfig[str, str](timeout_seconds=30) + config = InvokeConfig[str, str](timeout=Duration.from_seconds(30)) context = DurableContext(state=mock_state) [context._create_step_id() for _ in range(5)] # Set counter to 5 # noqa: SLF001 @@ -538,19 +609,24 @@ def test_invoke_with_name_and_config(mock_handler): expected_id = next(seq) # 6th assert result == "configured_result" - mock_handler.assert_called_once_with( - function_name="test_function", - payload={"key": "value"}, + mock_executor_class.assert_called_once_with( state=mock_state, operation_identifier=OperationIdentifier(expected_id, None, "named_invoke"), + function_name="test_function", + payload={"key": "value"}, config=config, ) + mock_executor.process.assert_called_once() -@patch("aws_durable_execution_sdk_python.context.invoke_handler") -def test_invoke_with_parent_id(mock_handler): +@patch("aws_durable_execution_sdk_python.context.InvokeOperationExecutor") +def test_invoke_with_parent_id(mock_executor_class): """Test invoke with parent_id.""" - mock_handler.return_value = "parent_result" + mock_executor = MagicMock() + + mock_executor.process.return_value = "parent_result" + + mock_executor_class.return_value = mock_executor mock_state = Mock(spec=ExecutionState) mock_state.durable_execution_arn = ( "arn:aws:durable:us-east-1:123456789012:execution/test" @@ -565,19 +641,24 @@ def test_invoke_with_parent_id(mock_handler): [next(seq) for _ in range(2)] expected_id = next(seq) - mock_handler.assert_called_once_with( - function_name="test_function", - payload=None, + mock_executor_class.assert_called_once_with( state=mock_state, operation_identifier=OperationIdentifier(expected_id, "parent123", None), - config=None, + function_name="test_function", + payload=None, + config=ANY, ) + mock_executor.process.assert_called_once() -@patch("aws_durable_execution_sdk_python.context.invoke_handler") -def test_invoke_increments_counter(mock_handler): +@patch("aws_durable_execution_sdk_python.context.InvokeOperationExecutor") +def test_invoke_increments_counter(mock_executor_class): """Test invoke increments step counter.""" - mock_handler.return_value = "result" + mock_executor = MagicMock() + + mock_executor.process.return_value = "result" + + mock_executor_class.return_value = mock_executor mock_state = Mock(spec=ExecutionState) mock_state.durable_execution_arn = ( "arn:aws:durable:us-east-1:123456789012:execution/test" @@ -595,18 +676,22 @@ def test_invoke_increments_counter(mock_handler): expected_id2 = next(seq) assert context._step_counter.get_current() == 12 # noqa: SLF001 - assert mock_handler.call_args_list[0][1][ + assert mock_executor_class.call_args_list[0][1][ "operation_identifier" ] == OperationIdentifier(expected_id1, None, None) - assert mock_handler.call_args_list[1][1][ + assert mock_executor_class.call_args_list[1][1][ "operation_identifier" ] == OperationIdentifier(expected_id2, None, None) -@patch("aws_durable_execution_sdk_python.context.invoke_handler") -def test_invoke_with_none_payload(mock_handler): +@patch("aws_durable_execution_sdk_python.context.InvokeOperationExecutor") +def test_invoke_with_none_payload(mock_executor_class): """Test invoke with None payload.""" - mock_handler.return_value = None + mock_executor = MagicMock() + + mock_executor.process.return_value = None + + mock_executor_class.return_value = mock_executor mock_state = Mock(spec=ExecutionState) mock_state.durable_execution_arn = ( "arn:aws:durable:us-east-1:123456789012:execution/test" @@ -621,28 +706,35 @@ def test_invoke_with_none_payload(mock_handler): assert result is None - mock_handler.assert_called_once_with( - function_name="test_function", - payload=None, + mock_executor_class.assert_called_once_with( state=mock_state, operation_identifier=OperationIdentifier(expected_id, None, None), - config=None, + function_name="test_function", + payload=None, + config=ANY, ) + mock_executor.process.assert_called_once() -@patch("aws_durable_execution_sdk_python.context.invoke_handler") -def test_invoke_with_custom_serdes(mock_handler): +@patch("aws_durable_execution_sdk_python.context.InvokeOperationExecutor") +def test_invoke_with_custom_serdes(mock_executor_class): """Test invoke with custom serialization config.""" - mock_handler.return_value = {"transformed": "data"} + mock_executor = MagicMock() + + mock_executor.process.return_value = {"transformed": "data"} + + mock_executor_class.return_value = mock_executor mock_state = Mock(spec=ExecutionState) mock_state.durable_execution_arn = ( "arn:aws:durable:us-east-1:123456789012:execution/test" ) + payload_serdes = CustomDictSerDes() + result_serdes = CustomDictSerDes() config = InvokeConfig[dict, dict]( - serdes_payload=CustomDictSerDes(), - serdes_result=CustomDictSerDes(), - timeout_seconds=60, + serdes_payload=payload_serdes, + serdes_result=result_serdes, + timeout=Duration.from_minutes(1), ) context = DurableContext(state=mock_state) @@ -658,24 +750,29 @@ def test_invoke_with_custom_serdes(mock_handler): expected_id = next(seq) assert result == {"transformed": "data"} - mock_handler.assert_called_once_with( - function_name="test_function", - payload={"original": "data"}, + mock_executor_class.assert_called_once_with( state=mock_state, operation_identifier=OperationIdentifier( expected_id, None, "custom_serdes_invoke" ), + function_name="test_function", + payload={"original": "data"}, config=config, ) + mock_executor.process.assert_called_once() # endregion invoke # region wait -@patch("aws_durable_execution_sdk_python.context.wait_handler") -def test_wait_basic(mock_handler): +@patch("aws_durable_execution_sdk_python.context.WaitOperationExecutor") +def test_wait_basic(mock_executor_class): """Test wait with basic parameters.""" + mock_executor = MagicMock() + mock_executor.process.return_value = None + mock_executor_class.return_value = mock_executor + mock_state = Mock(spec=ExecutionState) mock_state.durable_execution_arn = ( "arn:aws:durable:us-east-1:123456789012:execution/test" @@ -685,18 +782,23 @@ def test_wait_basic(mock_handler): operation_ids = operation_id_sequence() expected_operation_id = next(operation_ids) - context.wait(30) + context.wait(Duration.from_seconds(30)) - mock_handler.assert_called_once_with( - seconds=30, + mock_executor_class.assert_called_once_with( state=mock_state, operation_identifier=OperationIdentifier(expected_operation_id, None, None), + seconds=30, ) + mock_executor.process.assert_called_once() -@patch("aws_durable_execution_sdk_python.context.wait_handler") -def test_wait_with_name(mock_handler): +@patch("aws_durable_execution_sdk_python.context.WaitOperationExecutor") +def test_wait_with_name(mock_executor_class): """Test wait with name parameter.""" + mock_executor = MagicMock() + mock_executor.process.return_value = None + mock_executor_class.return_value = mock_executor + mock_state = Mock(spec=ExecutionState) mock_state.durable_execution_arn = ( "arn:aws:durable:us-east-1:123456789012:execution/test" @@ -705,22 +807,27 @@ def test_wait_with_name(mock_handler): context = DurableContext(state=mock_state) [context._create_step_id() for _ in range(5)] # Set counter to 5 # noqa: SLF001 - context.wait(60, name="test_wait") + context.wait(Duration.from_minutes(1), name="test_wait") seq = operation_id_sequence() [next(seq) for _ in range(5)] expected_id = next(seq) - mock_handler.assert_called_once_with( - seconds=60, + mock_executor_class.assert_called_once_with( state=mock_state, operation_identifier=OperationIdentifier(expected_id, None, "test_wait"), + seconds=60, ) + mock_executor.process.assert_called_once() -@patch("aws_durable_execution_sdk_python.context.wait_handler") -def test_wait_with_parent_id(mock_handler): +@patch("aws_durable_execution_sdk_python.context.WaitOperationExecutor") +def test_wait_with_parent_id(mock_executor_class): """Test wait with parent_id.""" + mock_executor = MagicMock() + mock_executor.process.return_value = None + mock_executor_class.return_value = mock_executor + mock_state = Mock(spec=ExecutionState) mock_state.durable_execution_arn = ( "arn:aws:durable:us-east-1:123456789012:execution/test" @@ -729,22 +836,27 @@ def test_wait_with_parent_id(mock_handler): context = DurableContext(state=mock_state, parent_id="parent123") [context._create_step_id() for _ in range(2)] # Set counter to 2 # noqa: SLF001 - context.wait(45) + context.wait(Duration.from_seconds(45)) seq = operation_id_sequence("parent123") [next(seq) for _ in range(2)] expected_id = next(seq) - mock_handler.assert_called_once_with( - seconds=45, + mock_executor_class.assert_called_once_with( state=mock_state, operation_identifier=OperationIdentifier(expected_id, "parent123"), + seconds=45, ) + mock_executor.process.assert_called_once() -@patch("aws_durable_execution_sdk_python.context.wait_handler") -def test_wait_increments_counter(mock_handler): +@patch("aws_durable_execution_sdk_python.context.WaitOperationExecutor") +def test_wait_increments_counter(mock_executor_class): """Test wait increments step counter.""" + mock_executor = MagicMock() + mock_executor.process.return_value = None + mock_executor_class.return_value = mock_executor + mock_state = Mock(spec=ExecutionState) mock_state.durable_execution_arn = ( "arn:aws:durable:us-east-1:123456789012:execution/test" @@ -753,8 +865,8 @@ def test_wait_increments_counter(mock_handler): context = DurableContext(state=mock_state) [context._create_step_id() for _ in range(10)] # Set counter to 10 # noqa: SLF001 - context.wait(15) - context.wait(25) + context.wait(Duration.from_seconds(15)) + context.wait(Duration.from_seconds(25)) seq = operation_id_sequence() [next(seq) for _ in range(10)] @@ -762,17 +874,21 @@ def test_wait_increments_counter(mock_handler): expected_id2 = next(seq) assert context._step_counter.get_current() == 12 # noqa: SLF001 - assert mock_handler.call_args_list[0][1][ + assert mock_executor_class.call_args_list[0][1][ "operation_identifier" ] == OperationIdentifier(expected_id1, None, None) - assert mock_handler.call_args_list[1][1][ + assert mock_executor_class.call_args_list[1][1][ "operation_identifier" ] == OperationIdentifier(expected_id2, None, None) -@patch("aws_durable_execution_sdk_python.context.wait_handler") -def test_wait_returns_none(mock_handler): +@patch("aws_durable_execution_sdk_python.context.WaitOperationExecutor") +def test_wait_returns_none(mock_executor_class): """Test wait returns None.""" + mock_executor = MagicMock() + mock_executor.process.return_value = None + mock_executor_class.return_value = mock_executor + mock_state = Mock(spec=ExecutionState) mock_state.durable_execution_arn = ( "arn:aws:durable:us-east-1:123456789012:execution/test" @@ -780,14 +896,18 @@ def test_wait_returns_none(mock_handler): context = DurableContext(state=mock_state) - result = context.wait(10) + result = context.wait(Duration.from_seconds(10)) assert result is None -@patch("aws_durable_execution_sdk_python.context.wait_handler") -def test_wait_with_time_less_than_one(mock_handler): +@patch("aws_durable_execution_sdk_python.context.WaitOperationExecutor") +def test_wait_with_time_less_than_one(mock_executor_class): """Test wait with time less than one.""" + mock_executor = MagicMock() + mock_executor.process.return_value = None + mock_executor_class.return_value = mock_executor + mock_state = Mock(spec=ExecutionState) mock_state.durable_execution_arn = ( "arn:aws:durable:us-east-1:123456789012:execution/test" @@ -796,7 +916,7 @@ def test_wait_with_time_less_than_one(mock_handler): context = DurableContext(state=mock_state) with pytest.raises(ValidationError): - context.wait(0) + context.wait(Duration.from_seconds(0)) # endregion wait @@ -865,9 +985,13 @@ def test_run_in_child_context_with_name_and_config(mock_handler): @patch("aws_durable_execution_sdk_python.context.child_handler") -def test_run_in_child_context_with_parent_id(mock_handler): +def test_run_in_child_context_with_parent_id(mock_executor_class): """Test run_in_child_context with parent_id.""" - mock_handler.return_value = "parent_child_result" + mock_executor = MagicMock() + + mock_executor.process.return_value = "parent_child_result" + + mock_executor_class.return_value = mock_executor mock_state = Mock(spec=ExecutionState) mock_state.durable_execution_arn = ( "arn:aws:durable:us-east-1:123456789012:execution/test" @@ -886,14 +1010,14 @@ def test_run_in_child_context_with_parent_id(mock_handler): [next(seq) for _ in range(1)] expected_id = next(seq) - call_args = mock_handler.call_args + call_args = mock_executor_class.call_args assert call_args[1]["operation_identifier"] == OperationIdentifier( expected_id, "parent456", None ) @patch("aws_durable_execution_sdk_python.context.child_handler") -def test_run_in_child_context_creates_child_context(mock_handler): +def test_run_in_child_context_creates_child_context(mock_executor_class): """Test run_in_child_context creates proper child context.""" mock_state = Mock(spec=ExecutionState) mock_state.durable_execution_arn = ( @@ -911,7 +1035,7 @@ def capture_child_context(child_context): return "child_executed" mock_callable = Mock(side_effect=capture_child_context) - mock_handler.side_effect = lambda func, **kwargs: func() + mock_executor_class.side_effect = lambda func, **kwargs: func() context = DurableContext(state=mock_state) @@ -922,9 +1046,13 @@ def capture_child_context(child_context): @patch("aws_durable_execution_sdk_python.context.child_handler") -def test_run_in_child_context_increments_counter(mock_handler): +def test_run_in_child_context_increments_counter(mock_executor_class): """Test run_in_child_context increments step counter.""" - mock_handler.return_value = "result" + mock_executor = MagicMock() + + mock_executor.process.return_value = "result" + + mock_executor_class.return_value = mock_executor mock_state = Mock(spec=ExecutionState) mock_state.durable_execution_arn = ( "arn:aws:durable:us-east-1:123456789012:execution/test" @@ -946,18 +1074,22 @@ def test_run_in_child_context_increments_counter(mock_handler): expected_id2 = next(seq) assert context._step_counter.get_current() == 7 # noqa: SLF001 - assert mock_handler.call_args_list[0][1][ + assert mock_executor_class.call_args_list[0][1][ "operation_identifier" ] == OperationIdentifier(expected_id1, None, None) - assert mock_handler.call_args_list[1][1][ + assert mock_executor_class.call_args_list[1][1][ "operation_identifier" ] == OperationIdentifier(expected_id2, None, None) @patch("aws_durable_execution_sdk_python.context.child_handler") -def test_run_in_child_context_resolves_name_from_callable(mock_handler): +def test_run_in_child_context_resolves_name_from_callable(mock_executor_class): """Test run_in_child_context resolves name from callable._original_name.""" - mock_handler.return_value = "named_result" + mock_executor = MagicMock() + + mock_executor.process.return_value = "named_result" + + mock_executor_class.return_value = mock_executor mock_state = Mock(spec=ExecutionState) mock_state.durable_execution_arn = ( "arn:aws:durable:us-east-1:123456789012:execution/test" @@ -969,7 +1101,7 @@ def test_run_in_child_context_resolves_name_from_callable(mock_handler): context.run_in_child_context(mock_callable) - call_args = mock_handler.call_args + call_args = mock_executor_class.call_args assert call_args[1]["operation_identifier"].name == "original_function_name" @@ -978,9 +1110,13 @@ def test_run_in_child_context_resolves_name_from_callable(mock_handler): # region wait_for_callback @patch("aws_durable_execution_sdk_python.context.wait_for_callback_handler") -def test_wait_for_callback_basic(mock_handler): +def test_wait_for_callback_basic(mock_executor_class): """Test wait_for_callback with basic parameters.""" - mock_handler.return_value = "callback_result" + mock_executor = MagicMock() + + mock_executor.process.return_value = "callback_result" + + mock_executor_class.return_value = mock_executor mock_state = Mock(spec=ExecutionState) mock_state.durable_execution_arn = ( "arn:aws:durable:us-east-1:123456789012:execution/test" @@ -1005,9 +1141,13 @@ def test_wait_for_callback_basic(mock_handler): @patch("aws_durable_execution_sdk_python.context.wait_for_callback_handler") -def test_wait_for_callback_with_name_and_config(mock_handler): +def test_wait_for_callback_with_name_and_config(mock_executor_class): """Test wait_for_callback with name and config.""" - mock_handler.return_value = "configured_callback_result" + mock_executor = MagicMock() + + mock_executor.process.return_value = "configured_callback_result" + + mock_executor_class.return_value = mock_executor mock_state = Mock(spec=ExecutionState) mock_state.durable_execution_arn = ( "arn:aws:durable:us-east-1:123456789012:execution/test" @@ -1030,9 +1170,13 @@ def test_wait_for_callback_with_name_and_config(mock_handler): @patch("aws_durable_execution_sdk_python.context.wait_for_callback_handler") -def test_wait_for_callback_resolves_name_from_submitter(mock_handler): +def test_wait_for_callback_resolves_name_from_submitter(mock_executor_class): """Test wait_for_callback resolves name from submitter._original_name.""" - mock_handler.return_value = "named_callback_result" + mock_executor = MagicMock() + + mock_executor.process.return_value = "named_callback_result" + + mock_executor_class.return_value = mock_executor mock_state = Mock(spec=ExecutionState) mock_state.durable_execution_arn = ( "arn:aws:durable:us-east-1:123456789012:execution/test" @@ -1051,7 +1195,7 @@ def test_wait_for_callback_resolves_name_from_submitter(mock_handler): @patch("aws_durable_execution_sdk_python.context.wait_for_callback_handler") -def test_wait_for_callback_passes_child_context(mock_handler): +def test_wait_for_callback_passes_child_context(mock_executor_class): """Test wait_for_callback passes child context to handler.""" mock_state = Mock(spec=ExecutionState) mock_state.durable_execution_arn = ( @@ -1064,7 +1208,7 @@ def capture_handler_call(context, submitter, name, config): assert submitter is mock_submitter return "handler_result" - mock_handler.side_effect = capture_handler_call + mock_executor_class.side_effect = capture_handler_call with patch.object(DurableContext, "run_in_child_context") as mock_run_in_child: @@ -1079,7 +1223,7 @@ def run_child_context(callable_func, name): result = context.wait_for_callback(mock_submitter) assert result == "handler_result" - mock_handler.assert_called_once() + mock_executor_class.assert_called_once() # endregion wait_for_callback @@ -1582,17 +1726,20 @@ def test_wait_strategy(state, attempt): wait_strategy=test_wait_strategy, initial_state="test" ) - # Mock the handler to track calls + # Mock the executor to track calls with patch( - "aws_durable_execution_sdk_python.context.wait_for_condition_handler" - ) as mock_handler: - mock_handler.return_value = "final_state" + "aws_durable_execution_sdk_python.context.WaitForConditionOperationExecutor" + ) as mock_executor_class: + mock_executor = MagicMock() + mock_executor.process.return_value = "final_state" + mock_executor_class.return_value = mock_executor # Call wait_for_condition method result = context.wait_for_condition(test_check, config) - # Verify wait_for_condition_handler was called (line 425) - mock_handler.assert_called_once() + # Verify executor was called + mock_executor_class.assert_called_once() + mock_executor.process.assert_called_once() assert result == "final_state" @@ -1657,3 +1804,50 @@ def test_operation_id_generation_unique(): for i in range(len(ids) - 1): assert ids[i] != ids[i + 1] + + +@patch("aws_durable_execution_sdk_python.context.InvokeOperationExecutor") +def test_invoke_with_explicit_tenant_id(mock_executor_class): + """Test invoke with explicit tenant_id in config.""" + mock_executor = MagicMock() + + mock_executor.process.return_value = "result" + + mock_executor_class.return_value = mock_executor + mock_state = Mock(spec=ExecutionState) + mock_state.durable_execution_arn = ( + "arn:aws:durable:us-east-1:123456789012:execution/test" + ) + + config = InvokeConfig(tenant_id="explicit-tenant") + context = DurableContext(state=mock_state) + + result = context.invoke("test_function", "payload", config=config) + + assert result == "result" + call_args = mock_executor_class.call_args[1] + assert call_args["config"].tenant_id == "explicit-tenant" + + +@patch("aws_durable_execution_sdk_python.context.InvokeOperationExecutor") +def test_invoke_without_tenant_id_defaults_to_none(mock_executor_class): + """Test invoke without tenant_id defaults to None.""" + mock_executor = MagicMock() + + mock_executor.process.return_value = "result" + + mock_executor_class.return_value = mock_executor + mock_state = Mock(spec=ExecutionState) + mock_state.durable_execution_arn = ( + "arn:aws:durable:us-east-1:123456789012:execution/test" + ) + + context = DurableContext(state=mock_state) + + result = context.invoke("test_function", "payload") + + assert result == "result" + # Config is created as InvokeConfig() when not provided + call_args = mock_executor_class.call_args[1] + assert isinstance(call_args["config"], InvokeConfig) + assert call_args["config"].tenant_id is None diff --git a/tests/e2e/checkpoint_response_int_test.py b/tests/e2e/checkpoint_response_int_test.py new file mode 100644 index 0000000..c0fd0f5 --- /dev/null +++ b/tests/e2e/checkpoint_response_int_test.py @@ -0,0 +1,768 @@ +"""Integration tests for immediate checkpoint response handling. + +Tests end-to-end operation execution with the immediate response handling +that's implemented via the OperationExecutor base class pattern. +""" + +from __future__ import annotations + +from typing import TYPE_CHECKING +from unittest.mock import Mock, patch + +import pytest + +from aws_durable_execution_sdk_python.config import ChildConfig, Duration +from aws_durable_execution_sdk_python.context import DurableContext, durable_step +from aws_durable_execution_sdk_python.exceptions import InvocationError +from aws_durable_execution_sdk_python.execution import ( + InvocationStatus, + durable_execution, +) +from aws_durable_execution_sdk_python.lambda_service import ( + CallbackDetails, + CheckpointOutput, + CheckpointUpdatedExecutionState, + Operation, + OperationStatus, + OperationType, +) + +if TYPE_CHECKING: + from aws_durable_execution_sdk_python.types import StepContext + + +def create_mock_checkpoint_with_operations(): + """Create a mock checkpoint function that properly tracks operations. + + Returns a tuple of (mock_checkpoint_function, checkpoint_calls_list). + The mock properly maintains an operations list that gets updated with each checkpoint. + """ + checkpoint_calls = [] + operations = [ + Operation( + operation_id="execution-1", + operation_type=OperationType.EXECUTION, + status=OperationStatus.STARTED, + ) + ] + + def mock_checkpoint( + durable_execution_arn, + checkpoint_token, + updates, + client_token="token", # noqa: S107 + ): + checkpoint_calls.append(updates) + + # Convert updates to Operation objects and add to operations list + for update in updates: + op = Operation( + operation_id=update.operation_id, + operation_type=update.operation_type, + status=OperationStatus.STARTED, + parent_id=update.parent_id, + ) + operations.append(op) + + return CheckpointOutput( + checkpoint_token="new_token", # noqa: S106 + new_execution_state=CheckpointUpdatedExecutionState( + operations=operations.copy() + ), + ) + + return mock_checkpoint, checkpoint_calls + + +def test_end_to_end_step_operation_with_double_check(): + """Test end-to-end step operation execution with double-check pattern. + + Verifies that the OperationExecutor.process() method properly calls + check_result_status() twice when a checkpoint is created, enabling + immediate response handling. + """ + + @durable_step + def my_step(step_context: StepContext) -> str: + return "step_result" + + @durable_execution + def my_handler(event, context: DurableContext) -> str: + result: str = context.step(my_step()) + return result + + with patch( + "aws_durable_execution_sdk_python.execution.LambdaClient" + ) as mock_client_class: + mock_client = Mock() + mock_client_class.initialize_client.return_value = mock_client + + mock_checkpoint, checkpoint_calls = create_mock_checkpoint_with_operations() + mock_client.checkpoint = mock_checkpoint + + event = { + "DurableExecutionArn": "test-arn", + "CheckpointToken": "test-token", + "InitialExecutionState": { + "Operations": [ + { + "Id": "execution-1", + "Type": "EXECUTION", + "Status": "STARTED", + "ExecutionDetails": {"InputPayload": "{}"}, + } + ], + "NextMarker": "", + }, + "LocalRunner": True, + } + + lambda_context = Mock() + lambda_context.aws_request_id = "test-request-id" + lambda_context.client_context = None + lambda_context.identity = None + lambda_context._epoch_deadline_time_in_ms = 0 # noqa: SLF001 + lambda_context.invoked_function_arn = "test-arn" + lambda_context.tenant_id = None + + result = my_handler(event, lambda_context) + + assert result["Status"] == InvocationStatus.SUCCEEDED.value + assert result["Result"] == '"step_result"' + + # Verify checkpoints were created (START + SUCCEED) + all_operations = [op for batch in checkpoint_calls for op in batch] + assert len(all_operations) == 2 + + +def test_end_to_end_multiple_operations_execute_sequentially(): + """Test end-to-end execution with multiple operations. + + Verifies that multiple operations in a workflow execute correctly + with the immediate response handling pattern. + """ + + @durable_step + def step1(step_context: StepContext) -> str: + return "result1" + + @durable_step + def step2(step_context: StepContext) -> str: + return "result2" + + @durable_execution + def my_handler(event, context: DurableContext) -> list[str]: + return [context.step(step1()), context.step(step2())] + + with patch( + "aws_durable_execution_sdk_python.execution.LambdaClient" + ) as mock_client_class: + mock_client = Mock() + mock_client_class.initialize_client.return_value = mock_client + + mock_checkpoint, checkpoint_calls = create_mock_checkpoint_with_operations() + mock_client.checkpoint = mock_checkpoint + + event = { + "DurableExecutionArn": "test-arn", + "CheckpointToken": "test-token", + "InitialExecutionState": { + "Operations": [ + { + "Id": "execution-1", + "Type": "EXECUTION", + "Status": "STARTED", + "ExecutionDetails": {"InputPayload": "{}"}, + } + ], + "NextMarker": "", + }, + "LocalRunner": True, + } + + lambda_context = Mock() + lambda_context.aws_request_id = "test-request-id" + lambda_context.client_context = None + lambda_context.identity = None + lambda_context._epoch_deadline_time_in_ms = 0 # noqa: SLF001 + lambda_context.invoked_function_arn = "test-arn" + lambda_context.tenant_id = None + + result = my_handler(event, lambda_context) + + assert result["Status"] == InvocationStatus.SUCCEEDED.value + assert result["Result"] == '["result1", "result2"]' + + # Verify all checkpoints were created (2 START + 2 SUCCEED) + all_operations = [op for batch in checkpoint_calls for op in batch] + assert len(all_operations) == 4 + + +def test_end_to_end_wait_operation_with_double_check(): + """Test end-to-end wait operation execution with double-check pattern. + + Verifies that wait operations properly use the double-check pattern + for immediate response handling. + """ + + @durable_execution + def my_handler(event, context: DurableContext) -> str: + context.wait(Duration.from_seconds(5)) + return "completed" + + with patch( + "aws_durable_execution_sdk_python.execution.LambdaClient" + ) as mock_client_class: + mock_client = Mock() + mock_client_class.initialize_client.return_value = mock_client + + mock_checkpoint, checkpoint_calls = create_mock_checkpoint_with_operations() + mock_client.checkpoint = mock_checkpoint + + event = { + "DurableExecutionArn": "test-arn", + "CheckpointToken": "test-token", + "InitialExecutionState": { + "Operations": [ + { + "Id": "execution-1", + "Type": "EXECUTION", + "Status": "STARTED", + "ExecutionDetails": {"InputPayload": "{}"}, + } + ], + "NextMarker": "", + }, + "LocalRunner": True, + } + + lambda_context = Mock() + lambda_context.aws_request_id = "test-request-id" + lambda_context.client_context = None + lambda_context.identity = None + lambda_context._epoch_deadline_time_in_ms = 0 # noqa: SLF001 + lambda_context.invoked_function_arn = "test-arn" + lambda_context.tenant_id = None + + # Wait will suspend, so we expect PENDING status + result = my_handler(event, lambda_context) + + assert result["Status"] == InvocationStatus.PENDING.value + + # Verify wait checkpoint was created + all_operations = [op for batch in checkpoint_calls for op in batch] + assert len(all_operations) >= 1 + + +def test_end_to_end_checkpoint_synchronization_with_operations_list(): + """Test that synchronous checkpoints properly update operations list. + + Verifies that when is_sync=True, the operations list is updated + before the second status check occurs. + """ + + @durable_step + def my_step(step_context: StepContext) -> str: + return "result" + + @durable_execution + def my_handler(event, context: DurableContext) -> str: + return context.step(my_step()) + + with patch( + "aws_durable_execution_sdk_python.execution.LambdaClient" + ) as mock_client_class: + mock_client = Mock() + mock_client_class.initialize_client.return_value = mock_client + + mock_checkpoint, checkpoint_calls = create_mock_checkpoint_with_operations() + mock_client.checkpoint = mock_checkpoint + + event = { + "DurableExecutionArn": "test-arn", + "CheckpointToken": "test-token", + "InitialExecutionState": { + "Operations": [ + { + "Id": "execution-1", + "Type": "EXECUTION", + "Status": "STARTED", + "ExecutionDetails": {"InputPayload": "{}"}, + } + ], + "NextMarker": "", + }, + "LocalRunner": True, + } + + lambda_context = Mock() + lambda_context.aws_request_id = "test-request-id" + lambda_context.client_context = None + lambda_context.identity = None + lambda_context._epoch_deadline_time_in_ms = 0 # noqa: SLF001 + lambda_context.invoked_function_arn = "test-arn" + lambda_context.tenant_id = None + + result = my_handler(event, lambda_context) + + assert result["Status"] == InvocationStatus.SUCCEEDED.value + + # Verify operations list was properly maintained + all_operations = [op for batch in checkpoint_calls for op in batch] + assert len(all_operations) >= 2 # At least START and SUCCEED + + +def test_callback_deferred_error_handling_to_result(): + """Test callback deferred error handling pattern. + + Verifies that callback operations properly return callback_id through + the immediate response handling pattern, enabling deferred error handling. + """ + + @durable_step + def step_after_callback(step_context: StepContext) -> str: + return "code_executed_after_callback" + + @durable_execution + def my_handler(event, context: DurableContext) -> str: + # Create callback + callback_id = context.create_callback("test_callback") + + # This code executes even if callback will eventually fail + # This is the deferred error handling pattern + result = context.step(step_after_callback()) + + return f"{callback_id}:{result}" + + with patch( + "aws_durable_execution_sdk_python.execution.LambdaClient" + ) as mock_client_class: + mock_client = Mock() + mock_client_class.initialize_client.return_value = mock_client + + checkpoint_calls = [] + operations = [ + Operation( + operation_id="execution-1", + operation_type=OperationType.EXECUTION, + status=OperationStatus.STARTED, + ) + ] + + def mock_checkpoint( + durable_execution_arn, + checkpoint_token, + updates, + client_token="token", # noqa: S107 + ): + checkpoint_calls.append(updates) + + # Add operations with proper details + for update in updates: + if update.operation_type == OperationType.CALLBACK: + op = Operation( + operation_id=update.operation_id, + operation_type=update.operation_type, + status=OperationStatus.STARTED, + parent_id=update.parent_id, + callback_details=CallbackDetails( + callback_id=f"cb-{update.operation_id[:8]}" + ), + ) + else: + op = Operation( + operation_id=update.operation_id, + operation_type=update.operation_type, + status=OperationStatus.STARTED, + parent_id=update.parent_id, + ) + operations.append(op) + + return CheckpointOutput( + checkpoint_token="new_token", # noqa: S106 + new_execution_state=CheckpointUpdatedExecutionState( + operations=operations.copy() + ), + ) + + mock_client.checkpoint = mock_checkpoint + + event = { + "DurableExecutionArn": "test-arn", + "CheckpointToken": "test-token", + "InitialExecutionState": { + "Operations": [ + { + "Id": "execution-1", + "Type": "EXECUTION", + "Status": "STARTED", + "ExecutionDetails": {"InputPayload": "{}"}, + } + ], + "NextMarker": "", + }, + "LocalRunner": True, + } + + lambda_context = Mock() + lambda_context.aws_request_id = "test-request-id" + lambda_context.client_context = None + lambda_context.identity = None + lambda_context._epoch_deadline_time_in_ms = 0 # noqa: SLF001 + lambda_context.invoked_function_arn = "test-arn" + lambda_context.tenant_id = None + + result = my_handler(event, lambda_context) + + # Verify execution succeeded and code after callback executed + assert result["Status"] == InvocationStatus.SUCCEEDED.value + assert "code_executed_after_callback" in result["Result"] + + +def test_end_to_end_invoke_operation_with_double_check(): + """Test end-to-end invoke operation execution with double-check pattern. + + Verifies that invoke operations properly use the double-check pattern + for immediate response handling. + """ + + @durable_execution + def my_handler(event, context: DurableContext): + context.invoke("my-function", {"data": "test"}) + + with patch( + "aws_durable_execution_sdk_python.execution.LambdaClient" + ) as mock_client_class: + mock_client = Mock() + mock_client_class.initialize_client.return_value = mock_client + + mock_checkpoint, checkpoint_calls = create_mock_checkpoint_with_operations() + mock_client.checkpoint = mock_checkpoint + + event = { + "DurableExecutionArn": "test-arn", + "CheckpointToken": "test-token", + "InitialExecutionState": { + "Operations": [ + { + "Id": "execution-1", + "Type": "EXECUTION", + "Status": "STARTED", + "ExecutionDetails": {"InputPayload": "{}"}, + } + ], + "NextMarker": "", + }, + "LocalRunner": True, + } + + lambda_context = Mock() + lambda_context.aws_request_id = "test-request-id" + lambda_context.client_context = None + lambda_context.identity = None + lambda_context._epoch_deadline_time_in_ms = 0 # noqa: SLF001 + lambda_context.invoked_function_arn = "test-arn" + lambda_context.tenant_id = None + + # Invoke will suspend, so we expect PENDING status + result = my_handler(event, lambda_context) + + assert result["Status"] == InvocationStatus.PENDING.value + + # Verify invoke checkpoint was created + all_operations = [op for batch in checkpoint_calls for op in batch] + assert len(all_operations) >= 1 + + +def test_end_to_end_child_context_with_async_checkpoint(): + """Test end-to-end child context execution with async checkpoint. + + Verifies that child context operations use async checkpoint (is_sync=False) + and execute correctly without waiting for immediate response. + """ + + def child_function(ctx: DurableContext) -> str: + return "child_result" + + @durable_execution + def my_handler(event, context: DurableContext) -> str: + result: str = context.run_in_child_context(child_function) + return result + + with patch( + "aws_durable_execution_sdk_python.execution.LambdaClient" + ) as mock_client_class: + mock_client = Mock() + mock_client_class.initialize_client.return_value = mock_client + + mock_checkpoint, checkpoint_calls = create_mock_checkpoint_with_operations() + mock_client.checkpoint = mock_checkpoint + + event = { + "DurableExecutionArn": "test-arn", + "CheckpointToken": "test-token", + "InitialExecutionState": { + "Operations": [ + { + "Id": "execution-1", + "Type": "EXECUTION", + "Status": "STARTED", + "ExecutionDetails": {"InputPayload": "{}"}, + } + ], + "NextMarker": "", + }, + "LocalRunner": True, + } + + lambda_context = Mock() + lambda_context.aws_request_id = "test-request-id" + lambda_context.client_context = None + lambda_context.identity = None + lambda_context._epoch_deadline_time_in_ms = 0 # noqa: SLF001 + lambda_context.invoked_function_arn = "test-arn" + lambda_context.tenant_id = None + + result = my_handler(event, lambda_context) + + assert result["Status"] == InvocationStatus.SUCCEEDED.value + assert result["Result"] == '"child_result"' + + # Verify checkpoints were created (START + SUCCEED) + all_operations = [op for batch in checkpoint_calls for op in batch] + assert len(all_operations) == 2 + + +def test_end_to_end_child_context_replay_children_mode(): + """Test end-to-end child context with large payload and ReplayChildren mode. + + Verifies that child context with large result (>256KB) triggers replay_children mode, + uses summary generator if provided, and re-executes function on replay. + """ + execution_count = {"count": 0} + + def child_function_with_large_result(ctx: DurableContext) -> str: + execution_count["count"] += 1 + return "large" * 256 * 1024 + + def summary_generator(result: str) -> str: + return f"summary_of_{len(result)}_bytes" + + @durable_execution + def my_handler(event, context: DurableContext) -> str: + context.run_in_child_context( + child_function_with_large_result, + config=ChildConfig(summary_generator=summary_generator), + ) + return f"executed_{execution_count['count']}_times" + + with patch( + "aws_durable_execution_sdk_python.execution.LambdaClient" + ) as mock_client_class: + mock_client = Mock() + mock_client_class.initialize_client.return_value = mock_client + + checkpoint_calls = [] + operations = [ + Operation( + operation_id="execution-1", + operation_type=OperationType.EXECUTION, + status=OperationStatus.STARTED, + ) + ] + + def mock_checkpoint( + durable_execution_arn, + checkpoint_token, + updates, + client_token="token", # noqa: S107 + ): + checkpoint_calls.append(updates) + + for update in updates: + op = Operation( + operation_id=update.operation_id, + operation_type=update.operation_type, + status=OperationStatus.STARTED, + parent_id=update.parent_id, + ) + operations.append(op) + + return CheckpointOutput( + checkpoint_token="new_token", # noqa: S106 + new_execution_state=CheckpointUpdatedExecutionState( + operations=operations.copy() + ), + ) + + mock_client.checkpoint = mock_checkpoint + + event = { + "DurableExecutionArn": "test-arn", + "CheckpointToken": "test-token", + "InitialExecutionState": { + "Operations": [ + { + "Id": "execution-1", + "Type": "EXECUTION", + "Status": "STARTED", + "ExecutionDetails": {"InputPayload": "{}"}, + } + ], + "NextMarker": "", + }, + "LocalRunner": True, + } + + lambda_context = Mock() + lambda_context.aws_request_id = "test-request-id" + lambda_context.client_context = None + lambda_context.identity = None + lambda_context._epoch_deadline_time_in_ms = 0 # noqa: SLF001 + lambda_context.invoked_function_arn = "test-arn" + lambda_context.tenant_id = None + + result = my_handler(event, lambda_context) + + assert result["Status"] == InvocationStatus.SUCCEEDED.value + # Function executed once during initial execution + assert execution_count["count"] == 1 + + # Verify replay_children was set in SUCCEED checkpoint + all_operations = [op for batch in checkpoint_calls for op in batch] + succeed_updates = [ + op + for op in all_operations + if hasattr(op, "action") and op.action.value == "SUCCEED" + ] + assert len(succeed_updates) == 1 + assert succeed_updates[0].context_options.replay_children is True + + +def test_end_to_end_child_context_error_handling(): + """Test end-to-end child context error handling. + + Verifies that child context that raises exception creates FAIL checkpoint + and error is wrapped as CallableRuntimeError. + """ + + def child_function_that_fails(ctx: DurableContext) -> str: + msg = "Child function error" + raise ValueError(msg) + + @durable_execution + def my_handler(event, context: DurableContext) -> str: + result: str = context.run_in_child_context(child_function_that_fails) + return result + + with patch( + "aws_durable_execution_sdk_python.execution.LambdaClient" + ) as mock_client_class: + mock_client = Mock() + mock_client_class.initialize_client.return_value = mock_client + + mock_checkpoint, checkpoint_calls = create_mock_checkpoint_with_operations() + mock_client.checkpoint = mock_checkpoint + + event = { + "DurableExecutionArn": "test-arn", + "CheckpointToken": "test-token", + "InitialExecutionState": { + "Operations": [ + { + "Id": "execution-1", + "Type": "EXECUTION", + "Status": "STARTED", + "ExecutionDetails": {"InputPayload": "{}"}, + } + ], + "NextMarker": "", + }, + "LocalRunner": True, + } + + lambda_context = Mock() + lambda_context.aws_request_id = "test-request-id" + lambda_context.client_context = None + lambda_context.identity = None + lambda_context._epoch_deadline_time_in_ms = 0 # noqa: SLF001 + lambda_context.invoked_function_arn = "test-arn" + lambda_context.tenant_id = None + + result = my_handler(event, lambda_context) + + # Verify execution failed + assert result["Status"] == InvocationStatus.FAILED.value + + # Verify FAIL checkpoint was created + all_operations = [op for batch in checkpoint_calls for op in batch] + fail_updates = [ + op + for op in all_operations + if hasattr(op, "action") and op.action.value == "FAIL" + ] + assert len(fail_updates) == 1 + + +def test_end_to_end_child_context_invocation_error_reraised(): + """Test end-to-end child context InvocationError re-raising. + + Verifies that child context that raises InvocationError creates FAIL checkpoint + and re-raises InvocationError (not wrapped) to enable retry at execution handler level. + """ + + def child_function_with_invocation_error(ctx: DurableContext) -> str: + msg = "Invocation failed in child" + raise InvocationError(msg) + + @durable_execution + def my_handler(event, context: DurableContext) -> str: + result: str = context.run_in_child_context(child_function_with_invocation_error) + return result + + with patch( + "aws_durable_execution_sdk_python.execution.LambdaClient" + ) as mock_client_class: + mock_client = Mock() + mock_client_class.initialize_client.return_value = mock_client + + mock_checkpoint, checkpoint_calls = create_mock_checkpoint_with_operations() + mock_client.checkpoint = mock_checkpoint + + event = { + "DurableExecutionArn": "test-arn", + "CheckpointToken": "test-token", + "InitialExecutionState": { + "Operations": [ + { + "Id": "execution-1", + "Type": "EXECUTION", + "Status": "STARTED", + "ExecutionDetails": {"InputPayload": "{}"}, + } + ], + "NextMarker": "", + }, + "LocalRunner": True, + } + + lambda_context = Mock() + lambda_context.aws_request_id = "test-request-id" + lambda_context.client_context = None + lambda_context.identity = None + lambda_context._epoch_deadline_time_in_ms = 0 # noqa: SLF001 + lambda_context.invoked_function_arn = "test-arn" + lambda_context.tenant_id = None + + # InvocationError should be re-raised (not wrapped) to trigger Lambda retry + with pytest.raises(InvocationError, match="Invocation failed in child"): + my_handler(event, lambda_context) + + # Verify FAIL checkpoint was created before re-raising + all_operations = [op for batch in checkpoint_calls for op in batch] + fail_updates = [ + op + for op in all_operations + if hasattr(op, "action") and op.action.value == "FAIL" + ] + assert len(fail_updates) == 1 diff --git a/tests/e2e/execution_int_test.py b/tests/e2e/execution_int_test.py index b5ec116..5a884bf 100644 --- a/tests/e2e/execution_int_test.py +++ b/tests/e2e/execution_int_test.py @@ -7,21 +7,24 @@ import pytest +from aws_durable_execution_sdk_python.config import Duration from aws_durable_execution_sdk_python.context import ( DurableContext, durable_step, + durable_wait_for_callback, durable_with_child_context, ) from aws_durable_execution_sdk_python.execution import ( InvocationStatus, durable_execution, ) - -# LambdaContext no longer needed - using duck typing from aws_durable_execution_sdk_python.lambda_service import ( + CallbackDetails, CheckpointOutput, CheckpointUpdatedExecutionState, + Operation, OperationAction, + OperationStatus, OperationType, ) from aws_durable_execution_sdk_python.logger import LoggerInterface @@ -31,6 +34,49 @@ from aws_durable_execution_sdk_python.types import StepContext +def create_mock_checkpoint_with_operations(): + """Create a mock checkpoint function that properly tracks operations. + + Returns a tuple of (mock_checkpoint_function, checkpoint_calls_list). + The mock properly maintains an operations list that gets updated with each checkpoint. + """ + checkpoint_calls = [] + operations = [ + Operation( + operation_id="execution-1", + operation_type=OperationType.EXECUTION, + status=OperationStatus.STARTED, + ) + ] + + def mock_checkpoint( + durable_execution_arn, + checkpoint_token, + updates, + client_token="token", # noqa: S107 + ): + checkpoint_calls.append(updates) + + # Convert updates to Operation objects and add to operations list + for update in updates: + op = Operation( + operation_id=update.operation_id, + operation_type=update.operation_type, + status=OperationStatus.STARTED, # New operations start as STARTED + parent_id=update.parent_id, + ) + operations.append(op) + + return CheckpointOutput( + checkpoint_token="new_token", # noqa: S106 + new_execution_state=CheckpointUpdatedExecutionState( + operations=operations.copy() + ), + ) + + return mock_checkpoint, checkpoint_calls + + def test_step_different_ways_to_pass_args(): def step_plain(step_context: StepContext) -> str: return "from step plain" @@ -67,7 +113,7 @@ def my_handler(event, context: DurableContext) -> list[str]: "aws_durable_execution_sdk_python.execution.LambdaClient" ) as mock_client_class: mock_client = Mock() - mock_client_class.initialize_local_runner_client.return_value = mock_client + mock_client_class.initialize_client.return_value = mock_client # Mock the checkpoint method to track calls checkpoint_calls = [] @@ -153,7 +199,7 @@ def my_handler(event, context: DurableContext): "aws_durable_execution_sdk_python.execution.LambdaClient" ) as mock_client_class: mock_client = Mock() - mock_client_class.initialize_local_runner_client.return_value = mock_client + mock_client_class.initialize_client.return_value = mock_client # Mock the checkpoint method to track calls checkpoint_calls = [] @@ -203,13 +249,6 @@ def mock_checkpoint( # Execute the handler result = my_handler(event, lambda_context) - my_logger.info.assert_called_once_with( - "from step %s %s", - 123, - "str", - extra={"execution_arn": "test-arn", "name": "mystep"}, - ) - assert result["Status"] == InvocationStatus.SUCCEEDED.value # 1 START checkpoint, 1 SUCCEED checkpoint (batched together) @@ -218,6 +257,18 @@ def mock_checkpoint( assert len(all_operations) == 2 operation_id = next(operation_id_sequence()) + my_logger.info.assert_called_once_with( + "from step %s %s", + 123, + "str", + extra={ + "executionArn": "test-arn", + "operationName": "mystep", + "attempt": 1, + "operationId": operation_id, + }, + ) + # Check the START operation start_op = all_operations[0] assert start_op.operation_type == OperationType.STEP @@ -238,7 +289,7 @@ def test_wait_inside_run_in_childcontext(): @durable_with_child_context def func(child_context: DurableContext, a: int, b: int): mock_inside_child(a, b) - child_context.wait(1) + child_context.wait(Duration.from_seconds(1)) @durable_execution def my_handler(event, context): @@ -249,24 +300,10 @@ def my_handler(event, context): "aws_durable_execution_sdk_python.execution.LambdaClient" ) as mock_client_class: mock_client = Mock() - mock_client_class.initialize_local_runner_client.return_value = mock_client - - # Mock the checkpoint method to track calls - checkpoint_calls = [] - - def mock_checkpoint( - durable_execution_arn, - checkpoint_token, - updates, - client_token="token", # noqa: S107 - ): - checkpoint_calls.append(updates) - - return CheckpointOutput( - checkpoint_token="new_token", # noqa: S106 - new_execution_state=CheckpointUpdatedExecutionState(), - ) + mock_client_class.initialize_client.return_value = mock_client + # Use helper to create mock that properly tracks operations + mock_checkpoint, checkpoint_calls = create_mock_checkpoint_with_operations() mock_client.checkpoint = mock_checkpoint # Create test event @@ -355,7 +392,7 @@ def my_handler(event, context: DurableContext): "aws_durable_execution_sdk_python.execution.LambdaClient" ) as mock_client_class: mock_client = Mock() - mock_client_class.initialize_local_runner_client.return_value = mock_client + mock_client_class.initialize_client.return_value = mock_client # Mock the checkpoint method to raise an error (using RuntimeError as a generic exception) def mock_checkpoint_failure( @@ -409,7 +446,7 @@ def test_wait_not_caught_by_exception(): @durable_execution def my_handler(event: Any, context: DurableContext): try: - context.wait(1) + context.wait(Duration.from_seconds(1)) except Exception as err: msg = "This should not be caught" raise CustomError(msg) from err @@ -418,9 +455,77 @@ def my_handler(event: Any, context: DurableContext): "aws_durable_execution_sdk_python.execution.LambdaClient" ) as mock_client_class: mock_client = Mock() - mock_client_class.initialize_local_runner_client.return_value = mock_client + mock_client_class.initialize_client.return_value = mock_client + + # Use helper to create mock that properly tracks operations + mock_checkpoint, checkpoint_calls = create_mock_checkpoint_with_operations() + mock_client.checkpoint = mock_checkpoint + + # Create test event + event = { + "DurableExecutionArn": "test-arn", + "CheckpointToken": "test-token", + "InitialExecutionState": { + "Operations": [ + { + "Id": "execution-1", + "Type": "EXECUTION", + "Status": "STARTED", + "ExecutionDetails": {"InputPayload": "{}"}, + } + ], + "NextMarker": "", + }, + "LocalRunner": True, + } + + # Create mock lambda context + lambda_context = Mock() + lambda_context.aws_request_id = "test-request-id" + lambda_context.client_context = None + lambda_context.identity = None + lambda_context._epoch_deadline_time_in_ms = 0 # noqa: SLF001 + lambda_context.invoked_function_arn = "test-arn" + lambda_context.tenant_id = None + + # Execute the handler + result = my_handler(event, lambda_context) + operation_ids = operation_id_sequence() + + # Assert the execution returns PENDING status + assert result["Status"] == InvocationStatus.PENDING.value + + # Assert that only 1 checkpoint was created for the wait operation + assert len(checkpoint_calls) == 1 + + # Check the wait checkpoint + checkpoint = checkpoint_calls[0][0] + assert checkpoint.operation_type is OperationType.WAIT + assert checkpoint.action is OperationAction.START + assert checkpoint.operation_id == next(operation_ids) + assert checkpoint.wait_options.wait_seconds == 1 + + +def test_durable_wait_for_callback_decorator(): + """Test the durable_wait_for_callback decorator with additional parameters.""" + + mock_submitter = Mock() + + @durable_wait_for_callback + def submit_to_external_system(callback_id, context, task_name, priority): + mock_submitter(callback_id, task_name, priority) + context.logger.info("Submitting %s with callback %s", task_name, callback_id) + + @durable_execution + def my_handler(event, context): + context.wait_for_callback(submit_to_external_system("my_task", priority=5)) + + with patch( + "aws_durable_execution_sdk_python.execution.LambdaClient" + ) as mock_client_class: + mock_client = Mock() + mock_client_class.initialize_client.return_value = mock_client - # Mock the checkpoint method to track calls checkpoint_calls = [] def mock_checkpoint( @@ -431,14 +536,29 @@ def mock_checkpoint( ): checkpoint_calls.append(updates) + # For CALLBACK operations, return the operation with callback details + operations = [ + Operation( + operation_id=update.operation_id, + operation_type=OperationType.CALLBACK, + status=OperationStatus.STARTED, + callback_details=CallbackDetails( + callback_id=f"callback-{update.operation_id[:8]}" + ), + ) + for update in updates + if update.operation_type == OperationType.CALLBACK + ] + return CheckpointOutput( checkpoint_token="new_token", # noqa: S106 - new_execution_state=CheckpointUpdatedExecutionState(), + new_execution_state=CheckpointUpdatedExecutionState( + operations=operations, next_marker=None + ), ) mock_client.checkpoint = mock_checkpoint - # Create test event event = { "DurableExecutionArn": "test-arn", "CheckpointToken": "test-token", @@ -456,7 +576,6 @@ def mock_checkpoint( "LocalRunner": True, } - # Create mock lambda context lambda_context = Mock() lambda_context.aws_request_id = "test-request-id" lambda_context.client_context = None @@ -465,19 +584,40 @@ def mock_checkpoint( lambda_context.invoked_function_arn = "test-arn" lambda_context.tenant_id = None - # Execute the handler result = my_handler(event, lambda_context) - operation_ids = operation_id_sequence() - # Assert the execution returns PENDING status assert result["Status"] == InvocationStatus.PENDING.value - # Assert that only 1 checkpoint was created for the wait operation - assert len(checkpoint_calls) == 1 + all_operations = [op for batch in checkpoint_calls for op in batch] + assert len(all_operations) == 4 - # Check the wait checkpoint - checkpoint = checkpoint_calls[0][0] - assert checkpoint.operation_type is OperationType.WAIT - assert checkpoint.action is OperationAction.START - assert checkpoint.operation_id == next(operation_ids) - assert checkpoint.wait_options.wait_seconds == 1 + # First: CONTEXT START + first_checkpoint = all_operations[0] + assert first_checkpoint.operation_type is OperationType.CONTEXT + assert first_checkpoint.action is OperationAction.START + assert first_checkpoint.name == "submit_to_external_system" + + # Second: CALLBACK START + second_checkpoint = all_operations[1] + assert second_checkpoint.operation_type is OperationType.CALLBACK + assert second_checkpoint.action is OperationAction.START + assert second_checkpoint.parent_id == first_checkpoint.operation_id + assert second_checkpoint.name == "submit_to_external_system create callback id" + + # Third: STEP START + third_checkpoint = all_operations[2] + assert third_checkpoint.operation_type is OperationType.STEP + assert third_checkpoint.action is OperationAction.START + assert third_checkpoint.parent_id == first_checkpoint.operation_id + assert third_checkpoint.name == "submit_to_external_system submitter" + + # Fourth: STEP SUCCEED + fourth_checkpoint = all_operations[3] + assert fourth_checkpoint.operation_type is OperationType.STEP + assert fourth_checkpoint.action is OperationAction.SUCCEED + assert fourth_checkpoint.operation_id == third_checkpoint.operation_id + + mock_submitter.assert_called_once() + call_args = mock_submitter.call_args[0] + assert call_args[1] == "my_task" + assert call_args[2] == 5 diff --git a/tests/exceptions_test.py b/tests/exceptions_test.py index ac425ac..f3ed213 100644 --- a/tests/exceptions_test.py +++ b/tests/exceptions_test.py @@ -4,15 +4,18 @@ from unittest.mock import patch import pytest +from botocore.exceptions import ClientError # type: ignore[import-untyped] from aws_durable_execution_sdk_python.exceptions import ( CallableRuntimeError, CallableRuntimeErrorSerializableDetails, CheckpointError, + CheckpointErrorCategory, DurableExecutionsError, ExecutionError, InvocationError, OrderedLockError, + OrphanedChildException, StepInterruptedError, SuspendExecution, TerminationReason, @@ -41,13 +44,101 @@ def test_invocation_error(): def test_checkpoint_error(): """Test CheckpointError exception.""" - error = CheckpointError("checkpoint failed") + error = CheckpointError( + "checkpoint failed", error_category=CheckpointErrorCategory.EXECUTION + ) assert str(error) == "checkpoint failed" assert isinstance(error, InvocationError) assert isinstance(error, UnrecoverableError) assert error.termination_reason == TerminationReason.CHECKPOINT_FAILED +def test_checkpoint_error_classification_invalid_token_invocation(): + """Test 4xx InvalidParameterValueException with Invalid Checkpoint Token is invocation error.""" + error_response = { + "Error": { + "Code": "InvalidParameterValueException", + "Message": "Invalid Checkpoint Token: token expired", + }, + "ResponseMetadata": {"HTTPStatusCode": 400}, + } + client_error = ClientError(error_response, "Checkpoint") + + result = CheckpointError.from_exception(client_error) + + assert result.error_category == CheckpointErrorCategory.INVOCATION + assert not result.is_retriable() + + +def test_checkpoint_error_classification_other_4xx_execution(): + """Test other 4xx errors are execution errors.""" + error_response = { + "Error": {"Code": "ValidationException", "Message": "Invalid parameter value"}, + "ResponseMetadata": {"HTTPStatusCode": 400}, + } + client_error = ClientError(error_response, "Checkpoint") + + result = CheckpointError.from_exception(client_error) + + assert result.error_category == CheckpointErrorCategory.EXECUTION + assert result.is_retriable() + + +def test_checkpoint_error_classification_429_invocation(): + """Test 429 errors are invocation errors (retryable).""" + error_response = { + "Error": {"Code": "TooManyRequestsException", "Message": "Rate limit exceeded"}, + "ResponseMetadata": {"HTTPStatusCode": 429}, + } + client_error = ClientError(error_response, "Checkpoint") + + result = CheckpointError.from_exception(client_error) + + assert result.error_category == CheckpointErrorCategory.INVOCATION + assert not result.is_retriable() + + +def test_checkpoint_error_classification_invalid_param_without_token_execution(): + """Test 4xx InvalidParameterValueException without Invalid Checkpoint Token is execution error.""" + error_response = { + "Error": { + "Code": "InvalidParameterValueException", + "Message": "Some other invalid parameter", + }, + "ResponseMetadata": {"HTTPStatusCode": 400}, + } + client_error = ClientError(error_response, "Checkpoint") + + result = CheckpointError.from_exception(client_error) + + assert result.error_category == CheckpointErrorCategory.EXECUTION + assert result.is_retriable() + + +def test_checkpoint_error_classification_5xx_invocation(): + """Test 5xx errors are invocation errors.""" + error_response = { + "Error": {"Code": "InternalServerError", "Message": "Service unavailable"}, + "ResponseMetadata": {"HTTPStatusCode": 500}, + } + client_error = ClientError(error_response, "Checkpoint") + + result = CheckpointError.from_exception(client_error) + + assert result.error_category == CheckpointErrorCategory.INVOCATION + assert not result.is_retriable() + + +def test_checkpoint_error_classification_unknown_invocation(): + """Test unknown errors are invocation errors.""" + unknown_error = Exception("Network timeout") + + result = CheckpointError.from_exception(unknown_error) + + assert result.error_category == CheckpointErrorCategory.INVOCATION + assert not result.is_retriable() + + def test_validation_error(): """Test ValidationError exception.""" error = ValidationError("validation failed") @@ -242,3 +333,44 @@ def test_execution_error_with_custom_termination_reason(): error = ExecutionError("custom error", TerminationReason.SERIALIZATION_ERROR) assert str(error) == "custom error" assert error.termination_reason == TerminationReason.SERIALIZATION_ERROR + + +def test_orphaned_child_exception_is_base_exception(): + """Test that OrphanedChildException is a BaseException, not Exception.""" + assert issubclass(OrphanedChildException, BaseException) + assert not issubclass(OrphanedChildException, Exception) + + +def test_orphaned_child_exception_bypasses_user_exception_handler(): + """Test that OrphanedChildException cannot be caught by user's except Exception handler.""" + caught_by_exception = False + caught_by_base_exception = False + exception_instance = None + + try: + msg = "test message" + raise OrphanedChildException(msg, operation_id="test_op_123") + except Exception: # noqa: BLE001 + caught_by_exception = True + except BaseException as e: # noqa: BLE001 + caught_by_base_exception = True + exception_instance = e + + expected_msg = "OrphanedChildException should not be caught by except Exception" + assert not caught_by_exception, expected_msg + expected_base_msg = ( + "OrphanedChildException should be caught by except BaseException" + ) + assert caught_by_base_exception, expected_base_msg + + # Verify operation_id is preserved + assert isinstance(exception_instance, OrphanedChildException) + assert exception_instance.operation_id == "test_op_123" + assert str(exception_instance) == "test message" + + +def test_orphaned_child_exception_with_operation_id(): + """Test OrphanedChildException stores operation_id correctly.""" + exception = OrphanedChildException("parent completed", operation_id="child_op_456") + assert exception.operation_id == "child_op_456" + assert str(exception) == "parent completed" diff --git a/tests/execution_test.py b/tests/execution_test.py index 6678d73..4383ceb 100644 --- a/tests/execution_test.py +++ b/tests/execution_test.py @@ -11,7 +11,9 @@ from aws_durable_execution_sdk_python.config import StepConfig, StepSemantics from aws_durable_execution_sdk_python.context import DurableContext from aws_durable_execution_sdk_python.exceptions import ( + BotoClientError, CheckpointError, + CheckpointErrorCategory, ExecutionError, InvocationError, SuspendExecution, @@ -160,7 +162,6 @@ def test_durable_execution_invocation_input_to_dict(): durable_execution_arn="arn:test:execution", checkpoint_token="token123", # noqa: S106 initial_execution_state=initial_state, - is_local_runner=True, ) result = invocation_input.to_dict() @@ -168,21 +169,18 @@ def test_durable_execution_invocation_input_to_dict(): "DurableExecutionArn": "arn:test:execution", "CheckpointToken": "token123", "InitialExecutionState": initial_state.to_dict(), - "LocalRunner": True, } assert result == expected def test_durable_execution_invocation_input_to_dict_not_local(): - """Test DurableExecutionInvocationInput.to_dict with is_local_runner=False.""" initial_state = InitialExecutionState(operations=[], next_marker="") invocation_input = DurableExecutionInvocationInput( durable_execution_arn="arn:test:execution", checkpoint_token="token123", # noqa: S106 initial_execution_state=initial_state, - is_local_runner=False, ) result = invocation_input.to_dict() @@ -190,7 +188,6 @@ def test_durable_execution_invocation_input_to_dict_not_local(): "DurableExecutionArn": "arn:test:execution", "CheckpointToken": "token123", "InitialExecutionState": initial_state.to_dict(), - "LocalRunner": False, } assert result == expected @@ -205,7 +202,6 @@ def test_durable_execution_invocation_input_with_client_inheritance(): durable_execution_arn="arn:test:execution", checkpoint_token="token123", # noqa: S106 initial_execution_state=initial_state, - is_local_runner=True, service_client=mock_client, ) @@ -215,7 +211,6 @@ def test_durable_execution_invocation_input_with_client_inheritance(): "DurableExecutionArn": "arn:test:execution", "CheckpointToken": "token123", "InitialExecutionState": initial_state.to_dict(), - "LocalRunner": True, } assert result == expected @@ -231,7 +226,6 @@ def test_durable_execution_invocation_input_with_client_from_parent(): durable_execution_arn="arn:test:execution", checkpoint_token="token123", # noqa: S106 initial_execution_state=initial_state, - is_local_runner=False, ) with_client = DurableExecutionInvocationInputWithClient.from_durable_execution_invocation_input( @@ -241,7 +235,6 @@ def test_durable_execution_invocation_input_with_client_from_parent(): assert with_client.durable_execution_arn == parent_input.durable_execution_arn assert with_client.checkpoint_token == parent_input.checkpoint_token assert with_client.initial_execution_state == parent_input.initial_execution_state - assert with_client.is_local_runner == parent_input.is_local_runner assert with_client.service_client == mock_client @@ -344,7 +337,7 @@ def test_durable_execution_client_selection_env_normal_result(): "aws_durable_execution_sdk_python.execution.LambdaClient" ) as mock_lambda_client: mock_client = Mock(spec=DurableServiceClient) - mock_lambda_client.initialize_from_env.return_value = mock_client + mock_lambda_client.initialize_client.return_value = mock_client # Mock successful checkpoint mock_output = CheckpointOutput( @@ -372,7 +365,6 @@ def test_handler(event: Any, context: DurableContext) -> dict: ], "NextMarker": "", }, - "LocalRunner": False, } lambda_context = Mock() @@ -387,7 +379,7 @@ def test_handler(event: Any, context: DurableContext) -> dict: assert result["Status"] == InvocationStatus.SUCCEEDED.value assert result["Result"] == '{"result": "success"}' - mock_lambda_client.initialize_from_env.assert_called_once() + mock_lambda_client.initialize_client.assert_called_once() mock_client.checkpoint.assert_not_called() @@ -397,7 +389,7 @@ def test_durable_execution_client_selection_env_large_result(): "aws_durable_execution_sdk_python.execution.LambdaClient" ) as mock_lambda_client: mock_client = Mock(spec=DurableServiceClient) - mock_lambda_client.initialize_from_env.return_value = mock_client + mock_lambda_client.initialize_client.return_value = mock_client # Mock successful checkpoint mock_output = CheckpointOutput( @@ -425,7 +417,6 @@ def test_handler(event: Any, context: DurableContext) -> dict: ], "NextMarker": "", }, - "LocalRunner": False, } lambda_context = Mock() @@ -440,7 +431,7 @@ def test_handler(event: Any, context: DurableContext) -> dict: assert result["Status"] == InvocationStatus.SUCCEEDED.value assert not result["Result"] - mock_lambda_client.initialize_from_env.assert_called_once() + mock_lambda_client.initialize_client.assert_called_once() mock_client.checkpoint.assert_called_once() @@ -473,7 +464,6 @@ def test_handler(event: Any, context: DurableContext) -> dict: durable_execution_arn="arn:test:execution", checkpoint_token="token123", # noqa: S106 initial_execution_state=initial_state, - is_local_runner=False, service_client=mock_client, ) @@ -521,7 +511,6 @@ def test_handler(event: Any, context: DurableContext) -> dict: durable_execution_arn="arn:test:execution", checkpoint_token="token123", # noqa: S106 initial_execution_state=initial_state, - is_local_runner=False, service_client=mock_client, ) @@ -577,7 +566,6 @@ def test_handler(event: Any, context: DurableContext) -> dict: durable_execution_arn="arn:test:execution", checkpoint_token="token123", # noqa: S106 initial_execution_state=initial_state, - is_local_runner=False, service_client=mock_client, ) @@ -591,17 +579,62 @@ def test_handler(event: Any, context: DurableContext) -> dict: result = test_handler(invocation_input, lambda_context) + # small error, should not call checkpoint assert result["Status"] == InvocationStatus.FAILED.value + assert result["Error"] == {"ErrorMessage": "Test error", "ErrorType": "ValueError"} + + assert not mock_client.checkpoint.called + + +def test_durable_execution_with_large_error_payload(): + """Test that large error payloads trigger checkpoint.""" + mock_client = Mock(spec=DurableServiceClient) + mock_output = CheckpointOutput( + checkpoint_token="new_token", # noqa: S106 + new_execution_state=CheckpointUpdatedExecutionState(), + ) + mock_client.checkpoint.return_value = mock_output + + @durable_execution + def test_handler(event: Any, context: DurableContext) -> dict: + raise ValueError(LARGE_RESULT) + + operation = Operation( + operation_id="exec1", + operation_type=OperationType.EXECUTION, + status=OperationStatus.STARTED, + execution_details=ExecutionDetails(input_payload="{}"), + ) + + initial_state = InitialExecutionState(operations=[operation], next_marker="") + + invocation_input = DurableExecutionInvocationInputWithClient( + durable_execution_arn="arn:test:execution", + checkpoint_token="token123", # noqa: S106 + initial_execution_state=initial_state, + service_client=mock_client, + ) + + lambda_context = Mock() + lambda_context.aws_request_id = "test-request" + lambda_context.client_context = None + lambda_context.identity = None + lambda_context._epoch_deadline_time_in_ms = 1000000 # noqa: SLF001 + lambda_context.invoked_function_arn = None + lambda_context.tenant_id = None + + result = test_handler(invocation_input, lambda_context) + + assert result["Status"] == InvocationStatus.FAILED.value + assert "Error" not in result mock_client.checkpoint.assert_called_once() - # Verify the checkpoint call was for execution failure call_args = mock_client.checkpoint.call_args updates = call_args[1]["updates"] assert len(updates) == 1 assert updates[0].operation_type == OperationType.EXECUTION assert updates[0].action.value == "FAIL" - assert updates[0].error.message == "Test error" - assert updates[0].error.type == "ValueError" + assert updates[0].error.message == LARGE_RESULT def test_durable_execution_fatal_error_handling(): @@ -626,7 +659,6 @@ def test_handler(event: Any, context: DurableContext) -> dict: durable_execution_arn="arn:test:execution", checkpoint_token="token123", # noqa: S106 initial_execution_state=initial_state, - is_local_runner=False, service_client=mock_client, ) @@ -665,7 +697,6 @@ def test_handler(event: Any, context: DurableContext) -> dict: durable_execution_arn="arn:test:execution", checkpoint_token="token123", # noqa: S106 initial_execution_state=initial_state, - is_local_runner=False, service_client=mock_client, ) @@ -688,13 +719,13 @@ def test_handler(event: Any, context: DurableContext) -> dict: assert error_data["ErrorType"] == "ExecutionError" -def test_durable_execution_client_selection_local_runner(): - """Test durable_execution selects correct client for local runner.""" +def test_durable_execution_client_selection_default(): + """Test durable_execution selects correct client using default initialization.""" with patch( "aws_durable_execution_sdk_python.execution.LambdaClient" ) as mock_lambda_client: mock_client = Mock(spec=DurableServiceClient) - mock_lambda_client.initialize_local_runner_client.return_value = mock_client + mock_lambda_client.initialize_client.return_value = mock_client # Mock successful checkpoint mock_output = CheckpointOutput( @@ -722,7 +753,6 @@ def test_handler(event: Any, context: DurableContext) -> dict: ], "NextMarker": "", }, - "LocalRunner": True, } lambda_context = Mock() @@ -736,17 +766,20 @@ def test_handler(event: Any, context: DurableContext) -> dict: result = test_handler(event, lambda_context) assert result["Status"] == InvocationStatus.SUCCEEDED.value - mock_lambda_client.initialize_local_runner_client.assert_called_once() + mock_lambda_client.initialize_client.assert_called_once() def test_initial_execution_state_get_execution_operation_no_operations(): - """Test get_execution_operation raises error when no operations exist.""" + """Test get_execution_operation logs debug and returns None when no operations exist.""" state = InitialExecutionState(operations=[], next_marker="") - with pytest.raises( - Exception, match="No durable operations found in initial execution state" - ): - state.get_execution_operation() + with patch("aws_durable_execution_sdk_python.execution.logger") as mock_logger: + result = state.get_execution_operation() + + assert result is None + mock_logger.debug.assert_called_once_with( + "No durable operations found in initial execution state." + ) def test_initial_execution_state_get_execution_operation_wrong_type(): @@ -803,7 +836,6 @@ def test_handler(event: Any, context: DurableContext) -> dict: durable_execution_arn="arn:test:execution", checkpoint_token="token123", # noqa: S106 initial_execution_state=initial_state, - is_local_runner=False, service_client=mock_client, ) @@ -843,7 +875,6 @@ def test_handler(event: Any, context: DurableContext) -> dict: durable_execution_arn="arn:test:execution", checkpoint_token="token123", # noqa: S106 initial_execution_state=initial_state, - is_local_runner=False, service_client=mock_client, ) @@ -883,7 +914,6 @@ def test_handler(event: Any, context: DurableContext) -> dict: durable_execution_arn="arn:test:execution", checkpoint_token="token123", # noqa: S106 initial_execution_state=initial_state, - is_local_runner=False, service_client=mock_client, ) @@ -927,7 +957,6 @@ def test_handler(event: Any, context: DurableContext) -> dict: durable_execution_arn="arn:test:execution", checkpoint_token="token123", # noqa: S106 initial_execution_state=initial_state, - is_local_runner=False, service_client=mock_client, ) @@ -968,7 +997,6 @@ def test_handler(event: Any, context: DurableContext) -> dict: durable_execution_arn="arn:test:execution", checkpoint_token="token123", # noqa: S106 initial_execution_state=initial_state, - is_local_runner=False, service_client=mock_client, ) @@ -998,7 +1026,7 @@ def test_durable_execution_checkpoint_error_in_background_thread(): # Make the background checkpoint thread fail immediately def failing_checkpoint(*args, **kwargs): msg = "Background checkpoint failed" - raise CheckpointError(msg) + raise CheckpointError(msg, error_category=CheckpointErrorCategory.EXECUTION) @durable_execution def test_handler(event: Any, context: DurableContext) -> dict: @@ -1019,7 +1047,6 @@ def test_handler(event: Any, context: DurableContext) -> dict: durable_execution_arn="arn:test:execution", checkpoint_token="token123", # noqa: S106 initial_execution_state=initial_state, - is_local_runner=False, service_client=mock_client, ) @@ -1041,7 +1068,7 @@ def test_handler(event: Any, context: DurableContext) -> dict: # endregion durable_execution -def test_durable_execution_checkpoint_error_stops_background(): +def test_durable_execution_checkpoint_execution_error_stops_background(): """Test that CheckpointError handler stops background checkpointing. When user code raises CheckpointError, the handler should stop the background @@ -1053,7 +1080,7 @@ def test_durable_execution_checkpoint_error_stops_background(): def test_handler(event: Any, context: DurableContext) -> dict: # Directly raise CheckpointError to simulate checkpoint failure msg = "Checkpoint system failed" - raise CheckpointError(msg) + raise CheckpointError(msg, CheckpointErrorCategory.EXECUTION) operation = Operation( operation_id="exec1", @@ -1068,7 +1095,6 @@ def test_handler(event: Any, context: DurableContext) -> dict: durable_execution_arn="arn:test:execution", checkpoint_token="token123", # noqa: S106 initial_execution_state=initial_state, - is_local_runner=False, service_client=mock_client, ) @@ -1093,6 +1119,330 @@ def slow_background(): test_handler(invocation_input, lambda_context) +def test_durable_execution_checkpoint_invocation_error_stops_background(): + """Test that CheckpointError handler stops background checkpointing. + + When user code raises CheckpointError, the handler should stop the background + thread before re-raising to terminate the Lambda. + """ + mock_client = Mock(spec=DurableServiceClient) + + @durable_execution + def test_handler(event: Any, context: DurableContext) -> dict: + # Directly raise CheckpointError to simulate checkpoint failure + msg = "Checkpoint system failed" + raise CheckpointError(msg, CheckpointErrorCategory.INVOCATION) + + operation = Operation( + operation_id="exec1", + operation_type=OperationType.EXECUTION, + status=OperationStatus.STARTED, + execution_details=ExecutionDetails(input_payload="{}"), + ) + + initial_state = InitialExecutionState(operations=[operation], next_marker="") + + invocation_input = DurableExecutionInvocationInputWithClient( + durable_execution_arn="arn:test:execution", + checkpoint_token="token123", # noqa: S106 + initial_execution_state=initial_state, + service_client=mock_client, + ) + + lambda_context = Mock() + lambda_context.aws_request_id = "test-request" + lambda_context.client_context = None + lambda_context.identity = None + lambda_context._epoch_deadline_time_in_ms = 1000000 # noqa: SLF001 + lambda_context.invoked_function_arn = None + lambda_context.tenant_id = None + + # Make background thread sleep so user code completes first + def slow_background(): + time.sleep(1) + + # Mock checkpoint_batches_forever to sleep (simulates background thread running) + with patch( + "aws_durable_execution_sdk_python.state.ExecutionState.checkpoint_batches_forever", + side_effect=slow_background, + ): + response = test_handler(invocation_input, lambda_context) + assert response["Status"] == InvocationStatus.FAILED.value + assert response["Error"]["ErrorType"] == "CheckpointError" + + +def test_durable_execution_background_thread_execution_error_retries(): + """Test that background thread Execution errors are retried (re-raised).""" + mock_client = Mock(spec=DurableServiceClient) + + def failing_checkpoint(*args, **kwargs): + msg = "Background checkpoint failed" + raise CheckpointError(msg, error_category=CheckpointErrorCategory.EXECUTION) + + @durable_execution + def test_handler(event: Any, context: DurableContext) -> dict: + context.step(lambda ctx: "step_result") + return {"result": "success"} + + operation = Operation( + operation_id="exec1", + operation_type=OperationType.EXECUTION, + status=OperationStatus.STARTED, + execution_details=ExecutionDetails(input_payload="{}"), + ) + + initial_state = InitialExecutionState(operations=[operation], next_marker="") + + invocation_input = DurableExecutionInvocationInputWithClient( + durable_execution_arn="arn:test:execution", + checkpoint_token="token123", # noqa: S106 + initial_execution_state=initial_state, + service_client=mock_client, + ) + + lambda_context = Mock() + lambda_context.aws_request_id = "test-request" + lambda_context.client_context = None + lambda_context.identity = None + lambda_context._epoch_deadline_time_in_ms = 1000000 # noqa: SLF001 + lambda_context.invoked_function_arn = None + lambda_context.tenant_id = None + + mock_client.checkpoint.side_effect = failing_checkpoint + + with pytest.raises(CheckpointError, match="Background checkpoint failed"): + test_handler(invocation_input, lambda_context) + + +def test_durable_execution_background_thread_invocation_error_returns_failed(): + """Test that background thread Invocation errors return FAILED status.""" + mock_client = Mock(spec=DurableServiceClient) + + def failing_checkpoint(*args, **kwargs): + msg = "Background checkpoint failed" + raise CheckpointError(msg, error_category=CheckpointErrorCategory.INVOCATION) + + @durable_execution + def test_handler(event: Any, context: DurableContext) -> dict: + context.step(lambda ctx: "step_result") + return {"result": "success"} + + operation = Operation( + operation_id="exec1", + operation_type=OperationType.EXECUTION, + status=OperationStatus.STARTED, + execution_details=ExecutionDetails(input_payload="{}"), + ) + + initial_state = InitialExecutionState(operations=[operation], next_marker="") + + invocation_input = DurableExecutionInvocationInputWithClient( + durable_execution_arn="arn:test:execution", + checkpoint_token="token123", # noqa: S106 + initial_execution_state=initial_state, + service_client=mock_client, + ) + + lambda_context = Mock() + lambda_context.aws_request_id = "test-request" + lambda_context.client_context = None + lambda_context.identity = None + lambda_context._epoch_deadline_time_in_ms = 1000000 # noqa: SLF001 + lambda_context.invoked_function_arn = None + lambda_context.tenant_id = None + + mock_client.checkpoint.side_effect = failing_checkpoint + + response = test_handler(invocation_input, lambda_context) + assert response["Status"] == InvocationStatus.FAILED.value + assert response["Error"]["ErrorType"] == "CheckpointError" + + +def test_durable_execution_final_success_checkpoint_execution_error_retries(): + """Test that execution errors on final success checkpoint trigger retry.""" + mock_client = Mock(spec=DurableServiceClient) + + def failing_final_checkpoint(*args, **kwargs): + raise CheckpointError( # noqa TRY003 + "Final checkpoint failed", # noqa EM101 + error_category=CheckpointErrorCategory.EXECUTION, + ) + + @durable_execution + def test_handler(event: Any, context: DurableContext) -> dict: + # Return large result to trigger final checkpoint (>6MB) + return {"result": "x" * (7 * 1024 * 1024)} + + operation = Operation( + operation_id="exec1", + operation_type=OperationType.EXECUTION, + status=OperationStatus.STARTED, + execution_details=ExecutionDetails(input_payload="{}"), + ) + initial_state = InitialExecutionState(operations=[operation], next_marker="") + + invocation_input = DurableExecutionInvocationInputWithClient( + durable_execution_arn="arn:test:execution", + checkpoint_token="token123", # noqa: S106 + initial_execution_state=initial_state, + service_client=mock_client, + ) + + lambda_context = Mock() + lambda_context.aws_request_id = "test-request" + lambda_context.client_context = None + lambda_context.identity = None + lambda_context._epoch_deadline_time_in_ms = 1000000 # noqa: SLF001 + lambda_context.invoked_function_arn = None + lambda_context.tenant_id = None + + mock_client.checkpoint.side_effect = failing_final_checkpoint + + with pytest.raises(CheckpointError, match="Final checkpoint failed"): + test_handler(invocation_input, lambda_context) + + +def test_durable_execution_final_success_checkpoint_invocation_error_returns_failed(): + """Test that invocation errors on final success checkpoint return FAILED.""" + mock_client = Mock(spec=DurableServiceClient) + + def failing_final_checkpoint(*args, **kwargs): + raise CheckpointError( # noqa TRY003 + "Final checkpoint failed", # noqa EM101 + error_category=CheckpointErrorCategory.INVOCATION, + ) + + @durable_execution + def test_handler(event: Any, context: DurableContext) -> dict: + # Return large result to trigger final checkpoint (>6MB) + return {"result": "x" * (7 * 1024 * 1024)} + + operation = Operation( + operation_id="exec1", + operation_type=OperationType.EXECUTION, + status=OperationStatus.STARTED, + execution_details=ExecutionDetails(input_payload="{}"), + ) + + initial_state = InitialExecutionState(operations=[operation], next_marker="") + + invocation_input = DurableExecutionInvocationInputWithClient( + durable_execution_arn="arn:test:execution", + checkpoint_token="token123", # noqa: S106 + initial_execution_state=initial_state, + service_client=mock_client, + ) + + lambda_context = Mock() + lambda_context.aws_request_id = "test-request" + lambda_context.client_context = None + lambda_context.identity = None + lambda_context._epoch_deadline_time_in_ms = 1000000 # noqa: SLF001 + lambda_context.invoked_function_arn = None + lambda_context.tenant_id = None + + mock_client.checkpoint.side_effect = failing_final_checkpoint + + response = test_handler(invocation_input, lambda_context) + assert response["Status"] == InvocationStatus.FAILED.value + assert response["Error"]["ErrorType"] == "CheckpointError" + assert response["Error"]["ErrorMessage"] == "Final checkpoint failed" + + +def test_durable_execution_final_failure_checkpoint_execution_error_retries(): + """Test that execution errors on final failure checkpoint trigger retry.""" + mock_client = Mock(spec=DurableServiceClient) + + def failing_final_checkpoint(*args, **kwargs): + raise CheckpointError( # noqa TRY003 + "Final checkpoint failed", # noqa EM101 + error_category=CheckpointErrorCategory.EXECUTION, + ) + + @durable_execution + def test_handler(event: Any, context: DurableContext) -> dict: + # Raise error with large message to trigger final checkpoint (>6MB) + msg = "x" * (7 * 1024 * 1024) + raise ValueError(msg) + + operation = Operation( + operation_id="exec1", + operation_type=OperationType.EXECUTION, + status=OperationStatus.STARTED, + execution_details=ExecutionDetails(input_payload="{}"), + ) + + initial_state = InitialExecutionState(operations=[operation], next_marker="") + + invocation_input = DurableExecutionInvocationInputWithClient( + durable_execution_arn="arn:test:execution", + checkpoint_token="token123", # noqa: S106 + initial_execution_state=initial_state, + service_client=mock_client, + ) + + lambda_context = Mock() + lambda_context.aws_request_id = "test-request" + lambda_context.client_context = None + lambda_context.identity = None + lambda_context._epoch_deadline_time_in_ms = 1000000 # noqa: SLF001 + lambda_context.invoked_function_arn = None + lambda_context.tenant_id = None + + mock_client.checkpoint.side_effect = failing_final_checkpoint + + with pytest.raises(CheckpointError, match="Final checkpoint failed"): + test_handler(invocation_input, lambda_context) + + +def test_durable_execution_final_failure_checkpoint_invocation_error_returns_failed(): + """Test that invocation errors on final failure checkpoint return FAILED.""" + mock_client = Mock(spec=DurableServiceClient) + + def failing_final_checkpoint(*args, **kwargs): + raise CheckpointError( # noqa TRY003 + "Final checkpoint failed", # noqa EM101 + error_category=CheckpointErrorCategory.INVOCATION, + ) + + @durable_execution + def test_handler(event: Any, context: DurableContext) -> dict: + # Raise error with large message to trigger final checkpoint (>6MB) + msg = "x" * (7 * 1024 * 1024) + raise ValueError(msg) + + operation = Operation( + operation_id="exec1", + operation_type=OperationType.EXECUTION, + status=OperationStatus.STARTED, + execution_details=ExecutionDetails(input_payload="{}"), + ) + + initial_state = InitialExecutionState(operations=[operation], next_marker="") + + invocation_input = DurableExecutionInvocationInputWithClient( + durable_execution_arn="arn:test:execution", + checkpoint_token="token123", # noqa: S106 + initial_execution_state=initial_state, + service_client=mock_client, + ) + + lambda_context = Mock() + lambda_context.aws_request_id = "test-request" + lambda_context.client_context = None + lambda_context.identity = None + lambda_context._epoch_deadline_time_in_ms = 1000000 # noqa: SLF001 + lambda_context.invoked_function_arn = None + lambda_context.tenant_id = None + + mock_client.checkpoint.side_effect = failing_final_checkpoint + + response = test_handler(invocation_input, lambda_context) + assert response["Status"] == InvocationStatus.FAILED.value + assert response["Error"]["ErrorType"] == "CheckpointError" + assert response["Error"]["ErrorMessage"] == "Final checkpoint failed" + + def test_durable_handler_background_thread_failure_on_succeed_checkpoint(): """Test durable_handler handles background thread failure on SUCCEED checkpoint. @@ -1142,7 +1492,6 @@ def test_handler(event: Any, context: DurableContext) -> dict: durable_execution_arn="arn:test:execution", checkpoint_token="token123", # noqa: S106 initial_execution_state=initial_state, - is_local_runner=False, service_client=mock_client, ) @@ -1233,7 +1582,6 @@ def test_handler(event: Any, context: DurableContext) -> dict: durable_execution_arn="arn:test:execution", checkpoint_token="token123", # noqa: S106 initial_execution_state=initial_state, - is_local_runner=False, service_client=mock_client, ) @@ -1317,7 +1665,6 @@ def test_handler(event: Any, context: DurableContext) -> str: durable_execution_arn="arn:test:execution", checkpoint_token="token123", # noqa: S106 initial_execution_state=initial_state, - is_local_runner=False, service_client=mock_client, ) @@ -1388,7 +1735,6 @@ def test_handler(event: Any, context: DurableContext) -> str: durable_execution_arn="arn:test:execution", checkpoint_token="token123", # noqa: S106 initial_execution_state=initial_state, - is_local_runner=False, service_client=mock_client, ) @@ -1403,8 +1749,291 @@ def test_handler(event: Any, context: DurableContext) -> str: # Make the service client checkpoint call fail on error handling mock_client.checkpoint.side_effect = failing_checkpoint - # Verify that the checkpoint error is raised (not the original ValueError) + # Verify that errors are not raised, but returned because response is small + resp = test_handler(invocation_input, lambda_context) + assert resp["Error"]["ErrorMessage"] == "User function error" + assert resp["Error"]["ErrorType"] == "ValueError" + assert resp["Status"] == InvocationStatus.FAILED.value + + +def test_durable_execution_logs_checkpoint_error_extras_from_background_thread(): + """Test that CheckpointError extras are logged when raised from background thread.""" + mock_client = Mock(spec=DurableServiceClient) + mock_logger = Mock() + + error_obj = {"Code": "TestError", "Message": "Test checkpoint error"} + metadata_obj = {"RequestId": "test-request-id"} + + def failing_checkpoint(*args, **kwargs): + raise CheckpointError( # noqa TRY003 + "Checkpoint failed", # noqa EM101 + error_category=CheckpointErrorCategory.EXECUTION, + error=error_obj, + response_metadata=metadata_obj, # EM101 + ) + + @durable_execution + def test_handler(event: Any, context: DurableContext) -> dict: + context.step(lambda ctx: "step_result") + return {"result": "success"} + + operation = Operation( + operation_id="exec1", + operation_type=OperationType.EXECUTION, + status=OperationStatus.STARTED, + execution_details=ExecutionDetails(input_payload="{}"), + ) + + initial_state = InitialExecutionState(operations=[operation], next_marker="") + + invocation_input = DurableExecutionInvocationInputWithClient( + durable_execution_arn="arn:test:execution", + checkpoint_token="token123", # noqa: S106 + initial_execution_state=initial_state, + service_client=mock_client, + ) + + lambda_context = Mock() + lambda_context.aws_request_id = "test-request" + lambda_context.client_context = None + lambda_context.identity = None + lambda_context._epoch_deadline_time_in_ms = 1000000 # noqa: SLF001 + lambda_context.invoked_function_arn = None + lambda_context.tenant_id = None + + mock_client.checkpoint.side_effect = failing_checkpoint + + with patch("aws_durable_execution_sdk_python.execution.logger", mock_logger): + with pytest.raises(CheckpointError): + test_handler(invocation_input, lambda_context) + + mock_logger.exception.assert_called_once() + call_args = mock_logger.exception.call_args + assert "Checkpoint processing failed" in call_args[0][0] + assert call_args[1]["extra"]["Error"] == error_obj + assert call_args[1]["extra"]["ResponseMetadata"] == metadata_obj + + +def test_durable_execution_logs_boto_client_error_extras_from_background_thread(): + """Test that BotoClientError extras are logged when raised from background thread.""" + + mock_client = Mock(spec=DurableServiceClient) + mock_logger = Mock() + + error_obj = {"Code": "ServiceError", "Message": "Boto3 service error"} + metadata_obj = {"RequestId": "boto-request-id"} + + def failing_checkpoint(*args, **kwargs): + raise BotoClientError( # noqa TRY003 + "Boto3 error", # noqa EM101 + error=error_obj, + response_metadata=metadata_obj, # EM101 + ) + + @durable_execution + def test_handler(event: Any, context: DurableContext) -> dict: + context.step(lambda ctx: "step_result") + return {"result": "success"} + + operation = Operation( + operation_id="exec1", + operation_type=OperationType.EXECUTION, + status=OperationStatus.STARTED, + execution_details=ExecutionDetails(input_payload="{}"), + ) + + initial_state = InitialExecutionState(operations=[operation], next_marker="") + + invocation_input = DurableExecutionInvocationInputWithClient( + durable_execution_arn="arn:test:execution", + checkpoint_token="token123", # noqa: S106 + initial_execution_state=initial_state, + service_client=mock_client, + ) + + lambda_context = Mock() + lambda_context.aws_request_id = "test-request" + lambda_context.client_context = None + lambda_context.identity = None + lambda_context._epoch_deadline_time_in_ms = 1000000 # noqa: SLF001 + lambda_context.invoked_function_arn = None + lambda_context.tenant_id = None + + mock_client.checkpoint.side_effect = failing_checkpoint + + with patch("aws_durable_execution_sdk_python.execution.logger", mock_logger): + with pytest.raises(BotoClientError): + test_handler(invocation_input, lambda_context) + + mock_logger.exception.assert_called_once() + call_args = mock_logger.exception.call_args + assert "Checkpoint processing failed" in call_args[0][0] + assert call_args[1]["extra"]["Error"] == error_obj + assert call_args[1]["extra"]["ResponseMetadata"] == metadata_obj + + +def test_durable_execution_logs_checkpoint_error_extras_from_user_code(): + """Test that CheckpointError extras are logged when raised directly from user code.""" + mock_client = Mock(spec=DurableServiceClient) + mock_logger = Mock() + + error_obj = { + "Code": "UserCheckpointError", + "Message": "User raised checkpoint error", + } + metadata_obj = {"RequestId": "user-request-id"} + + @durable_execution + def test_handler(event: Any, context: DurableContext) -> dict: + raise CheckpointError( # noqa TRY003 + "User checkpoint error", # noqa EM101 + error_category=CheckpointErrorCategory.EXECUTION, + error=error_obj, + response_metadata=metadata_obj, # EM101 + ) + + operation = Operation( + operation_id="exec1", + operation_type=OperationType.EXECUTION, + status=OperationStatus.STARTED, + execution_details=ExecutionDetails(input_payload="{}"), + ) + + initial_state = InitialExecutionState(operations=[operation], next_marker="") + + invocation_input = DurableExecutionInvocationInputWithClient( + durable_execution_arn="arn:test:execution", + checkpoint_token="token123", # noqa: S106 + initial_execution_state=initial_state, + service_client=mock_client, + ) + + lambda_context = Mock() + lambda_context.aws_request_id = "test-request" + lambda_context.client_context = None + lambda_context.identity = None + lambda_context._epoch_deadline_time_in_ms = 1000000 # noqa: SLF001 + lambda_context.invoked_function_arn = None + lambda_context.tenant_id = None + + with patch("aws_durable_execution_sdk_python.execution.logger", mock_logger): + with pytest.raises(CheckpointError): + test_handler(invocation_input, lambda_context) + + mock_logger.exception.assert_called_once() + call_args = mock_logger.exception.call_args + assert call_args[0][0] == "Checkpoint system failed" + assert call_args[1]["extra"]["Error"] == error_obj + assert call_args[1]["extra"]["ResponseMetadata"] == metadata_obj + + +def test_durable_execution_with_boto3_client_parameter(): + """Test durable_execution decorator accepts boto3_client parameter.""" + # GIVEN a custom boto3 Lambda client + mock_boto3_client = Mock() + mock_boto3_client.checkpoint_durable_execution.return_value = { + "CheckpointToken": "new_token", + "NewExecutionState": {"Operations": [], "NextMarker": ""}, + } + mock_boto3_client.get_durable_execution_state.return_value = { + "Operations": [], + "NextMarker": "", + } + + # GIVEN a durable function decorated with the custom client + @durable_execution(boto3_client=mock_boto3_client) + def test_handler(event: Any, context: DurableContext) -> dict: + return {"result": "success"} + + event = { + "DurableExecutionArn": "arn:test:execution", + "CheckpointToken": "token123", + "InitialExecutionState": { + "Operations": [ + { + "Id": "exec1", + "Type": "EXECUTION", + "Status": "STARTED", + "ExecutionDetails": {"InputPayload": '{"input": "test"}'}, + } + ], + "NextMarker": "", + }, + } + + lambda_context = Mock() + lambda_context.aws_request_id = "test-request" + lambda_context.client_context = None + lambda_context.identity = None + lambda_context._epoch_deadline_time_in_ms = 1000000 # noqa: SLF001 + lambda_context.invoked_function_arn = None + lambda_context.tenant_id = None + + # WHEN the handler is invoked + result = test_handler(event, lambda_context) + + # THEN the execution succeeds using the custom client + assert result["Status"] == InvocationStatus.SUCCEEDED.value + assert result["Result"] == '{"result": "success"}' + + +def test_durable_execution_with_non_durable_payload_raises_error(): + """Test that invoking a durable function with a regular event raises a helpful error.""" + + # GIVEN a durable function + @durable_execution + def test_handler(event: Any, context: DurableContext) -> dict: + return {"result": "success"} + + # GIVEN a regular Lambda event (not a durable execution payload) + regular_event = {"key": "value", "data": "test"} + + lambda_context = Mock() + lambda_context.aws_request_id = "test-request" + lambda_context.client_context = None + lambda_context.identity = None + lambda_context._epoch_deadline_time_in_ms = 1000000 # noqa: SLF001 + lambda_context.invoked_function_arn = None + lambda_context.tenant_id = None + + # WHEN the handler is invoked with a non-durable payload + # THEN it raises a ValueError with a helpful message with pytest.raises( - RuntimeError, match="Background checkpoint failed on error handling" + ExecutionError, + match=( + "Unexpected payload provided to start the durable execution. " + "Check your resource configurations to confirm the durability is set." + ), ): - test_handler(invocation_input, lambda_context) + test_handler(regular_event, lambda_context) + + +def test_durable_execution_with_non_dict_event_raises_error(): + """Test that invoking a durable function with a non-dict event raises a helpful error.""" + + # GIVEN a durable function + @durable_execution + def test_handler(event: Any, context: DurableContext) -> dict: + return {"result": "success"} + + # GIVEN a non-dict event + non_dict_event = "not a dict" + + lambda_context = Mock() + lambda_context.aws_request_id = "test-request" + lambda_context.client_context = None + lambda_context.identity = None + lambda_context._epoch_deadline_time_in_ms = 1000000 # noqa: SLF001 + lambda_context.invoked_function_arn = None + lambda_context.tenant_id = None + + # WHEN the handler is invoked with a non-dict event + # THEN it raises a ValueError with a helpful message + with pytest.raises( + ExecutionError, + match=( + "Unexpected payload provided to start the durable execution. " + "Check your resource configurations to confirm the durability is set." + ), + ): + test_handler(non_dict_event, lambda_context) diff --git a/tests/lambda_service_test.py b/tests/lambda_service_test.py index 35214b9..cc4dce4 100644 --- a/tests/lambda_service_test.py +++ b/tests/lambda_service_test.py @@ -8,6 +8,7 @@ from aws_durable_execution_sdk_python.exceptions import ( CallableRuntimeError, CheckpointError, + GetExecutionStateError, ) from aws_durable_execution_sdk_python.identifier import OperationIdentifier from aws_durable_execution_sdk_python.lambda_service import ( @@ -399,9 +400,10 @@ def test_callback_options_from_dict_partial(): def test_invoke_options_from_dict(): """Test ChainedInvokeOptions.from_dict method.""" - data = {"FunctionName": "test-function", "TimeoutSeconds": 120} + data = {"FunctionName": "test-function", "TenantId": "test-tenant"} options = ChainedInvokeOptions.from_dict(data) assert options.function_name == "test-function" + assert options.tenant_id == "test-tenant" def test_invoke_options_from_dict_required_only(): @@ -409,6 +411,15 @@ def test_invoke_options_from_dict_required_only(): data = {"FunctionName": "test-function"} options = ChainedInvokeOptions.from_dict(data) assert options.function_name == "test-function" + assert options.tenant_id is None + + +def test_invoke_options_from_dict_with_none_tenant(): + """Test ChainedInvokeOptions.from_dict with explicit None tenant_id.""" + data = {"FunctionName": "test-function", "TenantId": None} + options = ChainedInvokeOptions.from_dict(data) + assert options.function_name == "test-function" + assert options.tenant_id is None def test_context_options_from_dict(): @@ -685,9 +696,12 @@ def test_operation_update_create_wait_start(): @patch("aws_durable_execution_sdk_python.lambda_service.datetime") def test_operation_update_create_execution_succeed(mock_datetime): """Test OperationUpdate.create_execution_succeed factory method.""" - mock_datetime.datetime.now.return_value = "2023-01-01" + + mock_datetime.datetime.now.return_value = datetime.datetime.fromtimestamp( + 1672531200.0, tz=datetime.UTC + ) update = OperationUpdate.create_execution_succeed("success_payload") - assert update.operation_id == "execution-result-2023-01-01" + assert update.operation_id == "execution-result-1672531200000" assert update.operation_type == OperationType.EXECUTION assert update.action == OperationAction.SUCCEED assert update.payload == "success_payload" @@ -1467,6 +1481,8 @@ def test_operation_from_dict_complete(): assert operation.step_details.result == "step_result" assert operation.wait_details.scheduled_end_timestamp == start_time assert operation.callback_details.callback_id == "cb1" + assert operation.chained_invoke_details is not None + assert operation.chained_invoke_details.result == "invoke_result" def test_operation_to_dict_with_subtype(): @@ -1629,13 +1645,6 @@ def test_checkpoint_updated_execution_state_from_dict_with_operations(): assert state.next_marker == "marker123" -@patch.dict( - "os.environ", - { - "DURABLE_LOCAL_RUNNER_ENDPOINT": "http://test:5000/", - "DURABLE_LOCAL_RUNNER_REGION": "us-west-1", - }, -) @patch("aws_durable_execution_sdk_python.lambda_service.boto3") def test_lambda_client_checkpoint(mock_boto3): """Test LambdaClient.checkpoint method.""" @@ -1788,6 +1797,80 @@ def test_lambda_client_checkpoint_with_exception(): lambda_client.checkpoint("arn123", "token123", [update], None) +@patch("aws_durable_execution_sdk_python.lambda_service.logger") +def test_lambda_client_checkpoint_logs_response_metadata(mock_logger): + """Test LambdaClient.checkpoint logs ResponseMetadata from boto3 exception.""" + mock_client = Mock() + boto_error = Exception("API Error") + boto_error.response = { + "ResponseMetadata": { + "RequestId": "test-request-id-123", + "HTTPStatusCode": 500, + "RetryAttempts": 2, + } + } + mock_client.checkpoint_durable_execution.side_effect = boto_error + + lambda_client = LambdaClient(mock_client) + update = OperationUpdate( + operation_id="op1", + operation_type=OperationType.STEP, + action=OperationAction.START, + ) + + with pytest.raises(CheckpointError): + lambda_client.checkpoint("arn123", "token123", [update], None) + + mock_logger.exception.assert_called_once_with( + "Failed to checkpoint.", + extra={ + "ResponseMetadata": { + "RequestId": "test-request-id-123", + "HTTPStatusCode": 500, + "RetryAttempts": 2, + }, + }, + ) + + +@patch("aws_durable_execution_sdk_python.lambda_service.logger") +def test_lambda_client_get_execution_state_logs_response_metadata(mock_logger): + """Test LambdaClient.get_execution_state logs ResponseMetadata from boto3 exception.""" + mock_client = Mock() + boto_error = Exception("API Error") + boto_error.response = { + "ResponseMetadata": { + "RequestId": "test-request-id-456", + "HTTPStatusCode": 503, + "RetryAttempts": 1, + } + } + mock_client.get_durable_execution_state.side_effect = boto_error + + lambda_client = LambdaClient(mock_client) + + with pytest.raises(GetExecutionStateError) as exc_info: + lambda_client.get_execution_state("arn123", "token123", "", 1000) + + assert exc_info.value.error is None + assert exc_info.value.response_metadata == { + "RequestId": "test-request-id-456", + "HTTPStatusCode": 503, + "RetryAttempts": 1, + } + + mock_logger.exception.assert_called_once_with( + "Failed to get execution state.", + extra={ + "ResponseMetadata": { + "RequestId": "test-request-id-456", + "HTTPStatusCode": 503, + "RetryAttempts": 1, + }, + }, + ) + + def test_durable_service_client_protocol_checkpoint(): """Test DurableServiceClient protocol checkpoint method signature.""" mock_client = Mock(spec=DurableServiceClient) @@ -1825,50 +1908,45 @@ def test_lambda_client_constructor(): @patch.dict("os.environ", {}, clear=True) @patch("boto3.client") -def test_lambda_client_initialize_from_env_default(mock_boto_client): - """Test LambdaClient.initialize_from_env with default endpoint.""" +def test_lambda_client_initialize_client_default(mock_boto_client): + """Test LambdaClient.initialize_client with default endpoint.""" mock_client = Mock() mock_boto_client.return_value = mock_client - with patch.object(LambdaClient, "load_preview_botocore_models"): - client = LambdaClient.initialize_from_env() + client = LambdaClient.initialize_client() - mock_boto_client.assert_called_with("lambdainternal") + # Check that boto3.client was called with the right service name and config + mock_boto_client.assert_called_once() + call_args = mock_boto_client.call_args + assert call_args[0][0] == "lambda" + assert "config" in call_args[1] + config = call_args[1]["config"] + assert config.connect_timeout == 5 + assert config.read_timeout == 50 assert isinstance(client, LambdaClient) @patch.dict("os.environ", {"AWS_ENDPOINT_URL_LAMBDA": "http://localhost:3000/"}) @patch("boto3.client") -def test_lambda_client_initialize_from_env_with_endpoint(mock_boto_client): - """Test LambdaClient.initialize_from_env with custom endpoint.""" +def test_lambda_client_initialize_client_with_endpoint(mock_boto_client): + """Test LambdaClient.initialize_client with custom endpoint (boto3 handles it automatically).""" mock_client = Mock() mock_boto_client.return_value = mock_client - with patch.object(LambdaClient, "load_preview_botocore_models"): - client = LambdaClient.initialize_from_env() - - mock_boto_client.assert_called_with( - "lambdainternal", endpoint_url="http://localhost:3000/" - ) + client = LambdaClient.initialize_client() + + # Check that boto3.client was called with the right parameters and config + # Note: boto3 automatically picks up AWS_ENDPOINT_URL_LAMBDA from environment + mock_boto_client.assert_called_once() + call_args = mock_boto_client.call_args + assert call_args[0][0] == "lambda" + assert "config" in call_args[1] + config = call_args[1]["config"] + assert config.connect_timeout == 5 + assert config.read_timeout == 50 assert isinstance(client, LambdaClient) -@patch("aws_durable_execution_sdk_python.lambda_service.boto3") -def test_lambda_client_initialize_local_runner_client(mock_boto3): - """Test LambdaClient.initialize_local_runner_client method.""" - mock_client = Mock() - mock_boto3.client.return_value = mock_client - - lambda_client = LambdaClient.initialize_local_runner_client() - - mock_boto3.client.assert_called_once_with( - "lambdainternal-local", - endpoint_url="http://host.docker.internal:5000/", - region_name="us-west-2", - ) - assert lambda_client.client == mock_client - - def test_lambda_client_get_execution_state(): """Test LambdaClient.get_execution_state method.""" mock_client = Mock() @@ -1902,40 +1980,14 @@ def test_durable_service_client_protocol_get_execution_state(): assert result == mock_output -@patch("aws_durable_execution_sdk_python.lambda_service.boto3") -def test_lambda_client_initialize_local_runner_client_defaults(mock_boto3): - """Test LambdaClient.initialize_local_runner_client with default environment values.""" - mock_client = Mock() - mock_boto3.client.return_value = mock_client - - lambda_client = LambdaClient.initialize_local_runner_client() - - mock_boto3.client.assert_called_once_with( - "lambdainternal-local", - endpoint_url="http://host.docker.internal:5000/", - region_name="us-west-2", - ) - assert lambda_client.client == mock_client - - @patch.dict("os.environ", {}, clear=True) -@patch( - "aws_durable_execution_sdk_python.lambda_service.LambdaClient.initialize_from_env" -) -def test_lambda_client_initialize_from_env_defaults(mock_init): - """Test LambdaClient.initialize_from_env with default environment values.""" - LambdaClient.initialize_from_env() +@patch("aws_durable_execution_sdk_python.lambda_service.LambdaClient.initialize_client") +def test_lambda_client_initialize_client_defaults(mock_init): + """Test LambdaClient.initialize_client with default environment values.""" + LambdaClient.initialize_client() mock_init.assert_called_once_with() -@patch("os.environ") -def test_lambda_client_load_preview_botocore_models(mock_environ): - """Test LambdaClient.load_preview_botocore_models method.""" - LambdaClient.load_preview_botocore_models() - # Verify that AWS_DATA_PATH is set - assert "AWS_DATA_PATH" in mock_environ.__setitem__.call_args[0] - - def test_checkpoint_error_handling(): """Test CheckpointError exception handling in LambdaClient.checkpoint.""" mock_client = Mock() @@ -1954,15 +2006,17 @@ def test_checkpoint_error_handling(): @patch.dict("os.environ", {}, clear=True) @patch("boto3.client") -def test_lambda_client_initialize_from_env_no_endpoint(mock_boto_client): - """Test LambdaClient.initialize_from_env without AWS_ENDPOINT_URL_LAMBDA.""" +def test_lambda_client_initialize_client_no_endpoint(mock_boto_client): + """Test LambdaClient.initialize_client without AWS_ENDPOINT_URL_LAMBDA.""" mock_client = Mock() mock_boto_client.return_value = mock_client - with patch.object(LambdaClient, "load_preview_botocore_models"): - client = LambdaClient.initialize_from_env() + client = LambdaClient.initialize_client() - mock_boto_client.assert_called_with("lambdainternal") + # Verify the call was made with the expected arguments including config + call_args = mock_boto_client.call_args + assert call_args[0] == ("lambda",) + assert "config" in call_args[1] assert isinstance(client, LambdaClient) diff --git a/tests/logger_test.py b/tests/logger_test.py index d3b76aa..f503538 100644 --- a/tests/logger_test.py +++ b/tests/logger_test.py @@ -1,10 +1,17 @@ """Unit tests for logger module.""" +import logging from collections.abc import Mapping from unittest.mock import Mock from aws_durable_execution_sdk_python.identifier import OperationIdentifier +from aws_durable_execution_sdk_python.lambda_service import ( + Operation, + OperationStatus, + OperationType, +) from aws_durable_execution_sdk_python.logger import Logger, LoggerInterface, LogInfo +from aws_durable_execution_sdk_python.state import ExecutionState, ReplayStatus class PowertoolsLoggerStub: @@ -71,6 +78,14 @@ def exception( pass +EXECUTION_STATE = ExecutionState( + durable_execution_arn="arn:aws:test", + initial_checkpoint_token="test_token", # noqa: S106 + operations={}, + service_client=Mock(), +) + + def test_powertools_logger_compatibility(): """Test that PowertoolsLoggerStub is compatible with LoggerInterface protocol.""" powertools_logger = PowertoolsLoggerStub() @@ -87,7 +102,7 @@ def accepts_logger_interface(logger: LoggerInterface) -> None: accepts_logger_interface(powertools_logger) # Test that our Logger can wrap the PowertoolsLoggerStub - log_info = LogInfo("arn:aws:test") + log_info = LogInfo(EXECUTION_STATE) wrapped_logger = Logger.from_log_info(powertools_logger, log_info) # Test all methods work @@ -100,18 +115,20 @@ def accepts_logger_interface(logger: LoggerInterface) -> None: def test_log_info_creation(): """Test LogInfo creation with all parameters.""" - log_info = LogInfo("arn:aws:test", "parent123", "test_name", 5) - assert log_info.execution_arn == "arn:aws:test" + log_info = LogInfo(EXECUTION_STATE, "parent123", "operation123", "test_name", 5) + assert log_info.execution_state.durable_execution_arn == "arn:aws:test" assert log_info.parent_id == "parent123" + assert log_info.operation_id == "operation123" assert log_info.name == "test_name" assert log_info.attempt == 5 def test_log_info_creation_minimal(): """Test LogInfo creation with minimal parameters.""" - log_info = LogInfo("arn:aws:test") - assert log_info.execution_arn == "arn:aws:test" + log_info = LogInfo(EXECUTION_STATE) + assert log_info.execution_state.durable_execution_arn == "arn:aws:test" assert log_info.parent_id is None + assert log_info.operation_id is None assert log_info.name is None assert log_info.attempt is None @@ -119,9 +136,10 @@ def test_log_info_creation_minimal(): def test_log_info_from_operation_identifier(): """Test LogInfo.from_operation_identifier.""" op_id = OperationIdentifier("op123", "parent456", "op_name") - log_info = LogInfo.from_operation_identifier("arn:aws:test", op_id, 3) - assert log_info.execution_arn == "arn:aws:test" + log_info = LogInfo.from_operation_identifier(EXECUTION_STATE, op_id, 3) + assert log_info.execution_state.durable_execution_arn == "arn:aws:test" assert log_info.parent_id == "parent456" + assert log_info.operation_id == "op123" assert log_info.name == "op_name" assert log_info.attempt == 3 @@ -129,19 +147,21 @@ def test_log_info_from_operation_identifier(): def test_log_info_from_operation_identifier_no_attempt(): """Test LogInfo.from_operation_identifier without attempt.""" op_id = OperationIdentifier("op123", "parent456", "op_name") - log_info = LogInfo.from_operation_identifier("arn:aws:test", op_id) - assert log_info.execution_arn == "arn:aws:test" + log_info = LogInfo.from_operation_identifier(EXECUTION_STATE, op_id) + assert log_info.execution_state.durable_execution_arn == "arn:aws:test" assert log_info.parent_id == "parent456" + assert log_info.operation_id == "op123" assert log_info.name == "op_name" assert log_info.attempt is None def test_log_info_with_parent_id(): """Test LogInfo.with_parent_id.""" - original = LogInfo("arn:aws:test", "old_parent", "test_name", 2) + original = LogInfo(EXECUTION_STATE, "old_parent", "op123", "test_name", 2) new_log_info = original.with_parent_id("new_parent") - assert new_log_info.execution_arn == "arn:aws:test" + assert new_log_info.execution_state.durable_execution_arn == "arn:aws:test" assert new_log_info.parent_id == "new_parent" + assert new_log_info.operation_id == "op123" assert new_log_info.name == "test_name" assert new_log_info.attempt == 2 @@ -149,14 +169,15 @@ def test_log_info_with_parent_id(): def test_logger_from_log_info_full(): """Test Logger.from_log_info with all LogInfo fields.""" mock_logger = Mock() - log_info = LogInfo("arn:aws:test", "parent123", "test_name", 5) + log_info = LogInfo(EXECUTION_STATE, "parent123", "op123", "test_name", 5) logger = Logger.from_log_info(mock_logger, log_info) expected_extra = { - "execution_arn": "arn:aws:test", - "parent_id": "parent123", - "name": "test_name", - "attempt": 5, + "executionArn": "arn:aws:test", + "parentId": "parent123", + "operationId": "op123", + "operationName": "test_name", + "attempt": 6, } assert logger._default_extra == expected_extra # noqa: SLF001 assert logger._logger is mock_logger # noqa: SLF001 @@ -167,47 +188,54 @@ def test_logger_from_log_info_partial_fields(): mock_logger = Mock() # Test with parent_id but no name or attempt - log_info = LogInfo("arn:aws:test", "parent123") + log_info = LogInfo(EXECUTION_STATE, "parent123") logger = Logger.from_log_info(mock_logger, log_info) - expected_extra = {"execution_arn": "arn:aws:test", "parent_id": "parent123"} + expected_extra = {"executionArn": "arn:aws:test", "parentId": "parent123"} assert logger._default_extra == expected_extra # noqa: SLF001 # Test with name but no parent_id or attempt - log_info = LogInfo("arn:aws:test", None, "test_name") + log_info = LogInfo(EXECUTION_STATE, None, None, "test_name") logger = Logger.from_log_info(mock_logger, log_info) - expected_extra = {"execution_arn": "arn:aws:test", "name": "test_name"} + expected_extra = {"executionArn": "arn:aws:test", "operationName": "test_name"} assert logger._default_extra == expected_extra # noqa: SLF001 # Test with attempt but no parent_id or name - log_info = LogInfo("arn:aws:test", None, None, 5) + log_info = LogInfo(EXECUTION_STATE, None, None, None, 5) logger = Logger.from_log_info(mock_logger, log_info) - expected_extra = {"execution_arn": "arn:aws:test", "attempt": 5} + expected_extra = {"executionArn": "arn:aws:test", "attempt": 6} assert logger._default_extra == expected_extra # noqa: SLF001 def test_logger_from_log_info_minimal(): """Test Logger.from_log_info with minimal LogInfo.""" mock_logger = Mock() - log_info = LogInfo("arn:aws:test") + log_info = LogInfo(EXECUTION_STATE) logger = Logger.from_log_info(mock_logger, log_info) - expected_extra = {"execution_arn": "arn:aws:test"} + expected_extra = {"executionArn": "arn:aws:test"} assert logger._default_extra == expected_extra # noqa: SLF001 def test_logger_with_log_info(): """Test Logger.with_log_info.""" mock_logger = Mock() - original_info = LogInfo("arn:aws:test", "parent1") + original_info = LogInfo(EXECUTION_STATE, "parent1") logger = Logger.from_log_info(mock_logger, original_info) - new_info = LogInfo("arn:aws:new", "parent2", "new_name") + execution_state_new = ExecutionState( + durable_execution_arn="arn:aws:new", + initial_checkpoint_token="test_token", # noqa: S106 + operations={}, + service_client=Mock(), + ) + new_info = LogInfo(execution_state_new, "parent2", "op123", "new_name") new_logger = logger.with_log_info(new_info) expected_extra = { - "execution_arn": "arn:aws:new", - "parent_id": "parent2", - "name": "new_name", + "executionArn": "arn:aws:new", + "parentId": "parent2", + "operationId": "op123", + "operationName": "new_name", } assert new_logger._default_extra == expected_extra # noqa: SLF001 assert new_logger._logger is mock_logger # noqa: SLF001 @@ -216,7 +244,7 @@ def test_logger_with_log_info(): def test_logger_get_logger(): """Test Logger.get_logger.""" mock_logger = Mock() - log_info = LogInfo("arn:aws:test") + log_info = LogInfo(EXECUTION_STATE) logger = Logger.from_log_info(mock_logger, log_info) assert logger.get_logger() is mock_logger @@ -224,14 +252,14 @@ def test_logger_get_logger(): def test_logger_debug(): """Test Logger.debug method.""" mock_logger = Mock() - log_info = LogInfo("arn:aws:test", "parent123") + log_info = LogInfo(EXECUTION_STATE, "parent123") logger = Logger.from_log_info(mock_logger, log_info) logger.debug("test %s message", "arg1", extra={"custom": "value"}) expected_extra = { - "execution_arn": "arn:aws:test", - "parent_id": "parent123", + "executionArn": "arn:aws:test", + "parentId": "parent123", "custom": "value", } mock_logger.debug.assert_called_once_with( @@ -242,24 +270,24 @@ def test_logger_debug(): def test_logger_info(): """Test Logger.info method.""" mock_logger = Mock() - log_info = LogInfo("arn:aws:test") + log_info = LogInfo(EXECUTION_STATE) logger = Logger.from_log_info(mock_logger, log_info) logger.info("info message") - expected_extra = {"execution_arn": "arn:aws:test"} + expected_extra = {"executionArn": "arn:aws:test"} mock_logger.info.assert_called_once_with("info message", extra=expected_extra) def test_logger_warning(): """Test Logger.warning method.""" mock_logger = Mock() - log_info = LogInfo("arn:aws:test") + log_info = LogInfo(EXECUTION_STATE) logger = Logger.from_log_info(mock_logger, log_info) logger.warning("warning %s %s message", "arg1", "arg2") - expected_extra = {"execution_arn": "arn:aws:test"} + expected_extra = {"executionArn": "arn:aws:test"} mock_logger.warning.assert_called_once_with( "warning %s %s message", "arg1", "arg2", extra=expected_extra ) @@ -268,24 +296,24 @@ def test_logger_warning(): def test_logger_error(): """Test Logger.error method.""" mock_logger = Mock() - log_info = LogInfo("arn:aws:test") + log_info = LogInfo(EXECUTION_STATE) logger = Logger.from_log_info(mock_logger, log_info) logger.error("error message", extra={"error_code": 500}) - expected_extra = {"execution_arn": "arn:aws:test", "error_code": 500} + expected_extra = {"executionArn": "arn:aws:test", "error_code": 500} mock_logger.error.assert_called_once_with("error message", extra=expected_extra) def test_logger_exception(): """Test Logger.exception method.""" mock_logger = Mock() - log_info = LogInfo("arn:aws:test") + log_info = LogInfo(EXECUTION_STATE) logger = Logger.from_log_info(mock_logger, log_info) logger.exception("exception message") - expected_extra = {"execution_arn": "arn:aws:test"} + expected_extra = {"executionArn": "arn:aws:test"} mock_logger.exception.assert_called_once_with( "exception message", extra=expected_extra ) @@ -294,7 +322,7 @@ def test_logger_exception(): def test_logger_methods_with_none_extra(): """Test logger methods handle None extra parameter.""" mock_logger = Mock() - log_info = LogInfo("arn:aws:test") + log_info = LogInfo(EXECUTION_STATE) logger = Logger.from_log_info(mock_logger, log_info) logger.debug("debug", extra=None) @@ -303,7 +331,7 @@ def test_logger_methods_with_none_extra(): logger.error("error", extra=None) logger.exception("exception", extra=None) - expected_extra = {"execution_arn": "arn:aws:test"} + expected_extra = {"executionArn": "arn:aws:test"} mock_logger.debug.assert_called_with("debug", extra=expected_extra) mock_logger.info.assert_called_with("info", extra=expected_extra) mock_logger.warning.assert_called_with("warning", extra=expected_extra) @@ -314,14 +342,77 @@ def test_logger_methods_with_none_extra(): def test_logger_extra_override(): """Test that custom extra overrides default extra.""" mock_logger = Mock() - log_info = LogInfo("arn:aws:test", "parent123") + log_info = LogInfo(EXECUTION_STATE, "parent123") logger = Logger.from_log_info(mock_logger, log_info) - logger.info("test", extra={"execution_arn": "overridden", "new_field": "value"}) + logger.info("test", extra={"executionArn": "overridden", "newField": "value"}) expected_extra = { - "execution_arn": "overridden", - "parent_id": "parent123", - "new_field": "value", + "executionArn": "overridden", + "parentId": "parent123", + "newField": "value", } mock_logger.info.assert_called_once_with("test", extra=expected_extra) + + +def test_logger_without_mocked_logger(): + """Test Logger methods without mocking the underlying logger.""" + log_info = LogInfo(EXECUTION_STATE, "parent123", "test_name", 5) + logger = Logger.from_log_info(logging.getLogger(), log_info) + + logger.info("test", extra={"execution_arn": "overridden", "new_field": "value"}) + logger.warning("test", extra={"execution_arn": "overridden", "new_field": "value"}) + logger.error("test", extra={"execution_arn": "overridden", "new_field": "value"}) + + +def test_logger_replay_no_logging(): + operation = Operation( + operation_id="op1", + operation_type=OperationType.STEP, + status=OperationStatus.SUCCEEDED, + ) + replay_execution_state = ExecutionState( + durable_execution_arn="arn:aws:test", + initial_checkpoint_token="test_token", # noqa: S106 + operations={"op1": operation}, + service_client=Mock(), + replay_status=ReplayStatus.REPLAY, + ) + log_info = LogInfo(replay_execution_state, "parent123", "test_name", 5) + mock_logger = Mock() + logger = Logger.from_log_info(mock_logger, log_info) + logger.info("logging info") + replay_execution_state.track_replay(operation_id="op1") + + mock_logger.info.assert_not_called() + + +def test_logger_replay_then_new_logging(): + operation1 = Operation( + operation_id="op1", + operation_type=OperationType.STEP, + status=OperationStatus.SUCCEEDED, + ) + operation2 = Operation( + operation_id="op2", + operation_type=OperationType.STEP, + status=OperationStatus.SUCCEEDED, + ) + execution_state = ExecutionState( + durable_execution_arn="arn:aws:test", + initial_checkpoint_token="test_token", # noqa: S106 + operations={"op1": operation1, "op2": operation2}, + service_client=Mock(), + replay_status=ReplayStatus.REPLAY, + ) + log_info = LogInfo(execution_state, "parent123", "test_name", 5) + mock_logger = Mock() + logger = Logger.from_log_info(mock_logger, log_info) + execution_state.track_replay(operation_id="op1") + logger.info("logging info") + + mock_logger.info.assert_not_called() + + execution_state.track_replay(operation_id="op2") + logger.info("logging info") + mock_logger.info.assert_called_once() diff --git a/tests/operation/base_test.py b/tests/operation/base_test.py new file mode 100644 index 0000000..4b20818 --- /dev/null +++ b/tests/operation/base_test.py @@ -0,0 +1,314 @@ +"""Unit tests for OperationExecutor base framework.""" + +from __future__ import annotations + +import pytest + +from aws_durable_execution_sdk_python.exceptions import InvalidStateError +from aws_durable_execution_sdk_python.lambda_service import ( + Operation, + OperationStatus, + OperationType, +) +from aws_durable_execution_sdk_python.operation.base import ( + CheckResult, + OperationExecutor, +) +from aws_durable_execution_sdk_python.state import CheckpointedResult + +# Test fixtures and helpers + + +class ConcreteOperationExecutor(OperationExecutor[str]): + """Concrete implementation for testing the abstract base class.""" + + def __init__(self): + self.check_result_status_called = 0 + self.execute_called = 0 + self.check_result_to_return = None + self.execute_result_to_return = "executed_result" + + def check_result_status(self) -> CheckResult[str]: + """Mock implementation that returns configured result.""" + self.check_result_status_called += 1 + if self.check_result_to_return is None: + msg = "check_result_to_return not configured" + raise ValueError(msg) + return self.check_result_to_return + + def execute(self, checkpointed_result: CheckpointedResult) -> str: + """Mock implementation that returns configured result.""" + self.execute_called += 1 + return self.execute_result_to_return + + +def create_mock_checkpoint(status: OperationStatus) -> CheckpointedResult: + """Create a mock CheckpointedResult with the given status.""" + operation = Operation( + operation_id="test_op", + operation_type=OperationType.STEP, + status=status, + ) + return CheckpointedResult.create_from_operation(operation) + + +# Tests for CheckResult factory methods + + +def test_check_result_create_is_ready_to_execute(): + """Test CheckResult.create_is_ready_to_execute factory method.""" + checkpoint = create_mock_checkpoint(OperationStatus.STARTED) + + result = CheckResult.create_is_ready_to_execute(checkpoint) + + assert result.is_ready_to_execute is True + assert result.has_checkpointed_result is False + assert result.checkpointed_result is checkpoint + assert result.deserialized_result is None + + +def test_check_result_create_started(): + """Test CheckResult.create_started factory method.""" + result = CheckResult.create_started() + + assert result.is_ready_to_execute is False + assert result.has_checkpointed_result is False + assert result.checkpointed_result is None + assert result.deserialized_result is None + + +def test_check_result_create_completed(): + """Test CheckResult.create_completed factory method.""" + test_result = "test_completed_result" + + result = CheckResult.create_completed(test_result) + + assert result.is_ready_to_execute is False + assert result.has_checkpointed_result is True + assert result.checkpointed_result is None + assert result.deserialized_result == test_result + + +def test_check_result_create_completed_with_none(): + """Test CheckResult.create_completed with None result (valid for operations that return None).""" + result = CheckResult.create_completed(None) + + assert result.is_ready_to_execute is False + assert result.has_checkpointed_result is True + assert result.checkpointed_result is None + assert result.deserialized_result is None + + +# Tests for OperationExecutor.process() method + + +def test_process_with_terminal_result_on_first_check(): + """Test process() when check_result_status returns terminal result on first call.""" + executor = ConcreteOperationExecutor() + executor.check_result_to_return = CheckResult.create_completed("terminal_result") + + result = executor.process() + + assert result == "terminal_result" + assert executor.check_result_status_called == 1 + assert executor.execute_called == 0 + + +def test_process_with_ready_to_execute_on_first_check(): + """Test process() when check_result_status returns ready_to_execute on first call.""" + executor = ConcreteOperationExecutor() + checkpoint = create_mock_checkpoint(OperationStatus.STARTED) + executor.check_result_to_return = CheckResult.create_is_ready_to_execute(checkpoint) + executor.execute_result_to_return = "execution_result" + + result = executor.process() + + assert result == "execution_result" + assert executor.check_result_status_called == 1 + assert executor.execute_called == 1 + + +def test_process_with_checkpoint_created_then_terminal(): + """Test process() when checkpoint is created, then terminal result on second check.""" + executor = ConcreteOperationExecutor() + + # First call returns create_started (checkpoint was created) + # Second call returns terminal result (immediate response) + call_count = 0 + + def check_result_side_effect(): + nonlocal call_count + call_count += 1 + if call_count == 1: + return CheckResult.create_started() + return CheckResult.create_completed("immediate_response") + + executor.check_result_status = check_result_side_effect + + result = executor.process() + + assert result == "immediate_response" + assert call_count == 2 + assert executor.execute_called == 0 + + +def test_process_with_checkpoint_created_then_ready_to_execute(): + """Test process() when checkpoint is created, then ready_to_execute on second check.""" + executor = ConcreteOperationExecutor() + checkpoint = create_mock_checkpoint(OperationStatus.STARTED) + + # First call returns create_started (checkpoint was created) + # Second call returns ready_to_execute (no immediate response, proceed to execute) + call_count = 0 + + def check_result_side_effect(): + nonlocal call_count + call_count += 1 + if call_count == 1: + return CheckResult.create_started() + return CheckResult.create_is_ready_to_execute(checkpoint) + + executor.check_result_status = check_result_side_effect + executor.execute_result_to_return = "execution_result" + + result = executor.process() + + assert result == "execution_result" + assert call_count == 2 + assert executor.execute_called == 1 + + +def test_process_with_none_result_terminal(): + """Test process() with terminal result that is None (valid for operations returning None).""" + executor = ConcreteOperationExecutor() + executor.check_result_to_return = CheckResult.create_completed(None) + + result = executor.process() + + assert result is None + assert executor.check_result_status_called == 1 + assert executor.execute_called == 0 + + +def test_process_raises_invalid_state_when_checkpointed_result_missing(): + """Test process() raises InvalidStateError when ready_to_execute but checkpoint is None.""" + executor = ConcreteOperationExecutor() + # Create invalid state: ready_to_execute but no checkpoint + executor.check_result_to_return = CheckResult( + is_ready_to_execute=True, + has_checkpointed_result=False, + checkpointed_result=None, + ) + + with pytest.raises(InvalidStateError) as exc_info: + executor.process() + + assert "checkpointed result is not set" in str(exc_info.value) + + +def test_process_raises_invalid_state_when_neither_terminal_nor_ready(): + """Test process() raises InvalidStateError when result is neither terminal nor ready.""" + executor = ConcreteOperationExecutor() + # Create invalid state: neither terminal nor ready (both False) + executor.check_result_to_return = CheckResult( + is_ready_to_execute=False, + has_checkpointed_result=False, + ) + + # Mock to return same invalid state on both calls + call_count = 0 + + def check_result_side_effect(): + nonlocal call_count + call_count += 1 + return CheckResult( + is_ready_to_execute=False, + has_checkpointed_result=False, + ) + + executor.check_result_status = check_result_side_effect + + with pytest.raises(InvalidStateError) as exc_info: + executor.process() + + assert "neither terminal nor ready to execute" in str(exc_info.value) + assert call_count == 2 # Should call twice before raising + + +def test_process_double_check_pattern(): + """Test that process() implements the double-check pattern correctly. + + This verifies the core immediate response handling logic: + 1. Check status once (may find existing checkpoint or create new one) + 2. If checkpoint was just created, check again (catches immediate response) + 3. Only call execute() if ready after both checks + """ + executor = ConcreteOperationExecutor() + checkpoint = create_mock_checkpoint(OperationStatus.STARTED) + + check_calls = [] + + def track_check_calls(): + call_num = len(check_calls) + 1 + check_calls.append(call_num) + + if call_num == 1: + # First check: checkpoint doesn't exist, create it + return CheckResult.create_started() + # Second check: checkpoint exists, ready to execute + return CheckResult.create_is_ready_to_execute(checkpoint) + + executor.check_result_status = track_check_calls + executor.execute_result_to_return = "final_result" + + result = executor.process() + + # Verify the double-check pattern + assert len(check_calls) == 2, "Should check status exactly twice" + assert check_calls == [1, 2], "Checks should be in order" + assert executor.execute_called == 1, "Should execute once after both checks" + assert result == "final_result" + + +def test_process_single_check_when_terminal_immediately(): + """Test that process() only checks once when terminal result is found immediately.""" + executor = ConcreteOperationExecutor() + + check_calls = [] + + def track_check_calls(): + call_num = len(check_calls) + 1 + check_calls.append(call_num) + return CheckResult.create_completed("immediate_terminal") + + executor.check_result_status = track_check_calls + + result = executor.process() + + # Should only check once since terminal result was found + assert len(check_calls) == 1, "Should check status only once for immediate terminal" + assert executor.execute_called == 0, "Should not execute when terminal result found" + assert result == "immediate_terminal" + + +def test_process_single_check_when_ready_immediately(): + """Test that process() only checks once when ready_to_execute is found immediately.""" + executor = ConcreteOperationExecutor() + checkpoint = create_mock_checkpoint(OperationStatus.STARTED) + + check_calls = [] + + def track_check_calls(): + call_num = len(check_calls) + 1 + check_calls.append(call_num) + return CheckResult.create_is_ready_to_execute(checkpoint) + + executor.check_result_status = track_check_calls + executor.execute_result_to_return = "execution_result" + + result = executor.process() + + # Should only check once since ready_to_execute was found + assert len(check_calls) == 1, "Should check status only once when ready immediately" + assert executor.execute_called == 1, "Should execute once" + assert result == "execution_result" diff --git a/tests/operation/callback_test.py b/tests/operation/callback_test.py index 3943f76..334e276 100644 --- a/tests/operation/callback_test.py +++ b/tests/operation/callback_test.py @@ -7,14 +7,17 @@ from aws_durable_execution_sdk_python.config import ( CallbackConfig, + Duration, StepConfig, WaitForCallbackConfig, ) -from aws_durable_execution_sdk_python.exceptions import CallbackError +from aws_durable_execution_sdk_python.context import Callback +from aws_durable_execution_sdk_python.exceptions import CallbackError, ValidationError from aws_durable_execution_sdk_python.identifier import OperationIdentifier from aws_durable_execution_sdk_python.lambda_service import ( CallbackDetails, CallbackOptions, + ErrorObject, Operation, OperationAction, OperationStatus, @@ -23,7 +26,7 @@ OperationUpdate, ) from aws_durable_execution_sdk_python.operation.callback import ( - create_callback_handler, + CallbackOperationExecutor, wait_for_callback_handler, ) from aws_durable_execution_sdk_python.retries import RetryDecision @@ -32,6 +35,17 @@ from aws_durable_execution_sdk_python.types import DurableContext, StepContext +# Test helper - maintains old handler signature for backward compatibility in tests +def create_callback_handler(state, operation_identifier, config=None): + """Test helper that wraps CallbackOperationExecutor with old handler signature.""" + executor = CallbackOperationExecutor( + state=state, + operation_identifier=operation_identifier, + config=config, + ) + return executor.process() + + # region create_callback_handler def test_create_callback_handler_new_operation_with_config(): """Test create_callback_handler creates new checkpoint when operation doesn't exist.""" @@ -50,7 +64,9 @@ def test_create_callback_handler_new_operation_with_config(): CheckpointedResult.create_from_operation(operation), ] - config = CallbackConfig(timeout_seconds=300, heartbeat_timeout_seconds=60) + config = CallbackConfig( + timeout=Duration.from_minutes(5), heartbeat_timeout=Duration.from_minutes(1) + ) result = create_callback_handler( state=mock_state, @@ -139,23 +155,27 @@ def test_create_callback_handler_existing_started_operation(): def test_create_callback_handler_existing_failed_operation(): - """Test create_callback_handler raises error for failed operation.""" + """Test create_callback_handler returns callback_id for failed operation (deferred error).""" + # CRITICAL: create_callback_handler should NOT raise on FAILED + # Errors are deferred to Callback.result() for deterministic replay mock_state = Mock(spec=ExecutionState) - mock_result = Mock(spec=CheckpointedResult) - mock_result.is_failed.return_value = True - mock_result.is_started.return_value = False - msg = "Checkpointed error" - mock_result.raise_callable_error.side_effect = Exception(msg) + failed_op = Operation( + operation_id="callback4", + operation_type=OperationType.CALLBACK, + status=OperationStatus.FAILED, + callback_details=CallbackDetails(callback_id="failed_cb4"), + ) + mock_result = CheckpointedResult.create_from_operation(failed_op) mock_state.get_checkpoint_result.return_value = mock_result - with pytest.raises(Exception, match="Checkpointed error"): - create_callback_handler( - state=mock_state, - operation_identifier=OperationIdentifier("callback4", None), - config=None, - ) + # Should return callback_id without raising + callback_id = create_callback_handler( + state=mock_state, + operation_identifier=OperationIdentifier("callback4", None), + config=None, + ) - mock_result.raise_callable_error.assert_called_once() + assert callback_id == "failed_cb4" mock_state.create_checkpoint.assert_not_called() @@ -300,13 +320,18 @@ def test_wait_for_callback_handler_submitter_called_with_callback_id(): def capture_step_call(func, name, config=None): # Execute the step callable to verify submitter is called correctly step_context = Mock(spec=StepContext) + step_context.logger = Mock() func(step_context) mock_context.step.side_effect = capture_step_call wait_for_callback_handler(mock_context, mock_submitter, "test") - mock_submitter.assert_called_once_with("callback_test_id") + # Verify submitter was called with callback_id and WaitForCallbackContext + assert mock_submitter.call_count == 1 + call_args = mock_submitter.call_args[0] + assert call_args[0] == "callback_test_id" + assert hasattr(call_args[1], "logger") def test_create_callback_handler_with_none_operation_in_result(): @@ -329,29 +354,11 @@ def test_create_callback_handler_with_none_operation_in_result(): def test_create_callback_handler_with_negative_timeouts(): """Test create_callback_handler with negative timeout values in config.""" - mock_state = Mock(spec=ExecutionState) - callback_details = CallbackDetails(callback_id="negative_timeout_cb") - operation = Operation( - operation_id="negative_timeout", - operation_type=OperationType.CALLBACK, - status=OperationStatus.STARTED, - callback_details=callback_details, - ) - mock_state.get_checkpoint_result.side_effect = [ - CheckpointedResult.create_not_found(), - CheckpointedResult.create_from_operation(operation), - ] - - config = CallbackConfig(timeout_seconds=-100, heartbeat_timeout_seconds=-50) - - result = create_callback_handler( - state=mock_state, - operation_identifier=OperationIdentifier("negative_timeout", None), - config=config, - ) - - assert result == "negative_timeout_cb" - mock_state.create_checkpoint.assert_called_once() + # Duration now validates that all values must be positive + with pytest.raises(ValidationError, match="Duration seconds must be positive"): + CallbackConfig( + timeout=Duration(seconds=-100), heartbeat_timeout=Duration(seconds=-50) + ) def test_wait_for_callback_handler_with_none_callback_id(): @@ -365,6 +372,7 @@ def test_wait_for_callback_handler_with_none_callback_id(): def execute_step(func, name, config=None): step_context = Mock(spec=StepContext) + step_context.logger = Mock() return func(step_context) mock_context.step.side_effect = execute_step @@ -372,7 +380,11 @@ def execute_step(func, name, config=None): result = wait_for_callback_handler(mock_context, mock_submitter, "test") assert result == "result_with_none_id" - mock_submitter.assert_called_once_with(None) + # Verify submitter was called with None callback_id and WaitForCallbackContext + assert mock_submitter.call_count == 1 + call_args = mock_submitter.call_args[0] + assert call_args[0] is None + assert hasattr(call_args[1], "logger") def test_wait_for_callback_handler_with_empty_string_callback_id(): @@ -386,6 +398,7 @@ def test_wait_for_callback_handler_with_empty_string_callback_id(): def execute_step(func, name, config=None): step_context = Mock(spec=StepContext) + step_context.logger = Mock() return func(step_context) mock_context.step.side_effect = execute_step @@ -393,7 +406,11 @@ def execute_step(func, name, config=None): result = wait_for_callback_handler(mock_context, mock_submitter, "test") assert result == "result_with_empty_id" - mock_submitter.assert_called_once_with("") + # Verify submitter was called with empty string callback_id and WaitForCallbackContext + assert mock_submitter.call_count == 1 + call_args = mock_submitter.call_args[0] + assert call_args[0] == "" # noqa: PLC1901 - explicitly testing empty string, not just falsey + assert hasattr(call_args[1], "logger") def test_wait_for_callback_handler_with_large_data(): @@ -498,7 +515,9 @@ def test_create_callback_handler_config_with_zero_timeouts(): CheckpointedResult.create_from_operation(operation), ] - config = CallbackConfig(timeout_seconds=0, heartbeat_timeout_seconds=0) + config = CallbackConfig( + timeout=Duration.from_seconds(0), heartbeat_timeout=Duration.from_seconds(0) + ) result = create_callback_handler( state=mock_state, @@ -538,7 +557,10 @@ def test_create_callback_handler_config_with_large_timeouts(): CheckpointedResult.create_from_operation(operation), ] - config = CallbackConfig(timeout_seconds=86400, heartbeat_timeout_seconds=3600) + config = CallbackConfig( + timeout=Duration.from_days(1), + heartbeat_timeout=Duration.from_hours(1), + ) result = create_callback_handler( state=mock_state, @@ -595,12 +617,13 @@ def test_wait_for_callback_handler_submitter_exception_handling(): mock_callback.result.return_value = "exception_result" mock_context.create_callback.return_value = mock_callback - def failing_submitter(callback_id): + def failing_submitter(callback_id, context): msg = "Submitter failed" raise ValueError(msg) def step_side_effect(func, name, config=None): step_context = Mock(spec=StepContext) + step_context.logger = Mock() func(step_context) mock_context.step.side_effect = step_side_effect @@ -683,7 +706,9 @@ def test_wait_for_callback_handler_config_propagation(): mock_context.create_callback.return_value = mock_callback mock_submitter = Mock() - config = WaitForCallbackConfig(timeout_seconds=120, heartbeat_timeout_seconds=30) + config = WaitForCallbackConfig( + timeout=Duration.from_minutes(2), heartbeat_timeout=Duration.from_seconds(30) + ) result = wait_for_callback_handler( mock_context, mock_submitter, "config_test", config @@ -772,7 +797,9 @@ def test_callback_lifecycle_complete_flow(): mock_callback.result.return_value = {"status": "completed", "data": "test_data"} mock_context.create_callback.return_value = mock_callback - config = WaitForCallbackConfig(timeout_seconds=300, heartbeat_timeout_seconds=60) + config = WaitForCallbackConfig( + timeout=Duration.from_minutes(5), heartbeat_timeout=Duration.from_minutes(1) + ) callback_id = create_callback_handler( state=mock_state, operation_identifier=OperationIdentifier("lifecycle_callback", None), @@ -781,12 +808,14 @@ def test_callback_lifecycle_complete_flow(): assert callback_id == "lifecycle_cb123" - def mock_submitter(cb_id): + def mock_submitter(cb_id, context): assert cb_id == "lifecycle_cb123" + assert hasattr(context, "logger") return "submitted" def execute_step(func, name, config=None): step_context = Mock(spec=StepContext) + step_context.logger = Mock() return func(step_context) mock_context.step.side_effect = execute_step @@ -847,8 +876,8 @@ def test_callback_timeout_configuration(): ] config = CallbackConfig( - timeout_seconds=timeout_seconds, - heartbeat_timeout_seconds=heartbeat_timeout_seconds, + timeout=Duration.from_seconds(timeout_seconds), + heartbeat_timeout=Duration.from_seconds(heartbeat_timeout_seconds), ) callback_id = create_callback_handler( @@ -864,19 +893,25 @@ def test_callback_timeout_configuration(): def test_callback_error_propagation(): """Test error propagation through callback operations.""" + # CRITICAL: create_callback_handler should NOT raise on FAILED + # Errors are deferred to Callback.result() for deterministic replay mock_state = Mock(spec=ExecutionState) - mock_result = Mock(spec=CheckpointedResult) - mock_result.is_failed.return_value = True - msg = "Callback creation failed" - mock_result.raise_callable_error.side_effect = RuntimeError(msg) + failed_op = Operation( + operation_id="error_callback", + operation_type=OperationType.CALLBACK, + status=OperationStatus.FAILED, + callback_details=CallbackDetails(callback_id="failed_cb"), + ) + mock_result = CheckpointedResult.create_from_operation(failed_op) mock_state.get_checkpoint_result.return_value = mock_result - with pytest.raises(RuntimeError, match="Callback creation failed"): - create_callback_handler( - state=mock_state, - operation_identifier=OperationIdentifier("error_callback", None), - config=None, - ) + # Should return callback_id without raising + callback_id = create_callback_handler( + state=mock_state, + operation_identifier=OperationIdentifier("error_callback", None), + config=None, + ) + assert callback_id == "failed_cb" mock_context = Mock(spec=DurableContext) mock_context.create_callback.side_effect = ValueError("Context creation failed") @@ -895,7 +930,7 @@ def test_callback_with_complex_submitter(): submission_log = [] - def complex_submitter(callback_id): + def complex_submitter(callback_id, context): submission_log.append(f"received_id: {callback_id}") if callback_id == "complex_cb789": submission_log.append("api_call_success") @@ -907,6 +942,7 @@ def complex_submitter(callback_id): def execute_step(func, name, config): step_context = Mock(spec=StepContext) + step_context.logger = Mock() return func(step_context) mock_context.step.side_effect = execute_step @@ -1008,7 +1044,9 @@ def test_callback_operation_update_creation(mock_operation_update): CheckpointedResult.create_from_operation(operation), ] - config = CallbackConfig(timeout_seconds=600, heartbeat_timeout_seconds=120) + config = CallbackConfig( + timeout=Duration.from_minutes(10), heartbeat_timeout=Duration.from_minutes(2) + ) create_callback_handler( state=mock_state, @@ -1025,3 +1063,471 @@ def test_callback_operation_update_creation(mock_operation_update): # endregion wait_for_callback_handler + + +# region immediate response handling tests +def test_callback_immediate_response_get_checkpoint_result_called_twice(): + """Test that get_checkpoint_result is called twice when checkpoint is created.""" + mock_state = Mock(spec=ExecutionState) + + # First call: not found, second call: started (no immediate response) + not_found = CheckpointedResult.create_not_found() + callback_details = CallbackDetails(callback_id="cb_immediate_1") + started_op = Operation( + operation_id="callback_immediate_1", + operation_type=OperationType.CALLBACK, + status=OperationStatus.STARTED, + callback_details=callback_details, + ) + started = CheckpointedResult.create_from_operation(started_op) + mock_state.get_checkpoint_result.side_effect = [not_found, started] + + result = create_callback_handler( + state=mock_state, + operation_identifier=OperationIdentifier("callback_immediate_1", None), + config=None, + ) + + # Verify callback_id was returned + assert result == "cb_immediate_1" + # Verify get_checkpoint_result was called twice + assert mock_state.get_checkpoint_result.call_count == 2 + + +def test_callback_immediate_response_create_checkpoint_with_is_sync_true(): + """Test that create_checkpoint is called with is_sync=True.""" + mock_state = Mock(spec=ExecutionState) + + # First call: not found, second call: started + not_found = CheckpointedResult.create_not_found() + callback_details = CallbackDetails(callback_id="cb_immediate_2") + started_op = Operation( + operation_id="callback_immediate_2", + operation_type=OperationType.CALLBACK, + status=OperationStatus.STARTED, + callback_details=callback_details, + ) + started = CheckpointedResult.create_from_operation(started_op) + mock_state.get_checkpoint_result.side_effect = [not_found, started] + + result = create_callback_handler( + state=mock_state, + operation_identifier=OperationIdentifier("callback_immediate_2", None), + config=None, + ) + + # Verify callback_id was returned + assert result == "cb_immediate_2" + # Verify create_checkpoint was called with is_sync=True (default) + mock_state.create_checkpoint.assert_called_once() + # is_sync=True is the default, so it won't be in kwargs if not explicitly passed + # We just verify the checkpoint was created + + +def test_callback_immediate_response_immediate_success(): + """Test immediate success: checkpoint returns SUCCEEDED on second check. + + When checkpoint returns SUCCEEDED on second check, operation returns callback_id + without raising. + """ + mock_state = Mock(spec=ExecutionState) + + # First call: not found, second call: succeeded (immediate response) + not_found = CheckpointedResult.create_not_found() + callback_details = CallbackDetails(callback_id="cb_immediate_success") + succeeded_op = Operation( + operation_id="callback_immediate_3", + operation_type=OperationType.CALLBACK, + status=OperationStatus.SUCCEEDED, + callback_details=callback_details, + ) + succeeded = CheckpointedResult.create_from_operation(succeeded_op) + mock_state.get_checkpoint_result.side_effect = [not_found, succeeded] + + result = create_callback_handler( + state=mock_state, + operation_identifier=OperationIdentifier("callback_immediate_3", None), + config=None, + ) + + # Verify callback_id was returned without raising + assert result == "cb_immediate_success" + # Verify checkpoint was created + mock_state.create_checkpoint.assert_called_once() + # Verify get_checkpoint_result was called twice + assert mock_state.get_checkpoint_result.call_count == 2 + + +def test_callback_immediate_response_immediate_failure_deferred(): + """Test immediate failure deferred: checkpoint returns FAILED on second check. + + CRITICAL: When checkpoint returns FAILED on second check, create_callback() + returns callback_id (does NOT raise). Errors are deferred to Callback.result() + for deterministic replay. + """ + mock_state = Mock(spec=ExecutionState) + + # First call: not found, second call: failed (immediate response) + not_found = CheckpointedResult.create_not_found() + callback_details = CallbackDetails(callback_id="cb_immediate_failed") + failed_op = Operation( + operation_id="callback_immediate_4", + operation_type=OperationType.CALLBACK, + status=OperationStatus.FAILED, + callback_details=callback_details, + ) + failed = CheckpointedResult.create_from_operation(failed_op) + mock_state.get_checkpoint_result.side_effect = [not_found, failed] + + # CRITICAL: Should return callback_id without raising + result = create_callback_handler( + state=mock_state, + operation_identifier=OperationIdentifier("callback_immediate_4", None), + config=None, + ) + + # Verify callback_id was returned (error deferred) + assert result == "cb_immediate_failed" + # Verify checkpoint was created + mock_state.create_checkpoint.assert_called_once() + # Verify get_checkpoint_result was called twice + assert mock_state.get_checkpoint_result.call_count == 2 + + +def test_callback_result_raises_error_for_failed_callbacks(): + """Test that Callback.result() raises error for FAILED callbacks (deferred error handling). + + This test verifies that errors are properly deferred to Callback.result() rather + than being raised during create_callback(). This ensures deterministic replay: + code between create_callback() and callback.result() always executes. + """ + + mock_state = Mock(spec=ExecutionState) + + # Create a FAILED callback operation + error = ErrorObject( + message="Callback failed", type="CallbackError", data=None, stack_trace=None + ) + callback_details = CallbackDetails( + callback_id="cb_failed_result", result=None, error=error + ) + failed_op = Operation( + operation_id="callback_failed_result", + operation_type=OperationType.CALLBACK, + status=OperationStatus.FAILED, + callback_details=callback_details, + ) + failed_result = CheckpointedResult.create_from_operation(failed_op) + mock_state.get_checkpoint_result.return_value = failed_result + + # Create Callback instance + callback = Callback( + callback_id="cb_failed_result", + operation_id="callback_failed_result", + state=mock_state, + serdes=None, + ) + + # Verify that result() raises CallbackError + with pytest.raises(CallbackError, match="Callback failed"): + callback.result() + + +def test_callback_result_raises_error_for_timed_out_callbacks(): + """Test that Callback.result() raises error for TIMED_OUT callbacks.""" + + mock_state = Mock(spec=ExecutionState) + + # Create a TIMED_OUT callback operation + error = ErrorObject( + message="Callback timed out", + type="CallbackTimeoutError", + data=None, + stack_trace=None, + ) + callback_details = CallbackDetails( + callback_id="cb_timed_out_result", result=None, error=error + ) + timed_out_op = Operation( + operation_id="callback_timed_out_result", + operation_type=OperationType.CALLBACK, + status=OperationStatus.TIMED_OUT, + callback_details=callback_details, + ) + timed_out_result = CheckpointedResult.create_from_operation(timed_out_op) + mock_state.get_checkpoint_result.return_value = timed_out_result + + # Create Callback instance + callback = Callback( + callback_id="cb_timed_out_result", + operation_id="callback_timed_out_result", + state=mock_state, + serdes=None, + ) + + # Verify that result() raises CallbackError + with pytest.raises(CallbackError, match="Callback timed out"): + callback.result() + + +def test_callback_immediate_response_no_immediate_response(): + """Test no immediate response: checkpoint returns STARTED on second check. + + When checkpoint returns STARTED on second check, operation returns callback_id + normally (callbacks don't suspend). + """ + mock_state = Mock(spec=ExecutionState) + + # First call: not found, second call: started (no immediate response) + not_found = CheckpointedResult.create_not_found() + callback_details = CallbackDetails(callback_id="cb_immediate_started") + started_op = Operation( + operation_id="callback_immediate_5", + operation_type=OperationType.CALLBACK, + status=OperationStatus.STARTED, + callback_details=callback_details, + ) + started = CheckpointedResult.create_from_operation(started_op) + mock_state.get_checkpoint_result.side_effect = [not_found, started] + + result = create_callback_handler( + state=mock_state, + operation_identifier=OperationIdentifier("callback_immediate_5", None), + config=None, + ) + + # Verify callback_id was returned + assert result == "cb_immediate_started" + # Verify checkpoint was created + mock_state.create_checkpoint.assert_called_once() + # Verify get_checkpoint_result was called twice + assert mock_state.get_checkpoint_result.call_count == 2 + + +def test_callback_immediate_response_already_completed(): + """Test already completed: checkpoint exists on first check. + + When checkpoint is already SUCCEEDED on first check, no checkpoint is created + and callback_id is returned immediately. + """ + mock_state = Mock(spec=ExecutionState) + + # First call: already succeeded + callback_details = CallbackDetails(callback_id="cb_already_completed") + succeeded_op = Operation( + operation_id="callback_immediate_6", + operation_type=OperationType.CALLBACK, + status=OperationStatus.SUCCEEDED, + callback_details=callback_details, + ) + succeeded = CheckpointedResult.create_from_operation(succeeded_op) + mock_state.get_checkpoint_result.return_value = succeeded + + result = create_callback_handler( + state=mock_state, + operation_identifier=OperationIdentifier("callback_immediate_6", None), + config=None, + ) + + # Verify callback_id was returned + assert result == "cb_already_completed" + # Verify no checkpoint was created (already exists) + mock_state.create_checkpoint.assert_not_called() + # Verify get_checkpoint_result was called only once + assert mock_state.get_checkpoint_result.call_count == 1 + + +def test_callback_immediate_response_already_failed(): + """Test already failed: checkpoint is already FAILED on first check. + + When checkpoint is already FAILED on first check, no checkpoint is created + and callback_id is returned (error deferred to Callback.result()). + """ + mock_state = Mock(spec=ExecutionState) + + # First call: already failed + callback_details = CallbackDetails(callback_id="cb_already_failed") + failed_op = Operation( + operation_id="callback_immediate_7", + operation_type=OperationType.CALLBACK, + status=OperationStatus.FAILED, + callback_details=callback_details, + ) + failed = CheckpointedResult.create_from_operation(failed_op) + mock_state.get_checkpoint_result.return_value = failed + + # Should return callback_id without raising + result = create_callback_handler( + state=mock_state, + operation_identifier=OperationIdentifier("callback_immediate_7", None), + config=None, + ) + + # Verify callback_id was returned (error deferred) + assert result == "cb_already_failed" + # Verify no checkpoint was created (already exists) + mock_state.create_checkpoint.assert_not_called() + # Verify get_checkpoint_result was called only once + assert mock_state.get_checkpoint_result.call_count == 1 + + +def test_callback_deferred_error_handling_code_execution_between_create_and_result(): + """Test callback deferred error handling with code execution between create_callback() and callback.result(). + + This test verifies that code between create_callback() and callback.result() executes + even when the callback is FAILED. This ensures deterministic replay. + """ + + mock_state = Mock(spec=ExecutionState) + + # Setup: callback is already FAILED + error = ErrorObject( + message="Callback failed", type="CallbackError", data=None, stack_trace=None + ) + callback_details = CallbackDetails( + callback_id="cb_deferred_error", result=None, error=error + ) + failed_op = Operation( + operation_id="callback_deferred_error", + operation_type=OperationType.CALLBACK, + status=OperationStatus.FAILED, + callback_details=callback_details, + ) + failed_result = CheckpointedResult.create_from_operation(failed_op) + mock_state.get_checkpoint_result.return_value = failed_result + + # Step 1: create_callback() returns callback_id without raising + callback_id = create_callback_handler( + state=mock_state, + operation_identifier=OperationIdentifier("callback_deferred_error", None), + config=None, + ) + assert callback_id == "cb_deferred_error" + + # Step 2: Code executes between create_callback() and callback.result() + execution_log = [ + "code_executed_after_create_callback", + f"callback_id: {callback_id}", + ] + + # Step 3: Callback.result() raises the error + callback = Callback( + callback_id=callback_id, + operation_id="callback_deferred_error", + state=mock_state, + serdes=None, + ) + + with pytest.raises(CallbackError, match="Callback failed"): + callback.result() + + # Verify code between create_callback() and callback.result() executed + assert execution_log == [ + "code_executed_after_create_callback", + "callback_id: cb_deferred_error", + ] + + +def test_callback_immediate_response_with_config(): + """Test immediate response with callback configuration.""" + mock_state = Mock(spec=ExecutionState) + + # First call: not found, second call: succeeded + not_found = CheckpointedResult.create_not_found() + callback_details = CallbackDetails(callback_id="cb_with_config") + succeeded_op = Operation( + operation_id="callback_with_config", + operation_type=OperationType.CALLBACK, + status=OperationStatus.SUCCEEDED, + callback_details=callback_details, + ) + succeeded = CheckpointedResult.create_from_operation(succeeded_op) + mock_state.get_checkpoint_result.side_effect = [not_found, succeeded] + + config = CallbackConfig( + timeout=Duration.from_minutes(5), heartbeat_timeout=Duration.from_minutes(1) + ) + + result = create_callback_handler( + state=mock_state, + operation_identifier=OperationIdentifier("callback_with_config", None), + config=config, + ) + + # Verify callback_id was returned + assert result == "cb_with_config" + # Verify checkpoint was created with config + mock_state.create_checkpoint.assert_called_once() + call_args = mock_state.create_checkpoint.call_args[1] + operation_update = call_args["operation_update"] + assert operation_update.callback_options.timeout_seconds == 300 + assert operation_update.callback_options.heartbeat_timeout_seconds == 60 + + +# endregion immediate response handling tests + + +def test_callback_returns_id_when_second_check_returns_started(): + """Test when the second checkpoint check returns + STARTED (not terminal), the callback operation returns callback_id normally. + """ + mock_state = Mock(spec=ExecutionState) + mock_state.durable_execution_arn = "test_arn" + + # First call: checkpoint doesn't exist + # Second call: checkpoint returns STARTED (no immediate response) + mock_state.get_checkpoint_result.side_effect = [ + CheckpointedResult.create_not_found(), + CheckpointedResult.create_from_operation( + Operation( + operation_id="callback-1", + operation_type=OperationType.CALLBACK, + status=OperationStatus.STARTED, + callback_details=CallbackDetails(callback_id="cb-123"), + ) + ), + ] + + executor = CallbackOperationExecutor( + state=mock_state, + operation_identifier=OperationIdentifier("callback-1", None, "test_callback"), + config=CallbackConfig(), + ) + callback_id = executor.process() + + # Assert - behaves like "old way" + assert callback_id == "cb-123" + assert mock_state.get_checkpoint_result.call_count == 2 # Double-check happened + mock_state.create_checkpoint.assert_called_once() # START checkpoint created + + +def test_callback_returns_id_when_second_check_returns_started_duplicate(): + """Test when the second checkpoint check returns + STARTED (not terminal), the callback operation returns callback_id normally. + """ + mock_state = Mock(spec=ExecutionState) + mock_state.durable_execution_arn = "test_arn" + + # First call: checkpoint doesn't exist + # Second call: checkpoint returns STARTED (no immediate response) + not_found = CheckpointedResult.create_not_found() + started_op = Operation( + operation_id="callback-1", + operation_type=OperationType.CALLBACK, + status=OperationStatus.STARTED, + callback_details=CallbackDetails(callback_id="cb-123"), + ) + started = CheckpointedResult.create_from_operation(started_op) + mock_state.get_checkpoint_result.side_effect = [not_found, started] + + executor = CallbackOperationExecutor( + state=mock_state, + operation_identifier=OperationIdentifier("callback-1", None, "test_callback"), + config=CallbackConfig(), + ) + callback_id = executor.process() + + # Assert - behaves like "old way" + assert callback_id == "cb-123" + assert mock_state.get_checkpoint_result.call_count == 2 # Double-check happened + mock_state.create_checkpoint.assert_called_once() # START checkpoint created diff --git a/tests/operation/child_test.py b/tests/operation/child_test.py index e888ebb..ae1bb3a 100644 --- a/tests/operation/child_test.py +++ b/tests/operation/child_test.py @@ -1,5 +1,7 @@ """Unit tests for child handler.""" +from __future__ import annotations + import json from typing import cast from unittest.mock import Mock @@ -7,7 +9,10 @@ import pytest from aws_durable_execution_sdk_python.config import ChildConfig -from aws_durable_execution_sdk_python.exceptions import CallableRuntimeError +from aws_durable_execution_sdk_python.exceptions import ( + CallableRuntimeError, + InvocationError, +) from aws_durable_execution_sdk_python.identifier import OperationIdentifier from aws_durable_execution_sdk_python.lambda_service import ( ErrorObject, @@ -34,9 +39,15 @@ ], ) def test_child_handler_not_started( - config: ChildConfig, expected_sub_type: OperationSubType + config: ChildConfig | None, expected_sub_type: OperationSubType ): - """Test child_handler when operation not started.""" + """Test child_handler when operation not started. + + Verifies: + - get_checkpoint_result is called once (async checkpoint, no second check) + - create_checkpoint is called with is_sync=False for START + - Operation executes and creates SUCCEED checkpoint + """ mock_state = Mock(spec=ExecutionState) mock_state.durable_execution_arn = "test_arn" mock_result = Mock() @@ -44,7 +55,6 @@ def test_child_handler_not_started( mock_result.is_failed.return_value = False mock_result.is_started.return_value = False mock_result.is_replay_children.return_value = False - mock_result.is_replay_children.return_value = False mock_result.is_existent.return_value = False mock_state.get_checkpoint_result.return_value = mock_result mock_callable = Mock(return_value="fresh_result") @@ -54,10 +64,15 @@ def test_child_handler_not_started( ) assert result == "fresh_result" + + # Verify get_checkpoint_result called once (async checkpoint, no second check) + assert mock_state.get_checkpoint_result.call_count == 1 + + # Verify create_checkpoint called twice (start and succeed) mock_state.create_checkpoint.assert_called() - assert mock_state.create_checkpoint.call_count == 2 # start and succeed + assert mock_state.create_checkpoint.call_count == 2 - # Verify start checkpoint + # Verify start checkpoint with is_sync=False start_call = mock_state.create_checkpoint.call_args_list[0] start_operation = start_call[1]["operation_update"] assert start_operation.operation_id == "op1" @@ -65,6 +80,8 @@ def test_child_handler_not_started( assert start_operation.operation_type is OperationType.CONTEXT assert start_operation.sub_type is expected_sub_type assert start_operation.action is OperationAction.START + # CRITICAL: Verify is_sync=False for START checkpoint (async, no immediate response) + assert start_call[1]["is_sync"] is False # Verify success checkpoint success_call = mock_state.create_checkpoint.call_args_list[1] @@ -80,7 +97,13 @@ def test_child_handler_not_started( def test_child_handler_already_succeeded(): - """Test child_handler when operation already succeeded.""" + """Test child_handler when operation already succeeded without replay_children. + + Verifies: + - Returns cached result without executing function + - No checkpoint created + - get_checkpoint_result called once + """ mock_state = Mock(spec=ExecutionState) mock_state.durable_execution_arn = "test_arn" mock_result = Mock() @@ -95,8 +118,12 @@ def test_child_handler_already_succeeded(): ) assert result == "cached_result" + # Verify function not executed mock_callable.assert_not_called() + # Verify no checkpoint created mock_state.create_checkpoint.assert_not_called() + # Verify get_checkpoint_result called once + assert mock_state.get_checkpoint_result.call_count == 1 def test_child_handler_already_succeeded_none_result(): @@ -119,7 +146,13 @@ def test_child_handler_already_succeeded_none_result(): def test_child_handler_already_failed(): - """Test child_handler when operation already failed.""" + """Test child_handler when operation already failed. + + Verifies: + - Already failed: raises error without executing function + - No checkpoint created + - get_checkpoint_result called once + """ mock_state = Mock(spec=ExecutionState) mock_result = Mock() mock_result.is_succeeded.return_value = False @@ -138,7 +171,10 @@ def test_child_handler_already_failed(): None, ) + # Verify function not executed mock_callable.assert_not_called() + # Verify get_checkpoint_result called once + assert mock_state.get_checkpoint_result.call_count == 1 @pytest.mark.parametrize( @@ -153,9 +189,15 @@ def test_child_handler_already_failed(): ], ) def test_child_handler_already_started( - config: ChildConfig, expected_sub_type: OperationSubType + config: ChildConfig | None, expected_sub_type: OperationSubType ): - """Test child_handler when operation already started.""" + """Test child_handler when operation already started. + + Verifies: + - Operation executes when already started + - Only SUCCEED checkpoint created (no START) + - get_checkpoint_result called once + """ mock_state = Mock(spec=ExecutionState) mock_state.durable_execution_arn = "test_arn" mock_result = Mock() @@ -172,7 +214,11 @@ def test_child_handler_already_started( assert result == "started_result" - # Verify success checkpoint + # Verify get_checkpoint_result called once + assert mock_state.get_checkpoint_result.call_count == 1 + + # Verify only success checkpoint (no START since already started) + assert mock_state.create_checkpoint.call_count == 1 success_call = mock_state.create_checkpoint.call_args_list[0] success_operation = success_call[1]["operation_update"] assert success_operation.operation_id == "op5" @@ -197,9 +243,15 @@ def test_child_handler_already_started( ], ) def test_child_handler_callable_exception( - config: ChildConfig, expected_sub_type: OperationSubType + config: ChildConfig | None, expected_sub_type: OperationSubType ): - """Test child_handler when callable raises exception.""" + """Test child_handler when callable raises exception. + + Verifies: + - Error handling: checkpoints FAIL and raises wrapped error + - get_checkpoint_result called once + - create_checkpoint called with is_sync=False for START + """ mock_state = Mock(spec=ExecutionState) mock_state.durable_execution_arn = "test_arn" mock_result = Mock() @@ -218,10 +270,14 @@ def test_child_handler_callable_exception( config, ) + # Verify get_checkpoint_result called once + assert mock_state.get_checkpoint_result.call_count == 1 + + # Verify create_checkpoint called twice (start and fail) mock_state.create_checkpoint.assert_called() - assert mock_state.create_checkpoint.call_count == 2 # start and fail + assert mock_state.create_checkpoint.call_count == 2 - # Verify start checkpoint + # Verify start checkpoint with is_sync=False start_call = mock_state.create_checkpoint.call_args_list[0] start_operation = start_call[1]["operation_update"] assert start_operation.operation_id == "op6" @@ -229,6 +285,7 @@ def test_child_handler_callable_exception( assert start_operation.operation_type is OperationType.CONTEXT assert start_operation.sub_type is expected_sub_type assert start_operation.action is OperationAction.START + assert start_call[1]["is_sync"] is False # Verify fail checkpoint fail_call = mock_state.create_checkpoint.call_args_list[1] @@ -242,13 +299,19 @@ def test_child_handler_callable_exception( def test_child_handler_error_wrapped(): - """Test child_handler wraps regular errors as CallableRuntimeError.""" + """Test child_handler wraps regular errors as CallableRuntimeError. + + Verifies: + - Regular exceptions are wrapped as CallableRuntimeError + - FAIL checkpoint is created + """ mock_state = Mock(spec=ExecutionState) mock_state.durable_execution_arn = "test_arn" mock_result = Mock() mock_result.is_succeeded.return_value = False mock_result.is_failed.return_value = False mock_result.is_started.return_value = False + mock_result.is_existent.return_value = False mock_state.get_checkpoint_result.return_value = mock_result test_error = RuntimeError("Test error") mock_callable = Mock(side_effect=test_error) @@ -261,6 +324,46 @@ def test_child_handler_error_wrapped(): None, ) + # Verify FAIL checkpoint was created + assert mock_state.create_checkpoint.call_count == 2 # start and fail + + +def test_child_handler_invocation_error_reraised(): + """Test child_handler re-raises InvocationError after checkpointing FAIL. + + Verifies: + - InvocationError: checkpoints FAIL and re-raises (for retry) + - FAIL checkpoint is created + - Original InvocationError is re-raised (not wrapped) + """ + + mock_state = Mock(spec=ExecutionState) + mock_state.durable_execution_arn = "test_arn" + mock_result = Mock() + mock_result.is_succeeded.return_value = False + mock_result.is_failed.return_value = False + mock_result.is_started.return_value = False + mock_result.is_existent.return_value = False + mock_state.get_checkpoint_result.return_value = mock_result + test_error = InvocationError("Invocation failed") + mock_callable = Mock(side_effect=test_error) + + with pytest.raises(InvocationError, match="Invocation failed"): + child_handler( + mock_callable, + mock_state, + OperationIdentifier("op7b", None, "test_name"), + None, + ) + + # Verify FAIL checkpoint was created + assert mock_state.create_checkpoint.call_count == 2 # start and fail + + # Verify fail checkpoint + fail_call = mock_state.create_checkpoint.call_args_list[1] + fail_operation = fail_call[1]["operation_update"] + assert fail_operation.action is OperationAction.FAIL + def test_child_handler_with_config(): """Test child_handler with config parameter.""" @@ -270,6 +373,7 @@ def test_child_handler_with_config(): mock_result.is_succeeded.return_value = False mock_result.is_failed.return_value = False mock_result.is_started.return_value = False + mock_result.is_existent.return_value = False mock_state.get_checkpoint_result.return_value = mock_result mock_callable = Mock(return_value="config_result") config = ChildConfig() @@ -280,6 +384,8 @@ def test_child_handler_with_config(): assert result == "config_result" mock_callable.assert_called_once() + # Verify get_checkpoint_result called once + assert mock_state.get_checkpoint_result.call_count == 1 def test_child_handler_default_serialization(): @@ -291,6 +397,7 @@ def test_child_handler_default_serialization(): mock_result.is_failed.return_value = False mock_result.is_started.return_value = False mock_result.is_replay_children.return_value = False + mock_result.is_existent.return_value = False mock_state.get_checkpoint_result.return_value = mock_result complex_result = {"key": "value", "number": 42, "list": [1, 2, 3]} mock_callable = Mock(return_value=complex_result) @@ -300,6 +407,8 @@ def test_child_handler_default_serialization(): ) assert result == complex_result + # Verify get_checkpoint_result called once + assert mock_state.get_checkpoint_result.call_count == 1 # Verify JSON serialization was used in checkpoint success_call = [ call @@ -362,6 +471,8 @@ def test_child_handler_custom_serdes_already_succeeded() -> None: expected_checkpoointed_result = {"key": "value", "number": 42, "list": [1, 2, 3]} assert actual_result == expected_checkpoointed_result + # Verify get_checkpoint_result called once + assert mock_state.get_checkpoint_result.call_count == 1 # endregion child_handler @@ -369,7 +480,12 @@ def test_child_handler_custom_serdes_already_succeeded() -> None: # large payload with summary generator def test_child_handler_large_payload_with_summary_generator() -> None: - """Test child_handler with large payload and summary generator.""" + """Test child_handler with large payload and summary generator. + + Verifies: + - Large payload: uses ReplayChildren mode with summary_generator + - get_checkpoint_result called once + """ mock_state = Mock(spec=ExecutionState) mock_state.durable_execution_arn = "test_arn" mock_result = Mock() @@ -397,6 +513,9 @@ def my_summary(result: str) -> str: ) assert large_result == actual_result + # Verify get_checkpoint_result called once + assert mock_state.get_checkpoint_result.call_count == 1 + # Verify replay_children mode with summary success_call = mock_state.create_checkpoint.call_args_list[1] success_operation = success_call[1]["operation_update"] assert success_operation.context_options.replay_children @@ -406,7 +525,12 @@ def my_summary(result: str) -> str: # large payload without summary generator def test_child_handler_large_payload_without_summary_generator() -> None: - """Test child_handler with large payload and no summary generator.""" + """Test child_handler with large payload and no summary generator. + + Verifies: + - Large payload without summary_generator: uses ReplayChildren mode with empty string + - get_checkpoint_result called once + """ mock_state = Mock(spec=ExecutionState) mock_state.durable_execution_arn = "test_arn" mock_result = Mock() @@ -428,6 +552,9 @@ def test_child_handler_large_payload_without_summary_generator() -> None: ) assert large_result == actual_result + # Verify get_checkpoint_result called once + assert mock_state.get_checkpoint_result.call_count == 1 + # Verify replay_children mode with empty string success_call = mock_state.create_checkpoint.call_args_list[1] success_operation = success_call[1]["operation_update"] assert success_operation.context_options.replay_children @@ -437,7 +564,13 @@ def test_child_handler_large_payload_without_summary_generator() -> None: # mocked children replay mode execute the function again def test_child_handler_replay_children_mode() -> None: - """Test child_handler in ReplayChildren mode.""" + """Test child_handler in ReplayChildren mode. + + Verifies: + - Already succeeded with replay_children: re-executes function + - No checkpoint created (returns without checkpointing) + - get_checkpoint_result called once + """ mock_state = Mock(spec=ExecutionState) mock_state.durable_execution_arn = "test_arn" mock_result = Mock() @@ -458,12 +591,21 @@ def test_child_handler_replay_children_mode() -> None: ) assert actual_result == complex_result - + # Verify function was executed (replay_children mode) + mock_callable.assert_called_once() + # Verify no checkpoint created (returns without checkpointing in replay mode) mock_state.create_checkpoint.assert_not_called() + # Verify get_checkpoint_result called once + assert mock_state.get_checkpoint_result.call_count == 1 def test_small_payload_with_summary_generator(): - """Test: Small payload with summary_generator -> replay_children = False""" + """Test: Small payload with summary_generator -> replay_children = False + + Verifies: + - Small payload does NOT trigger replay_children even with summary_generator + - get_checkpoint_result called once + """ mock_state = Mock(spec=ExecutionState) mock_state.durable_execution_arn = "test_arn" mock_result = Mock() @@ -491,6 +633,8 @@ def my_summary(result: str) -> str: ) assert actual_result == small_result + # Verify get_checkpoint_result called once + assert mock_state.get_checkpoint_result.call_count == 1 success_call = mock_state.create_checkpoint.call_args_list[1] success_operation = success_call[1]["operation_update"] @@ -501,7 +645,12 @@ def my_summary(result: str) -> str: def test_small_payload_without_summary_generator(): - """Test: Small payload without summary_generator -> replay_children = False""" + """Test: Small payload without summary_generator -> replay_children = False + + Verifies: + - Small payload does NOT trigger replay_children + - get_checkpoint_result called once + """ mock_state = Mock(spec=ExecutionState) mock_state.durable_execution_arn = "test_arn" mock_result = Mock() @@ -526,6 +675,8 @@ def test_small_payload_without_summary_generator(): ) assert actual_result == small_result + # Verify get_checkpoint_result called once + assert mock_state.get_checkpoint_result.call_count == 1 success_call = mock_state.create_checkpoint.call_args_list[1] success_operation = success_call[1]["operation_update"] diff --git a/tests/operation/invoke_test.py b/tests/operation/invoke_test.py index baf69bf..5bb98da 100644 --- a/tests/operation/invoke_test.py +++ b/tests/operation/invoke_test.py @@ -7,7 +7,7 @@ import pytest -from aws_durable_execution_sdk_python.config import InvokeConfig +from aws_durable_execution_sdk_python.config import Duration, InvokeConfig from aws_durable_execution_sdk_python.exceptions import ( CallableRuntimeError, ExecutionError, @@ -23,14 +23,27 @@ OperationStatus, OperationType, ) -from aws_durable_execution_sdk_python.operation.invoke import ( - invoke_handler, - suspend_with_optional_resume_delay, -) +from aws_durable_execution_sdk_python.operation.invoke import InvokeOperationExecutor from aws_durable_execution_sdk_python.state import CheckpointedResult, ExecutionState +from aws_durable_execution_sdk_python.suspend import suspend_with_optional_resume_delay from tests.serdes_test import CustomDictSerDes +# Test helper - maintains old handler signature for backward compatibility in tests +def invoke_handler(function_name, payload, state, operation_identifier, config): + """Test helper that wraps InvokeOperationExecutor with old handler signature.""" + if not config: + config = InvokeConfig() + executor = InvokeOperationExecutor( + function_name=function_name, + payload=payload, + state=state, + operation_identifier=operation_identifier, + config=config, + ) + return executor.process() + + def test_invoke_handler_already_succeeded(): """Test invoke_handler when operation already succeeded.""" mock_state = Mock(spec=ExecutionState) @@ -164,7 +177,7 @@ def test_invoke_handler_already_timed_out(): ) -@pytest.mark.parametrize("status", [OperationStatus.STARTED, OperationStatus.PENDING]) +@pytest.mark.parametrize("status", [OperationStatus.STARTED]) def test_invoke_handler_already_started(status): """Test invoke_handler when operation is already started.""" mock_state = Mock(spec=ExecutionState) @@ -179,7 +192,9 @@ def test_invoke_handler_already_started(status): mock_result = CheckpointedResult.create_from_operation(operation) mock_state.get_checkpoint_result.return_value = mock_result - with pytest.raises(SuspendExecution, match="Invoke invoke6 still in progress"): + with pytest.raises( + SuspendExecution, match="Invoke invoke6 started, suspending for completion" + ): invoke_handler( function_name="test_function", payload="test_input", @@ -204,7 +219,7 @@ def test_invoke_handler_already_started_with_timeout(status): mock_result = CheckpointedResult.create_from_operation(operation) mock_state.get_checkpoint_result.return_value = mock_result - config = InvokeConfig[str, str](timeout_seconds=30) + config = InvokeConfig[str, str](timeout=Duration.from_seconds(30)) with pytest.raises(TimedSuspendExecution): invoke_handler( @@ -221,10 +236,17 @@ def test_invoke_handler_new_operation(): mock_state = Mock(spec=ExecutionState) mock_state.durable_execution_arn = "test_arn" - mock_result = CheckpointedResult.create_not_found() - mock_state.get_checkpoint_result.return_value = mock_result + # First call: not found, second call: started (no immediate response) + not_found = CheckpointedResult.create_not_found() + started_op = Operation( + operation_id="invoke8", + operation_type=OperationType.CHAINED_INVOKE, + status=OperationStatus.STARTED, + ) + started = CheckpointedResult.create_from_operation(started_op) + mock_state.get_checkpoint_result.side_effect = [not_found, started] - config = InvokeConfig[str, str](timeout_seconds=60) + config = InvokeConfig[str, str](timeout=Duration.from_minutes(1)) with pytest.raises( SuspendExecution, match="Invoke invoke8 started, suspending for completion" @@ -254,10 +276,16 @@ def test_invoke_handler_new_operation_with_timeout(): mock_state = Mock(spec=ExecutionState) mock_state.durable_execution_arn = "test_arn" - mock_result = CheckpointedResult.create_not_found() - mock_state.get_checkpoint_result.return_value = mock_result + not_found = CheckpointedResult.create_not_found() + started_op = Operation( + operation_id="invoke_test", + operation_type=OperationType.CHAINED_INVOKE, + status=OperationStatus.STARTED, + ) + started = CheckpointedResult.create_from_operation(started_op) + mock_state.get_checkpoint_result.side_effect = [not_found, started] - config = InvokeConfig[str, str](timeout_seconds=30) + config = InvokeConfig[str, str](timeout=Duration.from_seconds(30)) with pytest.raises(TimedSuspendExecution): invoke_handler( @@ -274,10 +302,16 @@ def test_invoke_handler_new_operation_no_timeout(): mock_state = Mock(spec=ExecutionState) mock_state.durable_execution_arn = "test_arn" - mock_result = CheckpointedResult.create_not_found() - mock_state.get_checkpoint_result.return_value = mock_result + not_found = CheckpointedResult.create_not_found() + started_op = Operation( + operation_id="invoke_test", + operation_type=OperationType.CHAINED_INVOKE, + status=OperationStatus.STARTED, + ) + started = CheckpointedResult.create_from_operation(started_op) + mock_state.get_checkpoint_result.side_effect = [not_found, started] - config = InvokeConfig[str, str](timeout_seconds=0) + config = InvokeConfig[str, str](timeout=Duration.from_seconds(0)) with pytest.raises(SuspendExecution): invoke_handler( @@ -294,8 +328,14 @@ def test_invoke_handler_no_config(): mock_state = Mock(spec=ExecutionState) mock_state.durable_execution_arn = "test_arn" - mock_result = CheckpointedResult.create_not_found() - mock_state.get_checkpoint_result.return_value = mock_result + not_found = CheckpointedResult.create_not_found() + started_op = Operation( + operation_id="invoke_test", + operation_type=OperationType.CHAINED_INVOKE, + status=OperationStatus.STARTED, + ) + started = CheckpointedResult.create_from_operation(started_op) + mock_state.get_checkpoint_result.side_effect = [not_found, started] with pytest.raises(SuspendExecution): invoke_handler( @@ -308,10 +348,10 @@ def test_invoke_handler_no_config(): # Verify default config was used operation_update = mock_state.create_checkpoint.call_args[1]["operation_update"] - assert ( - operation_update.to_dict()["ChainedInvokeOptions"]["FunctionName"] - == "test_function" - ) + chained_invoke_options = operation_update.to_dict()["ChainedInvokeOptions"] + assert chained_invoke_options["FunctionName"] == "test_function" + # tenant_id should be None when not specified + assert "TenantId" not in chained_invoke_options def test_invoke_handler_custom_serdes(): @@ -351,8 +391,14 @@ def test_invoke_handler_custom_serdes_new_operation(): mock_state = Mock(spec=ExecutionState) mock_state.durable_execution_arn = "test_arn" - mock_result = CheckpointedResult.create_not_found() - mock_state.get_checkpoint_result.return_value = mock_result + not_found = CheckpointedResult.create_not_found() + started_op = Operation( + operation_id="invoke_test", + operation_type=OperationType.CHAINED_INVOKE, + status=OperationStatus.STARTED, + ) + started = CheckpointedResult.create_from_operation(started_op) + mock_state.get_checkpoint_result.side_effect = [not_found, started] config = InvokeConfig[dict, dict]( serdes_payload=CustomDictSerDes(), serdes_result=CustomDictSerDes() @@ -461,8 +507,14 @@ def test_invoke_handler_with_none_payload(): mock_state = Mock(spec=ExecutionState) mock_state.durable_execution_arn = "test_arn" - mock_result = CheckpointedResult.create_not_found() - mock_state.get_checkpoint_result.return_value = mock_result + not_found = CheckpointedResult.create_not_found() + started_op = Operation( + operation_id="invoke_test", + operation_type=OperationType.CHAINED_INVOKE, + status=OperationStatus.STARTED, + ) + started = CheckpointedResult.create_from_operation(started_op) + mock_state.get_checkpoint_result.side_effect = [not_found, started] with pytest.raises(SuspendExecution): invoke_handler( @@ -514,8 +566,14 @@ def test_invoke_handler_suspend_does_not_raise(mock_suspend): mock_state = Mock(spec=ExecutionState) mock_state.durable_execution_arn = "test_arn" - mock_result = CheckpointedResult.create_not_found() - mock_state.get_checkpoint_result.return_value = mock_result + not_found = CheckpointedResult.create_not_found() + started_op = Operation( + operation_id="invoke_test", + operation_type=OperationType.CHAINED_INVOKE, + status=OperationStatus.STARTED, + ) + started = CheckpointedResult.create_from_operation(started_op) + mock_state.get_checkpoint_result.side_effect = [not_found, started] # Mock suspend_with_optional_resume_delay to not raise an exception (which it should always do) mock_suspend.return_value = None @@ -533,3 +591,597 @@ def test_invoke_handler_suspend_does_not_raise(mock_suspend): ) mock_suspend.assert_called_once() + + +def test_invoke_handler_with_tenant_id(): + """Test invoke_handler passes tenant_id to checkpoint.""" + mock_state = Mock(spec=ExecutionState) + mock_state.durable_execution_arn = "test_arn" + + not_found = CheckpointedResult.create_not_found() + started_op = Operation( + operation_id="invoke1", + operation_type=OperationType.CHAINED_INVOKE, + status=OperationStatus.STARTED, + ) + started = CheckpointedResult.create_from_operation(started_op) + mock_state.get_checkpoint_result.side_effect = [not_found, started] + + config = InvokeConfig(tenant_id="test-tenant-123") + + with pytest.raises(SuspendExecution): + invoke_handler( + function_name="test_function", + payload="test_input", + state=mock_state, + operation_identifier=OperationIdentifier("invoke1", None, None), + config=config, + ) + + # Verify checkpoint was called with tenant_id + mock_state.create_checkpoint.assert_called_once() + operation_update = mock_state.create_checkpoint.call_args[1]["operation_update"] + chained_invoke_options = operation_update.to_dict()["ChainedInvokeOptions"] + assert chained_invoke_options["FunctionName"] == "test_function" + assert chained_invoke_options["TenantId"] == "test-tenant-123" + + +def test_invoke_handler_without_tenant_id(): + """Test invoke_handler without tenant_id doesn't include it in checkpoint.""" + mock_state = Mock(spec=ExecutionState) + mock_state.durable_execution_arn = "test_arn" + + not_found = CheckpointedResult.create_not_found() + started_op = Operation( + operation_id="invoke1", + operation_type=OperationType.CHAINED_INVOKE, + status=OperationStatus.STARTED, + ) + started = CheckpointedResult.create_from_operation(started_op) + mock_state.get_checkpoint_result.side_effect = [not_found, started] + + config = InvokeConfig(tenant_id=None) + + with pytest.raises(SuspendExecution): + invoke_handler( + function_name="test_function", + payload="test_input", + state=mock_state, + operation_identifier=OperationIdentifier("invoke1", None, None), + config=config, + ) + + # Verify checkpoint was called without tenant_id + mock_state.create_checkpoint.assert_called_once() + operation_update = mock_state.create_checkpoint.call_args[1]["operation_update"] + chained_invoke_options = operation_update.to_dict()["ChainedInvokeOptions"] + assert chained_invoke_options["FunctionName"] == "test_function" + assert "TenantId" not in chained_invoke_options + + +def test_invoke_handler_default_config_no_tenant_id(): + """Test invoke_handler with default config has no tenant_id.""" + mock_state = Mock(spec=ExecutionState) + mock_state.durable_execution_arn = "test_arn" + + not_found = CheckpointedResult.create_not_found() + started_op = Operation( + operation_id="invoke1", + operation_type=OperationType.CHAINED_INVOKE, + status=OperationStatus.STARTED, + ) + started = CheckpointedResult.create_from_operation(started_op) + mock_state.get_checkpoint_result.side_effect = [not_found, started] + + with pytest.raises(SuspendExecution): + invoke_handler( + function_name="test_function", + payload="test_input", + state=mock_state, + operation_identifier=OperationIdentifier("invoke1", None, None), + config=None, + ) + + # Verify checkpoint was called without tenant_id + mock_state.create_checkpoint.assert_called_once() + operation_update = mock_state.create_checkpoint.call_args[1]["operation_update"] + chained_invoke_options = operation_update.to_dict()["ChainedInvokeOptions"] + assert chained_invoke_options["FunctionName"] == "test_function" + assert "TenantId" not in chained_invoke_options + + +def test_invoke_handler_defaults_to_json_serdes(): + """Test invoke_handler uses DEFAULT_JSON_SERDES when config has no serdes.""" + mock_state = Mock(spec=ExecutionState) + mock_state.durable_execution_arn = "test_arn" + + not_found = CheckpointedResult.create_not_found() + started_op = Operation( + operation_id="invoke1", + operation_type=OperationType.CHAINED_INVOKE, + status=OperationStatus.STARTED, + ) + started = CheckpointedResult.create_from_operation(started_op) + mock_state.get_checkpoint_result.side_effect = [not_found, started] + + config = InvokeConfig[dict, dict](serdes_payload=None, serdes_result=None) + payload = {"key": "value", "number": 42} + + with pytest.raises(SuspendExecution): + invoke_handler( + function_name="test_function", + payload=payload, + state=mock_state, + operation_identifier=OperationIdentifier("invoke_json", None, None), + config=config, + ) + + # Verify JSON serialization was used (not extended types) + operation_update = mock_state.create_checkpoint.call_args[1]["operation_update"] + assert operation_update.payload == json.dumps(payload) + + +def test_invoke_handler_result_defaults_to_json_serdes(): + """Test invoke_handler uses DEFAULT_JSON_SERDES for result deserialization.""" + mock_state = Mock(spec=ExecutionState) + mock_state.durable_execution_arn = "test_arn" + + result_data = {"key": "value", "number": 42} + operation = Operation( + operation_id="invoke_result_json", + operation_type=OperationType.CHAINED_INVOKE, + status=OperationStatus.SUCCEEDED, + chained_invoke_details=ChainedInvokeDetails(result=json.dumps(result_data)), + ) + mock_result = CheckpointedResult.create_from_operation(operation) + mock_state.get_checkpoint_result.return_value = mock_result + + config = InvokeConfig[dict, dict](serdes_payload=None, serdes_result=None) + + result = invoke_handler( + function_name="test_function", + payload={"input": "data"}, + state=mock_state, + operation_identifier=OperationIdentifier("invoke_result_json", None, None), + config=config, + ) + + # Verify JSON deserialization was used (not extended types) + assert result == result_data + + +# ============================================================================ +# Immediate Response Handling Tests +# ============================================================================ + + +def test_invoke_immediate_response_get_checkpoint_result_called_twice(): + """Test that get_checkpoint_result is called twice when checkpoint is created.""" + mock_state = Mock(spec=ExecutionState) + mock_state.durable_execution_arn = "test_arn" + + # First call: not found, second call: started (no immediate response) + not_found = CheckpointedResult.create_not_found() + started_op = Operation( + operation_id="invoke_immediate_1", + operation_type=OperationType.CHAINED_INVOKE, + status=OperationStatus.STARTED, + ) + started = CheckpointedResult.create_from_operation(started_op) + mock_state.get_checkpoint_result.side_effect = [not_found, started] + + with pytest.raises(SuspendExecution): + invoke_handler( + function_name="test_function", + payload="test_input", + state=mock_state, + operation_identifier=OperationIdentifier( + "invoke_immediate_1", None, "test_invoke" + ), + config=None, + ) + + # Verify get_checkpoint_result was called twice + assert mock_state.get_checkpoint_result.call_count == 2 + + +def test_invoke_immediate_response_create_checkpoint_with_is_sync_true(): + """Test that create_checkpoint is called with is_sync=True.""" + mock_state = Mock(spec=ExecutionState) + mock_state.durable_execution_arn = "test_arn" + + # First call: not found, second call: started + not_found = CheckpointedResult.create_not_found() + started_op = Operation( + operation_id="invoke_immediate_2", + operation_type=OperationType.CHAINED_INVOKE, + status=OperationStatus.STARTED, + ) + started = CheckpointedResult.create_from_operation(started_op) + mock_state.get_checkpoint_result.side_effect = [not_found, started] + + with pytest.raises(SuspendExecution): + invoke_handler( + function_name="test_function", + payload="test_input", + state=mock_state, + operation_identifier=OperationIdentifier( + "invoke_immediate_2", None, "test_invoke" + ), + config=None, + ) + + # Verify create_checkpoint was called with is_sync=True + mock_state.create_checkpoint.assert_called_once() + call_kwargs = mock_state.create_checkpoint.call_args[1] + assert call_kwargs["is_sync"] is True + + +def test_invoke_immediate_response_immediate_success(): + """Test immediate success: checkpoint returns SUCCEEDED on second check. + + When checkpoint returns SUCCEEDED on second check, operation returns result + without suspend. + """ + mock_state = Mock(spec=ExecutionState) + mock_state.durable_execution_arn = "test_arn" + + # First call: not found, second call: succeeded (immediate response) + not_found = CheckpointedResult.create_not_found() + succeeded_op = Operation( + operation_id="invoke_immediate_3", + operation_type=OperationType.CHAINED_INVOKE, + status=OperationStatus.SUCCEEDED, + chained_invoke_details=ChainedInvokeDetails( + result=json.dumps("immediate_result") + ), + ) + succeeded = CheckpointedResult.create_from_operation(succeeded_op) + mock_state.get_checkpoint_result.side_effect = [not_found, succeeded] + + result = invoke_handler( + function_name="test_function", + payload="test_input", + state=mock_state, + operation_identifier=OperationIdentifier( + "invoke_immediate_3", None, "test_invoke" + ), + config=None, + ) + + # Verify result was returned without suspend + assert result == "immediate_result" + # Verify checkpoint was created + mock_state.create_checkpoint.assert_called_once() + # Verify get_checkpoint_result was called twice + assert mock_state.get_checkpoint_result.call_count == 2 + + +def test_invoke_immediate_response_immediate_success_with_none_result(): + """Test immediate success with None result.""" + mock_state = Mock(spec=ExecutionState) + mock_state.durable_execution_arn = "test_arn" + + # First call: not found, second call: succeeded with None result + not_found = CheckpointedResult.create_not_found() + succeeded_op = Operation( + operation_id="invoke_immediate_4", + operation_type=OperationType.CHAINED_INVOKE, + status=OperationStatus.SUCCEEDED, + chained_invoke_details=ChainedInvokeDetails(result=None), + ) + succeeded = CheckpointedResult.create_from_operation(succeeded_op) + mock_state.get_checkpoint_result.side_effect = [not_found, succeeded] + + result = invoke_handler( + function_name="test_function", + payload="test_input", + state=mock_state, + operation_identifier=OperationIdentifier( + "invoke_immediate_4", None, "test_invoke" + ), + config=None, + ) + + # Verify None result was returned without suspend + assert result is None + assert mock_state.get_checkpoint_result.call_count == 2 + + +@pytest.mark.parametrize( + "status", + [OperationStatus.FAILED, OperationStatus.TIMED_OUT, OperationStatus.STOPPED], +) +def test_invoke_immediate_response_immediate_failure(status: OperationStatus): + """Test immediate failure: checkpoint returns FAILED/TIMED_OUT/STOPPED on second check. + + When checkpoint returns a failure status on second check, operation raises error + without suspend. + """ + mock_state = Mock(spec=ExecutionState) + mock_state.durable_execution_arn = "test_arn" + + # First call: not found, second call: failed (immediate response) + not_found = CheckpointedResult.create_not_found() + error = ErrorObject( + message="Immediate failure", type="TestError", data=None, stack_trace=None + ) + failed_op = Operation( + operation_id="invoke_immediate_5", + operation_type=OperationType.CHAINED_INVOKE, + status=status, + chained_invoke_details=ChainedInvokeDetails(error=error), + ) + failed = CheckpointedResult.create_from_operation(failed_op) + mock_state.get_checkpoint_result.side_effect = [not_found, failed] + + # Verify error is raised without suspend + with pytest.raises(CallableRuntimeError): + invoke_handler( + function_name="test_function", + payload="test_input", + state=mock_state, + operation_identifier=OperationIdentifier( + "invoke_immediate_5", None, "test_invoke" + ), + config=None, + ) + + # Verify checkpoint was created + mock_state.create_checkpoint.assert_called_once() + # Verify get_checkpoint_result was called twice + assert mock_state.get_checkpoint_result.call_count == 2 + + +def test_invoke_immediate_response_no_immediate_response(): + """Test no immediate response: checkpoint returns STARTED on second check. + + When checkpoint returns STARTED on second check, operation suspends normally. + """ + mock_state = Mock(spec=ExecutionState) + mock_state.durable_execution_arn = "test_arn" + + # First call: not found, second call: started (no immediate response) + not_found = CheckpointedResult.create_not_found() + started_op = Operation( + operation_id="invoke_immediate_6", + operation_type=OperationType.CHAINED_INVOKE, + status=OperationStatus.STARTED, + ) + started = CheckpointedResult.create_from_operation(started_op) + mock_state.get_checkpoint_result.side_effect = [not_found, started] + + # Verify operation suspends + with pytest.raises(SuspendExecution): + invoke_handler( + function_name="test_function", + payload="test_input", + state=mock_state, + operation_identifier=OperationIdentifier( + "invoke_immediate_6", None, "test_invoke" + ), + config=None, + ) + + # Verify checkpoint was created + mock_state.create_checkpoint.assert_called_once() + # Verify get_checkpoint_result was called twice + assert mock_state.get_checkpoint_result.call_count == 2 + + +def test_invoke_immediate_response_already_completed(): + """Test already completed: checkpoint is already SUCCEEDED on first check. + + When checkpoint is already SUCCEEDED on first check, no checkpoint is created + and result is returned immediately. + """ + mock_state = Mock(spec=ExecutionState) + mock_state.durable_execution_arn = "test_arn" + + # First call: already succeeded + succeeded_op = Operation( + operation_id="invoke_immediate_7", + operation_type=OperationType.CHAINED_INVOKE, + status=OperationStatus.SUCCEEDED, + chained_invoke_details=ChainedInvokeDetails( + result=json.dumps("existing_result") + ), + ) + succeeded = CheckpointedResult.create_from_operation(succeeded_op) + mock_state.get_checkpoint_result.return_value = succeeded + + result = invoke_handler( + function_name="test_function", + payload="test_input", + state=mock_state, + operation_identifier=OperationIdentifier( + "invoke_immediate_7", None, "test_invoke" + ), + config=None, + ) + + # Verify result was returned + assert result == "existing_result" + # Verify no checkpoint was created + mock_state.create_checkpoint.assert_not_called() + # Verify get_checkpoint_result was called only once + assert mock_state.get_checkpoint_result.call_count == 1 + + +def test_invoke_immediate_response_with_timeout_immediate_success(): + """Test immediate success with timeout configuration.""" + mock_state = Mock(spec=ExecutionState) + mock_state.durable_execution_arn = "test_arn" + + # First call: not found, second call: succeeded + not_found = CheckpointedResult.create_not_found() + succeeded_op = Operation( + operation_id="invoke_immediate_8", + operation_type=OperationType.CHAINED_INVOKE, + status=OperationStatus.SUCCEEDED, + chained_invoke_details=ChainedInvokeDetails( + result=json.dumps("timeout_result") + ), + ) + succeeded = CheckpointedResult.create_from_operation(succeeded_op) + mock_state.get_checkpoint_result.side_effect = [not_found, succeeded] + + config = InvokeConfig[str, str](timeout=Duration.from_seconds(30)) + + result = invoke_handler( + function_name="test_function", + payload="test_input", + state=mock_state, + operation_identifier=OperationIdentifier( + "invoke_immediate_8", None, "test_invoke" + ), + config=config, + ) + + # Verify result was returned without suspend + assert result == "timeout_result" + assert mock_state.get_checkpoint_result.call_count == 2 + + +def test_invoke_immediate_response_with_timeout_no_immediate_response(): + """Test no immediate response with timeout configuration. + + When no immediate response, operation should suspend with timeout. + """ + mock_state = Mock(spec=ExecutionState) + mock_state.durable_execution_arn = "test_arn" + + # First call: not found, second call: started + not_found = CheckpointedResult.create_not_found() + started_op = Operation( + operation_id="invoke_immediate_9", + operation_type=OperationType.CHAINED_INVOKE, + status=OperationStatus.STARTED, + ) + started = CheckpointedResult.create_from_operation(started_op) + mock_state.get_checkpoint_result.side_effect = [not_found, started] + + config = InvokeConfig[str, str](timeout=Duration.from_seconds(30)) + + # Verify operation suspends with timeout + with pytest.raises(TimedSuspendExecution): + invoke_handler( + function_name="test_function", + payload="test_input", + state=mock_state, + operation_identifier=OperationIdentifier( + "invoke_immediate_9", None, "test_invoke" + ), + config=config, + ) + + assert mock_state.get_checkpoint_result.call_count == 2 + + +def test_invoke_immediate_response_with_custom_serdes(): + """Test immediate success with custom serialization.""" + mock_state = Mock(spec=ExecutionState) + mock_state.durable_execution_arn = "test_arn" + + # First call: not found, second call: succeeded + not_found = CheckpointedResult.create_not_found() + succeeded_op = Operation( + operation_id="invoke_immediate_10", + operation_type=OperationType.CHAINED_INVOKE, + status=OperationStatus.SUCCEEDED, + chained_invoke_details=ChainedInvokeDetails( + result='{"key": "VALUE", "number": "84", "list": [1, 2, 3]}' + ), + ) + succeeded = CheckpointedResult.create_from_operation(succeeded_op) + mock_state.get_checkpoint_result.side_effect = [not_found, succeeded] + + config = InvokeConfig[dict, dict]( + serdes_payload=CustomDictSerDes(), serdes_result=CustomDictSerDes() + ) + + result = invoke_handler( + function_name="test_function", + payload={"key": "value", "number": 42, "list": [1, 2, 3]}, + state=mock_state, + operation_identifier=OperationIdentifier( + "invoke_immediate_10", None, "test_invoke" + ), + config=config, + ) + + # Verify custom deserialization was used + assert result == {"key": "value", "number": 42, "list": [1, 2, 3]} + assert mock_state.get_checkpoint_result.call_count == 2 + + +def test_invoke_suspends_when_second_check_returns_started(): + """Test backward compatibility: when the second checkpoint check returns + STARTED (not terminal), the invoke operation suspends normally. + + Validates: Requirements 8.1, 8.2 + """ + mock_state = Mock(spec=ExecutionState) + mock_state.durable_execution_arn = "test_arn" + + # First call: checkpoint doesn't exist + # Second call: checkpoint returns STARTED (no immediate response) + mock_state.get_checkpoint_result.side_effect = [ + CheckpointedResult.create_not_found(), + CheckpointedResult.create_from_operation( + Operation( + operation_id="invoke-1", + operation_type=OperationType.STEP, + status=OperationStatus.STARTED, + ) + ), + ] + + executor = InvokeOperationExecutor( + state=mock_state, + operation_identifier=OperationIdentifier("invoke-1", None, "test_invoke"), + function_name="my-function", + payload={"data": "test"}, + config=InvokeConfig(), + ) + + with pytest.raises(SuspendExecution): + executor.process() + + # Assert - behaves like "old way" + assert mock_state.get_checkpoint_result.call_count == 2 # Double-check happened + mock_state.create_checkpoint.assert_called_once() # START checkpoint created + + +def test_invoke_suspends_when_second_check_returns_started_duplicate(): + """Test backward compatibility: when the second checkpoint check returns + STARTED (not terminal), the invoke operation suspends normally. + """ + mock_state = Mock(spec=ExecutionState) + mock_state.durable_execution_arn = "test_arn" + + # First call: checkpoint doesn't exist + # Second call: checkpoint returns STARTED (no immediate response) + not_found = CheckpointedResult.create_not_found() + started_op = Operation( + operation_id="invoke-1", + operation_type=OperationType.STEP, + status=OperationStatus.STARTED, + ) + started = CheckpointedResult.create_from_operation(started_op) + mock_state.get_checkpoint_result.side_effect = [not_found, started] + + executor = InvokeOperationExecutor( + function_name="my-function", + payload={"data": "test"}, + state=mock_state, + operation_identifier=OperationIdentifier("invoke-1", None, "test_invoke"), + config=InvokeConfig(), + ) + + with pytest.raises(SuspendExecution): + executor.process() + + # Assert - behaves like "old way" + assert mock_state.get_checkpoint_result.call_count == 2 # Double-check happened + mock_state.create_checkpoint.assert_called_once() # START checkpoint created diff --git a/tests/operation/map_test.py b/tests/operation/map_test.py index eb099d1..5c5a5a1 100644 --- a/tests/operation/map_test.py +++ b/tests/operation/map_test.py @@ -1,9 +1,13 @@ """Tests for map operation.""" +import importlib +import json from unittest.mock import Mock, patch +import pytest + # Mock the executor.execute method -from aws_durable_execution_sdk_python.concurrency import ( +from aws_durable_execution_sdk_python.concurrency.models import ( BatchItem, BatchItemStatus, BatchResult, @@ -15,9 +19,12 @@ ItemBatcher, MapConfig, ) +from aws_durable_execution_sdk_python.context import DurableContext from aws_durable_execution_sdk_python.identifier import OperationIdentifier from aws_durable_execution_sdk_python.lambda_service import OperationSubType +from aws_durable_execution_sdk_python.operation import child # PLC0415 from aws_durable_execution_sdk_python.operation.map import MapExecutor, map_handler +from aws_durable_execution_sdk_python.serdes import serialize from tests.serdes_test import CustomStrSerDes @@ -750,3 +757,337 @@ def get_checkpoint_result(self, operation_id): # Verify replay was called, execute was not mock_replay.assert_called_once() mock_execute.assert_not_called() + + +@pytest.mark.parametrize( + ("item_serdes", "batch_serdes"), + [ + (Mock(), Mock()), + (None, Mock()), + (Mock(), None), + ], +) +@patch("aws_durable_execution_sdk_python.operation.child.serialize") +def test_map_item_serialize(mock_serialize, item_serdes, batch_serdes): + """Test map serializes items with item_serdes or fallback.""" + mock_serialize.return_value = '"serialized"' + + parent_checkpoint = Mock() + parent_checkpoint.is_succeeded.return_value = False + parent_checkpoint.is_failed.return_value = False + parent_checkpoint.is_started.return_value = False + parent_checkpoint.is_existent.return_value = True + parent_checkpoint.is_replay_children.return_value = False + + child_checkpoint = Mock() + child_checkpoint.is_succeeded.return_value = False + child_checkpoint.is_failed.return_value = False + child_checkpoint.is_started.return_value = False + child_checkpoint.is_existent.return_value = True + child_checkpoint.is_replay_children.return_value = False + + def get_checkpoint(op_id): + return child_checkpoint if op_id.startswith("child-") else parent_checkpoint + + mock_state = Mock() + mock_state.durable_execution_arn = "arn:test" + mock_state.get_checkpoint_result = Mock(side_effect=get_checkpoint) + mock_state.create_checkpoint = Mock() + + context_map = {} + + def create_id(self, i): + ctx_id = id(self) + if ctx_id not in context_map: + context_map[ctx_id] = [] + context_map[ctx_id].append(i) + return ( + "parent" + if len(context_map) == 1 and len(context_map[ctx_id]) == 1 + else f"child-{i}" + ) + + with patch.object(DurableContext, "_create_step_id_for_logical_step", create_id): + context = DurableContext(state=mock_state) + context.map( + ["a", "b"], + lambda ctx, item, idx, items: item, + config=MapConfig(serdes=batch_serdes, item_serdes=item_serdes), + ) + + expected = item_serdes or batch_serdes + assert mock_serialize.call_args_list[0][1]["serdes"] is expected + assert mock_serialize.call_args_list[0][1]["operation_id"] == "child-0" + assert mock_serialize.call_args_list[1][1]["serdes"] is expected + assert mock_serialize.call_args_list[1][1]["operation_id"] == "child-1" + assert mock_serialize.call_args_list[2][1]["serdes"] is batch_serdes + assert mock_serialize.call_args_list[2][1]["operation_id"] == "parent" + + +@pytest.mark.parametrize( + ("item_serdes", "batch_serdes"), + [ + (Mock(), Mock()), + (None, Mock()), + (Mock(), None), + ], +) +@patch("aws_durable_execution_sdk_python.operation.child.deserialize") +def test_map_item_deserialize(mock_deserialize, item_serdes, batch_serdes): + """Test map deserializes items with item_serdes or fallback.""" + mock_deserialize.return_value = "deserialized" + + parent_checkpoint = Mock() + parent_checkpoint.is_succeeded.return_value = False + parent_checkpoint.is_failed.return_value = False + parent_checkpoint.is_existent.return_value = False + + child_checkpoint = Mock() + child_checkpoint.is_succeeded.return_value = True + child_checkpoint.is_failed.return_value = False + child_checkpoint.is_replay_children.return_value = False + child_checkpoint.result = '"cached"' + + def get_checkpoint(op_id): + return child_checkpoint if op_id.startswith("child-") else parent_checkpoint + + mock_state = Mock() + mock_state.durable_execution_arn = "arn:test" + mock_state.get_checkpoint_result = Mock(side_effect=get_checkpoint) + mock_state.create_checkpoint = Mock() + + context_map = {} + + def create_id(self, i): + ctx_id = id(self) + if ctx_id not in context_map: + context_map[ctx_id] = [] + context_map[ctx_id].append(i) + return ( + "parent" + if len(context_map) == 1 and len(context_map[ctx_id]) == 1 + else f"child-{i}" + ) + + with patch.object(DurableContext, "_create_step_id_for_logical_step", create_id): + context = DurableContext(state=mock_state) + context.map( + ["a", "b"], + lambda ctx, item, idx, items: item, + config=MapConfig(serdes=batch_serdes, item_serdes=item_serdes), + ) + + expected = item_serdes or batch_serdes + assert mock_deserialize.call_args_list[0][1]["serdes"] is expected + assert mock_deserialize.call_args_list[0][1]["operation_id"] == "child-0" + assert mock_deserialize.call_args_list[1][1]["serdes"] is expected + assert mock_deserialize.call_args_list[1][1]["operation_id"] == "child-1" + + +def test_map_result_serialization_roundtrip(): + """Test that map operation BatchResult can be serialized and deserialized.""" + + items = ["a", "b", "c"] + + def func(ctx, item, idx, items): + return {"item": item.upper(), "index": idx} + + class MockExecutionState: + durable_execution_arn = "arn:test" + + def get_checkpoint_result(self, operation_id): + mock_result = Mock() + mock_result.is_succeeded.return_value = False + return mock_result + + execution_state = MockExecutionState() + map_context = Mock() + map_context._create_step_id_for_logical_step = Mock(side_effect=["1", "2", "3"]) # noqa SLF001 + map_context.create_child_context = Mock(return_value=Mock()) + operation_identifier = OperationIdentifier("test_op", "parent", "test_map") + + # Execute map + result = map_handler( + items, func, MapConfig(), execution_state, map_context, operation_identifier + ) + + # Serialize the BatchResult + serialized = json.dumps(result.to_dict()) + + # Deserialize + deserialized = BatchResult.from_dict(json.loads(serialized)) + + # Verify all data preserved + assert len(deserialized.all) == 3 + assert deserialized.all[0].result == {"item": "A", "index": 0} + assert deserialized.all[1].result == {"item": "B", "index": 1} + assert deserialized.all[2].result == {"item": "C", "index": 2} + assert deserialized.completion_reason == result.completion_reason + assert all(item.status == BatchItemStatus.SUCCEEDED for item in deserialized.all) + + +def test_map_handler_serializes_batch_result(): + """Verify map_handler serializes BatchResult at parent level.""" + with patch( + "aws_durable_execution_sdk_python.serdes.serialize" + ) as mock_serdes_serialize: + mock_serdes_serialize.return_value = '"serialized"' + importlib.reload(child) + + parent_checkpoint = Mock() + parent_checkpoint.is_succeeded.return_value = False + parent_checkpoint.is_failed.return_value = False + parent_checkpoint.is_existent.return_value = False + parent_checkpoint.is_replay_children.return_value = False + + child_checkpoint = Mock() + child_checkpoint.is_succeeded.return_value = False + child_checkpoint.is_failed.return_value = False + child_checkpoint.is_existent.return_value = False + child_checkpoint.is_replay_children.return_value = False + + def get_checkpoint(op_id): + return child_checkpoint if op_id.startswith("child-") else parent_checkpoint + + mock_state = Mock() + mock_state.durable_execution_arn = "arn:test" + mock_state.get_checkpoint_result = Mock(side_effect=get_checkpoint) + mock_state.create_checkpoint = Mock() + + context_map = {} + + def create_id(self, i): + ctx_id = id(self) + if ctx_id not in context_map: + context_map[ctx_id] = [] + context_map[ctx_id].append(i) + return ( + "parent" + if len(context_map) == 1 and len(context_map[ctx_id]) == 1 + else f"child-{i}" + ) + + with patch.object( + DurableContext, "_create_step_id_for_logical_step", create_id + ): + context = DurableContext(state=mock_state) + result = context.map(["a", "b"], lambda ctx, item, idx, items: item) + + assert len(mock_serdes_serialize.call_args_list) == 3 + parent_call = mock_serdes_serialize.call_args_list[2] + assert parent_call[1]["value"] is result + + +def test_map_default_serdes_serializes_batch_result(): + """Verify default serdes automatically serializes BatchResult.""" + + with patch( + "aws_durable_execution_sdk_python.serdes.serialize", wraps=serialize + ) as mock_serialize: + importlib.reload(child) + + parent_checkpoint = Mock() + parent_checkpoint.is_succeeded.return_value = False + parent_checkpoint.is_failed.return_value = False + parent_checkpoint.is_existent.return_value = False + parent_checkpoint.is_replay_children.return_value = False + + child_checkpoint = Mock() + child_checkpoint.is_succeeded.return_value = False + child_checkpoint.is_failed.return_value = False + child_checkpoint.is_existent.return_value = False + child_checkpoint.is_replay_children.return_value = False + + def get_checkpoint(op_id): + return child_checkpoint if op_id.startswith("child-") else parent_checkpoint + + mock_state = Mock() + mock_state.durable_execution_arn = "arn:test" + mock_state.get_checkpoint_result = Mock(side_effect=get_checkpoint) + mock_state.create_checkpoint = Mock() + + context_map = {} + + def create_id(self, i): + ctx_id = id(self) + if ctx_id not in context_map: + context_map[ctx_id] = [] + context_map[ctx_id].append(i) + return ( + "parent" + if len(context_map) == 1 and len(context_map[ctx_id]) == 1 + else f"child-{i}" + ) + + with patch.object( + DurableContext, "_create_step_id_for_logical_step", create_id + ): + context = DurableContext(state=mock_state) + result = context.map(["a", "b"], lambda ctx, item, idx, items: item) + + assert isinstance(result, BatchResult) + assert len(mock_serialize.call_args_list) == 3 + parent_call = mock_serialize.call_args_list[2] + assert parent_call[1]["serdes"] is None + assert isinstance(parent_call[1]["value"], BatchResult) + assert parent_call[1]["value"] is result + + +def test_map_custom_serdes_serializes_batch_result(): + """Verify custom serdes is used for BatchResult serialization.""" + + custom_serdes = CustomStrSerDes() + + with patch("aws_durable_execution_sdk_python.serdes.serialize") as mock_serialize: + mock_serialize.return_value = '"serialized"' + importlib.reload(child) + + parent_checkpoint = Mock() + parent_checkpoint.is_succeeded.return_value = False + parent_checkpoint.is_failed.return_value = False + parent_checkpoint.is_existent.return_value = False + parent_checkpoint.is_replay_children.return_value = False + + child_checkpoint = Mock() + child_checkpoint.is_succeeded.return_value = False + child_checkpoint.is_failed.return_value = False + child_checkpoint.is_existent.return_value = False + child_checkpoint.is_replay_children.return_value = False + + def get_checkpoint(op_id): + return child_checkpoint if op_id.startswith("child-") else parent_checkpoint + + mock_state = Mock() + mock_state.durable_execution_arn = "arn:test" + mock_state.get_checkpoint_result = Mock(side_effect=get_checkpoint) + mock_state.create_checkpoint = Mock() + + context_map = {} + + def create_id(self, i): + ctx_id = id(self) + if ctx_id not in context_map: + context_map[ctx_id] = [] + context_map[ctx_id].append(i) + return ( + "parent" + if len(context_map) == 1 and len(context_map[ctx_id]) == 1 + else f"child-{i}" + ) + + with patch.object( + DurableContext, "_create_step_id_for_logical_step", create_id + ): + context = DurableContext(state=mock_state) + result = context.map( + ["a", "b"], + lambda ctx, item, idx, items: item, + config=MapConfig(serdes=custom_serdes), + ) + + assert isinstance(result, BatchResult) + assert len(mock_serialize.call_args_list) == 3 + parent_call = mock_serialize.call_args_list[2] + assert parent_call[1]["serdes"] is custom_serdes + assert isinstance(parent_call[1]["value"], BatchResult) + assert parent_call[1]["value"] is result diff --git a/tests/operation/parallel_test.py b/tests/operation/parallel_test.py index 54f2229..c43be7e 100644 --- a/tests/operation/parallel_test.py +++ b/tests/operation/parallel_test.py @@ -1,25 +1,31 @@ """Tests for the parallel operation module.""" +import importlib +import json from unittest.mock import Mock, patch import pytest +from aws_durable_execution_sdk_python.concurrency.executor import ConcurrentExecutor + # Mock the executor.execute method to return a BatchResult -from aws_durable_execution_sdk_python.concurrency import ( +from aws_durable_execution_sdk_python.concurrency.models import ( BatchItem, BatchItemStatus, BatchResult, CompletionReason, - ConcurrentExecutor, Executable, ) from aws_durable_execution_sdk_python.config import CompletionConfig, ParallelConfig +from aws_durable_execution_sdk_python.context import DurableContext from aws_durable_execution_sdk_python.identifier import OperationIdentifier from aws_durable_execution_sdk_python.lambda_service import OperationSubType +from aws_durable_execution_sdk_python.operation import child from aws_durable_execution_sdk_python.operation.parallel import ( ParallelExecutor, parallel_handler, ) +from aws_durable_execution_sdk_python.serdes import serialize from tests.serdes_test import CustomStrSerDes @@ -734,3 +740,346 @@ def get_checkpoint_result(self, operation_id): # Verify replay was called, execute was not mock_replay.assert_called_once() mock_execute.assert_not_called() + + +@pytest.mark.parametrize( + ("item_serdes", "batch_serdes"), + [ + (Mock(), Mock()), + (None, Mock()), + (Mock(), None), + ], +) +@patch("aws_durable_execution_sdk_python.operation.child.serialize") +def test_parallel_item_serialize(mock_serialize, item_serdes, batch_serdes): + """Test parallel serializes branches with item_serdes or fallback.""" + mock_serialize.return_value = '"serialized"' + + parent_checkpoint = Mock() + parent_checkpoint.is_succeeded.return_value = False + parent_checkpoint.is_failed.return_value = False + parent_checkpoint.is_started.return_value = False + parent_checkpoint.is_existent.return_value = True + parent_checkpoint.is_replay_children.return_value = False + + child_checkpoint = Mock() + child_checkpoint.is_succeeded.return_value = False + child_checkpoint.is_failed.return_value = False + child_checkpoint.is_started.return_value = False + child_checkpoint.is_existent.return_value = True + child_checkpoint.is_replay_children.return_value = False + + def get_checkpoint(op_id): + return child_checkpoint if op_id.startswith("child-") else parent_checkpoint + + mock_state = Mock() + mock_state.durable_execution_arn = "arn:test" + mock_state.get_checkpoint_result = Mock(side_effect=get_checkpoint) + mock_state.create_checkpoint = Mock() + + context_map = {} + + def create_id(self, i): + ctx_id = id(self) + if ctx_id not in context_map: + context_map[ctx_id] = [] + context_map[ctx_id].append(i) + return ( + "parent" + if len(context_map) == 1 and len(context_map[ctx_id]) == 1 + else f"child-{i}" + ) + + with patch.object(DurableContext, "_create_step_id_for_logical_step", create_id): + context = DurableContext(state=mock_state) + context.parallel( + [lambda ctx: "a", lambda ctx: "b"], + config=ParallelConfig(serdes=batch_serdes, item_serdes=item_serdes), + ) + + expected = item_serdes or batch_serdes + assert mock_serialize.call_args_list[0][1]["serdes"] is expected + assert mock_serialize.call_args_list[0][1]["operation_id"] == "child-0" + assert mock_serialize.call_args_list[1][1]["serdes"] is expected + assert mock_serialize.call_args_list[1][1]["operation_id"] == "child-1" + assert mock_serialize.call_args_list[2][1]["serdes"] is batch_serdes + assert mock_serialize.call_args_list[2][1]["operation_id"] == "parent" + + +@pytest.mark.parametrize( + ("item_serdes", "batch_serdes"), + [ + (Mock(), Mock()), + (None, Mock()), + (Mock(), None), + ], +) +@patch("aws_durable_execution_sdk_python.operation.child.deserialize") +def test_parallel_item_deserialize(mock_deserialize, item_serdes, batch_serdes): + """Test parallel deserializes branches with item_serdes or fallback.""" + mock_deserialize.return_value = "deserialized" + + parent_checkpoint = Mock() + parent_checkpoint.is_succeeded.return_value = False + parent_checkpoint.is_failed.return_value = False + parent_checkpoint.is_existent.return_value = False + + child_checkpoint = Mock() + child_checkpoint.is_succeeded.return_value = True + child_checkpoint.is_failed.return_value = False + child_checkpoint.is_replay_children.return_value = False + child_checkpoint.result = '"cached"' + + def get_checkpoint(op_id): + return child_checkpoint if op_id.startswith("child-") else parent_checkpoint + + mock_state = Mock() + mock_state.durable_execution_arn = "arn:test" + mock_state.get_checkpoint_result = Mock(side_effect=get_checkpoint) + mock_state.create_checkpoint = Mock() + + context_map = {} + + def create_id(self, i): + ctx_id = id(self) + if ctx_id not in context_map: + context_map[ctx_id] = [] + context_map[ctx_id].append(i) + return ( + "parent" + if len(context_map) == 1 and len(context_map[ctx_id]) == 1 + else f"child-{i}" + ) + + with patch.object(DurableContext, "_create_step_id_for_logical_step", create_id): + context = DurableContext(state=mock_state) + context.parallel( + [lambda ctx: "a", lambda ctx: "b"], + config=ParallelConfig(serdes=batch_serdes, item_serdes=item_serdes), + ) + + expected = item_serdes or batch_serdes + assert mock_deserialize.call_args_list[0][1]["serdes"] is expected + assert mock_deserialize.call_args_list[0][1]["operation_id"] == "child-0" + assert mock_deserialize.call_args_list[1][1]["serdes"] is expected + assert mock_deserialize.call_args_list[1][1]["operation_id"] == "child-1" + + +def test_parallel_result_serialization_roundtrip(): + """Test that parallel operation BatchResult can be serialized and deserialized.""" + + def func1(ctx): + return [1, 2, 3] + + def func2(ctx): + return {"status": "complete", "count": 42} + + def func3(ctx): + return "simple string" + + callables = [func1, func2, func3] + + class MockExecutionState: + durable_execution_arn = "arn:test" + + def get_checkpoint_result(self, operation_id): + mock_result = Mock() + mock_result.is_succeeded.return_value = False + return mock_result + + execution_state = MockExecutionState() + parallel_context = Mock() + parallel_context._create_step_id_for_logical_step = Mock( # noqa SLF001 + side_effect=["1", "2", "3"] + ) + parallel_context.create_child_context = Mock(return_value=Mock()) + operation_identifier = OperationIdentifier("test_op", "parent", "test_parallel") + + # Execute parallel + result = parallel_handler( + callables, + ParallelConfig(), + execution_state, + parallel_context, + operation_identifier, + ) + + # Serialize the BatchResult + serialized = json.dumps(result.to_dict()) + + # Deserialize + deserialized = BatchResult.from_dict(json.loads(serialized)) + + # Verify all data preserved + assert len(deserialized.all) == 3 + assert deserialized.all[0].result == [1, 2, 3] + assert deserialized.all[1].result == {"status": "complete", "count": 42} + assert deserialized.all[2].result == "simple string" + assert deserialized.completion_reason == result.completion_reason + assert all(item.status == BatchItemStatus.SUCCEEDED for item in deserialized.all) + + +def test_parallel_handler_serializes_batch_result(): + """Verify parallel_handler serializes BatchResult at parent level.""" + + with patch( + "aws_durable_execution_sdk_python.serdes.serialize" + ) as mock_serdes_serialize: + mock_serdes_serialize.return_value = '"serialized"' + importlib.reload(child) + + parent_checkpoint = Mock() + parent_checkpoint.is_succeeded.return_value = False + parent_checkpoint.is_failed.return_value = False + parent_checkpoint.is_existent.return_value = False + parent_checkpoint.is_replay_children.return_value = False + + child_checkpoint = Mock() + child_checkpoint.is_succeeded.return_value = False + child_checkpoint.is_failed.return_value = False + child_checkpoint.is_existent.return_value = False + child_checkpoint.is_replay_children.return_value = False + + def get_checkpoint(op_id): + return child_checkpoint if op_id.startswith("child-") else parent_checkpoint + + mock_state = Mock() + mock_state.durable_execution_arn = "arn:test" + mock_state.get_checkpoint_result = Mock(side_effect=get_checkpoint) + mock_state.create_checkpoint = Mock() + + context_map = {} + + def create_id(self, i): + ctx_id = id(self) + if ctx_id not in context_map: + context_map[ctx_id] = [] + context_map[ctx_id].append(i) + return ( + "parent" + if len(context_map) == 1 and len(context_map[ctx_id]) == 1 + else f"child-{i}" + ) + + with patch.object( + DurableContext, "_create_step_id_for_logical_step", create_id + ): + context = DurableContext(state=mock_state) + result = context.parallel([lambda ctx: "a", lambda ctx: "b"]) + + assert len(mock_serdes_serialize.call_args_list) == 3 + parent_call = mock_serdes_serialize.call_args_list[2] + assert parent_call[1]["value"] is result + + +def test_parallel_default_serdes_serializes_batch_result(): + """Verify default serdes automatically serializes BatchResult.""" + with patch( + "aws_durable_execution_sdk_python.serdes.serialize", wraps=serialize + ) as mock_serialize: + importlib.reload(child) + + parent_checkpoint = Mock() + parent_checkpoint.is_succeeded.return_value = False + parent_checkpoint.is_failed.return_value = False + parent_checkpoint.is_existent.return_value = False + parent_checkpoint.is_replay_children.return_value = False + + child_checkpoint = Mock() + child_checkpoint.is_succeeded.return_value = False + child_checkpoint.is_failed.return_value = False + child_checkpoint.is_existent.return_value = False + child_checkpoint.is_replay_children.return_value = False + + def get_checkpoint(op_id): + return child_checkpoint if op_id.startswith("child-") else parent_checkpoint + + mock_state = Mock() + mock_state.durable_execution_arn = "arn:test" + mock_state.get_checkpoint_result = Mock(side_effect=get_checkpoint) + mock_state.create_checkpoint = Mock() + + context_map = {} + + def create_id(self, i): + ctx_id = id(self) + if ctx_id not in context_map: + context_map[ctx_id] = [] + context_map[ctx_id].append(i) + return ( + "parent" + if len(context_map) == 1 and len(context_map[ctx_id]) == 1 + else f"child-{i}" + ) + + with patch.object( + DurableContext, "_create_step_id_for_logical_step", create_id + ): + context = DurableContext(state=mock_state) + result = context.parallel([lambda ctx: "a", lambda ctx: "b"]) + + assert isinstance(result, BatchResult) + assert len(mock_serialize.call_args_list) == 3 + parent_call = mock_serialize.call_args_list[2] + assert parent_call[1]["serdes"] is None + assert isinstance(parent_call[1]["value"], BatchResult) + assert parent_call[1]["value"] is result + + +def test_parallel_custom_serdes_serializes_batch_result(): + """Verify custom serdes is used for BatchResult serialization.""" + + custom_serdes = CustomStrSerDes() + + with patch("aws_durable_execution_sdk_python.serdes.serialize") as mock_serialize: + mock_serialize.return_value = '"serialized"' + importlib.reload(child) + + parent_checkpoint = Mock() + parent_checkpoint.is_succeeded.return_value = False + parent_checkpoint.is_failed.return_value = False + parent_checkpoint.is_existent.return_value = False + parent_checkpoint.is_replay_children.return_value = False + + child_checkpoint = Mock() + child_checkpoint.is_succeeded.return_value = False + child_checkpoint.is_failed.return_value = False + child_checkpoint.is_existent.return_value = False + child_checkpoint.is_replay_children.return_value = False + + def get_checkpoint(op_id): + return child_checkpoint if op_id.startswith("child-") else parent_checkpoint + + mock_state = Mock() + mock_state.durable_execution_arn = "arn:test" + mock_state.get_checkpoint_result = Mock(side_effect=get_checkpoint) + mock_state.create_checkpoint = Mock() + + context_map = {} + + def create_id(self, i): + ctx_id = id(self) + if ctx_id not in context_map: + context_map[ctx_id] = [] + context_map[ctx_id].append(i) + return ( + "parent" + if len(context_map) == 1 and len(context_map[ctx_id]) == 1 + else f"child-{i}" + ) + + with patch.object( + DurableContext, "_create_step_id_for_logical_step", create_id + ): + context = DurableContext(state=mock_state) + result = context.parallel( + [lambda ctx: "a", lambda ctx: "b"], + config=ParallelConfig(serdes=custom_serdes), + ) + + assert isinstance(result, BatchResult) + assert len(mock_serialize.call_args_list) == 3 + parent_call = mock_serialize.call_args_list[2] + assert parent_call[1]["serdes"] is custom_serdes + assert isinstance(parent_call[1]["value"], BatchResult) + assert parent_call[1]["value"] is result diff --git a/tests/operation/step_test.py b/tests/operation/step_test.py index 04396cd..a7e38a8 100644 --- a/tests/operation/step_test.py +++ b/tests/operation/step_test.py @@ -7,6 +7,7 @@ import pytest from aws_durable_execution_sdk_python.config import ( + Duration, StepConfig, StepSemantics, ) @@ -27,12 +28,27 @@ StepDetails, ) from aws_durable_execution_sdk_python.logger import Logger -from aws_durable_execution_sdk_python.operation.step import step_handler +from aws_durable_execution_sdk_python.operation.step import StepOperationExecutor from aws_durable_execution_sdk_python.retries import RetryDecision from aws_durable_execution_sdk_python.state import CheckpointedResult, ExecutionState from tests.serdes_test import CustomDictSerDes +# Test helper - maintains old handler signature for backward compatibility in tests +def step_handler(func, state, operation_identifier, config, context_logger): + """Test helper that wraps StepOperationExecutor with old handler signature.""" + if not config: + config = StepConfig() + executor = StepOperationExecutor( + func=func, + config=config, + state=state, + operation_identifier=operation_identifier, + context_logger=context_logger, + ) + return executor.process() + + def test_step_handler_already_succeeded(): """Test step_handler when operation already succeeded.""" mock_state = Mock(spec=ExecutionState) @@ -222,10 +238,19 @@ def test_step_handler_success_at_least_once(): def test_step_handler_success_at_most_once(): """Test step_handler successful execution with AT_MOST_ONCE semantics.""" mock_state = Mock(spec=ExecutionState) - mock_result = CheckpointedResult.create_not_found() - mock_state.get_checkpoint_result.return_value = mock_result mock_state.durable_execution_arn = "test_arn" + # First call: not found, second call: started (after sync checkpoint) + not_found = CheckpointedResult.create_not_found() + started_op = Operation( + operation_id="step7", + operation_type=OperationType.STEP, + status=OperationStatus.STARTED, + step_details=StepDetails(attempt=0), + ) + started = CheckpointedResult.create_from_operation(started_op) + mock_state.get_checkpoint_result.side_effect = [not_found, started] + config = StepConfig(step_semantics=StepSemantics.AT_MOST_ONCE_PER_RETRY) mock_callable = Mock(return_value="success_result") mock_logger = Mock(spec=Logger) @@ -290,7 +315,7 @@ def test_step_handler_retry_success(): mock_state.durable_execution_arn = "test_arn" mock_retry_strategy = Mock( - return_value=RetryDecision(should_retry=True, delay_seconds=5) + return_value=RetryDecision(should_retry=True, delay=Duration.from_seconds(5)) ) config = StepConfig(retry_strategy=mock_retry_strategy) mock_callable = Mock(side_effect=RuntimeError("Test error")) @@ -333,7 +358,7 @@ def test_step_handler_retry_exhausted(): mock_state.durable_execution_arn = "test_arn" mock_retry_strategy = Mock( - return_value=RetryDecision(should_retry=False, delay_seconds=0) + return_value=RetryDecision(should_retry=False, delay=Duration.from_seconds(0)) ) config = StepConfig(retry_strategy=mock_retry_strategy) mock_callable = Mock(side_effect=RuntimeError("Test error")) @@ -376,7 +401,7 @@ def test_step_handler_retry_interrupted_error(): mock_state.durable_execution_arn = "test_arn" mock_retry_strategy = Mock( - return_value=RetryDecision(should_retry=False, delay_seconds=0) + return_value=RetryDecision(should_retry=False, delay=Duration.from_seconds(0)) ) config = StepConfig(retry_strategy=mock_retry_strategy) interrupted_error = StepInterruptedError("Step interrupted") @@ -415,7 +440,7 @@ def test_step_handler_retry_with_existing_attempts(): mock_state.durable_execution_arn = "test_arn" mock_retry_strategy = Mock( - return_value=RetryDecision(should_retry=True, delay_seconds=10) + return_value=RetryDecision(should_retry=True, delay=Duration.from_seconds(10)) ) config = StepConfig(retry_strategy=mock_retry_strategy) mock_callable = Mock(side_effect=RuntimeError("Test error")) @@ -451,7 +476,7 @@ def test_step_handler_pending_without_existing_attempts(): mock_state.durable_execution_arn = "test_arn" mock_retry_strategy = Mock( - return_value=RetryDecision(should_retry=True, delay_seconds=10) + return_value=RetryDecision(should_retry=True, delay=Duration.from_seconds(10)) ) config = StepConfig(retry_strategy=mock_retry_strategy) mock_callable = Mock(side_effect=RuntimeError("Test error")) @@ -471,14 +496,25 @@ def test_step_handler_pending_without_existing_attempts(): mock_retry_strategy.assert_not_called() -@patch("aws_durable_execution_sdk_python.operation.step.retry_handler") +@patch( + "aws_durable_execution_sdk_python.operation.step.StepOperationExecutor.retry_handler" +) def test_step_handler_retry_handler_no_exception(mock_retry_handler): """Test step_handler when retry_handler doesn't raise an exception.""" mock_state = Mock(spec=ExecutionState) - mock_result = CheckpointedResult.create_not_found() - mock_state.get_checkpoint_result.return_value = mock_result mock_state.durable_execution_arn = "test_arn" + # First call: not found, second call: started (AT_LEAST_ONCE default) + not_found = CheckpointedResult.create_not_found() + started_op = Operation( + operation_id="step13", + operation_type=OperationType.STEP, + status=OperationStatus.STARTED, + step_details=StepDetails(attempt=0), + ) + started = CheckpointedResult.create_from_operation(started_op) + mock_state.get_checkpoint_result.side_effect = [not_found, started] + # Mock retry_handler to not raise an exception (which it should always do) mock_retry_handler.return_value = None @@ -558,3 +594,303 @@ def test_step_handler_custom_serdes_already_succeeded(): ) assert result == {"key": "value", "number": 42, "list": [1, 2, 3]} + + +# Tests for immediate response handling + + +def test_step_immediate_response_get_checkpoint_called_twice(): + """Test that get_checkpoint_result is called twice when checkpoint is created.""" + mock_state = Mock(spec=ExecutionState) + mock_state.durable_execution_arn = "test_arn" + + # First call: not found (checkpoint doesn't exist) + # Second call: started (checkpoint created, no immediate response) + not_found = CheckpointedResult.create_not_found() + started_op = Operation( + operation_id="step_immediate_1", + operation_type=OperationType.STEP, + status=OperationStatus.STARTED, + step_details=StepDetails(attempt=0), + ) + started = CheckpointedResult.create_from_operation(started_op) + mock_state.get_checkpoint_result.side_effect = [not_found, started] + + config = StepConfig(step_semantics=StepSemantics.AT_MOST_ONCE_PER_RETRY) + mock_callable = Mock(return_value="success_result") + mock_logger = Mock(spec=Logger) + mock_logger.with_log_info.return_value = mock_logger + + result = step_handler( + mock_callable, + mock_state, + OperationIdentifier("step_immediate_1", None, "test_step"), + config, + mock_logger, + ) + + # Verify get_checkpoint_result was called twice (before and after checkpoint creation) + assert mock_state.get_checkpoint_result.call_count == 2 + assert result == "success_result" + + +def test_step_immediate_response_create_checkpoint_sync_at_most_once(): + """Test that create_checkpoint is called with is_sync=True for AT_MOST_ONCE semantics.""" + mock_state = Mock(spec=ExecutionState) + mock_state.durable_execution_arn = "test_arn" + + # First call: not found, second call: started + not_found = CheckpointedResult.create_not_found() + started_op = Operation( + operation_id="step_immediate_2", + operation_type=OperationType.STEP, + status=OperationStatus.STARTED, + step_details=StepDetails(attempt=0), + ) + started = CheckpointedResult.create_from_operation(started_op) + mock_state.get_checkpoint_result.side_effect = [not_found, started] + + config = StepConfig(step_semantics=StepSemantics.AT_MOST_ONCE_PER_RETRY) + mock_callable = Mock(return_value="success_result") + mock_logger = Mock(spec=Logger) + mock_logger.with_log_info.return_value = mock_logger + + step_handler( + mock_callable, + mock_state, + OperationIdentifier("step_immediate_2", None, "test_step"), + config, + mock_logger, + ) + + # Verify START checkpoint was created with is_sync=True + start_call = mock_state.create_checkpoint.call_args_list[0] + assert start_call[1]["is_sync"] is True + + +def test_step_immediate_response_create_checkpoint_async_at_least_once(): + """Test that create_checkpoint is called with is_sync=False for AT_LEAST_ONCE semantics.""" + mock_state = Mock(spec=ExecutionState) + mock_state.durable_execution_arn = "test_arn" + + # For AT_LEAST_ONCE, only one call to get_checkpoint_result (no second check) + not_found = CheckpointedResult.create_not_found() + mock_state.get_checkpoint_result.return_value = not_found + + config = StepConfig(step_semantics=StepSemantics.AT_LEAST_ONCE_PER_RETRY) + mock_callable = Mock(return_value="success_result") + mock_logger = Mock(spec=Logger) + mock_logger.with_log_info.return_value = mock_logger + + step_handler( + mock_callable, + mock_state, + OperationIdentifier("step_immediate_3", None, "test_step"), + config, + mock_logger, + ) + + # Verify START checkpoint was created with is_sync=False + start_call = mock_state.create_checkpoint.call_args_list[0] + assert start_call[1]["is_sync"] is False + + +def test_step_immediate_response_immediate_success(): + """Test immediate success: checkpoint returns SUCCEEDED on second check, operation returns without suspend. + + Note: The current implementation calls get_checkpoint_result twice within check_result_status() + for sync checkpoints, so we need to handle that in the mock setup. + """ + mock_state = Mock(spec=ExecutionState) + mock_state.durable_execution_arn = "test_arn" + + # First call: not found + # Second call: started (no immediate response, proceed to execute) + not_found = CheckpointedResult.create_not_found() + started_op = Operation( + operation_id="step_immediate_4", + operation_type=OperationType.STEP, + status=OperationStatus.STARTED, + step_details=StepDetails(attempt=0), + ) + started = CheckpointedResult.create_from_operation(started_op) + mock_state.get_checkpoint_result.side_effect = [not_found, started] + + config = StepConfig(step_semantics=StepSemantics.AT_MOST_ONCE_PER_RETRY) + mock_callable = Mock(return_value="immediate_success_result") + mock_logger = Mock(spec=Logger) + mock_logger.with_log_info.return_value = mock_logger + + result = step_handler( + mock_callable, + mock_state, + OperationIdentifier("step_immediate_4", None, "test_step"), + config, + mock_logger, + ) + + # Verify operation executed normally (no immediate response in current implementation) + assert result == "immediate_success_result" + mock_callable.assert_called_once() + # Both START and SUCCEED checkpoints should be created + assert mock_state.create_checkpoint.call_count == 2 + + +def test_step_immediate_response_immediate_failure(): + """Test immediate failure: checkpoint returns FAILED on second check, operation raises error without suspend.""" + mock_state = Mock(spec=ExecutionState) + mock_state.durable_execution_arn = "test_arn" + + # First call: not found + # Second call: started (current implementation doesn't support immediate terminal responses from START) + not_found = CheckpointedResult.create_not_found() + started_op = Operation( + operation_id="step_immediate_5", + operation_type=OperationType.STEP, + status=OperationStatus.STARTED, + step_details=StepDetails(attempt=0), + ) + started = CheckpointedResult.create_from_operation(started_op) + mock_state.get_checkpoint_result.side_effect = [not_found, started] + + config = StepConfig(step_semantics=StepSemantics.AT_MOST_ONCE_PER_RETRY) + # Make the step function raise an error + mock_callable = Mock(side_effect=RuntimeError("Step execution error")) + mock_logger = Mock(spec=Logger) + mock_logger.with_log_info.return_value = mock_logger + + # Configure retry strategy to not retry + mock_retry_strategy = Mock( + return_value=RetryDecision(should_retry=False, delay=Duration.from_seconds(0)) + ) + config = StepConfig( + step_semantics=StepSemantics.AT_MOST_ONCE_PER_RETRY, + retry_strategy=mock_retry_strategy, + ) + + # Verify operation raises error after executing step function + with pytest.raises(CallableRuntimeError, match="Step execution error"): + step_handler( + mock_callable, + mock_state, + OperationIdentifier("step_immediate_5", None, "test_step"), + config, + mock_logger, + ) + + mock_callable.assert_called_once() + # Both START and FAIL checkpoints should be created + assert mock_state.create_checkpoint.call_count == 2 + + +def test_step_immediate_response_no_immediate_response(): + """Test no immediate response: checkpoint returns STARTED on second check, operation executes step function.""" + mock_state = Mock(spec=ExecutionState) + mock_state.durable_execution_arn = "test_arn" + + # First call: not found + # Second call: started (no immediate response, proceed to execute) + not_found = CheckpointedResult.create_not_found() + started_op = Operation( + operation_id="step_immediate_6", + operation_type=OperationType.STEP, + status=OperationStatus.STARTED, + step_details=StepDetails(attempt=0), + ) + started = CheckpointedResult.create_from_operation(started_op) + mock_state.get_checkpoint_result.side_effect = [not_found, started] + + config = StepConfig(step_semantics=StepSemantics.AT_MOST_ONCE_PER_RETRY) + mock_callable = Mock(return_value="normal_execution_result") + mock_logger = Mock(spec=Logger) + mock_logger.with_log_info.return_value = mock_logger + + result = step_handler( + mock_callable, + mock_state, + OperationIdentifier("step_immediate_6", None, "test_step"), + config, + mock_logger, + ) + + # Verify step function was executed + assert result == "normal_execution_result" + mock_callable.assert_called_once() + # Both START and SUCCEED checkpoints should be created + assert mock_state.create_checkpoint.call_count == 2 + + +def test_step_immediate_response_already_completed(): + """Test already completed: checkpoint is already SUCCEEDED on first check, no checkpoint created.""" + mock_state = Mock(spec=ExecutionState) + mock_state.durable_execution_arn = "test_arn" + + # First call: already succeeded (replay scenario) + succeeded_op = Operation( + operation_id="step_immediate_7", + operation_type=OperationType.STEP, + status=OperationStatus.SUCCEEDED, + step_details=StepDetails(result=json.dumps("already_completed_result")), + ) + succeeded = CheckpointedResult.create_from_operation(succeeded_op) + mock_state.get_checkpoint_result.return_value = succeeded + + config = StepConfig(step_semantics=StepSemantics.AT_MOST_ONCE_PER_RETRY) + mock_callable = Mock(return_value="should_not_call") + mock_logger = Mock(spec=Logger) + + result = step_handler( + mock_callable, + mock_state, + OperationIdentifier("step_immediate_7", None, "test_step"), + config, + mock_logger, + ) + + # Verify operation returned immediately without creating checkpoint + assert result == "already_completed_result" + mock_callable.assert_not_called() + mock_state.create_checkpoint.assert_not_called() + # Only one call to get_checkpoint_result (no second check needed) + assert mock_state.get_checkpoint_result.call_count == 1 + + +def test_step_executes_function_when_second_check_returns_started(): + """Test backward compatibility: when the second checkpoint check returns + STARTED (not terminal), the step function executes normally. + """ + mock_state = Mock(spec=ExecutionState) + mock_state.durable_execution_arn = "test_arn" + + # First call: checkpoint doesn't exist + # Second call: checkpoint returns STARTED (no immediate response) + not_found = CheckpointedResult.create_not_found() + started_op = Operation( + operation_id="step-1", + operation_type=OperationType.STEP, + status=OperationStatus.STARTED, + step_details=StepDetails(attempt=1), + ) + started = CheckpointedResult.create_from_operation(started_op) + mock_state.get_checkpoint_result.side_effect = [not_found, started] + + mock_step_function = Mock(return_value="result") + mock_logger = Mock(spec=Logger) + mock_logger.with_log_info.return_value = mock_logger + + executor = StepOperationExecutor( + func=mock_step_function, + config=StepConfig(step_semantics=StepSemantics.AT_LEAST_ONCE_PER_RETRY), + state=mock_state, + operation_identifier=OperationIdentifier("step-1", None, "test_step"), + context_logger=mock_logger, + ) + result = executor.process() + + # Assert - behaves like "old way" + mock_step_function.assert_called_once() # Function executed (not skipped) + assert result == "result" + assert ( + mock_state.get_checkpoint_result.call_count == 1 + ) # Only one check for AT_LEAST_ONCE + assert mock_state.create_checkpoint.call_count == 2 # START + SUCCEED checkpoints diff --git a/tests/operation/wait_for_condition_test.py b/tests/operation/wait_for_condition_test.py index d1e43af..676244f 100644 --- a/tests/operation/wait_for_condition_test.py +++ b/tests/operation/wait_for_condition_test.py @@ -6,6 +6,7 @@ import pytest +from aws_durable_execution_sdk_python.config import Duration from aws_durable_execution_sdk_python.exceptions import ( CallableRuntimeError, InvocationError, @@ -21,7 +22,7 @@ ) from aws_durable_execution_sdk_python.logger import Logger, LogInfo from aws_durable_execution_sdk_python.operation.wait_for_condition import ( - wait_for_condition_handler, + WaitForConditionOperationExecutor, ) from aws_durable_execution_sdk_python.state import CheckpointedResult, ExecutionState from aws_durable_execution_sdk_python.types import WaitForConditionCheckContext @@ -32,6 +33,21 @@ from tests.serdes_test import CustomDictSerDes +# Test helper - maintains old handler signature for backward compatibility in tests +def wait_for_condition_handler( + check, config, state, operation_identifier, context_logger +): + """Test helper that wraps WaitForConditionOperationExecutor with old handler signature.""" + executor = WaitForConditionOperationExecutor( + check=check, + config=config, + state=state, + operation_identifier=operation_identifier, + context_logger=context_logger, + ) + return executor.process() + + def test_wait_for_condition_first_execution_condition_met(): """Test wait_for_condition on first execution when condition is met.""" mock_state = Mock(spec=ExecutionState) @@ -54,7 +70,11 @@ def wait_strategy(state, attempt): config = WaitForConditionConfig(initial_state=5, wait_strategy=wait_strategy) result = wait_for_condition_handler( - check_func, config, mock_state, op_id, mock_logger + state=mock_state, + operation_identifier=op_id, + check=check_func, + config=config, + context_logger=mock_logger, ) assert result == 6 @@ -78,12 +98,18 @@ def check_func(state, context): return state + 1 def wait_strategy(state, attempt): - return WaitForConditionDecision.continue_waiting(30) + return WaitForConditionDecision.continue_waiting(Duration.from_seconds(30)) config = WaitForConditionConfig(initial_state=5, wait_strategy=wait_strategy) with pytest.raises(SuspendExecution, match="will retry in 30 seconds"): - wait_for_condition_handler(check_func, config, mock_state, op_id, mock_logger) + wait_for_condition_handler( + state=mock_state, + operation_identifier=op_id, + check=check_func, + config=config, + context_logger=mock_logger, + ) assert mock_state.create_checkpoint.call_count == 2 # START and RETRY @@ -113,7 +139,11 @@ def check_func(state, context): ) result = wait_for_condition_handler( - check_func, config, mock_state, op_id, mock_logger + state=mock_state, + operation_identifier=op_id, + check=check_func, + config=config, + context_logger=mock_logger, ) assert result == 42 @@ -145,7 +175,11 @@ def check_func(state, context): ) result = wait_for_condition_handler( - check_func, config, mock_state, op_id, mock_logger + state=mock_state, + operation_identifier=op_id, + check=check_func, + config=config, + context_logger=mock_logger, ) assert result is None @@ -178,7 +212,13 @@ def check_func(state, context): ) with pytest.raises(CallableRuntimeError): - wait_for_condition_handler(check_func, config, mock_state, op_id, mock_logger) + wait_for_condition_handler( + state=mock_state, + operation_identifier=op_id, + check=check_func, + config=config, + context_logger=mock_logger, + ) def test_wait_for_condition_retry_with_state(): @@ -208,7 +248,11 @@ def check_func(state, context): ) result = wait_for_condition_handler( - check_func, config, mock_state, op_id, mock_logger + state=mock_state, + operation_identifier=op_id, + check=check_func, + config=config, + context_logger=mock_logger, ) assert result == 11 # 10 (from checkpoint) + 1 @@ -242,7 +286,11 @@ def check_func(state, context): ) result = wait_for_condition_handler( - check_func, config, mock_state, op_id, mock_logger + state=mock_state, + operation_identifier=op_id, + check=check_func, + config=config, + context_logger=mock_logger, ) assert result == 6 # 5 (initial) + 1 @@ -275,7 +323,11 @@ def check_func(state, context): ) result = wait_for_condition_handler( - check_func, config, mock_state, op_id, mock_logger + state=mock_state, + operation_identifier=op_id, + check=check_func, + config=config, + context_logger=mock_logger, ) assert result == 6 # Falls back to initial state @@ -304,7 +356,13 @@ def check_func(state, context): ) with pytest.raises(ValueError, match="Test error"): - wait_for_condition_handler(check_func, config, mock_state, op_id, mock_logger) + wait_for_condition_handler( + state=mock_state, + operation_identifier=op_id, + check=check_func, + config=config, + context_logger=mock_logger, + ) assert mock_state.create_checkpoint.call_count == 2 # START and FAIL @@ -334,7 +392,13 @@ def check_func(state, context): wait_strategy=lambda s, a: WaitForConditionDecision.stop_polling(), ) - wait_for_condition_handler(check_func, config, mock_state, op_id, mock_logger) + wait_for_condition_handler( + state=mock_state, + operation_identifier=op_id, + check=check_func, + config=config, + context_logger=mock_logger, + ) assert isinstance(captured_context, WaitForConditionCheckContext) assert captured_context.logger is mock_logger @@ -357,12 +421,18 @@ def check_func(state, context): return state + 1 def wait_strategy(state, attempt): - return WaitForConditionDecision(should_continue=True, delay_seconds=None) + return WaitForConditionDecision(should_continue=True, delay=Duration()) config = WaitForConditionConfig(initial_state=5, wait_strategy=wait_strategy) - with pytest.raises(SuspendExecution, match="will retry in None seconds"): - wait_for_condition_handler(check_func, config, mock_state, op_id, mock_logger) + with pytest.raises(SuspendExecution, match="will retry in 0 seconds"): + wait_for_condition_handler( + state=mock_state, + operation_identifier=op_id, + check=check_func, + config=config, + context_logger=mock_logger, + ) def test_wait_for_condition_no_operation_in_checkpoint(): @@ -396,7 +466,11 @@ def check_func(state, context): ) result = wait_for_condition_handler( - check_func, config, mock_state, op_id, mock_logger + state=mock_state, + operation_identifier=op_id, + check=check_func, + config=config, + context_logger=mock_logger, ) assert result == 11 # Uses attempt=1 by default @@ -441,7 +515,11 @@ def check_func(state, context): ) result = wait_for_condition_handler( - check_func, config, mock_state, op_id, mock_logger + state=mock_state, + operation_identifier=op_id, + check=check_func, + config=config, + context_logger=mock_logger, ) assert result == 11 # Uses attempt=1 by default @@ -464,12 +542,20 @@ def check_func(state, context): return state + 1 def wait_strategy(state, attempt): - return WaitForConditionDecision(should_continue=True, delay_seconds=60) + return WaitForConditionDecision( + should_continue=True, delay=Duration.from_minutes(1) + ) config = WaitForConditionConfig(initial_state=5, wait_strategy=wait_strategy) with pytest.raises(SuspendExecution, match="will retry in 60 seconds"): - wait_for_condition_handler(check_func, config, mock_state, op_id, mock_logger) + wait_for_condition_handler( + state=mock_state, + operation_identifier=op_id, + check=check_func, + config=config, + context_logger=mock_logger, + ) def test_wait_for_condition_attempt_number_passed_to_strategy(): @@ -502,7 +588,13 @@ def wait_strategy(state, attempt): config = WaitForConditionConfig(initial_state=5, wait_strategy=wait_strategy) - wait_for_condition_handler(check_func, config, mock_state, op_id, mock_logger) + wait_for_condition_handler( + state=mock_state, + operation_identifier=op_id, + check=check_func, + config=config, + context_logger=mock_logger, + ) assert captured_attempt == 3 @@ -532,7 +624,13 @@ def wait_strategy(state, attempt): config = WaitForConditionConfig(initial_state=5, wait_strategy=wait_strategy) - wait_for_condition_handler(check_func, config, mock_state, op_id, mock_logger) + wait_for_condition_handler( + state=mock_state, + operation_identifier=op_id, + check=check_func, + config=config, + context_logger=mock_logger, + ) assert captured_state == 10 # 5 * 2 @@ -558,7 +656,13 @@ def check_func(state, context): wait_strategy=lambda s, a: WaitForConditionDecision.stop_polling(), ) - wait_for_condition_handler(check_func, config, mock_state, op_id, mock_logger) + wait_for_condition_handler( + state=mock_state, + operation_identifier=op_id, + check=check_func, + config=config, + context_logger=mock_logger, + ) # Verify logger.with_log_info was called mock_logger.with_log_info.assert_called_once() @@ -583,12 +687,20 @@ def check_func(state, context): return state + 1 def wait_strategy(state, attempt): - return WaitForConditionDecision(should_continue=True, delay_seconds=0) + return WaitForConditionDecision( + should_continue=True, delay=Duration.from_seconds(0) + ) config = WaitForConditionConfig(initial_state=5, wait_strategy=wait_strategy) with pytest.raises(SuspendExecution, match="will retry in 0 seconds"): - wait_for_condition_handler(check_func, config, mock_state, op_id, mock_logger) + wait_for_condition_handler( + state=mock_state, + operation_identifier=op_id, + check=check_func, + config=config, + context_logger=mock_logger, + ) def test_wait_for_condition_custom_serdes_first_execution_condition_met(): @@ -614,7 +726,13 @@ def wait_strategy(state, attempt): initial_state=5, wait_strategy=wait_strategy, serdes=CustomDictSerDes() ) - wait_for_condition_handler(check_func, config, mock_state, op_id, mock_logger) + wait_for_condition_handler( + state=mock_state, + operation_identifier=op_id, + check=check_func, + config=config, + context_logger=mock_logger, + ) expected_checkpoointed_result = ( '{"key": "VALUE", "number": "84", "list": [1, 2, 3]}' ) @@ -651,7 +769,11 @@ def check_func(state, context): ) result = wait_for_condition_handler( - check_func, config, mock_state, op_id, mock_logger + state=mock_state, + operation_identifier=op_id, + check=check_func, + config=config, + context_logger=mock_logger, ) assert result == {"key": "value", "number": 42, "list": [1, 2, 3]} @@ -692,7 +814,13 @@ def check_func(state, context): with pytest.raises( SuspendExecution, match="wait_for_condition test_wait will retry at timestamp" ): - wait_for_condition_handler(check_func, config, mock_state, op_id, mock_logger) + wait_for_condition_handler( + state=mock_state, + operation_identifier=op_id, + check=check_func, + config=config, + context_logger=mock_logger, + ) def test_wait_for_condition_pending_without_next_attempt(): @@ -728,4 +856,346 @@ def check_func(state, context): SuspendExecution, match="No timestamp provided. Suspending without retry timestamp.", ): - wait_for_condition_handler(check_func, config, mock_state, op_id, mock_logger) + wait_for_condition_handler( + state=mock_state, + operation_identifier=op_id, + check=check_func, + config=config, + context_logger=mock_logger, + ) + + +# Immediate Response Handling Tests + + +def test_wait_for_condition_checkpoint_called_once_with_is_sync_false(): + """Test that get_checkpoint_result is called once when checkpoint is created (is_sync=False).""" + mock_state = Mock(spec=ExecutionState) + mock_state.durable_execution_arn = "arn:aws:test" + mock_state.get_checkpoint_result.return_value = ( + CheckpointedResult.create_not_found() + ) + + mock_logger = Mock(spec=Logger) + mock_logger.with_log_info.return_value = mock_logger + + op_id = OperationIdentifier("op1", None, "test_wait") + + def check_func(state, context): + return state + 1 + + config = WaitForConditionConfig( + initial_state=5, + wait_strategy=lambda s, a: WaitForConditionDecision.stop_polling(), + ) + + wait_for_condition_handler( + state=mock_state, + operation_identifier=op_id, + check=check_func, + config=config, + context_logger=mock_logger, + ) + + # Verify get_checkpoint_result called only once (no second check for async checkpoint) + assert mock_state.get_checkpoint_result.call_count == 1 + + # Verify create_checkpoint called with is_sync=False + assert mock_state.create_checkpoint.call_count == 2 # START and SUCCESS + start_call = mock_state.create_checkpoint.call_args_list[0] + assert start_call[1]["is_sync"] is False + + +def test_wait_for_condition_immediate_success_without_executing_check(): + """Test immediate success: checkpoint returns SUCCEEDED on first check, returns result without executing check.""" + mock_state = Mock(spec=ExecutionState) + mock_state.durable_execution_arn = "test_arn" + operation = Operation( + operation_id="op1", + operation_type=OperationType.STEP, + status=OperationStatus.SUCCEEDED, + step_details=StepDetails(result=json.dumps(42)), + ) + mock_result = CheckpointedResult.create_from_operation(operation) + mock_state.get_checkpoint_result.return_value = mock_result + + mock_logger = Mock(spec=Logger) + op_id = OperationIdentifier("op1", None, "test_wait") + + # Check function should NOT be called + def check_func(state, context): + msg = "Check function should not be called for immediate success" + raise AssertionError(msg) + + config = WaitForConditionConfig( + initial_state=5, + wait_strategy=lambda s, a: WaitForConditionDecision.stop_polling(), + ) + + result = wait_for_condition_handler( + state=mock_state, + operation_identifier=op_id, + check=check_func, + config=config, + context_logger=mock_logger, + ) + + # Verify result returned without executing check function + assert result == 42 + # Verify no new checkpoints created + assert mock_state.create_checkpoint.call_count == 0 + + +def test_wait_for_condition_immediate_failure_without_executing_check(): + """Test immediate failure: checkpoint returns FAILED on first check, raises error without executing check.""" + mock_state = Mock(spec=ExecutionState) + mock_state.durable_execution_arn = "test_arn" + operation = Operation( + operation_id="op1", + operation_type=OperationType.STEP, + status=OperationStatus.FAILED, + step_details=StepDetails( + error=ErrorObject("Test error", "TestError", None, None) + ), + ) + mock_result = CheckpointedResult.create_from_operation(operation) + mock_state.get_checkpoint_result.return_value = mock_result + + mock_logger = Mock(spec=Logger) + op_id = OperationIdentifier("op1", None, "test_wait") + + # Check function should NOT be called + def check_func(state, context): + msg = "Check function should not be called for immediate failure" + raise AssertionError(msg) + + config = WaitForConditionConfig( + initial_state=5, + wait_strategy=lambda s, a: WaitForConditionDecision.stop_polling(), + ) + + # Verify error raised without executing check function + with pytest.raises(CallableRuntimeError): + wait_for_condition_handler( + state=mock_state, + operation_identifier=op_id, + check=check_func, + config=config, + context_logger=mock_logger, + ) + + # Verify no new checkpoints created + assert mock_state.create_checkpoint.call_count == 0 + + +def test_wait_for_condition_pending_suspends_without_executing_check(): + """Test pending handling: checkpoint returns PENDING on first check, suspends without executing check.""" + mock_state = Mock(spec=ExecutionState) + mock_state.durable_execution_arn = "arn:aws:test" + operation = Operation( + operation_id="op1", + operation_type=OperationType.STEP, + status=OperationStatus.PENDING, + step_details=StepDetails( + result=json.dumps(10), + next_attempt_timestamp=datetime.datetime.fromtimestamp( + 1764547200, tz=datetime.UTC + ), + ), + ) + mock_result = CheckpointedResult.create_from_operation(operation) + mock_state.get_checkpoint_result.return_value = mock_result + + mock_logger = Mock(spec=Logger) + mock_logger.with_log_info.return_value = mock_logger + + op_id = OperationIdentifier("op1", None, "test_wait") + + # Check function should NOT be called + def check_func(state, context): + msg = "Check function should not be called for pending status" + raise AssertionError(msg) + + config = WaitForConditionConfig( + initial_state=5, + wait_strategy=lambda s, a: WaitForConditionDecision.stop_polling(), + ) + + # Verify suspend occurs without executing check function + with pytest.raises( + SuspendExecution, match="wait_for_condition test_wait will retry at timestamp" + ): + wait_for_condition_handler( + state=mock_state, + operation_identifier=op_id, + check=check_func, + config=config, + context_logger=mock_logger, + ) + + # Verify no new checkpoints created + assert mock_state.create_checkpoint.call_count == 0 + + +def test_wait_for_condition_no_checkpoint_executes_check_function(): + """Test no immediate response: when checkpoint doesn't exist, operation executes check function.""" + mock_state = Mock(spec=ExecutionState) + mock_state.durable_execution_arn = "arn:aws:test" + mock_state.get_checkpoint_result.return_value = ( + CheckpointedResult.create_not_found() + ) + + mock_logger = Mock(spec=Logger) + mock_logger.with_log_info.return_value = mock_logger + + op_id = OperationIdentifier("op1", None, "test_wait") + + check_called = False + + def check_func(state, context): + nonlocal check_called + check_called = True + return state + 1 + + config = WaitForConditionConfig( + initial_state=5, + wait_strategy=lambda s, a: WaitForConditionDecision.stop_polling(), + ) + + result = wait_for_condition_handler( + state=mock_state, + operation_identifier=op_id, + check=check_func, + config=config, + context_logger=mock_logger, + ) + + # Verify check function was executed + assert check_called is True + assert result == 6 + + # Verify checkpoints created (START and SUCCESS) + assert mock_state.create_checkpoint.call_count == 2 + + +def test_wait_for_condition_already_completed_no_checkpoint_created(): + """Test already completed: when checkpoint is SUCCEEDED on first check, no checkpoint created.""" + mock_state = Mock(spec=ExecutionState) + mock_state.durable_execution_arn = "test_arn" + operation = Operation( + operation_id="op1", + operation_type=OperationType.STEP, + status=OperationStatus.SUCCEEDED, + step_details=StepDetails(result=json.dumps(42)), + ) + mock_result = CheckpointedResult.create_from_operation(operation) + mock_state.get_checkpoint_result.return_value = mock_result + + mock_logger = Mock(spec=Logger) + op_id = OperationIdentifier("op1", None, "test_wait") + + def check_func(state, context): + return state + 1 + + config = WaitForConditionConfig( + initial_state=5, + wait_strategy=lambda s, a: WaitForConditionDecision.stop_polling(), + ) + + result = wait_for_condition_handler( + state=mock_state, + operation_identifier=op_id, + check=check_func, + config=config, + context_logger=mock_logger, + ) + + # Verify result returned + assert result == 42 + + # Verify NO checkpoints created (already completed) + assert mock_state.create_checkpoint.call_count == 0 + + +def test_wait_for_condition_executes_check_when_checkpoint_not_terminal(): + """Test backward compatibility: when checkpoint is not terminal (STARTED), + the wait_for_condition operation executes the check function normally. + + Note: wait_for_condition uses async checkpoints (is_sync=False), so there's + only one check, not two. + """ + mock_state = Mock(spec=ExecutionState) + mock_state.durable_execution_arn = "test_arn" + + # Single call: checkpoint doesn't exist (async checkpoint, no second check) + mock_state.get_checkpoint_result.return_value = ( + CheckpointedResult.create_not_found() + ) + + mock_check_function = Mock(return_value="final_state") + mock_logger = Mock(spec=Logger) + mock_logger.with_log_info.return_value = mock_logger + + def mock_wait_strategy(state, attempt): + return WaitForConditionDecision( + should_continue=False, delay=Duration.from_seconds(0) + ) + + executor = WaitForConditionOperationExecutor( + check=mock_check_function, + config=WaitForConditionConfig( + initial_state="initial", + wait_strategy=mock_wait_strategy, + ), + state=mock_state, + operation_identifier=OperationIdentifier("wfc-1", None, "test_wfc"), + context_logger=mock_logger, + ) + result = executor.process() + + # Assert - behaves like "old way" + mock_check_function.assert_called_once() # Check function executed + assert result == "final_state" + assert mock_state.get_checkpoint_result.call_count == 1 # Single check (async) + assert mock_state.create_checkpoint.call_count == 2 # START + SUCCESS checkpoints + + +def test_wait_for_condition_executes_check_when_checkpoint_not_terminal_duplicate(): + """Test backward compatibility: when checkpoint is not terminal (STARTED), + the wait_for_condition operation executes the check function normally. + + Note: wait_for_condition uses async checkpoints (is_sync=False), so there's + only one check, not two. + """ + mock_state = Mock(spec=ExecutionState) + mock_state.durable_execution_arn = "test_arn" + + # Single call: checkpoint doesn't exist (async checkpoint, no second check) + mock_state.get_checkpoint_result.return_value = ( + CheckpointedResult.create_not_found() + ) + + mock_check_function = Mock(return_value="final_state") + mock_logger = Mock(spec=Logger) + mock_logger.with_log_info.return_value = mock_logger + + def mock_wait_strategy(state, attempt): + return WaitForConditionDecision(should_continue=False, delay=None) + + executor = WaitForConditionOperationExecutor( + check=mock_check_function, + config=WaitForConditionConfig( + initial_state="initial", + wait_strategy=mock_wait_strategy, + ), + state=mock_state, + operation_identifier=OperationIdentifier("wfc-1", None, "test_wfc"), + context_logger=mock_logger, + ) + result = executor.process() + + # Assert - behaves like "old way" + mock_check_function.assert_called_once() # Check function executed + assert result == "final_state" + assert mock_state.get_checkpoint_result.call_count == 1 # Single check (async) + assert mock_state.create_checkpoint.call_count == 2 # START + SUCCESS checkpoints diff --git a/tests/operation/wait_test.py b/tests/operation/wait_test.py index 17b9de9..ca3083e 100644 --- a/tests/operation/wait_test.py +++ b/tests/operation/wait_test.py @@ -7,16 +7,29 @@ from aws_durable_execution_sdk_python.exceptions import SuspendExecution from aws_durable_execution_sdk_python.identifier import OperationIdentifier from aws_durable_execution_sdk_python.lambda_service import ( + Operation, OperationAction, + OperationStatus, OperationSubType, OperationType, OperationUpdate, WaitOptions, ) -from aws_durable_execution_sdk_python.operation.wait import wait_handler +from aws_durable_execution_sdk_python.operation.wait import WaitOperationExecutor from aws_durable_execution_sdk_python.state import CheckpointedResult, ExecutionState +# Test helper function - maintains old handler signature for backward compatibility +def wait_handler(seconds: int, state, operation_identifier) -> None: + """Test helper that wraps WaitOperationExecutor with old handler signature.""" + executor = WaitOperationExecutor( + seconds=seconds, + state=state, + operation_identifier=operation_identifier, + ) + return executor.process() + + def test_wait_handler_already_completed(): """Test wait_handler when operation is already completed.""" mock_state = Mock(spec=ExecutionState) @@ -37,10 +50,18 @@ def test_wait_handler_already_completed(): def test_wait_handler_not_completed(): """Test wait_handler when operation is not completed.""" mock_state = Mock(spec=ExecutionState) - mock_result = Mock(spec=CheckpointedResult) - mock_result.is_succeeded.return_value = False - mock_result.is_existent.return_value = False - mock_state.get_checkpoint_result.return_value = mock_result + + # First call: checkpoint doesn't exist + not_found_result = Mock(spec=CheckpointedResult) + not_found_result.is_succeeded.return_value = False + not_found_result.is_existent.return_value = False + + # Second call: checkpoint exists but not completed (no immediate response) + started_result = Mock(spec=CheckpointedResult) + started_result.is_succeeded.return_value = False + started_result.is_existent.return_value = True + + mock_state.get_checkpoint_result.side_effect = [not_found_result, started_result] with pytest.raises(SuspendExecution, match="Wait for 30 seconds"): wait_handler( @@ -49,7 +70,8 @@ def test_wait_handler_not_completed(): operation_identifier=OperationIdentifier("wait2", None), ) - mock_state.get_checkpoint_result.assert_called_once_with("wait2") + # Should be called twice: once before checkpoint, once after to check for immediate response + assert mock_state.get_checkpoint_result.call_count == 2 expected_operation = OperationUpdate( operation_id="wait2", @@ -60,25 +82,36 @@ def test_wait_handler_not_completed(): wait_options=WaitOptions(wait_seconds=30), ) mock_state.create_checkpoint.assert_called_once_with( - operation_update=expected_operation + operation_update=expected_operation, is_sync=True ) def test_wait_handler_with_none_name(): """Test wait_handler with None name.""" mock_state = Mock(spec=ExecutionState) - mock_result = Mock(spec=CheckpointedResult) - mock_result.is_succeeded.return_value = False - mock_result.is_existent.return_value = False - mock_state.get_checkpoint_result.return_value = mock_result + + # First call: checkpoint doesn't exist + not_found_result = Mock(spec=CheckpointedResult) + not_found_result.is_succeeded.return_value = False + not_found_result.is_existent.return_value = False + + # Second call: checkpoint exists but not completed (no immediate response) + started_result = Mock(spec=CheckpointedResult) + started_result.is_succeeded.return_value = False + started_result.is_existent.return_value = True + + mock_state.get_checkpoint_result.side_effect = [not_found_result, started_result] with pytest.raises(SuspendExecution, match="Wait for 5 seconds"): wait_handler( - seconds=5, state=mock_state, operation_identifier=OperationIdentifier("wait3", None), + seconds=5, ) + # Should be called twice: once before checkpoint, once after to check for immediate response + assert mock_state.get_checkpoint_result.call_count == 2 + expected_operation = OperationUpdate( operation_id="wait3", parent_id=None, @@ -88,7 +121,7 @@ def test_wait_handler_with_none_name(): wait_options=WaitOptions(wait_seconds=5), ) mock_state.create_checkpoint.assert_called_once_with( - operation_update=expected_operation + operation_update=expected_operation, is_sync=True ) @@ -102,10 +135,285 @@ def test_wait_handler_with_existent(): with pytest.raises(SuspendExecution, match="Wait for 5 seconds"): wait_handler( - seconds=5, state=mock_state, operation_identifier=OperationIdentifier("wait4", None), + seconds=5, ) mock_state.get_checkpoint_result.assert_called_once_with("wait4") mock_state.create_checkpoint.assert_not_called() + + +# Immediate response handling tests + + +def test_wait_status_evaluation_after_checkpoint(): + """Test that status is evaluated twice: before and after checkpoint creation. + + This verifies the immediate response pattern: + 1. Check status (checkpoint doesn't exist) + 2. Create checkpoint with is_sync=True + 3. Check status again (catches immediate response) + """ + # Arrange + mock_state = Mock(spec=ExecutionState) + + # First call: checkpoint doesn't exist + not_found_result = Mock(spec=CheckpointedResult) + not_found_result.is_succeeded.return_value = False + not_found_result.is_existent.return_value = False + + # Second call: checkpoint exists but not completed (no immediate response) + started_result = Mock(spec=CheckpointedResult) + started_result.is_succeeded.return_value = False + started_result.is_existent.return_value = True + + mock_state.get_checkpoint_result.side_effect = [not_found_result, started_result] + + executor = WaitOperationExecutor( + seconds=30, + state=mock_state, + operation_identifier=OperationIdentifier("wait_eval", None, "test_wait"), + ) + + # Act + with pytest.raises(SuspendExecution): + executor.process() + + # Assert - verify status checked twice + assert mock_state.get_checkpoint_result.call_count == 2 + mock_state.get_checkpoint_result.assert_any_call("wait_eval") + + # Verify checkpoint created with is_sync=True + expected_operation = OperationUpdate( + operation_id="wait_eval", + parent_id=None, + name="test_wait", + operation_type=OperationType.WAIT, + action=OperationAction.START, + sub_type=OperationSubType.WAIT, + wait_options=WaitOptions(wait_seconds=30), + ) + mock_state.create_checkpoint.assert_called_once_with( + operation_update=expected_operation, is_sync=True + ) + + +def test_wait_immediate_success_handling(): + """Test that immediate SUCCEEDED response returns without suspend. + + When the checkpoint returns SUCCEEDED on the second status check, + the operation should return immediately without suspending. + """ + # Arrange + mock_state = Mock(spec=ExecutionState) + + # First call: checkpoint doesn't exist + not_found_result = Mock(spec=CheckpointedResult) + not_found_result.is_succeeded.return_value = False + not_found_result.is_existent.return_value = False + + # Second call: checkpoint succeeded immediately + succeeded_result = Mock(spec=CheckpointedResult) + succeeded_result.is_succeeded.return_value = True + + mock_state.get_checkpoint_result.side_effect = [not_found_result, succeeded_result] + + executor = WaitOperationExecutor( + seconds=5, + state=mock_state, + operation_identifier=OperationIdentifier( + "wait_immediate", None, "immediate_wait" + ), + ) + + # Act + result = executor.process() + + # Assert - verify immediate return without suspend + assert result is None # Wait returns None + + # Verify checkpoint was created + assert mock_state.create_checkpoint.call_count == 1 + + # Verify status checked twice + assert mock_state.get_checkpoint_result.call_count == 2 + + +def test_wait_no_immediate_response_suspends(): + """Test that wait suspends when no immediate response received. + + When the checkpoint returns STARTED (not completed) on the second check, + the operation should suspend to wait for timer completion. + """ + # Arrange + mock_state = Mock(spec=ExecutionState) + + # First call: checkpoint doesn't exist + not_found_result = Mock(spec=CheckpointedResult) + not_found_result.is_succeeded.return_value = False + not_found_result.is_existent.return_value = False + + # Second call: checkpoint exists but not completed + started_result = Mock(spec=CheckpointedResult) + started_result.is_succeeded.return_value = False + started_result.is_existent.return_value = True + + mock_state.get_checkpoint_result.side_effect = [not_found_result, started_result] + + executor = WaitOperationExecutor( + seconds=60, + state=mock_state, + operation_identifier=OperationIdentifier("wait_suspend", None), + ) + + # Act & Assert - verify suspend occurs + with pytest.raises(SuspendExecution) as exc_info: + executor.process() + + # Verify suspend message + assert "Wait for 60 seconds" in str(exc_info.value) + + # Verify checkpoint was created + assert mock_state.create_checkpoint.call_count == 1 + + # Verify status checked twice + assert mock_state.get_checkpoint_result.call_count == 2 + + +def test_wait_already_completed_no_checkpoint(): + """Test that already completed wait doesn't create checkpoint. + + When replaying and the wait is already completed, it should return + immediately without creating a new checkpoint. + """ + # Arrange + mock_state = Mock(spec=ExecutionState) + + # Checkpoint already exists and succeeded + succeeded_result = Mock(spec=CheckpointedResult) + succeeded_result.is_succeeded.return_value = True + + mock_state.get_checkpoint_result.return_value = succeeded_result + + executor = WaitOperationExecutor( + seconds=10, + state=mock_state, + operation_identifier=OperationIdentifier("wait_replay", None, "completed_wait"), + ) + + # Act + result = executor.process() + + # Assert - verify immediate return without checkpoint + assert result is None + + # Verify no checkpoint created + mock_state.create_checkpoint.assert_not_called() + + # Verify status checked only once + mock_state.get_checkpoint_result.assert_called_once_with("wait_replay") + + +def test_wait_with_various_durations(): + """Test wait operations with different durations handle immediate response correctly.""" + for seconds in [1, 30, 300, 3600]: + # Arrange + mock_state = Mock(spec=ExecutionState) + + # First call: checkpoint doesn't exist + not_found_result = Mock(spec=CheckpointedResult) + not_found_result.is_succeeded.return_value = False + not_found_result.is_existent.return_value = False + + # Second call: immediate success + succeeded_result = Mock(spec=CheckpointedResult) + succeeded_result.is_succeeded.return_value = True + + mock_state.get_checkpoint_result.side_effect = [ + not_found_result, + succeeded_result, + ] + + executor = WaitOperationExecutor( + seconds=seconds, + state=mock_state, + operation_identifier=OperationIdentifier(f"wait_duration_{seconds}", None), + ) + + # Act + result = executor.process() + + # Assert + assert result is None + assert mock_state.get_checkpoint_result.call_count == 2 + + # Verify correct wait duration in checkpoint + call_args = mock_state.create_checkpoint.call_args + assert call_args[1]["operation_update"].wait_options.wait_seconds == seconds + + +def test_wait_suspends_when_second_check_returns_started(): + """Test backward compatibility: when the second checkpoint check returns + STARTED (not terminal), the wait operation suspends normally. + """ + mock_state = Mock(spec=ExecutionState) + mock_state.durable_execution_arn = "test_arn" + + # First call: checkpoint doesn't exist + # Second call: checkpoint returns STARTED (no immediate response) + mock_state.get_checkpoint_result.side_effect = [ + CheckpointedResult.create_not_found(), + CheckpointedResult.create_from_operation( + Operation( + operation_id="wait-1", + operation_type=OperationType.WAIT, + status=OperationStatus.STARTED, + ) + ), + ] + + executor = WaitOperationExecutor( + seconds=5, + state=mock_state, + operation_identifier=OperationIdentifier("wait-1", None, "test_wait"), + ) + + with pytest.raises(SuspendExecution): + executor.process() + + # Assert - behaves like "old way" + assert mock_state.get_checkpoint_result.call_count == 2 # Double-check happened + mock_state.create_checkpoint.assert_called_once() # START checkpoint created + + +def test_wait_suspends_when_second_check_returns_started_duplicate(): + """Test backward compatibility: when the second checkpoint check returns + STARTED (not terminal), the wait operation suspends normally. + """ + mock_state = Mock(spec=ExecutionState) + mock_state.durable_execution_arn = "test_arn" + + # First call: checkpoint doesn't exist + # Second call: checkpoint returns STARTED (no immediate response) + not_found = CheckpointedResult.create_not_found() + started_op = Operation( + operation_id="wait-1", + operation_type=OperationType.WAIT, + status=OperationStatus.STARTED, + ) + started = CheckpointedResult.create_from_operation(started_op) + mock_state.get_checkpoint_result.side_effect = [not_found, started] + + executor = WaitOperationExecutor( + seconds=5, + state=mock_state, + operation_identifier=OperationIdentifier("wait-1", None, "test_wait"), + ) + + with pytest.raises(SuspendExecution): + executor.process() + + # Assert - behaves like "old way" + assert mock_state.get_checkpoint_result.call_count == 2 # Double-check happened + mock_state.create_checkpoint.assert_called_once() # START checkpoint created diff --git a/tests/retries_test.py b/tests/retries_test.py index 8209376..1b58134 100644 --- a/tests/retries_test.py +++ b/tests/retries_test.py @@ -5,6 +5,7 @@ import pytest +from aws_durable_execution_sdk_python.config import Duration from aws_durable_execution_sdk_python.retries import ( JitterStrategy, RetryDecision, @@ -13,400 +14,563 @@ create_retry_strategy, ) +# region Jitter Strategy Tests -class TestJitterStrategy: - """Test jitter strategy implementations.""" - - def test_none_jitter_returns_zero(self): - """Test NONE jitter always returns 0.""" - strategy = JitterStrategy.NONE - assert strategy.compute_jitter(10) == 0 - assert strategy.compute_jitter(100) == 0 - - @patch("random.random") - def test_full_jitter_range(self, mock_random): - """Test FULL jitter returns value between 0 and delay.""" - mock_random.return_value = 0.5 - strategy = JitterStrategy.FULL - delay = 10 - result = strategy.compute_jitter(delay) - assert result == 5.0 # 0.5 * 10 - - @patch("random.random") - def test_half_jitter_range(self, mock_random): - """Test HALF jitter returns value between 0.5 and 1.0 (multiplier).""" - mock_random.return_value = 0.5 - strategy = JitterStrategy.HALF - result = strategy.compute_jitter(10) - assert result == 7.5 # 10 * (0.5 + 0.5*0.5) - - @patch("random.random") - def test_half_jitter_boundary_values(self, mock_random): - """Test HALF jitter boundary values.""" - strategy = JitterStrategy.HALF - - # Minimum value (random = 0) - mock_random.return_value = 0.0 - result = strategy.compute_jitter(100) - assert result == 50 - - # Maximum value (random = 1) - mock_random.return_value = 1.0 - result = strategy.compute_jitter(100) - assert result == 100 - - def test_invalid_jitter_strategy(self): - """Test behavior with invalid jitter strategy.""" - # Create an invalid enum value by bypassing normal construction - invalid_strategy = "INVALID" - - # This should raise an exception or return None - with pytest.raises((ValueError, AttributeError)): - JitterStrategy(invalid_strategy).compute_jitter(10) - - -class TestRetryDecision: - """Test RetryDecision factory methods.""" - - def test_retry_factory(self): - """Test retry factory method.""" - decision = RetryDecision.retry(30) - assert decision.should_retry is True - assert decision.delay_seconds == 30 - - def test_no_retry_factory(self): - """Test no_retry factory method.""" - decision = RetryDecision.no_retry() - assert decision.should_retry is False - assert decision.delay_seconds == 0 - - -class TestRetryStrategyConfig: - """Test RetryStrategyConfig defaults and behavior.""" - - def test_default_config(self): - """Test default configuration values.""" - config = RetryStrategyConfig() - assert config.max_attempts == 3 - assert config.initial_delay_seconds == 5 - assert config.max_delay_seconds == 300 - assert config.backoff_rate == 2.0 - assert config.jitter_strategy == JitterStrategy.FULL - assert len(config.retryable_errors) == 1 - assert config.retryable_error_types == [] - - -class TestCreateRetryStrategy: - """Test retry strategy creation and behavior.""" - - def test_max_attempts_exceeded(self): - """Test strategy returns no_retry when max attempts exceeded.""" - config = RetryStrategyConfig(max_attempts=2) - strategy = create_retry_strategy(config) - error = Exception("test error") - decision = strategy(error, 2) - assert decision.should_retry is False +def test_none_jitter_returns_delay(): + """Test NONE jitter returns the original delay unchanged.""" + strategy = JitterStrategy.NONE + assert strategy.apply_jitter(10) == 10 + assert strategy.apply_jitter(100) == 100 - def test_retryable_error_message_string(self): - """Test retry based on error message string match.""" - config = RetryStrategyConfig(retryable_errors=["timeout"]) - strategy = create_retry_strategy(config) - error = Exception("connection timeout") - decision = strategy(error, 1) - assert decision.should_retry is True +@patch("random.random") +def test_full_jitter_range(mock_random): + """Test FULL jitter returns value between 0 and delay.""" + mock_random.return_value = 0.5 + strategy = JitterStrategy.FULL + delay = 10 + result = strategy.apply_jitter(delay) + assert result == 5.0 # 0.5 * 10 - def test_retryable_error_message_regex(self): - """Test retry based on error message regex match.""" - config = RetryStrategyConfig(retryable_errors=[re.compile(r"timeout|error")]) - strategy = create_retry_strategy(config) - error = Exception("network timeout occurred") - decision = strategy(error, 1) - assert decision.should_retry is True +@patch("random.random") +def test_half_jitter_range(mock_random): + """Test HALF jitter returns value between delay/2 and delay.""" + mock_random.return_value = 0.5 + strategy = JitterStrategy.HALF + result = strategy.apply_jitter(10) + assert result == 7.5 # 10/2 + 0.5 * (10/2) = 5 + 2.5 - def test_retryable_error_type(self): - """Test retry based on error type.""" - config = RetryStrategyConfig(retryable_error_types=[ValueError]) - strategy = create_retry_strategy(config) - error = ValueError("invalid value") - decision = strategy(error, 1) - assert decision.should_retry is True +@patch("random.random") +def test_half_jitter_boundary_values(mock_random): + """Test HALF jitter boundary values.""" + strategy = JitterStrategy.HALF - def test_non_retryable_error(self): - """Test no retry for non-retryable error.""" - config = RetryStrategyConfig(retryable_errors=["timeout"]) - strategy = create_retry_strategy(config) + # Minimum value (random = 0): delay/2 + 0 = delay/2 + mock_random.return_value = 0.0 + result = strategy.apply_jitter(100) + assert result == 50 - error = Exception("permission denied") - decision = strategy(error, 1) - assert decision.should_retry is False + # Maximum value (random = 1): delay/2 + delay/2 = delay + mock_random.return_value = 1.0 + result = strategy.apply_jitter(100) + assert result == 100 - @patch("random.random") - def test_exponential_backoff_calculation(self, mock_random): - """Test exponential backoff delay calculation.""" - mock_random.return_value = 0.5 - config = RetryStrategyConfig( - initial_delay_seconds=2, - backoff_rate=2.0, - jitter_strategy=JitterStrategy.FULL, - ) - strategy = create_retry_strategy(config) - error = Exception("test error") +def test_invalid_jitter_strategy(): + """Test behavior with invalid jitter strategy.""" + # Create an invalid enum value by bypassing normal construction + invalid_strategy = "INVALID" - # First attempt: 2 * (2^0) = 2, jitter adds 1, total = 3 - decision = strategy(error, 1) - assert decision.delay_seconds == 3 + # This should raise an exception or return None + with pytest.raises((ValueError, AttributeError)): + JitterStrategy(invalid_strategy).apply_jitter(10) - # Second attempt: 2 * (2^1) = 4, jitter adds 2, total = 6 - decision = strategy(error, 2) - assert decision.delay_seconds == 6 - def test_max_delay_cap(self): - """Test delay is capped at max_delay_seconds.""" - config = RetryStrategyConfig( - initial_delay_seconds=100, - max_delay_seconds=50, - backoff_rate=2.0, - jitter_strategy=JitterStrategy.NONE, - ) - strategy = create_retry_strategy(config) +# endregion + + +# region Retry Decision Tests + + +def test_retry_factory(): + """Test retry factory method.""" + decision = RetryDecision.retry(Duration.from_seconds(30)) + assert decision.should_retry is True + assert decision.delay_seconds == 30 + + +def test_no_retry_factory(): + """Test no_retry factory method.""" + decision = RetryDecision.no_retry() + assert decision.should_retry is False + assert decision.delay_seconds == 0 + + +# endregion + + +# region Retry Strategy Config Tests + + +def test_default_config(): + """Test default configuration values.""" + config = RetryStrategyConfig() + assert config.max_attempts == 3 + assert config.initial_delay_seconds == 5 + assert config.max_delay_seconds == 300 + assert config.backoff_rate == 2.0 + assert config.jitter_strategy == JitterStrategy.FULL + assert config.retryable_errors is None + assert config.retryable_error_types is None + + +# endregion + + +# region Create Retry Strategy Tests + + +def test_max_attempts_exceeded(): + """Test strategy returns no_retry when max attempts exceeded.""" + config = RetryStrategyConfig(max_attempts=2) + strategy = create_retry_strategy(config) + + error = Exception("test error") + decision = strategy(error, 2) + assert decision.should_retry is False + + +def test_retryable_error_message_string(): + """Test retry based on error message string match.""" + config = RetryStrategyConfig(retryable_errors=["timeout"]) + strategy = create_retry_strategy(config) + + error = Exception("connection timeout") + decision = strategy(error, 1) + assert decision.should_retry is True - error = Exception("test error") - decision = strategy(error, 2) # Would be 200 without cap - assert decision.delay_seconds == 50 - def test_minimum_delay_one_second(self): - """Test delay is at least 1 second.""" +def test_retryable_error_message_regex(): + """Test retry based on error message regex match.""" + config = RetryStrategyConfig(retryable_errors=[re.compile(r"timeout|error")]) + strategy = create_retry_strategy(config) + + error = Exception("network timeout occurred") + decision = strategy(error, 1) + assert decision.should_retry is True + + +def test_retryable_error_type(): + """Test retry based on error type.""" + config = RetryStrategyConfig(retryable_error_types=[ValueError]) + strategy = create_retry_strategy(config) + + error = ValueError("invalid value") + decision = strategy(error, 1) + assert decision.should_retry is True + + +def test_non_retryable_error(): + """Test no retry for non-retryable error.""" + config = RetryStrategyConfig(retryable_errors=["timeout"]) + strategy = create_retry_strategy(config) + + error = Exception("permission denied") + decision = strategy(error, 1) + assert decision.should_retry is False + + +@patch("random.random") +def test_exponential_backoff_calculation(mock_random): + """Test exponential backoff delay calculation with jitter.""" + mock_random.return_value = 0.5 + config = RetryStrategyConfig( + initial_delay=Duration.from_seconds(2), + backoff_rate=2.0, + jitter_strategy=JitterStrategy.FULL, + ) + strategy = create_retry_strategy(config) + + error = Exception("test error") + + # First attempt: base = 2 * (2^0) = 2, full jitter = 0.5 * 2 = 1 + decision = strategy(error, 1) + assert decision.delay_seconds == 1 + + # Second attempt: base = 2 * (2^1) = 4, full jitter = 0.5 * 4 = 2 + decision = strategy(error, 2) + assert decision.delay_seconds == 2 + + +def test_max_delay_cap(): + """Test delay is capped at max_delay_seconds.""" + config = RetryStrategyConfig( + initial_delay=Duration.from_seconds(100), + max_delay=Duration.from_seconds(50), + backoff_rate=2.0, + jitter_strategy=JitterStrategy.NONE, + ) + strategy = create_retry_strategy(config) + + error = Exception("test error") + decision = strategy(error, 2) # Would be 200 without cap + assert decision.delay_seconds == 50 + + +def test_minimum_delay_one_second(): + """Test delay is at least 1 second.""" + config = RetryStrategyConfig( + initial_delay=Duration.from_seconds(0), jitter_strategy=JitterStrategy.NONE + ) + strategy = create_retry_strategy(config) + + error = Exception("test error") + decision = strategy(error, 1) + assert decision.delay_seconds == 1 + + +def test_delay_ceiling_applied(): + """Test delay is rounded up using math.ceil.""" + with patch("random.random", return_value=0.3): config = RetryStrategyConfig( - initial_delay_seconds=0, jitter_strategy=JitterStrategy.NONE + initial_delay=Duration.from_seconds(3), + jitter_strategy=JitterStrategy.FULL, ) strategy = create_retry_strategy(config) error = Exception("test error") decision = strategy(error, 1) + # base = 3, full jitter = 0.3 * 3 = 0.9, ceil(0.9) = 1 assert decision.delay_seconds == 1 - def test_delay_ceiling_applied(self): - """Test delay is rounded up using math.ceil.""" - with patch("random.random", return_value=0.3): - config = RetryStrategyConfig( - initial_delay_seconds=3, jitter_strategy=JitterStrategy.FULL - ) - strategy = create_retry_strategy(config) - error = Exception("test error") - decision = strategy(error, 1) - # 3 + (0.3 * 3) = 3.9, ceil(3.9) = 4 - assert decision.delay_seconds == 4 +# endregion -class TestRetryPresets: - """Test predefined retry presets.""" +# region Retry Presets Tests - def test_none_preset(self): - """Test none preset allows no retries.""" - strategy = RetryPresets.none() - error = Exception("test error") - decision = strategy(error, 1) - assert decision.should_retry is False +def test_none_preset(): + """Test none preset allows no retries.""" + strategy = RetryPresets.none() + error = Exception("test error") - def test_default_preset_config(self): - """Test default preset configuration.""" - strategy = RetryPresets.default() - error = Exception("test error") + decision = strategy(error, 1) + assert decision.should_retry is False - # Should retry within max attempts - decision = strategy(error, 1) - assert decision.should_retry is True - # Should not retry after max attempts - decision = strategy(error, 6) - assert decision.should_retry is False +def test_default_preset_config(): + """Test default preset configuration.""" + strategy = RetryPresets.default() + error = Exception("test error") - def test_transient_preset_config(self): - """Test transient preset configuration.""" - strategy = RetryPresets.transient() - error = Exception("test error") + # Should retry within max attempts + decision = strategy(error, 1) + assert decision.should_retry is True - # Should retry within max attempts - decision = strategy(error, 1) - assert decision.should_retry is True + # Should not retry after max attempts + decision = strategy(error, 6) + assert decision.should_retry is False - # Should not retry after max attempts - decision = strategy(error, 3) - assert decision.should_retry is False - def test_resource_availability_preset(self): - """Test resource availability preset allows longer retries.""" - strategy = RetryPresets.resource_availability() - error = Exception("test error") +def test_transient_preset_config(): + """Test transient preset configuration.""" + strategy = RetryPresets.transient() + error = Exception("test error") - # Should retry within max attempts - decision = strategy(error, 1) - assert decision.should_retry is True + # Should retry within max attempts + decision = strategy(error, 1) + assert decision.should_retry is True - # Should not retry after max attempts - decision = strategy(error, 5) - assert decision.should_retry is False + # Should not retry after max attempts + decision = strategy(error, 3) + assert decision.should_retry is False - def test_critical_preset_config(self): - """Test critical preset allows many retries.""" - strategy = RetryPresets.critical() - error = Exception("test error") - # Should retry within max attempts - decision = strategy(error, 5) - assert decision.should_retry is True +def test_resource_availability_preset(): + """Test resource availability preset allows longer retries.""" + strategy = RetryPresets.resource_availability() + error = Exception("test error") - # Should not retry after max attempts - decision = strategy(error, 10) - assert decision.should_retry is False + # Should retry within max attempts + decision = strategy(error, 1) + assert decision.should_retry is True - @patch("random.random") - def test_critical_preset_no_jitter(self, mock_random): - """Test critical preset uses no jitter.""" - mock_random.return_value = 0.5 # Should be ignored - strategy = RetryPresets.critical() - error = Exception("test error") + # Should not retry after max attempts + decision = strategy(error, 5) + assert decision.should_retry is False - decision = strategy(error, 1) - # With no jitter: 1 * (1.5^0) = 1 - assert decision.delay_seconds == 1 +def test_critical_preset_config(): + """Test critical preset allows many retries.""" + strategy = RetryPresets.critical() + error = Exception("test error") -class TestJitterIntegration: - """Test jitter integration with retry strategies.""" + # Should retry within max attempts + decision = strategy(error, 5) + assert decision.should_retry is True - @patch("random.random") - def test_full_jitter_integration(self, mock_random): - """Test full jitter integration in retry strategy.""" - mock_random.return_value = 0.8 - config = RetryStrategyConfig( - initial_delay_seconds=10, jitter_strategy=JitterStrategy.FULL - ) - strategy = create_retry_strategy(config) + # Should not retry after max attempts + decision = strategy(error, 10) + assert decision.should_retry is False - error = Exception("test error") - decision = strategy(error, 1) - # 10 + (0.8 * 10) = 18 - assert decision.delay_seconds == 18 - @patch("random.random") - def test_half_jitter_integration(self, mock_random): - """Test half jitter integration in retry strategy.""" - mock_random.return_value = 0.6 - config = RetryStrategyConfig( - initial_delay_seconds=10, jitter_strategy=JitterStrategy.HALF - ) - strategy = create_retry_strategy(config) +@patch("random.random") +def test_critical_preset_no_jitter(mock_random): + """Test critical preset uses no jitter.""" + mock_random.return_value = 0.5 # Should be ignored + strategy = RetryPresets.critical() + error = Exception("test error") - error = Exception("test error") - decision = strategy(error, 1) - # 10 + 10*(0.6 * 0.5 + 0.5) = 18 - assert decision.delay_seconds == 18 + decision = strategy(error, 1) + # With no jitter: 1 * (1.5^0) = 1 + assert decision.delay_seconds == 1 - @patch("random.random") - def test_half_jitter_integration_corrected(self, mock_random): - """Test half jitter with corrected understanding of implementation.""" - mock_random.return_value = 0.0 # Minimum jitter - config = RetryStrategyConfig( - initial_delay_seconds=10, jitter_strategy=JitterStrategy.HALF - ) - strategy = create_retry_strategy(config) - error = Exception("test error") - decision = strategy(error, 1) - # 10 + 10 * 0.5 = 15 - assert decision.delay_seconds == 15 +# endregion - def test_none_jitter_integration(self): - """Test no jitter integration in retry strategy.""" - config = RetryStrategyConfig( - initial_delay_seconds=10, jitter_strategy=JitterStrategy.NONE - ) - strategy = create_retry_strategy(config) - error = Exception("test error") - decision = strategy(error, 1) - assert decision.delay_seconds == 10 +# region Jitter Integration Tests -class TestEdgeCases: - """Test edge cases and error conditions.""" +@patch("random.random") +def test_full_jitter_integration(mock_random): + """Test full jitter integration in retry strategy.""" + mock_random.return_value = 0.8 + config = RetryStrategyConfig( + initial_delay=Duration.from_seconds(10), jitter_strategy=JitterStrategy.FULL + ) + strategy = create_retry_strategy(config) - def test_none_config(self): - """Test behavior when config is None.""" - strategy = create_retry_strategy(None) - error = Exception("test error") - decision = strategy(error, 1) - assert decision.should_retry is True - assert decision.delay_seconds >= 1 + error = Exception("test error") + decision = strategy(error, 1) + # base = 10, full jitter = 0.8 * 10 = 8 + assert decision.delay_seconds == 8 - def test_zero_backoff_rate(self): - """Test behavior with zero backoff rate.""" - config = RetryStrategyConfig( - initial_delay_seconds=5, backoff_rate=0, jitter_strategy=JitterStrategy.NONE - ) - strategy = create_retry_strategy(config) - error = Exception("test error") - decision = strategy(error, 1) - # 5 * (0^0) = 5 * 1 = 5 - assert decision.delay_seconds == 5 +@patch("random.random") +def test_half_jitter_integration(mock_random): + """Test half jitter integration in retry strategy.""" + mock_random.return_value = 0.6 + config = RetryStrategyConfig( + initial_delay=Duration.from_seconds(10), jitter_strategy=JitterStrategy.HALF + ) + strategy = create_retry_strategy(config) - def test_fractional_backoff_rate(self): - """Test behavior with fractional backoff rate.""" - config = RetryStrategyConfig( - initial_delay_seconds=8, - backoff_rate=0.5, - jitter_strategy=JitterStrategy.NONE, - ) - strategy = create_retry_strategy(config) + error = Exception("test error") + decision = strategy(error, 1) + # base = 10, half jitter = 10/2 + 0.6 * (10/2) = 5 + 3 = 8 + assert decision.delay_seconds == 8 - error = Exception("test error") - decision = strategy(error, 2) - # 8 * (0.5^1) = 4 - assert decision.delay_seconds == 4 - def test_empty_retryable_errors_list(self): - """Test behavior with empty retryable errors list.""" - config = RetryStrategyConfig(retryable_errors=[]) - strategy = create_retry_strategy(config) +@patch("random.random") +def test_half_jitter_integration_corrected(mock_random): + """Test half jitter with minimum random value.""" + mock_random.return_value = 0.0 # Minimum jitter + config = RetryStrategyConfig( + initial_delay=Duration.from_seconds(10), jitter_strategy=JitterStrategy.HALF + ) + strategy = create_retry_strategy(config) - error = Exception("test error") - decision = strategy(error, 1) - assert decision.should_retry is False + error = Exception("test error") + decision = strategy(error, 1) + # base = 10, half jitter = 10/2 + 0.0 * (10/2) = 5 + assert decision.delay_seconds == 5 - def test_multiple_error_patterns(self): - """Test multiple error patterns matching.""" - config = RetryStrategyConfig( - retryable_errors=["timeout", re.compile(r"network.*error")] - ) - strategy = create_retry_strategy(config) - # Test string match - error1 = Exception("connection timeout") - decision1 = strategy(error1, 1) - assert decision1.should_retry is True +def test_none_jitter_integration(): + """Test no jitter integration in retry strategy.""" + config = RetryStrategyConfig( + initial_delay=Duration.from_seconds(10), jitter_strategy=JitterStrategy.NONE + ) + strategy = create_retry_strategy(config) - # Test regex match - error2 = Exception("network connection error") - decision2 = strategy(error2, 1) - assert decision2.should_retry is True + error = Exception("test error") + decision = strategy(error, 1) + assert decision.delay_seconds == 10 - def test_mixed_error_types_and_patterns(self): - """Test combination of error types and patterns.""" - config = RetryStrategyConfig( - retryable_errors=["timeout"], retryable_error_types=[ValueError] - ) - strategy = create_retry_strategy(config) - # Should retry on ValueError even without message match - error = ValueError("some value error") - decision = strategy(error, 1) - assert decision.should_retry is True +# endregion + + +# region Default Behavior Tests + + +def test_no_filters_retries_all_errors(): + """Test that when neither filter is specified, all errors are retried.""" + config = RetryStrategyConfig() + strategy = create_retry_strategy(config) + + # Should retry any error + error1 = Exception("any error message") + decision1 = strategy(error1, 1) + assert decision1.should_retry is True + + error2 = ValueError("different error type") + decision2 = strategy(error2, 1) + assert decision2.should_retry is True + + +def test_only_retryable_errors_specified(): + """Test that when only retryable_errors is specified, only matching messages are retried.""" + config = RetryStrategyConfig(retryable_errors=["timeout"]) + strategy = create_retry_strategy(config) + + # Should retry matching error + error1 = Exception("connection timeout") + decision1 = strategy(error1, 1) + assert decision1.should_retry is True + + # Should NOT retry non-matching error + error2 = Exception("permission denied") + decision2 = strategy(error2, 1) + assert decision2.should_retry is False + + +def test_only_retryable_error_types_specified(): + """Test that when only retryable_error_types is specified, only matching types are retried.""" + config = RetryStrategyConfig(retryable_error_types=[ValueError, TypeError]) + strategy = create_retry_strategy(config) + + # Should retry matching type + error1 = ValueError("invalid value") + decision1 = strategy(error1, 1) + assert decision1.should_retry is True + + error2 = TypeError("type error") + decision2 = strategy(error2, 1) + assert decision2.should_retry is True + + # Should NOT retry non-matching type (even though message might match default pattern) + error3 = Exception("some error") + decision3 = strategy(error3, 1) + assert decision3.should_retry is False + + +def test_both_filters_specified_or_logic(): + """Test that when both filters are specified, errors matching either are retried (OR logic).""" + config = RetryStrategyConfig( + retryable_errors=["timeout"], retryable_error_types=[ValueError] + ) + strategy = create_retry_strategy(config) + + # Should retry on message match + error1 = Exception("connection timeout") + decision1 = strategy(error1, 1) + assert decision1.should_retry is True + + # Should retry on type match + error2 = ValueError("some value error") + decision2 = strategy(error2, 1) + assert decision2.should_retry is True + + # Should NOT retry when neither matches + error3 = RuntimeError("runtime error") + decision3 = strategy(error3, 1) + assert decision3.should_retry is False + + +def test_empty_retryable_errors_with_types(): + """Test that empty retryable_errors list with types specified only retries matching types.""" + config = RetryStrategyConfig( + retryable_errors=[], retryable_error_types=[ValueError] + ) + strategy = create_retry_strategy(config) + + # Should retry matching type + error1 = ValueError("value error") + decision1 = strategy(error1, 1) + assert decision1.should_retry is True + + # Should NOT retry non-matching type + error2 = Exception("some error") + decision2 = strategy(error2, 1) + assert decision2.should_retry is False + + +def test_empty_retryable_error_types_with_errors(): + """Test that empty retryable_error_types list with errors specified only retries matching messages.""" + config = RetryStrategyConfig(retryable_errors=["timeout"], retryable_error_types=[]) + strategy = create_retry_strategy(config) + + # Should retry matching message + error1 = Exception("connection timeout") + decision1 = strategy(error1, 1) + assert decision1.should_retry is True + + # Should NOT retry non-matching message + error2 = Exception("permission denied") + decision2 = strategy(error2, 1) + assert decision2.should_retry is False + + +# endregion + + +# region Edge Cases Tests + + +def test_none_config(): + """Test behavior when config is None.""" + strategy = create_retry_strategy(None) + error = Exception("test error") + decision = strategy(error, 1) + assert decision.should_retry is True + assert decision.delay_seconds >= 1 + + +def test_zero_backoff_rate(): + """Test behavior with zero backoff rate.""" + config = RetryStrategyConfig( + initial_delay=Duration.from_seconds(5), + backoff_rate=0, + jitter_strategy=JitterStrategy.NONE, + ) + strategy = create_retry_strategy(config) + + error = Exception("test error") + decision = strategy(error, 1) + # 5 * (0^0) = 5 * 1 = 5 + assert decision.delay_seconds == 5 + + +def test_fractional_backoff_rate(): + """Test behavior with fractional backoff rate.""" + config = RetryStrategyConfig( + initial_delay=Duration.from_seconds(8), + backoff_rate=0.5, + jitter_strategy=JitterStrategy.NONE, + ) + strategy = create_retry_strategy(config) + + error = Exception("test error") + decision = strategy(error, 2) + # 8 * (0.5^1) = 4 + assert decision.delay_seconds == 4 + + +def test_empty_retryable_errors_list(): + """Test behavior with empty retryable errors list.""" + config = RetryStrategyConfig(retryable_errors=[]) + strategy = create_retry_strategy(config) + + error = Exception("test error") + decision = strategy(error, 1) + assert decision.should_retry is False + + +def test_multiple_error_patterns(): + """Test multiple error patterns matching.""" + config = RetryStrategyConfig( + retryable_errors=["timeout", re.compile(r"network.*error")] + ) + strategy = create_retry_strategy(config) + + # Test string match + error1 = Exception("connection timeout") + decision1 = strategy(error1, 1) + assert decision1.should_retry is True + + # Test regex match + error2 = Exception("network connection error") + decision2 = strategy(error2, 1) + assert decision2.should_retry is True + + +def test_mixed_error_types_and_patterns(): + """Test combination of error types and patterns.""" + config = RetryStrategyConfig( + retryable_errors=["timeout"], retryable_error_types=[ValueError] + ) + strategy = create_retry_strategy(config) + + # Should retry on ValueError even without message match + error = ValueError("some value error") + decision = strategy(error, 1) + assert decision.should_retry is True + + +# endregion diff --git a/tests/serdes_test.py b/tests/serdes_test.py index 91baf2c..d511918 100644 --- a/tests/serdes_test.py +++ b/tests/serdes_test.py @@ -8,11 +8,18 @@ import pytest +from aws_durable_execution_sdk_python.concurrency.models import ( + BatchItem, + BatchItemStatus, + BatchResult, + CompletionReason, +) from aws_durable_execution_sdk_python.exceptions import ( DurableExecutionsError, ExecutionError, SerDesError, ) +from aws_durable_execution_sdk_python.lambda_service import ErrorObject from aws_durable_execution_sdk_python.serdes import ( BytesCodec, ContainerCodec, @@ -21,6 +28,7 @@ EncodedValue, ExtendedTypeSerDes, JsonSerDes, + PassThroughSerDes, PrimitiveCodec, SerDes, SerDesContext, @@ -730,6 +738,18 @@ def test_extended_serdes_errors(): # endregion +def test_pass_through_serdes(): + serdes = PassThroughSerDes() + + data = '"name": "test", "value": 123' + serialized = serialize(serdes, data, "test-op", "test-arn") + assert isinstance(serialized, str) + assert serialized == '"name": "test", "value": 123' + # Dict uses envelope format, so roundtrip through deserialize + deserialized = deserialize(serdes, serialized, "test-op", "test-arn") + assert deserialized == data + + # region EnvelopeSerDes Performance and Edge Cases def test_envelope_large_data_structure(): """Test with reasonably large data.""" @@ -894,3 +914,82 @@ def test_all_t_v_nested_dicts(): # endregion + + +# to_dict() support tests +def test_default_serdes_supports_to_dict_objects(): + """Test that default serdes automatically handles BatchResult serialization/deserialization.""" + + result = BatchResult( + all=[BatchItem(0, BatchItemStatus.SUCCEEDED, result="test")], + completion_reason=CompletionReason.ALL_COMPLETED, + ) + + # Default serdes should automatically handle BatchResult + serialized = serialize( + serdes=None, + value=result, + operation_id="test_op", + durable_execution_arn="arn:test", + ) + + # Deserialize returns BatchResult (not dict) + deserialized = deserialize( + serdes=None, + data=serialized, + operation_id="test_op", + durable_execution_arn="arn:test", + ) + + assert isinstance(deserialized, BatchResult) + assert deserialized.completion_reason == CompletionReason.ALL_COMPLETED + assert len(deserialized.all) == 1 + assert deserialized.all[0].result == "test" + + +def test_to_dict_output_is_serializable(): + """Test that to_dict() output is serializable by default serdes.""" + + result = BatchResult( + all=[ + BatchItem(0, BatchItemStatus.SUCCEEDED, result={"key": "value"}), + BatchItem( + 1, + BatchItemStatus.FAILED, + error=ErrorObject( + message="error", type="TestError", data=None, stack_trace=[] + ), + ), + ], + completion_reason=CompletionReason.ALL_COMPLETED, + ) + + # Convert to dict + result_dict = result.to_dict() + + # Dict should be serializable + serialized = serialize( + serdes=None, + value=result_dict, + operation_id="test_op", + durable_execution_arn="arn:test", + ) + + # Deserialize + deserialized_dict = deserialize( + serdes=None, + data=serialized, + operation_id="test_op", + durable_execution_arn="arn:test", + ) + + # Verify structure preserved + assert deserialized_dict["completionReason"] == "ALL_COMPLETED" + assert len(deserialized_dict["all"]) == 2 + assert deserialized_dict["all"][0]["result"] == {"key": "value"} + assert deserialized_dict["all"][1]["error"]["ErrorType"] == "TestError" + + # Can reconstruct BatchResult + reconstructed = BatchResult.from_dict(deserialized_dict) + assert len(reconstructed.all) == 2 + assert reconstructed.completion_reason == CompletionReason.ALL_COMPLETED diff --git a/tests/state_test.py b/tests/state_test.py index b4e9d9f..d997abf 100644 --- a/tests/state_test.py +++ b/tests/state_test.py @@ -16,6 +16,7 @@ from aws_durable_execution_sdk_python.exceptions import ( BackgroundThreadError, CallableRuntimeError, + OrphanedChildException, ) from aws_durable_execution_sdk_python.identifier import OperationIdentifier from aws_durable_execution_sdk_python.lambda_service import ( @@ -39,6 +40,7 @@ CheckpointedResult, ExecutionState, QueuedOperation, + ReplayStatus, ) from aws_durable_execution_sdk_python.threading import CompletionEvent @@ -1090,20 +1092,18 @@ def test_rejection_of_operations_from_completed_parents(): ) state.create_checkpoint(parent_complete, is_sync=False) - # Get initial queue size - initial_queue_size = state._checkpoint_queue.qsize() - - # Try to checkpoint child operation (should be rejected) + # Try to checkpoint child operation (should raise OrphanedChildException) child_checkpoint = OperationUpdate( operation_id="child_1", operation_type=OperationType.STEP, action=OperationAction.SUCCEED, parent_id="parent_1", ) - state.create_checkpoint(child_checkpoint, is_sync=False) + with pytest.raises(OrphanedChildException) as exc_info: + state.create_checkpoint(child_checkpoint, is_sync=False) - # Verify operation was rejected (queue size unchanged) - assert state._checkpoint_queue.qsize() == initial_queue_size + # Verify exception contains operation_id + assert exc_info.value.operation_id == "child_1" def test_nested_parallel_operations_deep_hierarchy(): @@ -1473,20 +1473,18 @@ def process_sync_checkpoint(): state.create_checkpoint(parent_complete, is_sync=True) processor.join(timeout=1.0) - # Get queue size before attempting to checkpoint orphaned child - initial_queue_size = state._checkpoint_queue.qsize() - - # Try to checkpoint child (should be rejected) + # Try to checkpoint child (should raise OrphanedChildException) child_checkpoint = OperationUpdate( operation_id="child_1", operation_type=OperationType.STEP, action=OperationAction.SUCCEED, parent_id="parent_1", ) - state.create_checkpoint(child_checkpoint, is_sync=True) + with pytest.raises(OrphanedChildException) as exc_info: + state.create_checkpoint(child_checkpoint, is_sync=True) - # Verify operation was rejected (queue size unchanged) - assert state._checkpoint_queue.qsize() == initial_queue_size + # Verify exception contains operation_id + assert exc_info.value.operation_id == "child_1" def test_mark_orphans_handles_cycles(): @@ -3242,3 +3240,28 @@ def test_create_checkpoint_sync_always_synchronous(): finally: state.stop_checkpointing() executor.shutdown(wait=True) + + +def test_state_replay_mode(): + operation1 = Operation( + operation_id="op1", + operation_type=OperationType.STEP, + status=OperationStatus.SUCCEEDED, + ) + operation2 = Operation( + operation_id="op2", + operation_type=OperationType.STEP, + status=OperationStatus.SUCCEEDED, + ) + execution_state = ExecutionState( + durable_execution_arn="arn:aws:test", + initial_checkpoint_token="test_token", # noqa: S106 + operations={"op1": operation1, "op2": operation2}, + service_client=Mock(), + replay_status=ReplayStatus.REPLAY, + ) + assert execution_state.is_replaying() is True + execution_state.track_replay(operation_id="op1") + assert execution_state.is_replaying() is True + execution_state.track_replay(operation_id="op2") + assert execution_state.is_replaying() is False diff --git a/tests/waits_test.py b/tests/waits_test.py index 09b7fe5..06267d8 100644 --- a/tests/waits_test.py +++ b/tests/waits_test.py @@ -2,7 +2,7 @@ from unittest.mock import patch -from aws_durable_execution_sdk_python.config import JitterStrategy +from aws_durable_execution_sdk_python.config import Duration, JitterStrategy from aws_durable_execution_sdk_python.serdes import JsonSerDes from aws_durable_execution_sdk_python.waits import ( WaitDecision, @@ -18,7 +18,7 @@ class TestWaitDecision: def test_wait_factory(self): """Test wait factory method.""" - decision = WaitDecision.wait(30) + decision = WaitDecision.wait(Duration.from_seconds(30)) assert decision.should_wait is True assert decision.delay_seconds == 30 @@ -34,7 +34,7 @@ class TestWaitForConditionDecision: def test_continue_waiting_factory(self): """Test continue_waiting factory method.""" - decision = WaitForConditionDecision.continue_waiting(45) + decision = WaitForConditionDecision.continue_waiting(Duration.from_seconds(45)) assert decision.should_continue is True assert decision.delay_seconds == 45 @@ -42,7 +42,7 @@ def test_stop_polling_factory(self): """Test stop_polling factory method.""" decision = WaitForConditionDecision.stop_polling() assert decision.should_continue is False - assert decision.delay_seconds == -1 + assert decision.delay_seconds == 0 class TestWaitStrategyConfig: @@ -97,7 +97,7 @@ def test_exponential_backoff_calculation(self, mock_random): mock_random.return_value = 0.5 config = WaitStrategyConfig( should_continue_polling=lambda x: True, - initial_delay_seconds=2, + initial_delay=Duration.from_seconds(2), backoff_rate=2.0, jitter_strategy=JitterStrategy.FULL, ) @@ -105,20 +105,20 @@ def test_exponential_backoff_calculation(self, mock_random): result = "pending" - # First attempt: 2 * (2^0) = 2, jitter adds 1, total = 3 + # First attempt: 2 * (2^0) = 2, FULL jitter with 0.5 = 0.5 * 2 = 1 decision = strategy(result, 1) - assert decision.delay_seconds == 3 + assert decision.delay_seconds == 1 - # Second attempt: 2 * (2^1) = 4, jitter adds 2, total = 6 + # Second attempt: 2 * (2^1) = 4, FULL jitter with 0.5 = 0.5 * 4 = 2 decision = strategy(result, 2) - assert decision.delay_seconds == 6 + assert decision.delay_seconds == 2 def test_max_delay_cap(self): """Test delay is capped at max_delay_seconds.""" config = WaitStrategyConfig( should_continue_polling=lambda x: True, - initial_delay_seconds=100, - max_delay_seconds=50, + initial_delay=Duration.from_seconds(100), + max_delay=Duration.from_seconds(50), backoff_rate=2.0, jitter_strategy=JitterStrategy.NONE, ) @@ -132,7 +132,7 @@ def test_minimum_delay_one_second(self): """Test delay is at least 1 second.""" config = WaitStrategyConfig( should_continue_polling=lambda x: True, - initial_delay_seconds=0, + initial_delay=Duration.from_seconds(0), jitter_strategy=JitterStrategy.NONE, ) strategy = create_wait_strategy(config) @@ -147,15 +147,15 @@ def test_full_jitter_integration(self, mock_random): mock_random.return_value = 0.8 config = WaitStrategyConfig( should_continue_polling=lambda x: True, - initial_delay_seconds=10, + initial_delay=Duration.from_seconds(10), jitter_strategy=JitterStrategy.FULL, ) strategy = create_wait_strategy(config) result = "pending" decision = strategy(result, 1) - # 10 + (0.8 * 10) = 18 - assert decision.delay_seconds == 18 + # FULL jitter: 0.8 * 10 = 8 + assert decision.delay_seconds == 8 @patch("random.random") def test_half_jitter_integration(self, mock_random): @@ -163,21 +163,21 @@ def test_half_jitter_integration(self, mock_random): mock_random.return_value = 0.0 # Minimum jitter config = WaitStrategyConfig( should_continue_polling=lambda x: True, - initial_delay_seconds=10, + initial_delay=Duration.from_seconds(10), jitter_strategy=JitterStrategy.HALF, ) strategy = create_wait_strategy(config) result = "pending" decision = strategy(result, 1) - # base: 10, jitter: 10 * (0.5 + 0.0 * 0.5) = 5, total: 10 + 5 = 15 - assert decision.delay_seconds == 15 + # HALF jitter: 10/2 + 0.0 * (10/2) = 5 + assert decision.delay_seconds == 5 def test_none_jitter_integration(self): """Test no jitter integration in wait strategy.""" config = WaitStrategyConfig( should_continue_polling=lambda x: True, - initial_delay_seconds=10, + initial_delay=Duration.from_seconds(10), jitter_strategy=JitterStrategy.NONE, ) strategy = create_wait_strategy(config) @@ -244,7 +244,7 @@ def test_zero_backoff_rate(self): """Test behavior with zero backoff rate.""" config = WaitStrategyConfig( should_continue_polling=lambda x: True, - initial_delay_seconds=5, + initial_delay=Duration.from_seconds(5), backoff_rate=0, jitter_strategy=JitterStrategy.NONE, ) @@ -259,7 +259,7 @@ def test_fractional_backoff_rate(self): """Test behavior with fractional backoff rate.""" config = WaitStrategyConfig( should_continue_polling=lambda x: True, - initial_delay_seconds=8, + initial_delay=Duration.from_seconds(8), backoff_rate=0.5, jitter_strategy=JitterStrategy.NONE, ) @@ -274,8 +274,8 @@ def test_large_backoff_rate(self): """Test behavior with large backoff rate hits max delay.""" config = WaitStrategyConfig( should_continue_polling=lambda x: True, - initial_delay_seconds=10, - max_delay_seconds=100, + initial_delay=Duration.from_seconds(10), + max_delay=Duration.from_seconds(100), backoff_rate=10.0, jitter_strategy=JitterStrategy.NONE, ) @@ -307,7 +307,7 @@ def test_negative_delay_clamped_to_one(self): """Test negative delay is clamped to 1.""" config = WaitStrategyConfig( should_continue_polling=lambda x: True, - initial_delay_seconds=0, + initial_delay=Duration.from_seconds(0), backoff_rate=0, jitter_strategy=JitterStrategy.NONE, ) @@ -323,15 +323,15 @@ def test_rounding_behavior(self, mock_random): mock_random.return_value = 0.3 config = WaitStrategyConfig( should_continue_polling=lambda x: True, - initial_delay_seconds=3, + initial_delay=Duration.from_seconds(3), jitter_strategy=JitterStrategy.FULL, ) strategy = create_wait_strategy(config) result = "pending" decision = strategy(result, 1) - # 3 + (0.3 * 3) = 3.9, round(3.9) = 4 - assert decision.delay_seconds == 4 + # FULL jitter: 0.3 * 3 = 0.9, ceil(0.9) = 1 + assert decision.delay_seconds == 1 class TestWaitForConditionConfig: @@ -341,7 +341,7 @@ def test_config_creation(self): """Test creating WaitForConditionConfig.""" def wait_strategy(state, attempt): - return WaitForConditionDecision.continue_waiting(10) + return WaitForConditionDecision.continue_waiting(Duration.from_seconds(10)) config = WaitForConditionConfig( wait_strategy=wait_strategy, initial_state={"count": 0}