diff --git a/.github/ISSUE_TEMPLATE/bug_report.yml b/.github/ISSUE_TEMPLATE/bug_report.yml new file mode 100644 index 0000000..3612c14 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug_report.yml @@ -0,0 +1,91 @@ +name: 🐛 Bug Report +description: Report a bug or unexpected behavior +title: "[Bug]: " +labels: ["bug"] +body: + - type: markdown + attributes: + value: | + Thanks for reporting a bug! Please fill out the information below. + + - type: textarea + id: expected + attributes: + label: Expected Behavior + description: What did you expect to happen? + placeholder: I expected... + validations: + required: true + + - type: textarea + id: actual + attributes: + label: Actual Behavior + description: What actually happened? + placeholder: Instead, what happened was... + validations: + required: true + + - type: textarea + id: reproduce + attributes: + label: Steps to Reproduce + description: Provide steps to reproduce the issue + placeholder: | + 1. + 2. + 3. + validations: + required: true + + - type: input + id: sdk-version + attributes: + label: SDK Version + description: What version of the SDK are you using? + placeholder: e.g., 1.0.0 + validations: + required: true + + - type: dropdown + id: python-version + attributes: + label: Python Version + description: What version of Python are you using? + options: + - "3.14" + - "3.13" + - "3.12" + - "3.11" + - Other (specify in additional context) + validations: + required: true + + - type: dropdown + id: regression + attributes: + label: Is this a regression? + description: Did this work in a previous version? + options: + - "No" + - "Yes" + validations: + required: true + + - type: input + id: worked-version + attributes: + label: Last Working Version + description: If this is a regression, what version did this work in? + placeholder: e.g., 0.9.0 + validations: + required: false + + - type: textarea + id: context + attributes: + label: Additional Context + description: Add any other context, logs, or screenshots + placeholder: Additional information... + validations: + required: false diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml new file mode 100644 index 0000000..aa6cbb7 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -0,0 +1,5 @@ +blank_issues_enabled: false +contact_links: + - name: Ask a question + url: https://github.com/aws/aws-durable-execution-sdk-python/discussions/new + about: Ask a general question about Lambda Powertools diff --git a/.github/ISSUE_TEMPLATE/documentation.yml b/.github/ISSUE_TEMPLATE/documentation.yml new file mode 100644 index 0000000..cdd8e3e --- /dev/null +++ b/.github/ISSUE_TEMPLATE/documentation.yml @@ -0,0 +1,36 @@ +name: 📚 Documentation Issue +description: Report an issue with documentation +title: "[Docs]: " +labels: ["documentation"] +body: + - type: markdown + attributes: + value: | + Thanks for helping improve our documentation! + + - type: textarea + id: issue + attributes: + label: Issue + description: Describe the documentation issue + placeholder: The documentation says... but it should say... + validations: + required: true + + - type: input + id: page + attributes: + label: Page/Location + description: Link to the page or specify where in the docs this occurs + placeholder: https://... or README.md section "..." + validations: + required: true + + - type: textarea + id: fix + attributes: + label: Suggested Fix + description: How should this be corrected? + placeholder: This could be fixed by... + validations: + required: false diff --git a/.github/ISSUE_TEMPLATE/feature_request.yml b/.github/ISSUE_TEMPLATE/feature_request.yml new file mode 100644 index 0000000..f4b648b --- /dev/null +++ b/.github/ISSUE_TEMPLATE/feature_request.yml @@ -0,0 +1,57 @@ +name: ✨ Feature Request +description: Suggest a new feature or enhancement +title: "[Feature]: " +labels: ["enhancement"] +body: + - type: markdown + attributes: + value: | + Thanks for taking the time to suggest a new feature! + + - type: textarea + id: what + attributes: + label: What would you like? + description: Describe the feature you'd like to see + placeholder: I would like to... + validations: + required: true + + - type: textarea + id: implementation + attributes: + label: Possible Implementation + description: Suggest how this could be implemented + placeholder: This could be implemented by... + validations: + required: false + + - type: dropdown + id: breaking-change + attributes: + label: Is this a breaking change? + options: + - "No" + - "Yes" + validations: + required: true + + - type: dropdown + id: rfc + attributes: + label: Does this require an RFC? + description: RFC is required when changing existing behavior or for new features that require research + options: + - "No" + - "Yes" + validations: + required: true + + - type: textarea + id: context + attributes: + label: Additional Context + description: Add any other context, examples, or screenshots + placeholder: Additional information... + validations: + required: false diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 0000000..33209d2 --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,14 @@ +# Reference: https://docs.github.com/en/github/administering-a-repository/configuration-options-for-dependency-updates +version: 2 +updates: + - package-ecosystem: "github-actions" + directory: "/" + schedule: + interval: "weekly" + groups: + # Group updates together, so that they are all applied in a single PR. + # Grouped updates are currently in beta and is subject to change. + # xref: https://docs.github.com/en/code-security/dependabot/dependabot-version-updates/configuration-options-for-the-dependabot.yml-file#groups + actions-deps: + patterns: + - "*" diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 1af4b28..376613a 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -16,9 +16,9 @@ jobs: # Note: To re-run `lint-commits` after fixing the PR title, close-and-reopen the PR. runs-on: ubuntu-latest steps: - - uses: actions/checkout@v5 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 - name: Use Node.js - uses: actions/setup-node@v4 + uses: actions/setup-node@395ad3262231945c25e8478fd5baf05154b1d79f # v6.1.0 with: node-version: 22.x - name: Check PR title @@ -32,17 +32,17 @@ jobs: strategy: fail-fast: false matrix: - python-version: ["3.13"] + python-version: ["3.11","3.12","3.13","3.14"] steps: - - uses: actions/checkout@v5 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v6 + uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: python-version: ${{ matrix.python-version }} - name: Install Hatch run: | - python -m pip install --upgrade hatch + python -m pip install hatch==1.15.0 - name: static analysis run: hatch fmt --check - name: type checking diff --git a/.github/workflows/integration-tests.yml b/.github/workflows/integration-tests.yml index 8f47f48..ca66bb1 100644 --- a/.github/workflows/integration-tests.yml +++ b/.github/workflows/integration-tests.yml @@ -17,22 +17,19 @@ jobs: python-version: ["3.13"] steps: - - name: Parse testing SDK branch from PR body - id: parse - run: | - # Look for a line like: TESTING_SDK_BRANCH: feature/foo - REF=$(printf '%s\n' '${{ github.event.pull_request.body }}' | sed -n 's/^TESTING_SDK_BRANCH:[[:space:]]*//p' | head -n1) - if [ -z "$REF" ]; then REF="main"; fi - echo "testing_ref=$REF" >> "$GITHUB_OUTPUT" - echo "Using testing SDK branch: $REF" - - name: Checkout Language SDK (this PR) - uses: actions/checkout@v5 + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: path: language-sdk + - name: Parse testing SDK branch from PR body + id: parse + run: python language-sdk/ops/parse_sdk_branch.py + env: + PR_BODY: ${{ github.event.pull_request.body }} + - name: Checkout Testing SDK - uses: actions/checkout@v5 + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: repository: aws/aws-durable-execution-sdk-python-testing ref: ${{ steps.parse.outputs.testing_ref }} @@ -40,12 +37,12 @@ jobs: path: testing-sdk - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v6 + uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: python-version: ${{ matrix.python-version }} - name: Install Hatch - run: python -m pip install --upgrade hatch + run: python -m pip install hatch==1.15.0 - name: Setup and run Testing SDK working-directory: testing-sdk @@ -67,24 +64,21 @@ jobs: if: github.event_name == 'pull_request' env: AWS_REGION: us-west-2 - - steps: - - name: Parse testing SDK branch from PR body - id: parse - run: | - # Look for a line like: TESTING_SDK_BRANCH: feature/foo - REF=$(printf '%s\n' '${{ github.event.pull_request.body }}' | sed -n 's/^TESTING_SDK_BRANCH:[[:space:]]*//p' | head -n1) - if [ -z "$REF" ]; then REF="main"; fi - echo "testing_ref=$REF" >> "$GITHUB_OUTPUT" - echo "Using testing SDK branch: $REF" + steps: - name: Checkout Language SDK (this PR) - uses: actions/checkout@v5 + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: path: language-sdk + - name: Parse testing SDK branch from PR body + id: parse + run: python language-sdk/ops/parse_sdk_branch.py + env: + PR_BODY: ${{ github.event.pull_request.body }} + - name: Checkout Testing SDK - uses: actions/checkout@v5 + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: repository: aws/aws-durable-execution-sdk-python-testing ref: ${{ steps.parse.outputs.testing_ref }} @@ -92,28 +86,23 @@ jobs: path: testing-sdk - name: Set up Python 3.13 - uses: actions/setup-python@v6 + uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: python-version: '3.13' - name: Configure AWS credentials - uses: aws-actions/configure-aws-credentials@v4 + uses: aws-actions/configure-aws-credentials@61815dcd50bd041e203e49132bacad1fd04d2708 # v5.1.1 with: role-to-assume: "${{ secrets.ACTIONS_INTEGRATION_ROLE_NAME }}" role-session-name: languageSDKIntegrationTest aws-region: ${{ env.AWS_REGION }} - - name: Install custom Lambda model - working-directory: testing-sdk - run: | - aws configure add-model --service-model file://.github/model/lambda.json --service-name lambda - - name: Install Hatch and setup Testing SDK working-directory: testing-sdk env: AWS_DURABLE_SDK_URL: file://${{ github.workspace }}/language-sdk run: | - pip install hatch + pip install hatch==1.15.0 python -m pip install -e . - name: Get integration examples @@ -122,6 +111,13 @@ jobs: run: | echo "examples=$(jq -c '.examples | map(select(.integration == true)) | .[0:2]' examples-catalog.json)" >> $GITHUB_OUTPUT + - name: Install AWS CLI v2 + run: | + curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "/tmp/awscliv2.zip" + unzip -q /tmp/awscliv2.zip -d /tmp + rm /tmp/awscliv2.zip + sudo /tmp/aws/install --update + rm -rf /tmp/aws/ - name: Deploy and test examples working-directory: testing-sdk env: @@ -133,20 +129,20 @@ jobs: run: | echo "Building examples..." hatch run examples:build - + # Get first integration example for testing EXAMPLE_NAME=$(echo '${{ steps.get-examples.outputs.examples }}' | jq -r '.[0].name') EXAMPLE_NAME_CLEAN=$(echo "$EXAMPLE_NAME" | sed 's/ //g') FUNCTION_NAME="${EXAMPLE_NAME_CLEAN}-LanguageSDK-PR-${{ github.event.number }}" - + echo "Deploying example: $EXAMPLE_NAME as $FUNCTION_NAME" hatch run examples:deploy "$EXAMPLE_NAME" --function-name "$FUNCTION_NAME" - + QUALIFIED_FUNCTION_NAME="$FUNCTION_NAME:\$LATEST" - + echo "Waiting for function to be ready..." aws lambda wait function-active --function-name "$FUNCTION_NAME" --endpoint-url "$LAMBDA_ENDPOINT" --region "${{ env.AWS_REGION }}" - + echo "Invoking Lambda function: $QUALIFIED_FUNCTION_NAME" aws lambda invoke \ --function-name "$QUALIFIED_FUNCTION_NAME" \ @@ -156,10 +152,10 @@ jobs: --endpoint-url "$LAMBDA_ENDPOINT" \ /tmp/response.json \ > /tmp/invoke_response.json - + echo "Response:" cat /tmp/response.json - + # Check for function errors FUNCTION_ERROR=$(jq -r '.FunctionError // empty' /tmp/invoke_response.json) if [ -n "$FUNCTION_ERROR" ]; then @@ -167,7 +163,7 @@ jobs: cat /tmp/response.json exit 1 fi - + echo "Getting durable executions..." aws lambda list-durable-executions-by-function \ --function-name "$QUALIFIED_FUNCTION_NAME" \ @@ -176,15 +172,13 @@ jobs: --endpoint-url "$LAMBDA_ENDPOINT" \ --cli-binary-format raw-in-base64-out \ > /tmp/executions.json - + echo "Durable Executions:" cat /tmp/executions.json - + # Cleanup echo "Cleaning up function: $FUNCTION_NAME" aws lambda delete-function \ --function-name "$FUNCTION_NAME" \ --endpoint-url "$LAMBDA_ENDPOINT" \ --region "${{ env.AWS_REGION }}" || echo "Function cleanup failed or already deleted" - - diff --git a/.github/workflows/pypi-publish.yml b/.github/workflows/pypi-publish.yml new file mode 100644 index 0000000..95278fa --- /dev/null +++ b/.github/workflows/pypi-publish.yml @@ -0,0 +1,71 @@ +# This workflow will upload a Python Package to PyPI when a release is created +# For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-python#publishing-to-package-registries + +# This workflow uses actions that are not certified by GitHub. +# They are provided by a third-party and are governed by +# separate terms of service, privacy policy, and support +# documentation. + +name: Upload PyPI Package + +on: + release: + types: [published] + +permissions: + contents: read + +jobs: + release-build: + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + + - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 + with: + python-version: "3.11" + - name: Install Hatch + run: | + python -m pip install --upgrade hatch==1.15.0 + - name: Build release distributions + run: | + # NOTE: put your own distribution build steps here. + hatch build + + - name: Upload distributions + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 + with: + name: release-dists + path: dist/ + + pypi-publish: + runs-on: ubuntu-latest + needs: + - release-build + permissions: + # IMPORTANT: this permission is mandatory for trusted publishing + id-token: write + + # Dedicated environments with protections for publishing are strongly recommended. + # For more information, see: https://docs.github.com/en/actions/deployment/targeting-different-environments/using-environments-for-deployment#deployment-protection-rules + environment: + name: pypi + # OPTIONAL: uncomment and update to include your PyPI project URL in the deployment status: + # url: https://pypi.org/p/aws-durable-execution-sdk-python + # + # ALTERNATIVE: if your GitHub Release name is the PyPI project version string + # ALTERNATIVE: exactly, uncomment the following line instead: + url: https://pypi.org/project/aws-durable-execution-sdk-python/${{ github.event.release.name }} + + steps: + - name: Retrieve release distributions + uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0 + with: + name: release-dists + path: dist/ + + - name: Publish release distributions to PyPI + uses: pypa/gh-action-pypi-publish@ed0c53931b1dc9bd32cbe73a98c7f6766f8a527e # v1.13.0 + with: + packages-dir: dist/ diff --git a/.github/workflows/scorecard.yml b/.github/workflows/scorecard.yml new file mode 100644 index 0000000..9235ec3 --- /dev/null +++ b/.github/workflows/scorecard.yml @@ -0,0 +1,80 @@ +# This workflow uses actions that are not certified by GitHub. They are provided +# by a third-party and are governed by separate terms of service, privacy +# policy, and support documentation. + +name: Scorecard supply-chain security +on: + # For Branch-Protection check. Only the default branch is supported. See + # https://github.com/ossf/scorecard/blob/main/docs/checks.md#branch-protection + branch_protection_rule: + # To guarantee Maintained check is occasionally updated. See + # https://github.com/ossf/scorecard/blob/main/docs/checks.md#maintained + schedule: + - cron: '21 16 * * 4' + push: + branches: [ "main" ] + workflow_dispatch: + +# Declare default permissions as read only. +permissions: + contents: read + +jobs: + analysis: + name: Scorecard analysis + runs-on: ubuntu-latest + # `publish_results: true` only works when run from the default branch. conditional can be removed if disabled. + if: github.event.repository.default_branch == github.ref_name || github.event_name == 'pull_request' + permissions: + # Needed to upload the results to code-scanning dashboard. + security-events: write + # Needed to publish results and get a badge (see publish_results below). + id-token: write + # Uncomment the permissions below if installing in a private repository. + # contents: read + # actions: read + + steps: + - name: "Checkout code" + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + with: + persist-credentials: false + + - name: "Run analysis" + uses: ossf/scorecard-action@4eaacf0543bb3f2c246792bd56e8cdeffafb205a # v2.4.3 + with: + results_file: results.sarif + results_format: sarif + # (Optional) "write" PAT token. Uncomment the `repo_token` line below if: + # - you want to enable the Branch-Protection check on a *public* repository, or + # - you are installing Scorecard on a *private* repository + # To create the PAT, follow the steps in https://github.com/ossf/scorecard-action?tab=readme-ov-file#authentication-with-fine-grained-pat-optional. + # repo_token: ${{ secrets.SCORECARD_TOKEN }} + + # Public repositories: + # - Publish results to OpenSSF REST API for easy access by consumers + # - Allows the repository to include the Scorecard badge. + # - See https://github.com/ossf/scorecard-action#publishing-results. + # For private repositories: + # - `publish_results` will always be set to `false`, regardless + # of the value entered here. + publish_results: true + + # (Optional) Uncomment file_mode if you have a .gitattributes with files marked export-ignore + # file_mode: git + + # Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF + # format to the repository Actions tab. + - name: "Upload artifact" + uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 + with: + name: SARIF file + path: results.sarif + retention-days: 5 + + # Upload the results to GitHub's code scanning dashboard (optional). + # Commenting out will disable upload of results to your repo's Code Scanning dashboard + - name: "Upload to code-scanning" + uses: github/codeql-action/upload-sarif@cf1bb45a277cb3c205638b2cd5c984db1c46a412 # v4.31.7 + with: + sarif_file: results.sarif diff --git a/.github/workflows/sync-package.yml b/.github/workflows/sync-package.yml index 6fd9c6b..7123109 100644 --- a/.github/workflows/sync-package.yml +++ b/.github/workflows/sync-package.yml @@ -20,18 +20,18 @@ jobs: python-version: ["3.13"] steps: - - uses: actions/checkout@v5 + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v6 + uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: python-version: ${{ matrix.python-version }} - name: Install Hatch run: | - python -m pip install --upgrade hatch + python -m pip install --upgrade hatch==1.15.0 - name: Build distribution run: hatch build - name: configure aws credentials - uses: aws-actions/configure-aws-credentials@v4 + uses: aws-actions/configure-aws-credentials@61815dcd50bd041e203e49132bacad1fd04d2708 # v5.1.1 with: role-to-assume: "${{ secrets.ACTIONS_SYNC_ROLE_NAME }}" role-session-name: gh-python diff --git a/.github/workflows/test-parser.yml b/.github/workflows/test-parser.yml new file mode 100644 index 0000000..da142d1 --- /dev/null +++ b/.github/workflows/test-parser.yml @@ -0,0 +1,24 @@ +name: Test Parser + +on: + pull_request: + paths: + - 'ops/parse_sdk_branch.py' + - 'ops/__tests__/**' + push: + branches: [ main ] + paths: + - 'ops/parse_sdk_branch.py' + - 'ops/__tests__/**' + +permissions: + contents: read + +jobs: + test-parser: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + + - name: Run parser tests + run: python ops/__tests__/test_parse_sdk_branch.py diff --git a/README.md b/README.md index c11017f..3ec5798 100644 --- a/README.md +++ b/README.md @@ -1,7 +1,9 @@ -# aws-durable-functions-sdk-python +# AWS Durable Execution SDK for Python -[![PyPI - Version](https://img.shields.io/pypi/v/aws-durable-functions-sdk-python.svg)](https://pypi.org/project/aws-durable-functions-sdk-python) -[![PyPI - Python Version](https://img.shields.io/pypi/pyversions/aws-durable-functions-sdk-python.svg)](https://pypi.org/project/aws-durable-functions-sdk-python) +[![PyPI - Version](https://img.shields.io/pypi/v/aws-durable-execution-sdk-python.svg)](https://pypi.org/project/aws-durable-execution-sdk-python) +[![PyPI - Python Version](https://img.shields.io/pypi/pyversions/aws-durable-execution-sdk-python.svg)](https://pypi.org/project/aws-durable-execution-sdk-python) + +[![OpenSSF Scorecard](https://api.scorecard.dev/projects/github.com/aws/aws-durable-execution-sdk-python/badge)](https://scorecard.dev/viewer/?uri=github.com/aws/aws-durable-execution-sdk-python) ----- diff --git a/docs/advanced/error-handling.md b/docs/advanced/error-handling.md new file mode 100644 index 0000000..1123ad7 --- /dev/null +++ b/docs/advanced/error-handling.md @@ -0,0 +1,955 @@ +# Error Handling + +## Table of Contents + +- [Overview](#overview) +- [Terminology](#terminology) +- [Getting started](#getting-started) +- [Exception types](#exception-types) +- [Retry strategies](#retry-strategies) +- [Error response formats](#error-response-formats) +- [Common error scenarios](#common-error-scenarios) +- [Troubleshooting](#troubleshooting) +- [Best practices](#best-practices) +- [FAQ](#faq) +- [Testing](#testing) +- [See also](#see-also) + +[← Back to main index](../index.md) + +## Overview + +Error handling in durable functions determines how your code responds to failures. The SDK provides typed exceptions, automatic retry with exponential backoff, and AWS-compliant error responses that help you build resilient workflows. + +When errors occur, the SDK can: +- Retry transient failures automatically with configurable backoff +- Checkpoint failures with detailed error information +- Distinguish between recoverable and unrecoverable errors +- Provide clear termination reasons and stack traces for debugging + +[↑ Back to top](#table-of-contents) + +## Terminology + +**Exception** - A Python error that interrupts normal execution flow. The SDK provides specific exception types for different failure scenarios. + +**Retry strategy** - A function that determines whether to retry an operation after an exception and how long to wait before retrying. + +**Termination reason** - A code indicating why a durable execution terminated, such as `UNHANDLED_ERROR` or `INVOCATION_ERROR`. + +**Recoverable error** - An error that can be retried, such as transient network failures or rate limiting. + +**Unrecoverable error** - An error that terminates execution immediately without retry, such as validation errors or non-deterministic execution. + +**Backoff** - The delay between retry attempts, typically increasing exponentially to avoid overwhelming failing services. + +[↑ Back to top](#table-of-contents) + +## Getting started + +Here's a simple example of handling errors in a durable function: + +```python +from aws_durable_execution_sdk_python import ( + DurableContext, + durable_execution, + durable_step, + StepContext, +) + +@durable_step +def process_order(step_context: StepContext, order_id: str) -> dict: + """Process an order with validation.""" + if not order_id: + raise ValueError("Order ID is required") + + # Process the order + return {"order_id": order_id, "status": "processed"} + +@durable_execution +def handler(event: dict, context: DurableContext) -> dict: + """Handle order processing with error handling.""" + try: + order_id = event.get("order_id") + result = context.step(process_order(order_id)) + return result + except ValueError as e: + # Handle validation errors from your code + return {"error": "InvalidInput", "message": str(e)} +``` + +When this function runs: +1. If `order_id` is missing, `ValueError` is raised from your code +2. The exception is caught and handled gracefully +3. A structured error response is returned to the caller + +[↑ Back to top](#table-of-contents) + +## Exception types + +The SDK provides several exception types for different failure scenarios. + +### Exception summary + +| Exception | Retryable | Behavior | Use case | +|-----------|-----------|----------|----------| +| `ValidationError` | No | Fails immediately | SDK detects invalid arguments | +| `ExecutionError` | No | Returns FAILED status | Permanent business logic failures | +| `InvocationError` | Yes (by Lambda) | Lambda retries invocation | Transient infrastructure issues | +| `CallbackError` | No | Returns FAILED status | Callback handling failures | +| `StepInterruptedError` | Yes (automatic) | Retries on next invocation | Step interrupted before checkpoint | +| `CheckpointError` | Depends | Retries if 4xx (except invalid token) | Failed to save execution state | +| `SerDesError` | No | Returns FAILED status | Serialization failures | + +### Base exceptions + +**DurableExecutionsError** - Base class for all SDK exceptions. + +```python +from aws_durable_execution_sdk_python import DurableExecutionsError + +try: + # Your code here + pass +except DurableExecutionsError as e: + # Handle any SDK exception + print(f"SDK error: {e}") +``` + +**UnrecoverableError** - Base class for errors that terminate execution. These errors include a `termination_reason` attribute. + +```python +from aws_durable_execution_sdk_python import ( + ExecutionError, + InvocationError, +) + +try: + # Your code here + pass +except (ExecutionError, InvocationError) as e: + # Access termination reason from unrecoverable errors + print(f"Execution terminated: {e.termination_reason}") +``` + +### Validation errors + +**ValidationError** - Raised by the SDK when you pass invalid arguments to SDK operations. + +```python +from aws_durable_execution_sdk_python import ( + DurableContext, + durable_execution, + ValidationError, +) +from aws_durable_execution_sdk_python.config import CallbackConfig + +@durable_execution +def handler(event: dict, context: DurableContext) -> dict: + """Handle SDK validation errors.""" + try: + # SDK raises ValidationError if timeout is invalid + callback = context.create_callback( + config=CallbackConfig(timeout_seconds=-1), # Invalid! + name="approval" + ) + return {"callback_id": callback} + except ValidationError as e: + # SDK caught invalid configuration + return {"error": "InvalidConfiguration", "message": str(e)} +``` + +The SDK raises `ValidationError` when: +- Operation arguments are invalid (negative timeouts, empty names) +- Required parameters are missing +- Configuration values are out of range + +### Execution errors + +**ExecutionError** - Raised when execution fails in a way that shouldn't be retried. Returns `FAILED` status without retry. + +```python +from aws_durable_execution_sdk_python import ExecutionError + +@durable_step +def process_data(step_context: StepContext, data: dict) -> dict: + """Process data with business logic validation.""" + if not data.get("required_field"): + raise ExecutionError("Required field missing") + return {"processed": True} +``` + +Use `ExecutionError` for: +- Business logic failures +- Invalid data that won't be fixed by retry +- Permanent failures that should fail fast + +### Invocation errors + +**InvocationError** - Raised when Lambda should retry the entire invocation. Causes Lambda to retry by throwing from the handler. + +```python +from aws_durable_execution_sdk_python import InvocationError + +@durable_step +def call_external_service(step_context: StepContext) -> dict: + """Call external service with retry.""" + try: + # Call external service + response = make_api_call() + return response + except ConnectionError: + # Trigger Lambda retry + raise InvocationError("Service unavailable") +``` + +Use `InvocationError` for: +- Service unavailability +- Network failures +- Transient infrastructure issues + +### Callback errors + +**CallbackError** - Raised when callback handling fails. + +```python +from aws_durable_execution_sdk_python import CallbackError +from aws_durable_execution_sdk_python.config import CallbackConfig + +@durable_execution +def handler(event: dict, context: DurableContext) -> dict: + """Handle callback with error handling.""" + try: + callback = context.create_callback( + config=CallbackConfig(timeout_seconds=3600), + name="approval" + ) + context.wait_for_callback(callback) + return {"status": "approved"} + except CallbackError as e: + return {"error": "CallbackError", "callback_id": e.callback_id} +``` + +### Step interrupted errors + +**StepInterruptedError** - Raised when a step is interrupted before checkpointing. + +```python +from aws_durable_execution_sdk_python import StepInterruptedError + +# This can happen if Lambda times out during step execution +@durable_execution +def handler(event: dict, context: DurableContext) -> dict: + try: + result = context.step(long_running_operation()) + return result + except StepInterruptedError as e: + # Step was interrupted, will retry on next invocation + context.logger.warning(f"Step interrupted: {e.step_id}") + raise +``` + +### Serialization errors + +**SerDesError** - Raised when serialization or deserialization fails. + +```python +from aws_durable_execution_sdk_python import SerDesError + +@durable_step +def process_complex_data(step_context: StepContext, data: object) -> dict: + """Process data that might not be serializable.""" + try: + # Process data + return {"result": data} + except SerDesError as e: + # Handle serialization failure + return {"error": "Cannot serialize result"} +``` + +[↑ Back to top](#table-of-contents) + +## Retry strategies + +Configure retry behavior for steps using retry strategies. + +### Creating retry strategies + +Use `RetryStrategyConfig` to define retry behavior: + +```python +from aws_durable_execution_sdk_python import ( + DurableContext, + durable_execution, + durable_step, + StepContext, +) +from aws_durable_execution_sdk_python.config import StepConfig +from aws_durable_execution_sdk_python.retries import ( + RetryStrategyConfig, + create_retry_strategy, +) + +@durable_step +def unreliable_operation(step_context: StepContext) -> str: + """Operation that might fail.""" + # Your code here + return "success" + +@durable_execution +def handler(event: dict, context: DurableContext) -> str: + # Configure retry strategy + retry_config = RetryStrategyConfig( + max_attempts=3, + initial_delay_seconds=1, + max_delay_seconds=10, + backoff_rate=2.0, + retryable_error_types=[RuntimeError, ConnectionError], + ) + + # Create step config with retry + step_config = StepConfig( + retry_strategy=create_retry_strategy(retry_config) + ) + + # Execute with retry + result = context.step(unreliable_operation(), config=step_config) + return result +``` + +### RetryStrategyConfig parameters + +**max_attempts** - Maximum number of attempts (including the initial attempt). Default: 3. + +**initial_delay_seconds** - Initial delay before first retry in seconds. Default: 5. + +**max_delay_seconds** - Maximum delay between retries in seconds. Default: 300 (5 minutes). + +**backoff_rate** - Multiplier for exponential backoff. Default: 2.0. + +**jitter_strategy** - Jitter strategy to add randomness to delays. Default: `JitterStrategy.FULL`. + +**retryable_errors** - List of error message patterns to retry (strings or regex patterns). Default: matches all errors. + +**retryable_error_types** - List of exception types to retry. Default: empty (retry all). + +### Retry presets + +The SDK provides preset retry strategies for common scenarios: + +```python +from aws_durable_execution_sdk_python.retries import RetryPresets +from aws_durable_execution_sdk_python.config import StepConfig + +# No retries +step_config = StepConfig(retry_strategy=RetryPresets.none()) + +# Default retries (6 attempts, 5s initial delay) +step_config = StepConfig(retry_strategy=RetryPresets.default()) + +# Quick retries for transient errors (3 attempts) +step_config = StepConfig(retry_strategy=RetryPresets.transient()) + +# Longer retries for resource availability (5 attempts, up to 5 minutes) +step_config = StepConfig(retry_strategy=RetryPresets.resource_availability()) + +# Aggressive retries for critical operations (10 attempts) +step_config = StepConfig(retry_strategy=RetryPresets.critical()) +``` + +### Retrying specific exceptions + +Only retry certain exception types: + +```python +from random import random +from aws_durable_execution_sdk_python import ( + DurableContext, + durable_execution, + durable_step, + StepContext, +) +from aws_durable_execution_sdk_python.config import StepConfig +from aws_durable_execution_sdk_python.retries import ( + RetryStrategyConfig, + create_retry_strategy, +) + +@durable_step +def call_api(step_context: StepContext) -> dict: + """Call external API that might fail.""" + if random() > 0.5: + raise ConnectionError("Network timeout") + return {"status": "success"} + +@durable_execution +def handler(event: dict, context: DurableContext) -> dict: + # Only retry ConnectionError, not other exceptions + retry_config = RetryStrategyConfig( + max_attempts=3, + retryable_error_types=[ConnectionError], + ) + + result = context.step( + call_api(), + config=StepConfig(create_retry_strategy(retry_config)), + ) + + return result +``` + +### Exponential backoff + +Configure exponential backoff to avoid overwhelming failing services: + +```python +retry_config = RetryStrategyConfig( + max_attempts=5, + initial_delay_seconds=1, # First retry after 1 second + max_delay_seconds=60, # Cap at 60 seconds + backoff_rate=2.0, # Double delay each time: 1s, 2s, 4s, 8s, 16s... +) +``` + +With this configuration: +- Attempt 1: Immediate +- Attempt 2: After 1 second +- Attempt 3: After 2 seconds +- Attempt 4: After 4 seconds +- Attempt 5: After 8 seconds + +[↑ Back to top](#table-of-contents) + +## Error response formats + +The SDK follows AWS service conventions for error responses. + +### Error response structure + +When a durable function fails, the response includes: + +```json +{ + "errorType": "ExecutionError", + "errorMessage": "Order validation failed", + "termination_reason": "EXECUTION_ERROR", + "stackTrace": [ + " File \"/var/task/handler.py\", line 42, in process_order", + " raise ExecutionError(\"Order validation failed\")" + ] +} +``` + +### Termination reasons + +**UNHANDLED_ERROR** - An unhandled exception occurred in user code. + +**INVOCATION_ERROR** - Lambda should retry the invocation. + +**EXECUTION_ERROR** - Execution failed and shouldn't be retried. + +**CHECKPOINT_FAILED** - Failed to checkpoint execution state. + +**NON_DETERMINISTIC_EXECUTION** - Execution produced different results on replay. + +**STEP_INTERRUPTED** - A step was interrupted before completing. + +**CALLBACK_ERROR** - Callback handling failed. + +**SERIALIZATION_ERROR** - Failed to serialize or deserialize data. + +### HTTP status codes + +When calling durable functions via API Gateway or Lambda URLs: + +- **200 OK** - Execution succeeded +- **400 Bad Request** - Validation error or invalid input +- **500 Internal Server Error** - Execution error or unhandled exception +- **503 Service Unavailable** - Invocation error (Lambda will retry) + +[↑ Back to top](#table-of-contents) + +## Common error scenarios + +### Handling input validation + +Validate input early and return clear error messages: + +```python +from aws_durable_execution_sdk_python import ( + DurableContext, + durable_execution, +) + +@durable_execution +def handler(event: dict, context: DurableContext) -> dict: + """Validate input and handle errors.""" + # Validate required fields + if not event.get("user_id"): + return {"error": "InvalidInput", "message": "user_id is required"} + + if not event.get("action"): + return {"error": "InvalidInput", "message": "action is required"} + + # Process valid input + user_id = event["user_id"] + action = event["action"] + + result = context.step( + lambda _: {"user_id": user_id, "action": action, "status": "completed"}, + name="process_action" + ) + + return result +``` + +### Handling transient failures + +Retry transient failures automatically: + +```python +from aws_durable_execution_sdk_python import ( + DurableContext, + durable_execution, + durable_step, + StepContext, +) +from aws_durable_execution_sdk_python.config import StepConfig +from aws_durable_execution_sdk_python.retries import RetryPresets + +@durable_step +def call_external_api(step_context: StepContext, endpoint: str) -> dict: + """Call external API with retry.""" + # API call that might fail transiently + response = make_http_request(endpoint) + return response + +@durable_execution +def handler(event: dict, context: DurableContext) -> dict: + """Handle API calls with automatic retry.""" + # Use transient preset for quick retries + step_config = StepConfig(retry_strategy=RetryPresets.transient()) + + try: + result = context.step( + call_external_api(event["endpoint"]), + config=step_config, + ) + return {"status": "success", "data": result} + except Exception as e: + # All retries exhausted + return {"status": "failed", "error": str(e)} +``` + +### Handling permanent failures + +Fail fast for permanent errors: + +```python +from aws_durable_execution_sdk_python import ( + DurableContext, + durable_execution, + durable_step, + ExecutionError, + StepContext, +) + +@durable_step +def process_payment(step_context: StepContext, amount: float, card: str) -> dict: + """Process payment with validation.""" + # Validate card + if not is_valid_card(card): + # Don't retry invalid cards + raise ExecutionError("Invalid card number") + + # Process payment + return {"transaction_id": "txn_123", "amount": amount} + +@durable_execution +def handler(event: dict, context: DurableContext) -> dict: + """Handle payment with error handling.""" + try: + result = context.step( + process_payment(event["amount"], event["card"]) + ) + return {"status": "success", "transaction": result} + except ExecutionError as e: + # Permanent failure, don't retry + return {"status": "failed", "error": str(e)} +``` + +### Handling multiple error types + +Handle different error types appropriately: + +```python +from aws_durable_execution_sdk_python import ( + DurableContext, + durable_execution, + durable_step, + ExecutionError, + InvocationError, + ValidationError, + StepContext, +) + +@durable_step +def complex_operation(step_context: StepContext, data: dict) -> dict: + """Operation with multiple failure modes.""" + # Validate input + if not data: + raise ValueError("Data is required") + + # Check business rules + if data.get("amount", 0) < 0: + raise ExecutionError("Amount must be positive") + + # Call external service + try: + result = call_external_service(data) + return result + except ConnectionError: + # Transient failure + raise InvocationError("Service unavailable") + +@durable_execution +def handler(event: dict, context: DurableContext) -> dict: + """Handle multiple error types.""" + try: + result = context.step(complex_operation(event)) + return {"status": "success", "result": result} + except ValueError as e: + return {"status": "invalid", "error": str(e)} + except ExecutionError as e: + return {"status": "failed", "error": str(e)} + except InvocationError as e: + # Let Lambda retry + raise +``` + +[↑ Back to top](#table-of-contents) + +## Troubleshooting + +### Step retries exhausted + +**Problem:** Your step fails after exhausting all retry attempts. + +**Cause:** The operation continues to fail, or the error isn't retryable. + +**Solution:** Check your retry configuration and error types: + +```python +# Ensure you're retrying the right errors +retry_config = RetryStrategyConfig( + max_attempts=5, # Increase attempts + retryable_error_types=[ConnectionError, TimeoutError], # Add error types +) +``` + +### Checkpoint failed errors + +**Problem:** Execution fails with `CheckpointError`. + +**Cause:** Failed to save execution state, possibly due to payload size limits or service issues. + +**Solution:** Reduce checkpoint payload size or check service health: + +```python +# Reduce payload size by returning only necessary data +@durable_step +def large_operation(step_context: StepContext) -> dict: + # Process large data + large_result = process_data() + + # Return only summary, not full data + return {"summary": large_result["summary"], "count": len(large_result["items"])} +``` + +### Callback timeout + +**Problem:** Callback times out before receiving a response. + +**Cause:** External system didn't respond within the timeout period. + +**Solution:** Increase callback timeout or implement retry logic: + +```python +from aws_durable_execution_sdk_python.config import CallbackConfig + +# Increase timeout +callback = context.create_callback( + config=CallbackConfig( + timeout_seconds=7200, # 2 hours + heartbeat_timeout_seconds=300, # 5 minutes + ), + name="long_running_approval" +) +``` + +### Step interrupted errors + +**Problem:** Steps are interrupted before completing. + +**Cause:** Lambda timeout or memory limit reached during step execution. + +**Solution:** Increase Lambda timeout or break large steps into smaller ones: + +```python +# Break large operation into smaller steps +@durable_execution +def handler(event: dict, context: DurableContext) -> dict: + # Process in chunks instead of all at once + items = event["items"] + chunk_size = 100 + + results = [] + for i in range(0, len(items), chunk_size): + chunk = items[i:i + chunk_size] + result = context.step( + lambda _, c=chunk: process_chunk(c), + name=f"process_chunk_{i}" + ) + results.extend(result) + + return {"processed": len(results)} +``` + +[↑ Back to top](#table-of-contents) + +## Best practices + +**Validate input early** - Check for invalid input at the start of your function and return clear error responses or raise appropriate exceptions like `ValueError`. + +**Use appropriate exception types** - Choose the right exception type for each failure scenario. Use `ExecutionError` for permanent failures and `InvocationError` for transient issues. + +**Configure retry for transient failures** - Use retry strategies for operations that might fail temporarily, such as network calls or rate limits. + +**Fail fast for permanent errors** - Don't retry errors that won't be fixed by retrying, such as validation failures or business logic errors. + +**Wrap non-deterministic code in steps** - All code that produces different results on replay must be wrapped in steps, including random values, timestamps, and external API calls. + +**Handle errors explicitly** - Catch and handle exceptions in your code. Provide meaningful error messages to callers. + +**Log errors with context** - Use `context.logger` to log errors with execution context for debugging. + +**Keep error messages clear** - Write error messages that help users understand what went wrong and how to fix it. + +**Test error scenarios** - Write tests for both success and failure cases to ensure your error handling works correctly. + +**Monitor error rates** - Track error rates and termination reasons to identify issues in production. + +[↑ Back to top](#table-of-contents) + +## FAQ + +**Q: What's the difference between ExecutionError and InvocationError?** + +A: `ExecutionError` fails the execution without retry (returns FAILED status). `InvocationError` triggers Lambda to retry the entire invocation. Use `ExecutionError` for permanent failures and `InvocationError` for transient issues. + +**Q: How do I retry only specific exceptions?** + +A: Use `retryable_error_types` in `RetryStrategyConfig`: + +```python +retry_config = RetryStrategyConfig( + max_attempts=3, + retryable_error_types=[ConnectionError, TimeoutError], +) +``` + +**Q: Can I customize the backoff strategy?** + +A: Yes, configure `initial_delay_seconds`, `max_delay_seconds`, `backoff_rate`, and `jitter_strategy` in `RetryStrategyConfig`. + +**Q: What happens when retries are exhausted?** + +A: The step checkpoints the error and the exception propagates to your handler. You can catch and handle it there. + +**Q: How do I prevent duplicate operations on retry?** + +A: Use at-most-once semantics for operations with side effects: + +```python +from aws_durable_execution_sdk_python.config import StepConfig, StepSemantics + +step_config = StepConfig( + step_semantics=StepSemantics.AT_MOST_ONCE_PER_RETRY +) +``` + +**Q: Can I access error details in my code?** + +A: Yes, catch the exception and access its attributes: + +```python +try: + result = context.step(operation()) +except CallbackError as e: + print(f"Callback failed: {e.callback_id}") +except NonDeterministicExecutionError as e: + print(f"Non-deterministic step: {e.step_id}") +``` + +**Q: How do I handle errors in parallel operations?** + +A: Wrap each parallel operation in a try-except block or let errors propagate to fail the entire execution: + +```python +results = [] +for item in items: + try: + result = context.step(lambda _, i=item: process(i), name=f"process_{item}") + results.append(result) + except Exception as e: + results.append({"error": str(e)}) +``` + +**Q: What's the maximum number of retry attempts?** + +A: You can configure any number of attempts, but consider Lambda timeout limits. The default is 6 attempts. + +[↑ Back to top](#table-of-contents) + +## Testing + +You can test error handling using the testing SDK. The test runner executes your function and lets you inspect errors. + +### Testing successful execution + +```python +import pytest +from aws_durable_execution_sdk_python_testing import InvocationStatus +from my_function import handler + +@pytest.mark.durable_execution( + handler=handler, + lambda_function_name="my_function", +) +def test_success(durable_runner): + """Test successful execution.""" + with durable_runner: + result = durable_runner.run(input={"data": "test"}, timeout=10) + + assert result.status is InvocationStatus.SUCCEEDED +``` + +### Testing error conditions + +Test that your function handles errors correctly: + +```python +@pytest.mark.durable_execution( + handler=handler_with_validation, + lambda_function_name="validation_function", +) +def test_input_validation(durable_runner): + """Test input validation handling.""" + with durable_runner: + result = durable_runner.run(input={}, timeout=10) + + # Function should return error response for invalid input + assert result.status is InvocationStatus.SUCCEEDED + assert "error" in result.result + assert result.result["error"] == "InvalidInput" +``` + +### Testing SDK validation errors + +Test that the SDK catches invalid configuration: + +```python +@pytest.mark.durable_execution( + handler=handler_with_invalid_config, + lambda_function_name="sdk_validation_function", +) +def test_sdk_validation_error(durable_runner): + """Test SDK validation error handling.""" + with durable_runner: + result = durable_runner.run(input={}, timeout=10) + + # SDK should catch invalid configuration + assert result.status is InvocationStatus.FAILED + assert "ValidationError" in str(result.error) +``` + +### Testing retry behavior + +Test that steps retry correctly: + +```python +@pytest.mark.durable_execution( + handler=handler_with_retry, + lambda_function_name="retry_function", +) +def test_retry_success(durable_runner): + """Test that retries eventually succeed.""" + with durable_runner: + result = durable_runner.run(input={}, timeout=30) + + # Should succeed after retries + assert result.status is InvocationStatus.SUCCEEDED +``` + +### Testing retry exhaustion + +Test that execution fails when retries are exhausted: + +```python +@pytest.mark.durable_execution( + handler=handler_always_fails, + lambda_function_name="failing_function", +) +def test_retry_exhausted(durable_runner): + """Test that execution fails after exhausting retries.""" + with durable_runner: + result = durable_runner.run(input={}, timeout=30) + + # Should fail after all retries + assert result.status is InvocationStatus.FAILED + assert "RuntimeError" in str(result.error) +``` + +### Inspecting error details + +Inspect error details in test results: + +```python +@pytest.mark.durable_execution( + handler=handler_with_error, + lambda_function_name="error_function", +) +def test_error_details(durable_runner): + """Test error details are captured.""" + with durable_runner: + result = durable_runner.run(input={}, timeout=10) + + # Check error details + assert result.status is InvocationStatus.FAILED + assert result.error is not None + assert "error_type" in result.error + assert "message" in result.error +``` + +For more testing patterns, see: +- [Basic tests](../testing-patterns/basic-tests.md) - Simple test examples +- [Complex workflows](../testing-patterns/complex-workflows.md) - Multi-step workflow testing +- [Best practices](../testing-patterns/best-practices.md) - Testing recommendations + +[↑ Back to top](#table-of-contents) + +## See also + +- [Steps](../core/steps.md) - Configure retry for steps +- [Callbacks](../core/callbacks.md) - Handle callback errors +- [Child contexts](../core/child-contexts.md) - Error handling in nested contexts +- [Retry strategies](../api-reference/config.md) - Retry configuration reference +- [Examples](https://github.com/awslabs/aws-durable-execution-sdk-python/tree/main/examples/src/step) - Error handling examples + +[↑ Back to top](#table-of-contents) + +## License + +See the [LICENSE](../../LICENSE) file for our project's licensing. + +[↑ Back to top](#table-of-contents) diff --git a/docs/advanced/serialization.md b/docs/advanced/serialization.md new file mode 100644 index 0000000..112131a --- /dev/null +++ b/docs/advanced/serialization.md @@ -0,0 +1,771 @@ +# Serialization + +Learn how the SDK serializes and deserializes data for durable execution checkpoints. + +## Table of Contents + +- [Terminology](#terminology) +- [What is serialization?](#what-is-serialization) +- [Key features](#key-features) +- [Default serialization behavior](#default-serialization-behavior) +- [Supported types](#supported-types) +- [Converting non-serializable types](#converting-non-serializable-types) +- [Custom serialization](#custom-serialization) +- [Serialization in configurations](#serialization-in-configurations) +- [Best practices](#best-practices) +- [Troubleshooting](#troubleshooting) +- [FAQ](#faq) + +[← Back to main index](../index.md) + +## Terminology + +**Serialization** - Converting Python objects to strings for storage in checkpoints. + +**Deserialization** - Converting checkpoint strings back to Python objects. + +**SerDes** - Short for Serializer/Deserializer, a custom class that handles both serialization and deserialization. + +**Checkpoint** - A saved state of execution that includes serialized operation results. + +**Extended types** - Types beyond basic JSON (datetime, Decimal, UUID, bytes) that the SDK serializes automatically. + +**Envelope format** - The SDK's internal format that wraps complex types with type tags for accurate deserialization. + +[↑ Back to top](#table-of-contents) + +## What is serialization? + +Serialization converts Python objects into strings that can be stored in checkpoints. When your durable function resumes, deserialization converts those strings back into Python objects. The SDK handles this automatically for most types. + +[↑ Back to top](#table-of-contents) + +## Key features + +- Automatic serialization for common Python types +- Extended type support (datetime, Decimal, UUID, bytes) +- Custom serialization for complex objects +- Type preservation during round-trip serialization +- Efficient plain JSON for primitives + +[↑ Back to top](#table-of-contents) + +## Default serialization behavior + +The SDK handles most Python types automatically: + +```python +from aws_durable_execution_sdk_python import DurableContext, durable_execution +from datetime import datetime +from decimal import Decimal +from uuid import uuid4 + +@durable_execution +def handler(event: dict, context: DurableContext) -> dict: + # All these types serialize automatically + result = context.step( + process_order, + order_id=uuid4(), + amount=Decimal("99.99"), + timestamp=datetime.now() + ) + return result +``` + +The SDK serializes data automatically when: +- Checkpointing step results +- Storing callback payloads +- Passing data to child contexts +- Returning results from your handler + +[↑ Back to top](#table-of-contents) + +## Supported types + +### Primitive types + +These types serialize as plain JSON for performance: + +```python +from aws_durable_execution_sdk_python import DurableContext, durable_execution + +@durable_execution +def handler(event: dict, context: DurableContext) -> dict: + # Primitives - plain JSON + none_value = None + text = "hello" + number = 42 + decimal_num = 3.14 + flag = True + + # Simple lists of primitives - plain JSON + numbers = [1, 2, 3, 4, 5] + + return { + "none": none_value, + "text": text, + "number": number, + "decimal": decimal_num, + "flag": flag, + "numbers": numbers + } +``` + +**Supported primitive types:** +- `None` +- `str` +- `int` +- `float` +- `bool` +- Lists containing only primitives + +[↑ Back to top](#table-of-contents) + +### Extended types + +The SDK automatically handles these types using envelope format: + +```python +from aws_durable_execution_sdk_python import DurableContext, durable_execution +from datetime import datetime, date +from decimal import Decimal +from uuid import UUID, uuid4 + +@durable_execution +def handler(event: dict, context: DurableContext) -> dict: + # Extended types - automatic serialization + order_data = { + "order_id": uuid4(), # UUID + "amount": Decimal("99.99"), # Decimal + "created_at": datetime.now(), # datetime + "delivery_date": date.today(), # date + "signature": b"binary_signature_data", # bytes + "coordinates": (40.7128, -74.0060), # tuple + } + + result = context.step(process_order, order_data) + return result +``` + +**Supported extended types:** +- `datetime` - ISO format with timezone +- `date` - ISO date format +- `Decimal` - Precise decimal numbers +- `UUID` - Universally unique identifiers +- `bytes`, `bytearray`, `memoryview` - Binary data (base64 encoded) +- `tuple` - Immutable sequences +- `list` - Mutable sequences (including nested) +- `dict` - Dictionaries (including nested) + +[↑ Back to top](#table-of-contents) + +### Container types + +Containers can hold any supported type, including nested containers: + +```python +from aws_durable_execution_sdk_python import DurableContext, durable_execution +from datetime import datetime +from decimal import Decimal +from uuid import uuid4 + +@durable_execution +def handler(event: dict, context: DurableContext) -> dict: + # Nested structures serialize automatically + complex_data = { + "user": { + "id": uuid4(), + "created": datetime.now(), + "balance": Decimal("1234.56"), + "metadata": b"binary_data", + "coordinates": (40.7128, -74.0060), + "tags": ["premium", "verified"], + "settings": { + "notifications": True, + "theme": "dark", + "limits": { + "daily": Decimal("500.00"), + "monthly": Decimal("10000.00"), + }, + }, + } + } + + result = context.step(process_user, complex_data) + return result +``` + +[↑ Back to top](#table-of-contents) + +## Converting non-serializable types + +Some Python types aren't serializable by default. Convert them before passing to durable operations. + +### Dataclasses + +Convert dataclasses to dictionaries: + +```python +from dataclasses import dataclass, asdict +from aws_durable_execution_sdk_python import DurableContext, durable_execution + +@dataclass +class Order: + order_id: str + amount: float + customer: str + +@durable_execution +def handler(event: dict, context: DurableContext) -> dict: + order = Order( + order_id="ORD-123", + amount=99.99, + customer="Jane Doe" + ) + + # Convert to dict before passing to step + result = context.step(process_order, asdict(order)) + return result +``` + +### Pydantic models + +Use Pydantic's built-in serialization: + +```python +from pydantic import BaseModel +from aws_durable_execution_sdk_python import DurableContext, durable_execution + +class Order(BaseModel): + order_id: str + amount: float + customer: str + +@durable_execution +def handler(event: dict, context: DurableContext) -> dict: + order = Order( + order_id="ORD-123", + amount=99.99, + customer="Jane Doe" + ) + + # Use model_dump() to convert to dict + result = context.step(process_order, order.model_dump()) + return result +``` + +### Custom objects + +Implement `to_dict()` and `from_dict()` methods: + +```python +from aws_durable_execution_sdk_python import DurableContext, durable_execution + +class Order: + def __init__(self, order_id: str, amount: float, customer: str): + self.order_id = order_id + self.amount = amount + self.customer = customer + + def to_dict(self) -> dict: + return { + "order_id": self.order_id, + "amount": self.amount, + "customer": self.customer + } + + @classmethod + def from_dict(cls, data: dict) -> "Order": + return cls( + order_id=data["order_id"], + amount=data["amount"], + customer=data["customer"] + ) + +@durable_execution +def handler(event: dict, context: DurableContext) -> dict: + order = Order("ORD-123", 99.99, "Jane Doe") + + # Convert to dict before passing to step + result = context.step(process_order, order.to_dict()) + return result +``` + +[↑ Back to top](#table-of-contents) + +## Custom serialization + +Implement custom serialization for specialized needs like encryption or compression. + +### Creating a custom SerDes + +Extend the `SerDes` base class: + +```python +from aws_durable_execution_sdk_python.serdes import SerDes, SerDesContext +import json + +class UpperCaseSerDes(SerDes[str]): + """Example: Convert strings to uppercase during serialization.""" + + def serialize(self, value: str, serdes_context: SerDesContext) -> str: + return value.upper() + + def deserialize(self, data: str, serdes_context: SerDesContext) -> str: + return data.lower() +``` + +### Using custom SerDes with steps + +Pass your custom SerDes in `StepConfig`: + +```python +from aws_durable_execution_sdk_python import DurableContext, durable_execution, durable_step, StepContext +from aws_durable_execution_sdk_python.config import StepConfig +from aws_durable_execution_sdk_python.serdes import SerDes, SerDesContext +import json + +class CompressedSerDes(SerDes[dict]): + """Example: Compress large dictionaries.""" + + def serialize(self, value: dict, serdes_context: SerDesContext) -> str: + # In production, use actual compression like gzip + return json.dumps(value, separators=(',', ':')) + + def deserialize(self, data: str, serdes_context: SerDesContext) -> dict: + return json.loads(data) + +@durable_step +def process_large_data(step_context: StepContext, data: dict) -> dict: + # Process the data + return {"processed": True, "items": len(data)} + +@durable_execution +def handler(event: dict, context: DurableContext) -> dict: + large_data = {"items": [f"item_{i}" for i in range(1000)]} + + # Use custom SerDes for this step + config = StepConfig(serdes=CompressedSerDes()) + result = context.step(process_large_data(large_data), config=config) + + return result +``` + +### Encryption example + +Encrypt sensitive data in checkpoints: + +```python +from aws_durable_execution_sdk_python import DurableContext, durable_execution, durable_step, StepContext +from aws_durable_execution_sdk_python.config import StepConfig +from aws_durable_execution_sdk_python.serdes import SerDes, SerDesContext +import json +import base64 + +class EncryptedSerDes(SerDes[dict]): + """Example: Encrypt sensitive data (simplified for demonstration).""" + + def __init__(self, encryption_key: str): + self.encryption_key = encryption_key + + def serialize(self, value: dict, serdes_context: SerDesContext) -> str: + json_str = json.dumps(value) + # In production, use proper encryption like AWS KMS + encrypted = base64.b64encode(json_str.encode()).decode() + return encrypted + + def deserialize(self, data: str, serdes_context: SerDesContext) -> dict: + # In production, use proper decryption + decrypted = base64.b64decode(data.encode()).decode() + return json.loads(decrypted) + +@durable_step +def process_sensitive_data(step_context: StepContext, data: dict) -> dict: + return {"processed": True} + +@durable_execution +def handler(event: dict, context: DurableContext) -> dict: + sensitive_data = { + "ssn": "123-45-6789", + "credit_card": "4111-1111-1111-1111" + } + + # Encrypt data in checkpoints + config = StepConfig(serdes=EncryptedSerDes("my-key")) + result = context.step(process_sensitive_data(sensitive_data), config=config) + + return result +``` + +[↑ Back to top](#table-of-contents) + +## Serialization in configurations + +Different operations support custom serialization through their configuration objects. + +### StepConfig + +Control serialization for step results: + +```python +from aws_durable_execution_sdk_python import DurableContext, durable_execution +from aws_durable_execution_sdk_python.config import StepConfig + +@durable_execution +def handler(event: dict, context: DurableContext) -> dict: + config = StepConfig(serdes=CustomSerDes()) + result = context.step(my_function(), config=config) + return result +``` + +### CallbackConfig + +Control serialization for callback payloads: + +```python +from aws_durable_execution_sdk_python import DurableContext, durable_execution +from aws_durable_execution_sdk_python.config import CallbackConfig, Duration + +@durable_execution +def handler(event: dict, context: DurableContext) -> dict: + config = CallbackConfig( + timeout=Duration.from_hours(2), + serdes=CustomSerDes() + ) + callback = context.create_callback(config=config) + + # Send callback.callback_id to external system + return {"callback_id": callback.callback_id} +``` + +### MapConfig and ParallelConfig + +Control serialization for batch results: + +```python +from aws_durable_execution_sdk_python import DurableContext, durable_execution +from aws_durable_execution_sdk_python.config import MapConfig + +@durable_execution +def handler(event: dict, context: DurableContext) -> dict: + items = [1, 2, 3, 4, 5] + + # Custom serialization for BatchResult + config = MapConfig( + serdes=CustomSerDes(), # For the entire BatchResult + item_serdes=ItemSerDes() # For individual item results + ) + + result = context.map(process_item, items, config=config) + return {"processed": len(result.succeeded)} +``` + +**Note:** When both `serdes` and `item_serdes` are provided: +- `item_serdes` serializes individual item results in child contexts +- `serdes` serializes the entire `BatchResult` at the handler level + +For backward compatibility, if only `serdes` is provided, it's used for both individual items and the `BatchResult`. + +[↑ Back to top](#table-of-contents) + +## Best practices + +### Use default serialization when possible + +The SDK handles most cases efficiently without custom serialization: + +```python +# Good - uses default serialization +from datetime import datetime +from decimal import Decimal + +result = context.step( + process_order, + order_id="ORD-123", + amount=Decimal("99.99"), + timestamp=datetime.now() +) +``` + +### Convert complex objects to dicts + +Convert custom objects to dictionaries before passing to durable operations: + +```python +# Good - convert to dict first +order_dict = order.to_dict() +result = context.step(process_order, order_dict) + +# Avoid - custom objects aren't serializable +result = context.step(process_order, order) # Will fail +``` + +### Keep serialized data small + +Large checkpoints might slow down execution. Keep data compact: + +```python +# Good - only checkpoint what you need +result = context.step( + process_data, + {"id": order.id, "amount": order.amount} +) + +# Avoid - large objects in checkpoints +result = context.step( + process_data, + entire_database_dump # Too large +) +``` + +### Use appropriate types + +Choose types that serialize efficiently: + +```python +# Good - Decimal for precise amounts +amount = Decimal("99.99") + +# Avoid - float for money (precision issues) +amount = 99.99 +``` + +### Test serialization round-trips + +Verify your data survives serialization: + +```python +from aws_durable_execution_sdk_python.serdes import serialize, deserialize + +def test_serialization(): + original = {"amount": Decimal("99.99")} + serialized = serialize(None, original, "test-op", "test-arn") + deserialized = deserialize(None, serialized, "test-op", "test-arn") + + assert deserialized == original +``` + +### Handle serialization errors gracefully + +Catch and handle serialization errors: + +```python +from aws_durable_execution_sdk_python.exceptions import ExecutionError + +@durable_execution +def handler(event: dict, context: DurableContext) -> dict: + try: + result = context.step(process_data, complex_object) + except ExecutionError as e: + if "Serialization failed" in str(e): + # Convert to serializable format + simple_data = convert_to_dict(complex_object) + result = context.step(process_data, simple_data) + else: + raise + + return result +``` + +[↑ Back to top](#table-of-contents) + +## Troubleshooting + +### Unsupported type error + +**Problem:** `SerDesError: Unsupported type: ` + +**Solution:** Convert custom objects to supported types: + +```python +# Before - fails +result = context.step(process_order, order_object) + +# After - works +result = context.step(process_order, order_object.to_dict()) +``` + +### Serialization failed error + +**Problem:** `ExecutionError: Serialization failed for id: step-123` + +**Cause:** The data contains types that can't be serialized. + +**Solution:** Check for circular references or unsupported types: + +```python +# Circular reference - fails +data = {"self": None} +data["self"] = data + +# Fix - remove circular reference +data = {"id": 123, "name": "test"} +``` + +### Type not preserved after deserialization + +**Problem:** `tuple` becomes `list` or `Decimal` becomes `float` + +**Cause:** Using a custom SerDes that doesn't preserve types. + +**Solution:** Use default serialization which preserves types: + +```python +# Default serialization preserves tuple +result = context.step(process_data, (1, 2, 3)) # Stays as tuple + +# If using custom SerDes, ensure it preserves types +class TypePreservingSerDes(SerDes[Any]): + def serialize(self, value: Any, context: SerDesContext) -> str: + # Implement type preservation logic + pass +``` + +### Large payload errors + +**Problem:** Checkpoint size exceeds limits + +**Solution:** Reduce data size or use summary generators: + +```python +# Option 1: Reduce data +small_data = {"id": order.id, "status": order.status} +result = context.step(process_order, small_data) + +# Option 2: Use summary generator (for map/parallel) +def generate_summary(result): + return json.dumps({"count": len(result.all)}) + +config = MapConfig(summary_generator=generate_summary) +result = context.map(process_item, items, config=config) +``` + +### Datetime timezone issues + +**Problem:** Datetime loses timezone information + +**Solution:** Always use timezone-aware datetime objects: + +```python +from datetime import datetime, UTC + +# Good - timezone aware +timestamp = datetime.now(UTC) + +# Avoid - naive datetime +timestamp = datetime.now() # No timezone +``` + +[↑ Back to top](#table-of-contents) + +## FAQ + +### What types can I serialize? + +The SDK supports: +- Primitives: `None`, `str`, `int`, `float`, `bool` +- Extended: `datetime`, `date`, `Decimal`, `UUID`, `bytes`, `tuple` +- Containers: `list`, `dict` (including nested) + +For other types, convert to dictionaries first. + +### Do I need custom serialization? + +Most applications don't need custom serialization. Use it for: +- Encryption of sensitive data +- Compression of large payloads +- Special encoding requirements +- Legacy format compatibility + +### How does serialization affect performance? + +The SDK optimizes for performance: +- Primitives use plain JSON (fast) +- Extended types use envelope format (slightly slower but preserves types) +- Custom SerDes adds overhead based on your implementation + +### Can I serialize Pydantic models? + +Yes, convert them to dictionaries: + +```python +order = Order(order_id="ORD-123", amount=99.99) +result = context.step(process_order, order.model_dump()) +``` + +### What's the difference between serdes and item_serdes? + +In `MapConfig` and `ParallelConfig`: +- `item_serdes`: Serializes individual item results in child contexts +- `serdes`: Serializes the entire `BatchResult` at handler level + +If only `serdes` is provided, it's used for both (backward compatibility). + +### How do I handle binary data? + +Use `bytes` type - it's automatically base64 encoded: + +```python +binary_data = b"binary content" +result = context.step(process_binary, binary_data) +``` + +### Can I use JSON strings directly? + +Yes, use `PassThroughSerDes` or `JsonSerDes`: + +```python +from aws_durable_execution_sdk_python.serdes import JsonSerDes +from aws_durable_execution_sdk_python.config import StepConfig + +config = StepConfig(serdes=JsonSerDes()) +result = context.step(process_json, json_string, config=config) +``` + +### What happens if serialization fails? + +The SDK raises `ExecutionError` with details. Handle it in your code: + +```python +from aws_durable_execution_sdk_python.exceptions import ExecutionError + +try: + result = context.step(process_data, data) +except ExecutionError as e: + context.logger.error(f"Serialization failed: {e}") + # Handle error or convert data +``` + +### How do I debug serialization issues? + +Test serialization independently: + +```python +from aws_durable_execution_sdk_python.serdes import serialize, deserialize + +try: + serialized = serialize(None, my_data, "test-op", "test-arn") + deserialized = deserialize(None, serialized, "test-op", "test-arn") + print("Serialization successful") +except Exception as e: + print(f"Serialization failed: {e}") +``` + +### Are there size limits for serialized data? + +Yes, checkpoints have size limits (typically 256KB). Keep data compact: +- Only checkpoint necessary data +- Use summary generators for large results +- Store large data externally (S3) and checkpoint references + +[↑ Back to top](#table-of-contents) + +## See also + +- [Steps](../core/steps.md) - Using steps with custom serialization +- [Callbacks](../core/callbacks.md) - Serializing callback payloads +- [Map Operations](../core/map.md) - Serialization in map operations +- [Error Handling](error-handling.md) - Handling serialization errors +- [Best Practices](../best-practices.md) - General best practices + +[↑ Back to index](#table-of-contents) diff --git a/docs/advanced/testing-modes.md b/docs/advanced/testing-modes.md new file mode 100644 index 0000000..5d05d35 --- /dev/null +++ b/docs/advanced/testing-modes.md @@ -0,0 +1,496 @@ +# Testing Modes: Local vs Cloud + +## Table of Contents + +- [Overview](#overview) +- [Terminology](#terminology) +- [Key features](#key-features) +- [Getting started](#getting-started) +- [Configuration](#configuration) +- [Deployment workflow](#deployment-workflow) +- [Running tests in different modes](#running-tests-in-different-modes) +- [Local vs cloud modes](#local-vs-cloud-modes) +- [Best practices](#best-practices) +- [Troubleshooting](#troubleshooting) +- [See also](#see-also) + +[← Back to main index](../index.md) + +## Overview + +The [AWS Durable Execution SDK Testing Framework](https://github.com/aws/aws-durable-execution-sdk-python-testing) (`aws-durable-execution-sdk-python-testing`) supports two execution modes: local and cloud. Local mode runs tests in-memory for fast development, while cloud mode runs tests against actual AWS Lambda functions for integration validation. + +**Local mode** uses `DurableFunctionTestRunner` to execute your function in-memory without AWS deployment. It's fast, requires no credentials, and perfect for development. + +**Cloud mode** uses `DurableFunctionCloudTestRunner` to invoke deployed Lambda functions and poll for completion. It validates your function's behavior in a real AWS environment, including Lambda runtime behavior, IAM permissions, and service integrations. + +[↑ Back to top](#table-of-contents) + +## Terminology + +**Cloud mode** - Test execution mode that runs tests against deployed Lambda functions in AWS. + +**Local mode** - Test execution mode that runs tests in-memory without AWS deployment (default). + +**DurableFunctionCloudTestRunner** - Test runner class that executes durable functions against AWS Lambda backend. + +**Qualified function name** - Lambda function identifier including version or alias (e.g., `MyFunction:$LATEST`). + +**Polling** - The process of repeatedly checking execution status until completion. + +[↑ Back to top](#table-of-contents) + +## Key features + +- **Real AWS environment** - Tests run against actual Lambda functions +- **End-to-end validation** - Verifies deployment, IAM permissions, and service integrations +- **Same test interface** - Tests work in both local and cloud modes without changes +- **Automatic polling** - Waits for execution completion automatically +- **Execution history** - Retrieves full execution history for assertions + +[↑ Back to top](#table-of-contents) + +## Getting started + +Here's a simple example of running tests in cloud mode: + +```python +import pytest +from aws_durable_execution_sdk_python.execution import InvocationStatus +from examples.src import hello_world + + +@pytest.mark.durable_execution( + handler=hello_world.handler, + lambda_function_name="hello world", +) +def test_hello_world(durable_runner): + """Test hello world in both local and cloud modes.""" + with durable_runner: + result = durable_runner.run(input="test", timeout=10) + + assert result.status == InvocationStatus.SUCCEEDED + assert result.result == "Hello World!" +``` + +Run the test in cloud mode: + +```console +# Set environment variables +export AWS_REGION=us-west-2 +export QUALIFIED_FUNCTION_NAME="HelloWorld:$LATEST" +export LAMBDA_FUNCTION_TEST_NAME="hello world" + +# Run test +pytest --runner-mode=cloud -k test_hello_world +``` + +[↑ Back to top](#table-of-contents) + +## Configuration + +### Environment Variables + +Cloud mode requires these environment variables: + +**Required:** + +- `QUALIFIED_FUNCTION_NAME` - The deployed Lambda function ARN or qualified name + - Example: `MyFunction:$LATEST` + - Example: `arn:aws:lambda:us-west-2:123456789012:function:MyFunction:$LATEST` + +- `LAMBDA_FUNCTION_TEST_NAME` - The function name to match against test markers + - Example: `hello world` + - Must match the `lambda_function_name` parameter in `@pytest.mark.durable_execution` + +**Optional:** + +- `AWS_REGION` - AWS region for Lambda invocation (default: `us-west-2`) + - Example: `us-east-1` + +- `LAMBDA_ENDPOINT` - Custom Lambda endpoint URL for testing + - Example: `https://lambda.us-west-2.amazonaws.com` + - Useful for testing against local Lambda emulators + +### CLI Options + +- `--runner-mode` - Test execution mode + - `local` (default) - Run tests in-memory + - `cloud` - Run tests against deployed Lambda functions + +### Test Markers + +Use the `@pytest.mark.durable_execution` marker to configure tests: + +```python +@pytest.mark.durable_execution( + handler=my_function.handler, # Required for local mode + lambda_function_name="my function", # Required for cloud mode +) +def test_my_function(durable_runner): + # Test code here + pass +``` + +**Parameters:** + +- `handler` - The durable function handler (required for local mode) +- `lambda_function_name` - The function name for cloud mode matching (required for cloud mode) + +[↑ Back to top](#table-of-contents) + +## Deployment workflow + +Follow these steps to deploy and test your durable functions in the cloud: + +### 1. Deploy your function + +Deploy your Lambda function to AWS using your preferred deployment tool (SAM, CDK, Terraform, etc.): + +```console +# Example using SAM +sam build +sam deploy --stack-name my-durable-function +``` + +### 2. Get the function ARN + +After deployment, get the qualified function name or ARN: + +```console +# Get function ARN +aws lambda get-function --function-name MyFunction --query 'Configuration.FunctionArn' +``` + +### 3. Set environment variables + +Configure the environment for cloud testing: + +```console +export AWS_REGION=us-west-2 +export QUALIFIED_FUNCTION_NAME="MyFunction:$LATEST" +export LAMBDA_FUNCTION_TEST_NAME="my function" +``` + +### 4. Run tests + +Execute your tests in cloud mode: + +```console +pytest --runner-mode=cloud -k test_my_function +``` + +[↑ Back to top](#table-of-contents) + +## Running tests in different modes + +### Run all tests in local mode (default) + +```console +pytest examples/test/ +``` + +### Run all tests in cloud mode + +```console +pytest --runner-mode=cloud examples/test/ +``` + +### Run specific test in cloud mode + +```console +pytest --runner-mode=cloud -k test_hello_world examples/test/ +``` + +### Run with custom timeout + +Increase the timeout for long-running functions: + +```python +def test_long_running(durable_runner): + with durable_runner: + result = durable_runner.run(input="test", timeout=300) # 5 minutes + + assert result.status == InvocationStatus.SUCCEEDED +``` + +### Mode-specific assertions + +Check the runner mode in your tests: + +```python +def test_with_mode_check(durable_runner): + with durable_runner: + result = durable_runner.run(input="test", timeout=10) + + assert result.status == InvocationStatus.SUCCEEDED + + # Cloud-specific validation + if durable_runner.mode == "cloud": + # Additional assertions for cloud environment + pass +``` + +[↑ Back to top](#table-of-contents) + +## Local vs cloud modes + +### Comparison + +| Feature | Local Mode | Cloud Mode | +|---------|-----------|------------| +| **Execution** | In-memory | AWS Lambda | +| **Speed** | Fast (seconds) | Slower (network latency) | +| **AWS credentials** | Not required | Required | +| **Deployment** | Not required | Required | +| **IAM permissions** | Not validated | Validated | +| **Service integrations** | Mocked | Real | +| **Cost** | Free | Lambda invocation costs | +| **Use case** | Development, unit tests | Integration tests, validation | + +### When to use local mode + +Use local mode for: +- **Development** - Fast iteration during development +- **Unit tests** - Testing function logic without AWS dependencies +- **CI/CD** - Fast feedback in pull request checks +- **Debugging** - Easy debugging with local tools + +### When to use cloud mode + +Use cloud mode for: +- **Integration testing** - Validate real AWS service integrations +- **Deployment validation** - Verify deployed functions work correctly +- **IAM testing** - Ensure permissions are configured correctly +- **End-to-end testing** - Test complete workflows in production-like environment + +### Writing mode-agnostic tests + +Write tests that work in both modes: + +```python +@pytest.mark.durable_execution( + handler=my_function.handler, + lambda_function_name="my function", +) +def test_my_function(durable_runner): + """Test works in both local and cloud modes.""" + with durable_runner: + result = durable_runner.run(input={"value": 42}, timeout=10) + + # These assertions work in both modes + assert result.status == InvocationStatus.SUCCEEDED + assert result.result == "expected output" +``` + +[↑ Back to top](#table-of-contents) + +## Best practices + +### Use local mode for development + +Run tests locally during development for fast feedback: + +```console +# Fast local testing +pytest examples/test/ +``` + +### Use cloud mode for validation + +Run cloud tests before merging or deploying: + +```console +# Validate deployment +pytest --runner-mode=cloud examples/test/ +``` + +### Set appropriate timeouts + +Cloud tests need longer timeouts due to network latency: + +```python +# Local mode: short timeout +result = runner.run(input="test", timeout=10) + +# Cloud mode: longer timeout +result = runner.run(input="test", timeout=60) +``` + +### Use environment-specific configuration + +Configure different settings for different environments: + +```console +# Development +export AWS_REGION=us-west-2 +export QUALIFIED_FUNCTION_NAME="MyFunction-Dev:$LATEST" + +# Production +export AWS_REGION=us-east-1 +export QUALIFIED_FUNCTION_NAME="MyFunction-Prod:$LATEST" +``` + +### Test one function at a time + +When running cloud tests, test one function at a time to avoid confusion: + +```console +# Test specific function +export LAMBDA_FUNCTION_TEST_NAME="hello world" +pytest --runner-mode=cloud -k test_hello_world +``` + +### Use CI/CD for automated cloud testing + +Integrate cloud testing into your CI/CD pipeline: + +```yaml +# Example GitHub Actions workflow +- name: Deploy function + run: sam deploy --stack-name test-stack + +- name: Run cloud tests + env: + AWS_REGION: us-west-2 + QUALIFIED_FUNCTION_NAME: ${{ steps.deploy.outputs.function_arn }} + LAMBDA_FUNCTION_TEST_NAME: "hello world" + run: pytest --runner-mode=cloud -k test_hello_world +``` + +[↑ Back to top](#table-of-contents) + +## Troubleshooting + +### TimeoutError: Execution did not complete + +**Problem:** Test times out waiting for execution to complete. + +**Cause:** The function takes longer than the timeout value, or the function is stuck. + +**Solution:** Increase the timeout parameter: + +```python +# Increase timeout to 120 seconds +result = runner.run(input="test", timeout=120) +``` + +Check the Lambda function logs to see if it's actually running: + +```console +aws logs tail /aws/lambda/MyFunction --follow +``` + +### Environment variables not set + +**Problem:** `Cloud mode requires both QUALIFIED_FUNCTION_NAME and LAMBDA_FUNCTION_TEST_NAME environment variables` + +**Cause:** Required environment variables are missing. + +**Solution:** Set both required environment variables: + +```console +export QUALIFIED_FUNCTION_NAME="MyFunction:$LATEST" +export LAMBDA_FUNCTION_TEST_NAME="hello world" +``` + +### Test skipped: doesn't match LAMBDA_FUNCTION_TEST_NAME + +**Problem:** Test is skipped with message about function name mismatch. + +**Cause:** The test's `lambda_function_name` doesn't match `LAMBDA_FUNCTION_TEST_NAME`. + +**Solution:** Either: +1. Update `LAMBDA_FUNCTION_TEST_NAME` to match the test: + ```console + export LAMBDA_FUNCTION_TEST_NAME="my function" + ``` + +2. Or run only the matching test: + ```console + pytest --runner-mode=cloud -k test_hello_world + ``` + +### Failed to invoke Lambda function + +**Problem:** `Failed to invoke Lambda function MyFunction: ...` + +**Cause:** AWS credentials are invalid, function doesn't exist, or IAM permissions are missing. + +**Solution:** + +1. Verify AWS credentials: + ```console + aws sts get-caller-identity + ``` + +2. Verify function exists: + ```console + aws lambda get-function --function-name MyFunction + ``` + +3. Check IAM permissions - you need `lambda:InvokeFunction` permission: + ```json + { + "Effect": "Allow", + "Action": "lambda:InvokeFunction", + "Resource": "arn:aws:lambda:*:*:function:*" + } + ``` + +### No DurableExecutionArn in response + +**Problem:** `No DurableExecutionArn in response for function MyFunction` + +**Cause:** The Lambda function is not a durable function or doesn't have durable execution enabled. + +**Solution:** Ensure your function is decorated with `@durable_execution`: + +```python +from aws_durable_execution_sdk_python import durable_execution, DurableContext + +@durable_execution +def handler(event: dict, context: DurableContext): + # Your durable function code + pass +``` + +### Lambda function failed + +**Problem:** `Lambda function failed: ...` + +**Cause:** The function threw an unhandled exception. + +**Solution:** Check the Lambda function logs: + +```console +aws logs tail /aws/lambda/MyFunction --follow +``` + +Fix the error in your function code and redeploy. + +### Failed to get execution status + +**Problem:** `Failed to get execution status: ...` + +**Cause:** The Lambda service API call failed. + +**Solution:** + +1. Check AWS service health +2. Verify your AWS credentials have the required permissions +3. Check if you're using the correct region: + ```console + export AWS_REGION=us-west-2 + ``` + +[↑ Back to top](#table-of-contents) + +## See also + +- [Test Runner](../core/test-runner.md) - Learn about the test runner interface +- [Getting Started](../getting-started.md) - Set up your development environment +- [Pytest Integration](pytest-integration.md) - Advanced pytest configuration +- [Examples README](../../examples/test/README.md) - More examples and configuration details + +[↑ Back to top](#table-of-contents) diff --git a/docs/api-reference/.gitkeep b/docs/api-reference/.gitkeep new file mode 100644 index 0000000..9748135 --- /dev/null +++ b/docs/api-reference/.gitkeep @@ -0,0 +1 @@ +# This file will be removed once the directory has content diff --git a/docs/best-practices.md b/docs/best-practices.md new file mode 100644 index 0000000..da60095 --- /dev/null +++ b/docs/best-practices.md @@ -0,0 +1,850 @@ +# Best Practices + +## Table of Contents + +- [Overview](#overview) +- [Function design](#function-design) +- [Timeout configuration](#timeout-configuration) +- [Naming conventions](#naming-conventions) +- [Performance optimization](#performance-optimization) +- [Serialization](#serialization) +- [Common mistakes](#common-mistakes) +- [Code organization](#code-organization) +- [FAQ](#faq) +- [See also](#see-also) + +[← Back to main index](index.md) + +## Overview + +This guide covers best practices for building reliable, maintainable durable functions. You'll learn how to design functions that are easy to test, debug, and maintain in production. + +[↑ Back to top](#table-of-contents) + +## Function design + +### Keep functions focused + +Each durable function should have a single, clear purpose. Focused functions are easier to test, debug, and maintain. They also make it simpler to understand execution flow and identify failures. + +**Good:** + +```python +@durable_execution +def process_order(event: dict, context: DurableContext) -> dict: + """Process a single order through validation, payment, and fulfillment.""" + order_id = event["order_id"] + + validation = context.step(validate_order(order_id)) + payment = context.step(process_payment(order_id, event["amount"])) + fulfillment = context.step(fulfill_order(order_id)) + + return {"order_id": order_id, "status": "completed"} +``` + +**Avoid:** + +```python +@durable_execution +def process_everything(event: dict, context: DurableContext) -> dict: + """Process orders, update inventory, send emails, generate reports...""" + # Too many responsibilities - hard to test and maintain + # If one part fails, the entire function needs to retry + pass +``` + +### Wrap non-deterministic code in steps + +All non-deterministic operations must be wrapped in steps: + +```python +@durable_step +def get_timestamp(step_context: StepContext) -> int: + return int(time.time()) + +@durable_step +def generate_id(step_context: StepContext) -> str: + return str(uuid.uuid4()) + +@durable_execution +def lambda_handler(event: dict, context: DurableContext) -> dict: + timestamp = context.step(get_timestamp()) + request_id = context.step(generate_id()) + return {"timestamp": timestamp, "request_id": request_id} +``` + +**Why:** Non-deterministic code produces different values on replay, breaking state consistency. + + +### Use @durable_step for reusable functions + +Decorate functions with `@durable_step` to get automatic naming, better code organization, and cleaner syntax. This makes your code more maintainable and easier to test. + +**Good:** + +```python +@durable_step +def validate_input(step_context: StepContext, data: dict) -> bool: + return all(key in data for key in ["name", "email"]) + +@durable_execution +def lambda_handler(event: dict, context: DurableContext) -> dict: + is_valid = context.step(validate_input(event)) + return {"valid": is_valid} +``` + +**Avoid:** + +```python +@durable_execution +def lambda_handler(event: dict, context: DurableContext) -> dict: + # Lambda functions require explicit names and are harder to test + is_valid = context.step( + lambda _: all(key in event for key in ["name", "email"]), + name="validate_input" + ) + return {"valid": is_valid} +``` + +### Don't share state between steps + +Pass data through return values, not global variables or class attributes. Global state breaks on replay because steps return cached results, but global variables reset to their initial values. + +**Good:** + +```python +@durable_step +def fetch_user(step_context: StepContext, user_id: str) -> dict: + return {"user_id": user_id, "name": "Jane Doe"} + +@durable_step +def send_email(step_context: StepContext, user: dict) -> bool: + send_to_address(user["name"], user.get("email")) + return True + +@durable_execution +def lambda_handler(event: dict, context: DurableContext) -> dict: + user = context.step(fetch_user(event["user_id"])) + sent = context.step(send_email(user)) + return {"sent": sent} +``` + +**Avoid:** + +```python +# DON'T: Global state +current_user = None + +@durable_step +def fetch_user(step_context: StepContext, user_id: str) -> dict: + global current_user + current_user = {"user_id": user_id, "name": "Jane Doe"} + return current_user + +@durable_step +def send_email(step_context: StepContext) -> bool: + # On replay, current_user might be None! + send_to_address(current_user["name"], current_user.get("email")) + return True + +@durable_execution +def lambda_handler(event: dict, context: DurableContext) -> dict: + # First execution: works fine + # On replay: fetch_user returns cached result but doesn't set global variable + # send_email crashes because current_user is None + user = context.step(fetch_user(event["user_id"])) + sent = context.step(send_email()) + return {"sent": sent} +``` + +### Choose the right execution semantics + +Use at-most-once semantics for operations with side effects (payments, emails, database writes) to prevent duplicate execution. Use at-least-once (default) for idempotent operations that are safe to retry. + +**At-most-once for side effects:** + +```python +from aws_durable_execution_sdk_python.config import StepConfig, StepSemantics + +@durable_step +def charge_credit_card(step_context: StepContext, amount: float) -> dict: + return {"transaction_id": "txn_123", "status": "completed"} + +@durable_execution +def lambda_handler(event: dict, context: DurableContext) -> dict: + # Prevent duplicate charges on retry + payment = context.step( + charge_credit_card(event["amount"]), + config=StepConfig(step_semantics=StepSemantics.AT_MOST_ONCE_PER_RETRY), + ) + return payment +``` + +**At-least-once for idempotent operations:** + +```python +@durable_step +def calculate_total(step_context: StepContext, items: list) -> float: + return sum(item["price"] for item in items) + +@durable_execution +def lambda_handler(event: dict, context: DurableContext) -> float: + # Safe to run multiple times - same input produces same output + total = context.step(calculate_total(event["items"])) + return total +``` + +### Handle errors explicitly + +Catch and handle exceptions in your step functions. Distinguish between transient failures (network issues, rate limits) that should retry, and permanent failures (invalid input, not found) that shouldn't. + +**Good:** + +```python +@durable_step +def call_external_api(step_context: StepContext, url: str) -> dict: + try: + response = requests.get(url, timeout=10) + response.raise_for_status() + return response.json() + except requests.Timeout: + raise # Let retry handle timeouts + except requests.HTTPError as e: + if e.response.status_code >= 500: + raise # Retry server errors + # Don't retry client errors (400-499) + return {"error": "client_error", "status": e.response.status_code} +``` + +**Avoid:** + +```python +@durable_step +def call_external_api(step_context: StepContext, url: str) -> dict: + # No error handling - all errors cause retry, even permanent ones + response = requests.get(url) + return response.json() +``` + +[↑ Back to top](#table-of-contents) + +## Timeout configuration + +### Set realistic timeouts + +Choose timeout values based on expected execution time plus buffer for retries and network delays. Too short causes unnecessary failures; too long wastes resources waiting for operations that won't complete. + +**Good:** + +```python +from aws_durable_execution_sdk_python.config import CallbackConfig, Duration + +@durable_execution +def lambda_handler(event: dict, context: DurableContext) -> dict: + # Expected 2 minutes + 1 minute buffer = 3 minutes + callback = context.create_callback( + name="approval", + config=CallbackConfig(timeout=Duration.from_minutes(3)), + ) + return {"callback_id": callback.callback_id} +``` + +**Avoid:** + +```python +@durable_execution +def lambda_handler(event: dict, context: DurableContext) -> dict: + # Too short - will timeout before external system responds + callback = context.create_callback( + name="approval", + config=CallbackConfig(timeout=Duration.from_seconds(5)), + ) + return {"callback_id": callback.callback_id} +``` + +### Use heartbeat timeouts for long operations + +Enable heartbeat monitoring for callbacks that take more than a few minutes. Heartbeats detect when external systems stop responding, preventing you from waiting the full timeout period. + +```python +callback = context.create_callback( + name="approval", + config=CallbackConfig( + timeout=Duration.from_hours(24), # Maximum wait time + heartbeat_timeout=Duration.from_hours(2), # Fail if no heartbeat for 2 hours + ), +) +``` + +Without heartbeat monitoring, you'd wait the full 24 hours even if the external system crashes after 10 minutes. + +### Configure retry delays appropriately + +```python +from aws_durable_execution_sdk_python.retries import RetryStrategyConfig + +# Fast retry for transient network issues +fast_retry = RetryStrategyConfig( + max_attempts=3, + initial_delay_seconds=1, + max_delay_seconds=5, + backoff_rate=2.0, +) + +# Slow retry for rate limiting +slow_retry = RetryStrategyConfig( + max_attempts=5, + initial_delay_seconds=10, + max_delay_seconds=60, + backoff_rate=2.0, +) +``` + +[↑ Back to top](#table-of-contents) + +## Naming conventions + +### Use descriptive operation names + +Choose names that explain what the operation does, not how it does it. Good names make logs easier to read and help you identify which operation failed. + +**Good:** + +```python +@durable_execution +def lambda_handler(event: dict, context: DurableContext) -> dict: + user = context.step(fetch_user(event["user_id"]), name="fetch_user") + validated = context.step(validate_user(user), name="validate_user") + notification = context.step(send_notification(user), name="send_notification") + return {"status": "completed"} +``` + +**Avoid:** + +```python +@durable_execution +def lambda_handler(event: dict, context: DurableContext) -> dict: + # Generic names don't help with debugging + user = context.step(fetch_user(event["user_id"]), name="step1") + validated = context.step(validate_user(user), name="step2") + notification = context.step(send_notification(user), name="step3") + return {"status": "completed"} +``` + +### Use consistent naming patterns + +```python +# Pattern: verb_noun for operations +context.step(validate_order(order_id), name="validate_order") +context.step(process_payment(amount), name="process_payment") + +# Pattern: noun_action for callbacks +context.create_callback(name="payment_callback") +context.create_callback(name="approval_callback") + +# Pattern: descriptive_wait for waits +context.wait(seconds=30, name="payment_confirmation_wait") +``` + +### Name dynamic operations with context + +Include context when creating operations in loops: + +```python +@durable_execution +def lambda_handler(event: dict, context: DurableContext) -> list: + results = [] + for i, item in enumerate(event["items"]): + result = context.step( + process_item(item), + name=f"process_item_{i}_{item['id']}" + ) + results.append(result) + return results +``` + +[↑ Back to top](#table-of-contents) + +## Performance optimization + +### Minimize checkpoint size + +Keep operation inputs and results small. Large payloads increase checkpoint overhead, slow down execution, and can hit size limits. Store large data in S3 and pass references instead. + +**Good:** + +```python +@durable_step +def process_large_dataset(step_context: StepContext, s3_key: str) -> str: + data = download_from_s3(s3_key) + result = process_data(data) + result_key = upload_to_s3(result) + return result_key # Small checkpoint - just the S3 key + +@durable_execution +def lambda_handler(event: dict, context: DurableContext) -> dict: + result_key = context.step(process_large_dataset(event["s3_key"])) + return {"result_key": result_key} +``` + +**Avoid:** + +```python +@durable_step +def process_large_dataset(step_context: StepContext, data: list) -> list: + return process_data(data) # Large checkpoint! + +@durable_execution +def lambda_handler(event: dict, context: DurableContext) -> dict: + # Passing megabytes of data through checkpoints + large_data = download_from_s3(event["s3_key"]) + result = context.step(process_large_dataset(large_data)) + return {"result": result} # Another large checkpoint! +``` + +### Batch operations when possible + +Group related operations to reduce checkpoint overhead. Each step creates a checkpoint, so batching reduces API calls and speeds up execution. + +**Good:** + +```python +@durable_step +def process_batch(step_context: StepContext, items: list) -> list: + return [process_item(item) for item in items] + +@durable_execution +def lambda_handler(event: dict, context: DurableContext) -> list: + items = event["items"] + results = [] + + # Process 10 items per step instead of 1 + for i in range(0, len(items), 10): + batch = items[i:i+10] + batch_results = context.step( + process_batch(batch), + name=f"process_batch_{i//10}" + ) + results.extend(batch_results) + + return results +``` + +**Avoid:** + +```python +@durable_execution +def lambda_handler(event: dict, context: DurableContext) -> list: + results = [] + # Creating a step for each item - too many checkpoints! + for i, item in enumerate(event["items"]): + result = context.step( + lambda _, item=item: process_item(item), + name=f"process_item_{i}" + ) + results.append(result) + return results +``` + +### Use parallel operations for independent work + +Execute independent operations concurrently to reduce total execution time. Use `context.parallel()` to run multiple operations at the same time. + +**Good:** + +```python +@durable_execution +def lambda_handler(event: dict, context: DurableContext) -> dict: + # Execute all three operations concurrently + results = context.parallel( + fetch_user_data(event["user_id"]), + fetch_order_history(event["user_id"]), + fetch_preferences(event["user_id"]), + ) + + return { + "user": results[0], + "orders": results[1], + "preferences": results[2], + } +``` + +**Avoid:** + +```python +@durable_execution +def lambda_handler(event: dict, context: DurableContext) -> dict: + # Sequential execution - each step waits for the previous one + user_data = context.step(fetch_user_data(event["user_id"])) + order_history = context.step(fetch_order_history(event["user_id"])) + preferences = context.step(fetch_preferences(event["user_id"])) + + return { + "user": user_data, + "orders": order_history, + "preferences": preferences, + } +``` + +### Avoid unnecessary waits + +Only use waits when you need to delay execution: + +```python +@durable_execution +def lambda_handler(event: dict, context: DurableContext) -> dict: + job_id = context.step(start_job(event["data"])) + context.wait(seconds=30, name="job_processing_wait") # Necessary + result = context.step(check_job_status(job_id)) + return result +``` + +[↑ Back to top](#table-of-contents) + +## Serialization + +### Use JSON-serializable types + +The SDK uses JSON serialization by default for checkpoints. Stick to JSON-compatible types (dict, list, str, int, float, bool, None) for operation inputs and results. + +**Good:** + +```python +@durable_step +def process_order(step_context: StepContext, order: dict) -> dict: + return { + "order_id": order["id"], + "total": 99.99, + "items": ["item1", "item2"], + "processed": True, + } +``` + +**Avoid:** + +```python +from datetime import datetime +from decimal import Decimal + +@durable_step +def process_order(step_context: StepContext, order: dict) -> dict: + # datetime and Decimal aren't JSON-serializable by default + return { + "order_id": order["id"], + "total": Decimal("99.99"), # Won't serialize! + "timestamp": datetime.now(), # Won't serialize! + } +``` + +### Convert non-serializable types + +Convert complex types to JSON-compatible formats before returning from steps: + +```python +from datetime import datetime +from decimal import Decimal + +@durable_step +def process_order(step_context: StepContext, order: dict) -> dict: + return { + "order_id": order["id"], + "total": float(Decimal("99.99")), # Convert to float + "timestamp": datetime.now().isoformat(), # Convert to string + } +``` + +### Use custom serialization for complex types + +For complex objects, implement custom serialization or use the SDK's SerDes system: + +```python +from dataclasses import dataclass, asdict + +@dataclass +class Order: + order_id: str + total: float + items: list + +@durable_step +def process_order(step_context: StepContext, order_data: dict) -> dict: + order = Order(**order_data) + # Process order... + return asdict(order) # Convert dataclass to dict +``` + +[↑ Back to top](#table-of-contents) + +## Common mistakes + +### ⚠️ Modifying mutable objects between steps + +**Wrong:** + +```python +@durable_execution +def lambda_handler(event: dict, context: DurableContext) -> dict: + data = {"count": 0} + context.step(increment_count(data)) + data["count"] += 1 # DON'T: Mutation outside step + return data +``` + +**Right:** + +```python +@durable_execution +def lambda_handler(event: dict, context: DurableContext) -> dict: + data = {"count": 0} + data = context.step(increment_count(data)) + data = context.step(increment_count(data)) + return data +``` + +### ⚠️ Using context inside its own operations + +**Wrong:** + +```python +@durable_step +def process_with_wait(step_context: StepContext, context: DurableContext) -> str: + # DON'T: Can't use context inside its own step operation + context.wait(seconds=1) # Error: using context inside step! + result = context.step(nested_step(), name="step2") # Error: nested context.step! + return result + +@durable_execution +def lambda_handler(event: dict, context: DurableContext) -> dict: + # This will fail - context is being used inside its own step + result = context.step(process_with_wait(context), name="step1") + return {"result": result} +``` + +**Right:** + +```python +@durable_step +def nested_step(step_context: StepContext) -> str: + return "nested step" + +@durable_with_child_context +def process_with_wait(child_ctx: DurableContext) -> str: + # Use child context for nested operations + child_ctx.wait(seconds=1) + result = child_ctx.step(nested_step(), name="step2") + return result + +@durable_execution +def lambda_handler(event: dict, context: DurableContext) -> dict: + # Use run_in_child_context for nested operations + result = context.run_in_child_context( + process_with_wait(), + name="block1" + ) + return {"result": result} +``` + +**Why:** You can't use a context object inside its own operations (like calling `context.step()` inside another `context.step()`). Use child contexts to create isolated execution scopes for nested operations. + +### ⚠️ Forgetting to handle callback timeouts + +**Wrong:** + +```python +@durable_execution +def lambda_handler(event: dict, context: DurableContext) -> dict: + callback = context.create_callback(name="approval") + result = callback.result() + return {"approved": result["approved"]} # Crashes if timeout! +``` + +**Right:** + +```python +@durable_execution +def lambda_handler(event: dict, context: DurableContext) -> dict: + callback = context.create_callback(name="approval") + result = callback.result() + + if result is None: + return {"status": "timeout", "approved": False} + + return {"status": "completed", "approved": result.get("approved", False)} +``` + +### ⚠️ Creating too many small steps + +**Wrong:** + +```python +@durable_execution +def lambda_handler(event: dict, context: DurableContext) -> dict: + a = context.step(lambda _: event["a"]) + b = context.step(lambda _: event["b"]) + sum_val = context.step(lambda _: a + b) + return {"result": sum_val} +``` + +**Right:** + +```python +@durable_step +def calculate_result(step_context: StepContext, a: int, b: int) -> int: + return a + b + +@durable_execution +def lambda_handler(event: dict, context: DurableContext) -> dict: + result = context.step(calculate_result(event["a"], event["b"])) + return {"result": result} +``` + +### ⚠️ Not using retry for transient failures + +**Right:** + +```python +from aws_durable_execution_sdk_python.config import StepConfig +from aws_durable_execution_sdk_python.retries import ( + RetryStrategyConfig, + create_retry_strategy, +) + +@durable_step +def call_api(step_context: StepContext, url: str) -> dict: + response = requests.get(url, timeout=10) + response.raise_for_status() + return response.json() + +@durable_execution +def lambda_handler(event: dict, context: DurableContext) -> dict: + retry_config = RetryStrategyConfig( + max_attempts=3, + retryable_error_types=[requests.Timeout, requests.ConnectionError], + ) + + result = context.step( + call_api(event["url"]), + config=StepConfig(retry_strategy=create_retry_strategy(retry_config)), + ) + return result +``` + +[↑ Back to top](#table-of-contents) + +## Code organization + +### Separate business logic from orchestration + +```python +# business_logic.py +@durable_step +def validate_order(step_context: StepContext, order: dict) -> dict: + if not order.get("items"): + raise ValueError("Order must have items") + return {**order, "validated": True} + +# handler.py +@durable_execution +def lambda_handler(event: dict, context: DurableContext) -> dict: + order = event["order"] + validated_order = context.step(validate_order(order)) + return {"status": "completed", "order_id": validated_order["order_id"]} +``` + +### Use child contexts for complex workflows + +```python +@durable_with_child_context +def validate_and_enrich(ctx: DurableContext, data: dict) -> dict: + validated = ctx.step(validate_data(data)) + enriched = ctx.step(enrich_data(validated)) + return enriched + +@durable_execution +def lambda_handler(event: dict, context: DurableContext) -> dict: + enriched = context.run_in_child_context( + validate_and_enrich(event["data"]), + name="validation_phase", + ) + return enriched +``` + +### Group related configuration + +```python +# config.py +from aws_durable_execution_sdk_python.config import StepConfig +from aws_durable_execution_sdk_python.retries import ( + RetryStrategyConfig, + create_retry_strategy, +) + +FAST_RETRY = StepConfig( + retry_strategy=create_retry_strategy( + RetryStrategyConfig( + max_attempts=3, + initial_delay_seconds=1, + max_delay_seconds=5, + backoff_rate=2.0, + ) + ) +) + +# handler.py +from config import FAST_RETRY + +@durable_execution +def lambda_handler(event: dict, context: DurableContext) -> dict: + data = context.step(fetch_data(event["id"]), config=FAST_RETRY) + return data +``` + +[↑ Back to top](#table-of-contents) + +## FAQ + +**Q: How many steps should a durable function have?** + +A: There's a limit of 3,000 operations per execution. Keep in mind that more steps mean more API operations and longer execution time. Balance granularity with performance - group related operations when it makes sense, but don't hesitate to break down complex logic into steps. + +**Q: Should I create a step for every function call?** + +A: No. Only create steps for operations that need checkpointing, retry logic, or isolation. + +**Q: Can I use async/await in durable functions?** + +A: Functions decorated with `@durable_step` must be synchronous. If you need to call async code, use `asyncio.run()` inside your step to execute it synchronously. + +**Q: How do I handle secrets and credentials?** + +A: Use AWS Secrets Manager or Parameter Store. Fetch secrets in a step at the beginning of your workflow. + +**Q: What's the maximum execution time for a durable function?** + +A: Durable functions can run for days or weeks using waits and callbacks. Each individual Lambda invocation is still subject to the 15-minute Lambda timeout. + +**Q: How do I test durable functions locally?** + +A: Use the testing SDK (`aws-durable-execution-sdk-python-testing`) to run functions locally without AWS credentials. See [Testing patterns](testing-patterns/basic-tests.md) for examples. + +**Q: How do I monitor durable functions in production?** + +A: Use CloudWatch Logs for execution logs, CloudWatch Metrics for performance metrics, and X-Ray for distributed tracing. + +[↑ Back to top](#table-of-contents) + +## See also + +- [Getting started](getting-started.md) - Build your first durable function +- [Steps](core/steps.md) - Step operations +- [Error handling](advanced/error-handling.md) - Handle failures +- [Configuration](api-reference/config.md) - Configuration options +- [Testing patterns](testing-patterns/basic-tests.md) - How to test your functions + +[↑ Back to top](#table-of-contents) + +## License + +See the [LICENSE](../LICENSE) file for our project's licensing. + +[↑ Back to top](#table-of-contents) diff --git a/docs/core/callbacks.md b/docs/core/callbacks.md new file mode 100644 index 0000000..b200177 --- /dev/null +++ b/docs/core/callbacks.md @@ -0,0 +1,877 @@ +# Callbacks + +## Table of Contents + +- [Terminology](#terminology) +- [What are callbacks?](#what-are-callbacks) +- [Key features](#key-features) +- [Getting started](#getting-started) +- [Method signatures](#method-signatures) +- [Configuration](#configuration) +- [Waiting for callbacks](#waiting-for-callbacks) +- [Integration patterns](#integration-patterns) +- [Advanced patterns](#advanced-patterns) +- [Best practices](#best-practices) +- [FAQ](#faq) +- [Testing](#testing) +- [See also](#see-also) + +[← Back to main index](../index.md) + +## Terminology + +**Callback** - A mechanism that pauses execution and waits for an external system to provide a result. Created using `context.create_callback()`. + +**Callback ID** - A unique identifier for a callback that you send to external systems. The external system uses this ID to send the result back. + +**Callback timeout** - The maximum time to wait for a callback response. If the timeout expires without a response, the callback fails. + +**Heartbeat timeout** - The maximum time between heartbeat signals from the external system. Use this to detect when external systems stop responding. + +**Wait for callback** - The operation that pauses execution until the callback receives a result. Created using `context.wait_for_callback()`. + +[↑ Back to top](#table-of-contents) + +## What are callbacks? + +Callbacks let your durable function pause and wait for external systems to respond. When you create a callback, you get a unique callback ID that you can send to external systems like approval workflows, payment processors, or third-party APIs. Your function pauses until the external system calls back with a result. + +Use callbacks to: +- Wait for human approvals in workflows +- Integrate with external payment systems +- Coordinate with third-party APIs +- Handle long-running external processes +- Implement request-response patterns with external systems + +[↑ Back to top](#table-of-contents) + +## Key features + +- **External system integration** - Pause execution and wait for external responses +- **Unique callback IDs** - Each callback gets a unique identifier for routing +- **Configurable timeouts** - Set maximum wait times and heartbeat intervals +- **Type-safe results** - Callbacks are generic and preserve result types +- **Automatic checkpointing** - Callback results are saved automatically +- **Heartbeat monitoring** - Detect when external systems stop responding + +[↑ Back to top](#table-of-contents) + +## Getting started + +Callbacks let you pause your durable function while waiting for an external system to respond. Think of it like this: + +**Your durable function:** +1. Creates a callback and gets a unique `callback_id` +2. Sends the `callback_id` to an external system (payment processor, approval system, etc.) +3. Calls `callback.result()` - execution pauses here ⏸️ +4. When the callback is notified, execution resumes ▶️ + +**Your notification handler** (separate Lambda or service): +1. Receives the result from the external system (via webhook, queue, etc.) +2. Calls AWS Lambda API `SendDurableExecutionCallbackSuccess` with the `callback_id` +3. This wakes up your durable function + +The key insight: callbacks need two pieces working together - one that waits, and one that notifies. + +### Basic example + +Here's a simple example showing the durable function side: + +```python +from typing import Any +from aws_durable_execution_sdk_python import DurableContext, durable_execution +from aws_durable_execution_sdk_python.config import CallbackConfig, Duration + +@durable_execution +def handler(event: Any, context: DurableContext) -> dict: + """Create a callback and wait for external system response.""" + # Step 1: Create the callback + callback_config = CallbackConfig( + timeout=Duration.from_minutes(2), + heartbeat_timeout=Duration.from_seconds(60), + ) + + callback = context.create_callback( + name="example_callback", + config=callback_config, + ) + + # Step 2: Send callback ID to external system + # In a real scenario, you'd send this to a third-party API, + # message queue, or webhook endpoint + send_to_external_system({ + "callback_id": callback.callback_id, + "data": event.get("data"), + }) + + # Step 3: Wait for the result - execution suspends here + result = callback.result() + + # Step 4: Execution resumes when result is received + return { + "status": "completed", + "result": result, + } +``` + +### Notifying the callback + +When your external system finishes processing, you need to notify the callback using AWS Lambda APIs. You have three options: + +**send_durable_execution_callback_success** - Notify success with a result: + +```python +import boto3 +import json + +lambda_client = boto3.client('lambda') + +# When external system succeeds +callback_id = "abc123-callback-id-from-durable-function" +result_data = json.dumps({'status': 'approved', 'amount': 1000}).encode('utf-8') + +lambda_client.send_durable_execution_callback_success( + CallbackId=callback_id, + Result=result_data +) +``` + +**send_durable_execution_callback_failure** - Notify failure with an error: + +```python +# When external system fails +callback_id = "abc123-callback-id-from-durable-function" + +lambda_client.send_durable_execution_callback_failure( + CallbackId=callback_id, + Error={ + 'ErrorType': 'PaymentDeclined', + 'ErrorMessage': 'Insufficient funds' + } +) +``` + +**send_durable_execution_callback_heartbeat** - Send heartbeat to keep callback alive: + +```python +# Send heartbeat for long-running operations +callback_id = "abc123-callback-id-from-durable-function" + +lambda_client.send_durable_execution_callback_heartbeat( + CallbackId=callback_id +) +``` + +### Complete example with message broker + +Here's a complete example showing both sides of the callback flow: + +```python +# Durable function side +@durable_execution +def handler(event: dict, context: DurableContext) -> dict: + """Process payment with external payment processor.""" + # Create callback + callback = context.create_callback( + name="payment_callback", + config=CallbackConfig(timeout=Duration.from_minutes(5)), + ) + + # Send to message broker (SQS, SNS, EventBridge, etc.) + send_to_payment_queue({ + "callback_id": callback.callback_id, + "amount": event["amount"], + "customer_id": event["customer_id"], + }) + + # Wait for result - execution suspends here + payment_result = callback.result() + + # Execution resumes here when callback is notified + return { + "payment_status": payment_result.get("status"), + "transaction_id": payment_result.get("transaction_id"), + } +``` + +```python +# Message processor side (separate Lambda or service) +import boto3 +import json + +lambda_client = boto3.client('lambda') + +def process_payment_message(event: dict): + """Process payment and notify callback.""" + callback_id = event["callback_id"] + amount = event["amount"] + customer_id = event["customer_id"] + + try: + # Process payment with external system + result = payment_processor.charge(customer_id, amount) + + # Notify success + result_data = json.dumps({ + 'status': 'completed', + 'transaction_id': result.transaction_id, + }).encode('utf-8') + + lambda_client.send_durable_execution_callback_success( + CallbackId=callback_id, + Result=result_data + ) + except PaymentError as e: + # Notify failure + lambda_client.send_durable_execution_callback_failure( + CallbackId=callback_id, + Error={ + 'ErrorType': 'PaymentError', + 'ErrorMessage': f'{e.error_code}: {str(e)}' + } + ) +``` + +### Key points + +- **Callbacks require two parts**: Your durable function creates the callback, and a separate process notifies the result +- **Use Lambda APIs to notify**: `SendDurableExecutionCallbackSuccess`, `SendDurableExecutionCallbackFailure`, or `SendDurableExecutionCallbackHeartbeat` +- **Execution suspends at `callback.result()`**: Your function stops running and doesn't consume resources while waiting +- **Execution resumes when notified**: When you call the Lambda API with the callback ID, your function resumes from where it suspended +- **Heartbeats keep callbacks alive**: For long operations, send heartbeats to prevent timeout + +[↑ Back to top](#table-of-contents) + +## Method signatures + +### context.create_callback() + +```python +def create_callback( + name: str | None = None, + config: CallbackConfig | None = None, +) -> Callback[T] +``` + +**Parameters:** + +- `name` (optional) - A name for the callback, useful for debugging and testing +- `config` (optional) - A `CallbackConfig` object to configure timeout behavior + +**Returns:** A `Callback` object with a `callback_id` property + +**Type parameter:** `T` - The type of result the callback will receive + +### callback.callback_id + +```python +callback_id: str +``` + +A unique identifier for this callback. Send this ID to external systems so they can return results. + +### callback.result() + +```python +def result() -> T | None +``` + +Returns the callback result. Blocks until the result is available or the callback times out. + +[↑ Back to top](#table-of-contents) + +## Configuration + +Configure callback behavior using `CallbackConfig`: + +```python +from aws_durable_execution_sdk_python import DurableContext, durable_execution +from aws_durable_execution_sdk_python.config import CallbackConfig, Duration + +@durable_execution +def handler(event: dict, context: DurableContext) -> str: + # Configure callback with custom timeouts + config = CallbackConfig( + timeout=Duration.from_seconds(60), + heartbeat_timeout=Duration.from_seconds(30), + ) + + callback = context.create_callback( + name="timeout_callback", + config=config, + ) + + return f"Callback created with 60s timeout: {callback.callback_id}" +``` + +### CallbackConfig parameters + +**timeout** - Maximum time to wait for the callback response. Use `Duration` helpers to specify: +- `Duration.from_seconds(60)` - 60 seconds +- `Duration.from_minutes(5)` - 5 minutes +- `Duration.from_hours(2)` - 2 hours +- `Duration.from_days(1)` - 1 day + +**heartbeat_timeout** - Maximum time between heartbeat signals from the external system. If the external system doesn't send a heartbeat within this interval, the callback fails. Set to 0 or omit to disable heartbeat monitoring. + +**serdes** (optional) - Custom serialization/deserialization for the callback result. If not provided, uses JSON serialization. + +### Duration helpers + +The `Duration` class provides convenient methods for specifying timeouts: + +```python +from aws_durable_execution_sdk_python.config import Duration + +# Various ways to specify duration +timeout_60s = Duration.from_seconds(60) +timeout_5m = Duration.from_minutes(5) +timeout_2h = Duration.from_hours(2) +timeout_1d = Duration.from_days(1) + +# Use in CallbackConfig +config = CallbackConfig( + timeout=Duration.from_hours(2), + heartbeat_timeout=Duration.from_minutes(15), +) +``` + +[↑ Back to top](#table-of-contents) + +## Waiting for callbacks + +After creating a callback, you typically wait for its result. There are two ways to do this: + +### Using callback.result() + +Call `result()` on the callback object to wait for the response: + +```python +from aws_durable_execution_sdk_python import DurableContext, durable_execution +from aws_durable_execution_sdk_python.config import CallbackConfig, Duration + +@durable_execution +def handler(event: dict, context: DurableContext) -> dict: + # Create callback + callback = context.create_callback( + name="approval_callback", + config=CallbackConfig(timeout=Duration.from_hours(24)), + ) + + # Send callback ID to approval system + send_approval_request(callback.callback_id, event["request_details"]) + + # Wait for approval response + approval_result = callback.result() + + if approval_result and approval_result.get("approved"): + return {"status": "approved", "details": approval_result} + else: + return {"status": "rejected"} +``` + +### Using context.wait_for_callback() + +Alternatively, use `wait_for_callback()` to wait for a callback by its ID: + +```python +@durable_execution +def handler(event: dict, context: DurableContext) -> dict: + # Create callback + callback = context.create_callback(name="payment_callback") + + # Send to payment processor + initiate_payment(callback.callback_id, event["amount"]) + + # Wait for payment result + payment_result = context.wait_for_callback( + callback.callback_id, + config=CallbackConfig(timeout=Duration.from_minutes(5)), + ) + + return {"payment_status": payment_result} +``` + +[↑ Back to top](#table-of-contents) + +## Integration patterns + +### Human approval workflow + +Use callbacks to pause execution while waiting for human approval: + +```python +from aws_durable_execution_sdk_python import DurableContext, durable_execution +from aws_durable_execution_sdk_python.config import CallbackConfig, Duration + +@durable_execution +def handler(event: dict, context: DurableContext) -> dict: + """Process an order that requires approval.""" + order_id = event["order_id"] + + # Create callback for approval + approval_callback = context.create_callback( + name="order_approval", + config=CallbackConfig( + timeout=Duration.from_hours(48), # 48 hours to approve + heartbeat_timeout=Duration.from_hours(12), # Check every 12 hours + ), + ) + + # Send approval request to approval system + # The approval system will use callback.callback_id to respond + send_to_approval_system({ + "callback_id": approval_callback.callback_id, + "order_id": order_id, + "details": event["order_details"], + }) + + # Wait for approval + approval = approval_callback.result() + + if approval and approval.get("approved"): + # Process approved order + return process_order(order_id) + else: + # Handle rejection + return {"status": "rejected", "reason": approval.get("reason")} +``` + +### Payment processing + +Integrate with external payment processors: + +```python +@durable_execution +def handler(event: dict, context: DurableContext) -> dict: + """Process a payment with external processor.""" + amount = event["amount"] + customer_id = event["customer_id"] + + # Create callback for payment result + payment_callback = context.create_callback( + name="payment_processing", + config=CallbackConfig( + timeout=Duration.from_minutes(5), + heartbeat_timeout=Duration.from_seconds(30), + ), + ) + + # Initiate payment with external processor + initiate_payment_with_processor({ + "callback_id": payment_callback.callback_id, + "amount": amount, + "customer_id": customer_id, + "callback_url": f"https://api.example.com/callbacks/%7Bpayment_callback.callback_id%7D", + }) + + # Wait for payment result + payment_result = payment_callback.result() + + return { + "transaction_id": payment_result.get("transaction_id"), + "status": payment_result.get("status"), + "amount": amount, + } +``` + +### Third-party API integration + +Wait for responses from third-party APIs: + +```python +@durable_execution +def handler(event: dict, context: DurableContext) -> dict: + """Integrate with third-party data enrichment API.""" + user_data = event["user_data"] + + # Create callback for enrichment result + enrichment_callback = context.create_callback( + name="data_enrichment", + config=CallbackConfig(timeout=Duration.from_minutes(10)), + ) + + # Request data enrichment from third-party + request_data_enrichment({ + "callback_id": enrichment_callback.callback_id, + "user_data": user_data, + "webhook_url": f"https://api.example.com/webhooks/%7Benrichment_callback.callback_id%7D", + }) + + # Wait for enriched data + enriched_data = enrichment_callback.result() + + # Combine original and enriched data + return { + "original": user_data, + "enriched": enriched_data, + "timestamp": enriched_data.get("processed_at"), + } +``` + +### Multiple callbacks + +Handle multiple external systems in parallel: + +```python +@durable_execution +def handler(event: dict, context: DurableContext) -> dict: + """Wait for multiple external systems.""" + # Create callbacks for different systems + credit_check = context.create_callback( + name="credit_check", + config=CallbackConfig(timeout=Duration.from_minutes(5)), + ) + + fraud_check = context.create_callback( + name="fraud_check", + config=CallbackConfig(timeout=Duration.from_minutes(3)), + ) + + # Send requests to external systems + request_credit_check(credit_check.callback_id, event["customer_id"]) + request_fraud_check(fraud_check.callback_id, event["transaction_data"]) + + # Wait for both results + credit_result = credit_check.result() + fraud_result = fraud_check.result() + + # Make decision based on both checks + approved = ( + credit_result.get("score", 0) > 650 and + fraud_result.get("risk_level") == "low" + ) + + return { + "approved": approved, + "credit_score": credit_result.get("score"), + "fraud_risk": fraud_result.get("risk_level"), + } +``` + +[↑ Back to top](#table-of-contents) + +## Advanced patterns + +### Callback with retry + +Combine callbacks with retry logic for resilient integrations: + +```python +from aws_durable_execution_sdk_python import ( + DurableContext, + durable_execution, + durable_step, + StepContext, +) +from aws_durable_execution_sdk_python.config import ( + CallbackConfig, + Duration, + StepConfig, +) +from aws_durable_execution_sdk_python.retries import ( + RetryStrategyConfig, + create_retry_strategy, +) + +@durable_step +def wait_for_external_system( + step_context: StepContext, + callback_id: str, +) -> dict: + """Wait for external system with retry on timeout.""" + # This will retry if the callback times out + result = context.wait_for_callback( + callback_id, + config=CallbackConfig(timeout=Duration.from_minutes(2)), + ) + return result + +@durable_execution +def handler(event: dict, context: DurableContext) -> dict: + # Create callback + callback = context.create_callback(name="external_api") + + # Send request + send_external_request(callback.callback_id) + + # Wait with retry + retry_config = RetryStrategyConfig( + max_attempts=3, + initial_delay_seconds=5, + ) + + result = context.step( + wait_for_external_system(callback.callback_id), + config=StepConfig(retry_strategy=create_retry_strategy(retry_config)), + ) + + return result +``` + +### Conditional callback handling + +Handle different callback results based on conditions: + +```python +@durable_execution +def handler(event: dict, context: DurableContext) -> dict: + """Handle callback results conditionally.""" + callback = context.create_callback( + name="conditional_callback", + config=CallbackConfig(timeout=Duration.from_minutes(10)), + ) + + # Send request + send_request(callback.callback_id, event["request_type"]) + + # Wait for result + result = callback.result() + + # Handle different result types + if result is None: + return {"status": "timeout", "message": "No response received"} + + result_type = result.get("type") + + if result_type == "success": + return process_success(result) + elif result_type == "partial": + return process_partial(result) + else: + return process_failure(result) +``` + +### Callback with fallback + +Implement fallback logic when callbacks timeout: + +```python +@durable_execution +def handler(event: dict, context: DurableContext) -> dict: + """Use fallback when callback times out.""" + callback = context.create_callback( + name="primary_service", + config=CallbackConfig(timeout=Duration.from_seconds(30)), + ) + + # Try primary service + send_to_primary_service(callback.callback_id, event["data"]) + + result = callback.result() + + if result is None: + # Primary service timed out, use fallback + fallback_callback = context.create_callback( + name="fallback_service", + config=CallbackConfig(timeout=Duration.from_minutes(2)), + ) + + send_to_fallback_service(fallback_callback.callback_id, event["data"]) + result = fallback_callback.result() + + return {"result": result, "source": "primary" if result else "fallback"} +``` + +[↑ Back to top](#table-of-contents) + +## Best practices + +**Set appropriate timeouts** - Choose timeout values based on your external system's expected response time. Add buffer for network delays and processing time. + +**Use heartbeat timeouts for long operations** - Enable heartbeat monitoring for callbacks that take more than a few minutes. This helps detect when external systems stop responding. + +**Send callback IDs securely** - Treat callback IDs as sensitive data. Use HTTPS when sending them to external systems. + +**Handle timeout scenarios** - Always handle the case where `callback.result()` returns `None` due to timeout. Implement fallback logic or error handling. + +**Name callbacks for debugging** - Use descriptive names to identify callbacks in logs and tests. + +**Don't reuse callback IDs** - Each callback gets a unique ID. Don't try to reuse IDs across different operations. + +**Validate callback results** - Always validate the structure and content of callback results before using them. + +**Use type hints** - Specify the expected result type when creating callbacks: `Callback[dict]`, `Callback[str]`, etc. + +**Monitor callback metrics** - Track callback success rates, timeout rates, and response times to identify integration issues. + +**Document callback contracts** - Clearly document what data external systems should send back and in what format. + +[↑ Back to top](#table-of-contents) + +## FAQ + +**Q: What happens if a callback times out?** + +A: If the timeout expires before receiving a result, `callback.result()` returns `None`. You should handle this case in your code. + +**Q: Can I cancel a callback?** + +A: No, callbacks can't be cancelled once created. They either receive a result or timeout. + +**Q: How do external systems send results back?** + +A: External systems use the callback ID to send results through your application's callback endpoint. You need to implement an endpoint that receives the callback ID and result, then forwards it to the durable execution service. + +**Q: Can I create multiple callbacks in one function?** + +A: Yes, you can create as many callbacks as needed. Each gets a unique callback ID. + +**Q: What's the maximum timeout for a callback?** + +A: You can set any timeout value using `Duration` helpers. For long-running operations (hours or days), use longer timeouts and enable heartbeat monitoring to detect if external systems stop responding. + +**Q: Do I need to wait for a callback immediately after creating it?** + +A: No, you can create a callback, send its ID to an external system, perform other operations, and wait for the result later in your function. + +**Q: Can callbacks be used with steps?** + +A: Yes, you can create and wait for callbacks inside step functions. However, `context.wait_for_callback()` is a convenience method that already wraps the callback in a step with retry logic for you. + +**Q: What happens if the external system sends a result after the timeout?** + +A: Late results are ignored. The callback has already failed due to timeout. + +**Q: How do I test functions with callbacks?** + +A: Use the testing SDK to simulate callback responses. See the Testing section below for examples. + +**Q: Can I use callbacks in child contexts?** + +A: Yes, callbacks work in child contexts just like in the main context. + +**Q: What's the difference between timeout and heartbeat_timeout?** + +A: `timeout` is the maximum total wait time. `heartbeat_timeout` is the maximum time between heartbeat signals. Use heartbeat timeout to detect when external systems stop responding before the main timeout expires. + +[↑ Back to top](#table-of-contents) + +## Testing + +You can test callbacks using the testing SDK. The test runner lets you simulate callback responses and verify callback behavior. + +### Basic callback testing + +```python +import pytest +from aws_durable_execution_sdk_python_testing import InvocationStatus +from examples.src.callback import callback + +@pytest.mark.durable_execution( + handler=callback.handler, + lambda_function_name="callback", +) +def test_callback(durable_runner): + """Test callback creation.""" + with durable_runner: + result = durable_runner.run(input="test", timeout=10) + + # Check overall status + assert result.status is InvocationStatus.SUCCEEDED + + # Verify callback was created + assert "Callback created with ID:" in result.result +``` + +### Inspecting callback operations + +Use `result.operations` to inspect callback details: + +```python +@pytest.mark.durable_execution( + handler=callback.handler, + lambda_function_name="callback", +) +def test_callback_operation(durable_runner): + """Test and inspect callback operation.""" + with durable_runner: + result = durable_runner.run(input="test", timeout=10) + + # Find callback operations + callback_ops = [ + op for op in result.operations + if op.operation_type.value == "CALLBACK" + ] + + assert len(callback_ops) == 1 + callback_op = callback_ops[0] + + # Verify callback properties + assert callback_op.name == "example_callback" + assert callback_op.callback_id is not None +``` + +### Testing callback timeouts + +Test that callbacks handle timeouts correctly: + +```python +from examples.src.callback import callback_with_timeout + +@pytest.mark.durable_execution( + handler=callback_with_timeout.handler, + lambda_function_name="callback_timeout", +) +def test_callback_timeout(durable_runner): + """Test callback with custom timeout.""" + with durable_runner: + result = durable_runner.run(input={}, timeout=10) + + assert result.status is InvocationStatus.SUCCEEDED + assert "60s timeout" in result.result +``` + +### Testing callback integration patterns + +Test complete integration workflows: + +```python +@pytest.mark.durable_execution( + handler=approval_workflow_handler, + lambda_function_name="approval_workflow", +) +def test_approval_workflow(durable_runner): + """Test approval workflow with callback.""" + with durable_runner: + result = durable_runner.run( + input={"order_id": "order-123", "amount": 1000}, + timeout=30, + ) + + # Verify workflow completed + assert result.status is InvocationStatus.SUCCEEDED + + # Check callback was created + callback_ops = [ + op for op in result.operations + if op.operation_type.value == "CALLBACK" + ] + assert len(callback_ops) == 1 + assert callback_ops[0].name == "order_approval" +``` + +For more testing patterns, see: +- [Basic tests](../testing-patterns/basic-tests.md) - Simple test examples +- [Complex workflows](../testing-patterns/complex-workflows.md) - Multi-step workflow testing +- [Best practices](../testing-patterns/best-practices.md) - Testing recommendations + +[↑ Back to top](#table-of-contents) + +## See also + +- [DurableContext API](../api-reference/context.md) - Complete context reference +- [CallbackConfig](../api-reference/config.md) - Configuration options +- [Duration helpers](../api-reference/config.md#duration) - Time duration utilities +- [Steps](steps.md) - Combine callbacks with steps for retry logic +- [Child contexts](child-contexts.md) - Use callbacks in nested contexts +- [Error handling](../advanced/error-handling.md) - Handle callback failures +- [Examples](https://github.com/awslabs/aws-durable-execution-sdk-python/tree/main/examples/src/callback) - More callback examples + +[↑ Back to top](#table-of-contents) + +## License + +See the LICENSE file for our project's licensing. + +[↑ Back to top](#table-of-contents) diff --git a/docs/core/child-contexts.md b/docs/core/child-contexts.md new file mode 100644 index 0000000..bcc26dd --- /dev/null +++ b/docs/core/child-contexts.md @@ -0,0 +1,701 @@ +# Child Contexts + +## Table of Contents + +- [Terminology](#terminology) +- [What are child contexts?](#what-are-child-contexts) +- [Key features](#key-features) +- [Getting started](#getting-started) +- [Method signatures](#method-signatures) +- [Using the @durable_with_child_context decorator](#using-the-durable_with_child_context-decorator) +- [Naming child contexts](#naming-child-contexts) +- [Use cases for isolation](#use-cases-for-isolation) +- [Advanced patterns](#advanced-patterns) +- [Best practices](#best-practices) +- [FAQ](#faq) +- [Testing](#testing) +- [See also](#see-also) + +[← Back to main index](../index.md) + +## Terminology + +**Child context** - An isolated execution scope within a durable function. Created using `context.run_in_child_context()`. + +**Parent context** - The main durable function context that creates child contexts. + +**Context function** - A function decorated with `@durable_with_child_context` that receives a `DurableContext` and can execute operations. + +**Context isolation** - Child contexts have their own operation namespace, preventing naming conflicts with the parent context. + +**Context result** - The return value from a child context function, which is checkpointed as a single unit in the parent context. + +[↑ Back to top](#table-of-contents) + +## What are child contexts? + +A child context creates a scope in which you can nest durable operations. It creates an isolated execution scope with its own set of operations, checkpoints, and state. This is often useful as a unit of concurrency that lets you run concurrent operations within your durable function. You can also use child contexts to wrap large chunks of durable logic into a single piece - once completed, that logic won't run or replay again. + +Use child contexts to: +- Run concurrent operations (steps, waits, callbacks) in parallel +- Wrap large blocks of logic that should execute as a single unit +- Handle large data that exceeds individual step limits +- Isolate groups of related operations +- Create reusable components +- Improve code organization and maintainability + +[↑ Back to top](#table-of-contents) + +## Key features + +- **Concurrency unit** - Run multiple operations concurrently within your function +- **Execution isolation** - Child contexts have their own operation namespace +- **Single-unit checkpointing** - Completed child contexts never replay +- **Large data handling** - Process data that exceeds individual step limits +- **Named contexts** - Identify contexts by name for debugging and testing + +[↑ Back to top](#table-of-contents) + +## Getting started + +Here's an example showing why child contexts are useful - they let you group multiple operations that execute as a single unit: + +```python +from aws_durable_execution_sdk_python import ( + DurableContext, + durable_execution, + durable_step, + durable_with_child_context, + StepContext, +) + +@durable_step +def validate_order(step_context: StepContext, order_id: str) -> dict: + """Validate order details.""" + # Validation logic here + return {"valid": True, "order_id": order_id} + +@durable_step +def reserve_inventory(step_context: StepContext, order_id: str) -> dict: + """Reserve inventory for order.""" + # Inventory logic here + return {"reserved": True, "order_id": order_id} + +@durable_step +def charge_payment(step_context: StepContext, order_id: str) -> dict: + """Charge payment for order.""" + # Payment logic here + return {"charged": True, "order_id": order_id} + +@durable_step +def send_confirmation(step_context: StepContext, result: dict) -> dict: + """Send order confirmation.""" + # Notification logic here + return {"sent": True, "order_id": result["order_id"]} + +@durable_with_child_context +def process_order(ctx: DurableContext, order_id: str) -> dict: + """Process an order with multiple steps.""" + # These three steps execute as a single unit + validation = ctx.step(validate_order(order_id)) + inventory = ctx.step(reserve_inventory(order_id)) + payment = ctx.step(charge_payment(order_id)) + + return {"order_id": order_id, "status": "completed"} + +@durable_execution +def handler(event: dict, context: DurableContext) -> dict: + """Process order using a child context.""" + # Once this completes, it never replays - even if the function continues + result = context.run_in_child_context( + process_order(event["order_id"]), + name="order_processing" + ) + + # Additional operations here won't cause process_order to replay + context.step(send_confirmation(result)) + + return result +``` + +**Why use a child context here?** + +Child contexts let you group related operations into a logical unit. Once `process_order` completes, its result is saved just like a step - everything inside won't replay even if the function continues or restarts. This provides organizational benefits and a small optimization by avoiding unnecessary replays. + +**Key benefits:** + +- **Organization**: Group related operations together for better code structure and readability +- **Reusability**: Call `process_order` multiple times in the same function, and each execution is tracked independently +- **Isolation**: Child contexts act like checkpointed functions - once done, they're done + +[↑ Back to top](#table-of-contents) + +## Method signatures + +### context.run_in_child_context() + +```python +def run_in_child_context( + func: Callable[[DurableContext], T], + name: str | None = None, +) -> T +``` + +**Parameters:** + +- `func` - A callable that receives a `DurableContext` and returns a result. Use the `@durable_with_child_context` decorator to create context functions. +- `name` (optional) - A name for the child context, useful for debugging and testing + +**Returns:** The result of executing the context function. + +**Raises:** Any exception raised by the context function. + +### @durable_with_child_context decorator + +```python +@durable_with_child_context +def my_context_function(ctx: DurableContext, arg1: str, arg2: int) -> dict: + # Your operations here + return result +``` + +The decorator wraps your function so it can be called with arguments and passed to `context.run_in_child_context()`. + +[↑ Back to top](#table-of-contents) + +## Using the @durable_with_child_context decorator + +The `@durable_with_child_context` decorator marks a function as a context function. Context functions receive a `DurableContext` as their first parameter and can execute any durable operations: + +```python +from aws_durable_execution_sdk_python import ( + DurableContext, + durable_execution, + durable_with_child_context, +) + +@durable_with_child_context +def process_order(ctx: DurableContext, order_id: str, items: list) -> dict: + """Process an order in a child context.""" + # Validate items + validation = ctx.step( + lambda _: validate_items(items), + name="validate_items" + ) + + if not validation["valid"]: + return {"status": "invalid", "errors": validation["errors"]} + + # Calculate total + total = ctx.step( + lambda _: calculate_total(items), + name="calculate_total" + ) + + # Process payment + payment = ctx.step( + lambda _: process_payment(order_id, total), + name="process_payment" + ) + + return { + "order_id": order_id, + "total": total, + "payment_status": payment["status"], + } + +@durable_execution +def handler(event: dict, context: DurableContext) -> dict: + """Process an order using a child context.""" + order_id = event["order_id"] + items = event["items"] + + # Execute order processing in child context + result = context.run_in_child_context( + process_order(order_id, items) + ) + + return result +``` + +**Why use @durable_with_child_context?** + +The decorator wraps your function so it can be called with arguments and passed to `context.run_in_child_context()`. It provides a convenient way to define reusable workflow components. + +[↑ Back to top](#table-of-contents) + +## Naming child contexts + +You can name child contexts explicitly using the `name` parameter. Named contexts are easier to identify in logs and tests: + +```python +@durable_with_child_context +def data_processing(ctx: DurableContext, data: dict) -> dict: + """Process data in a child context.""" + result = ctx.step(lambda _: transform_data(data), name="transform") + return result + +@durable_execution +def handler(event: dict, context: DurableContext) -> dict: + # Named child context + result = context.run_in_child_context( + data_processing(event["data"]), + name="data_processor" + ) + return result +``` + +**Naming best practices:** + +- Use descriptive names that explain what the context does +- Keep names consistent across your codebase +- Use names when you need to inspect specific contexts in tests +- Names help with debugging and monitoring + +[↑ Back to top](#table-of-contents) + +## Use cases for isolation + +### Organizing complex workflows + +Use child contexts to organize complex workflows into logical units: + +```python +@durable_with_child_context +def inventory_check(ctx: DurableContext, items: list) -> dict: + """Check inventory for all items.""" + results = [] + for item in items: + available = ctx.step( + lambda _: check_item_availability(item), + name=f"check_{item['id']}" + ) + results.append({"item_id": item["id"], "available": available}) + + return {"all_available": all(r["available"] for r in results)} + +@durable_with_child_context +def payment_processing(ctx: DurableContext, order_total: float) -> dict: + """Process payment in isolated context.""" + auth = ctx.step( + lambda _: authorize_payment(order_total), + name="authorize" + ) + + if auth["approved"]: + capture = ctx.step( + lambda _: capture_payment(auth["transaction_id"]), + name="capture" + ) + return {"status": "completed", "transaction_id": capture["id"]} + + return {"status": "declined"} + +@durable_execution +def handler(event: dict, context: DurableContext) -> dict: + """Process order with organized child contexts.""" + # Check inventory + inventory = context.run_in_child_context( + inventory_check(event["items"]), + name="inventory_check" + ) + + if not inventory["all_available"]: + return {"status": "failed", "reason": "items_unavailable"} + + # Process payment + payment = context.run_in_child_context( + payment_processing(event["total"]), + name="payment_processing" + ) + + if payment["status"] != "completed": + return {"status": "failed", "reason": "payment_declined"} + + return { + "status": "success", + "transaction_id": payment["transaction_id"], + } +``` + +### Creating reusable components + +Child contexts make it easy to create reusable workflow components: + +```python +@durable_with_child_context +def send_notifications(ctx: DurableContext, user_id: str, message: str) -> dict: + """Send notifications through multiple channels.""" + email_sent = ctx.step( + lambda _: send_email(user_id, message), + name="send_email" + ) + + sms_sent = ctx.step( + lambda _: send_sms(user_id, message), + name="send_sms" + ) + + push_sent = ctx.step( + lambda _: send_push_notification(user_id, message), + name="send_push" + ) + + return { + "email": email_sent, + "sms": sms_sent, + "push": push_sent, + } + +@durable_execution +def order_confirmation_handler(event: dict, context: DurableContext) -> dict: + """Send order confirmation notifications.""" + notifications = context.run_in_child_context( + send_notifications( + event["user_id"], + f"Order {event['order_id']} confirmed" + ), + name="order_notifications" + ) + + return {"notifications_sent": notifications} + +@durable_execution +def shipment_handler(event: dict, context: DurableContext) -> dict: + """Send shipment notifications.""" + notifications = context.run_in_child_context( + send_notifications( + event["user_id"], + f"Order {event['order_id']} shipped" + ), + name="shipment_notifications" + ) + + return {"notifications_sent": notifications} +``` + +[↑ Back to top](#table-of-contents) + +## Advanced patterns + +### Conditional child contexts + +Execute child contexts based on conditions: + +```python +@durable_with_child_context +def standard_processing(ctx: DurableContext, data: dict) -> dict: + """Standard data processing.""" + result = ctx.step(lambda _: process_standard(data), name="process") + return {"type": "standard", "result": result} + +@durable_with_child_context +def premium_processing(ctx: DurableContext, data: dict) -> dict: + """Premium data processing with extra steps.""" + enhanced = ctx.step(lambda _: enhance_data(data), name="enhance") + validated = ctx.step(lambda _: validate_premium(enhanced), name="validate") + result = ctx.step(lambda _: process_premium(validated), name="process") + return {"type": "premium", "result": result} + +@durable_execution +def handler(event: dict, context: DurableContext) -> dict: + """Process data based on customer tier.""" + customer_tier = event.get("tier", "standard") + + if customer_tier == "premium": + result = context.run_in_child_context( + premium_processing(event["data"]), + name="premium_processing" + ) + else: + result = context.run_in_child_context( + standard_processing(event["data"]), + name="standard_processing" + ) + + return result +``` + +### Error handling in child contexts + +Handle errors within child contexts: + +```python +@durable_with_child_context +def risky_operation(ctx: DurableContext, data: dict) -> dict: + """Operation that might fail.""" + try: + result = ctx.step( + lambda _: potentially_failing_operation(data), + name="risky_step" + ) + return {"status": "success", "result": result} + except Exception as e: + # Handle error within child context + fallback = ctx.step( + lambda _: fallback_operation(data), + name="fallback" + ) + return {"status": "fallback", "result": fallback, "error": str(e)} + +@durable_execution +def handler(event: dict, context: DurableContext) -> dict: + """Handle errors in child context.""" + result = context.run_in_child_context( + risky_operation(event["data"]), + name="risky_operation" + ) + + if result["status"] == "fallback": + # Log or handle fallback scenario + return {"warning": "Used fallback", "result": result["result"]} + + return result +``` + +### Parallel child contexts + +Execute multiple child contexts in parallel: + +```python +@durable_with_child_context +def process_region_a(ctx: DurableContext, data: dict) -> dict: + """Process data for region A.""" + result = ctx.step(lambda _: process_for_region("A", data), name="process_a") + return {"region": "A", "result": result} + +@durable_with_child_context +def process_region_b(ctx: DurableContext, data: dict) -> dict: + """Process data for region B.""" + result = ctx.step(lambda _: process_for_region("B", data), name="process_b") + return {"region": "B", "result": result} + +@durable_with_child_context +def process_region_c(ctx: DurableContext, data: dict) -> dict: + """Process data for region C.""" + result = ctx.step(lambda _: process_for_region("C", data), name="process_c") + return {"region": "C", "result": result} + +@durable_execution +def handler(event: dict, context: DurableContext) -> dict: + """Process data for multiple regions in parallel.""" + data = event["data"] + + # Execute child contexts in parallel + result_a = context.run_in_child_context( + process_region_a(data), + name="region_a" + ) + + result_b = context.run_in_child_context( + process_region_b(data), + name="region_b" + ) + + result_c = context.run_in_child_context( + process_region_c(data), + name="region_c" + ) + + return { + "regions_processed": 3, + "results": [result_a, result_b, result_c], + } +``` + +[↑ Back to top](#table-of-contents) + +## Best practices + +**Use child contexts for logical grouping** - Group related operations together in a child context to improve code organization and readability. + +**Name contexts descriptively** - Use clear names that explain what the context does. This helps with debugging and testing. + +**Keep context functions focused** - Each context function should have a single, well-defined purpose. Don't create overly complex context functions. + +**Use child contexts for large data** - When processing data that exceeds step size limits, break it into multiple steps within a child context. + +**Create reusable components** - Design context functions that can be reused across different workflows. + +**Handle errors appropriately** - Decide whether to handle errors within the child context or let them propagate to the parent. + +**Pass data through parameters** - Pass data to child contexts through function parameters, not global variables. + +**Document context functions** - Add docstrings explaining what the context does and what it returns. + +**Test context functions independently** - Write tests for individual context functions to ensure they work correctly in isolation. + +[↑ Back to top](#table-of-contents) + +## FAQ + +**Q: What's the difference between a child context and a step?** + +A: A step is a single operation that checkpoints its result. A child context is a collection of operations (steps, waits, callbacks, etc.) that execute in an isolated scope. The entire child context result is checkpointed as a single unit in the parent context. + +**Q: Can I use steps inside child contexts?** + +A: Yes, child contexts can contain any durable operations: steps, waits, and callbacks. + +**Q: When should I use a child context vs multiple steps?** + +A: Use child contexts when you want to: +- Group related operations logically +- Create reusable workflow components +- Handle data larger than step size limits +- Isolate operations from the parent context + +Use multiple steps when operations are independent and don't need isolation. + +**Q: Can child contexts access the parent context?** + +A: No, child contexts receive their own `DurableContext` instance. They can't access the parent context directly. Pass data through function parameters. + +**Q: What happens if a child context fails?** + +A: If an operation within a child context raises an exception, the exception propagates to the parent context unless you handle it within the child context. + +**Q: Can I create multiple child contexts in one function?** + +A: Yes, you can create as many child contexts as needed. They execute sequentially unless you use parallel patterns. + +**Q: Can I use callbacks in child contexts?** + +A: Yes, child contexts support all durable operations including callbacks, waits, and steps. + +**Q: Can I pass large data to child contexts?** + +A: Yes, but be mindful of Lambda payload limits. If data is very large, consider storing it externally (S3, DynamoDB) and passing references. + +**Q: Do child contexts share the same logger?** + +A: Yes, the logger is inherited from the parent context, but you can access it through the child context's `ctx.logger`. + +[↑ Back to top](#table-of-contents) + +## Testing + +You can test child contexts using the testing SDK. The test runner executes your function and lets you inspect child context results. + +### Basic child context testing + +```python +import pytest +from aws_durable_execution_sdk_python_testing import InvocationStatus +from examples.src.run_in_child_context import run_in_child_context + +@pytest.mark.durable_execution( + handler=run_in_child_context.handler, + lambda_function_name="run in child context", +) +def test_run_in_child_context(durable_runner): + """Test basic child context execution.""" + with durable_runner: + result = durable_runner.run(input="test", timeout=10) + + # Check overall status + assert result.status is InvocationStatus.SUCCEEDED + assert result.result == "Child context result: 10" +``` + +### Inspecting child context operations + +Use `result.get_context()` to inspect child context results: + +```python +@pytest.mark.durable_execution( + handler=run_in_child_context.handler, + lambda_function_name="run in child context", +) +def test_child_context_operations(durable_runner): + """Test and inspect child context operations.""" + with durable_runner: + result = durable_runner.run(input="test", timeout=10) + + # Verify child context operation exists + context_ops = [ + op for op in result.operations + if op.operation_type.value == "CONTEXT" + ] + assert len(context_ops) >= 1 + + # Get child context by name (if named) + child_result = result.get_context("child_operation") + assert child_result is not None +``` + +### Testing large data handling + +Test that child contexts handle large data correctly: + +```python +from examples.src.run_in_child_context import run_in_child_context_large_data + +@pytest.mark.durable_execution( + handler=run_in_child_context_large_data.handler, + lambda_function_name="run in child context large data", +) +def test_large_data_processing(durable_runner): + """Test large data handling with child context.""" + with durable_runner: + result = durable_runner.run(input=None, timeout=30) + + result_data = result.result + + # Verify execution succeeded + assert result.status is InvocationStatus.SUCCEEDED + assert result_data["success"] is True + + # Verify large data was processed + assert result_data["summary"]["totalDataSize"] > 240 # ~250KB + assert result_data["summary"]["stepsExecuted"] == 5 + + # Verify data integrity across wait + assert result_data["dataIntegrityCheck"] is True +``` + + + +### Testing error handling + +Test that child contexts handle errors correctly: + +```python +@pytest.mark.durable_execution( + handler=error_handling_handler, + lambda_function_name="error_handling", +) +def test_child_context_error_handling(durable_runner): + """Test error handling in child context.""" + with durable_runner: + result = durable_runner.run(input={"data": "invalid"}, timeout=10) + + # Function should handle error gracefully + assert result.status is InvocationStatus.SUCCEEDED + assert result.result["status"] == "fallback" + assert "error" in result.result +``` + +For more testing patterns, see: +- [Basic tests](../testing-patterns/basic-tests.md) - Simple test examples +- [Complex workflows](../testing-patterns/complex-workflows.md) - Multi-step workflow testing +- [Best practices](../testing-patterns/best-practices.md) - Testing recommendations + +[↑ Back to top](#table-of-contents) + +## See also + +- [DurableContext API](../api-reference/context.md) - Complete context reference +- [Steps](steps.md) - Use steps within child contexts +- [Wait operations](wait.md) - Use waits within child contexts +- [Callbacks](callbacks.md) - Use callbacks within child contexts +- [Parallel operations](parallel.md) - Execute child contexts in parallel +- [Examples](https://github.com/awslabs/aws-durable-execution-sdk-python/tree/main/examples/src/run_in_child_context) - More child context examples + +[↑ Back to top](#table-of-contents) + +## License + +See the LICENSE file for our project's licensing. + +[↑ Back to top](#table-of-contents) diff --git a/docs/core/invoke.md b/docs/core/invoke.md new file mode 100644 index 0000000..a6bac65 --- /dev/null +++ b/docs/core/invoke.md @@ -0,0 +1,774 @@ +# Invoke Operations + +## Table of Contents + +- [What are invoke operations?](#what-are-invoke-operations) +- [Terminology](#terminology) +- [Key features](#key-features) +- [Getting started](#getting-started) +- [Method signature](#method-signature) +- [Function composition patterns](#function-composition-patterns) +- [Configuration](#configuration) +- [Error handling](#error-handling) +- [Advanced patterns](#advanced-patterns) +- [Best practices](#best-practices) +- [FAQ](#faq) +- [Testing](#testing) +- [See also](#see-also) + +[← Back to main index](../index.md) + +## Terminology + +**Invoke operation** - A durable operation that calls another durable function and waits for its result. Created using `context.invoke()`. + +**Chained invocation** - The process of one durable function calling another durable function. The calling function suspends while the invoked function executes. + +**Function composition** - Building complex workflows by combining multiple durable functions, where each function handles a specific part of the overall process. + +**Payload** - The input data sent to the invoked function. Can be any JSON-serializable value or use custom serialization. + +**Timeout** - The maximum time to wait for an invoked function to complete. If exceeded, the invoke operation fails with a timeout error. + +[↑ Back to top](#table-of-contents) + +## What are invoke operations? + +Invoke operations let you call other Lambda functions from within your durable function. You can invoke both durable functions and regular on-demand Lambda functions. This enables function composition, where you break complex workflows into smaller, reusable functions. The calling function suspends while the invoked function executes, and resumes when the result is available. + +Use invoke operations to: +- Modularize complex workflows into manageable functions +- Call existing Lambda functions (durable or on-demand) from your workflow +- Isolate different parts of your business logic +- Build hierarchical execution patterns +- Coordinate multiple Lambda functions durably +- Integrate with existing Lambda-based services + +When you invoke a function, the SDK: +1. Checkpoints the invoke operation +2. Triggers the target function asynchronously +3. Suspends the calling function +4. Resumes the calling function when the result is ready +5. Returns the result or propagates any errors + +[↑ Back to top](#table-of-contents) + +## Key features + +- **Automatic checkpointing** - Invoke operations are checkpointed before execution +- **Asynchronous execution** - Invoked functions run independently without blocking resources +- **Result handling** - Results are automatically deserialized and returned +- **Error propagation** - Errors from invoked functions propagate to the caller +- **Timeout support** - Configure maximum wait time for invoked functions +- **Custom serialization** - Control how payloads and results are serialized +- **Named operations** - Identify invoke operations by name for debugging + +[↑ Back to top](#table-of-contents) + +## Getting started + +Here's a simple example of invoking another durable function: + +```python +from aws_durable_execution_sdk_python import ( + DurableContext, + durable_execution, +) + +@durable_execution +def process_order(event: dict, context: DurableContext) -> dict: + """Process an order by validating and charging.""" + order_id = event["order_id"] + amount = event["amount"] + + # Invoke validation function + validation_result = context.invoke( + function_name="validate-order", + payload={"order_id": order_id}, + name="validate_order", + ) + + if not validation_result["valid"]: + return {"status": "rejected", "reason": validation_result["reason"]} + + # Invoke payment function + payment_result = context.invoke( + function_name="process-payment", + payload={"order_id": order_id, "amount": amount}, + name="process_payment", + ) + + return { + "status": "completed", + "order_id": order_id, + "transaction_id": payment_result["transaction_id"], + } +``` + +When this function runs: +1. It invokes the `validate-order` function and waits for the result +2. If validation succeeds, it invokes the `process-payment` function +3. Each invoke operation is checkpointed automatically +4. If the function is interrupted, it resumes from the last completed invoke + +[↑ Back to top](#table-of-contents) + +## Method signature + +### context.invoke() + +```python +def invoke( + function_name: str, + payload: P, + name: str | None = None, + config: InvokeConfig[P, R] | None = None, +) -> R +``` + +**Parameters:** + +- `function_name` - The name of the Lambda function to invoke. This should be the function name, not the ARN. +- `payload` - The input data to send to the invoked function. Can be any JSON-serializable value. +- `name` (optional) - A name for the invoke operation, useful for debugging and testing. +- `config` (optional) - An `InvokeConfig` object to configure timeout and serialization. + +**Returns:** The result returned by the invoked function. + +**Raises:** +- `CallableRuntimeError` - If the invoked function fails or times out + +[↑ Back to top](#table-of-contents) + +## Function composition patterns + +### Sequential invocations + +Call multiple functions in sequence, where each depends on the previous result: + +```python +@durable_execution +def orchestrate_workflow(event: dict, context: DurableContext) -> dict: + """Orchestrate a multi-step workflow.""" + user_id = event["user_id"] + + # Step 1: Fetch user data + user = context.invoke( + function_name="fetch-user", + payload={"user_id": user_id}, + name="fetch_user", + ) + + # Step 2: Enrich user data + enriched_user = context.invoke( + function_name="enrich-user-data", + payload=user, + name="enrich_user", + ) + + # Step 3: Generate report + report = context.invoke( + function_name="generate-report", + payload=enriched_user, + name="generate_report", + ) + + return report +``` + +### Conditional invocations + +Invoke different functions based on conditions: + +```python +@durable_execution +def process_document(event: dict, context: DurableContext) -> dict: + """Process a document based on its type.""" + document_type = event["document_type"] + document_data = event["data"] + + if document_type == "pdf": + result = context.invoke( + function_name="process-pdf", + payload=document_data, + name="process_pdf", + ) + elif document_type == "image": + result = context.invoke( + function_name="process-image", + payload=document_data, + name="process_image", + ) + else: + result = context.invoke( + function_name="process-generic", + payload=document_data, + name="process_generic", + ) + + return result +``` + +### Hierarchical workflows + +Build hierarchical workflows where parent functions coordinate child functions: + +```python +@durable_execution +def parent_workflow(event: dict, context: DurableContext) -> dict: + """Parent workflow that coordinates sub-workflows.""" + project_id = event["project_id"] + + # Invoke sub-workflow for data collection + data = context.invoke( + function_name="collect-data-workflow", + payload={"project_id": project_id}, + name="collect_data", + ) + + # Invoke sub-workflow for data processing + processed = context.invoke( + function_name="process-data-workflow", + payload=data, + name="process_data", + ) + + # Invoke sub-workflow for reporting + report = context.invoke( + function_name="generate-report-workflow", + payload=processed, + name="generate_report", + ) + + return report +``` + +### Invoking on-demand functions + +You can invoke regular Lambda functions (non-durable) from your durable workflow: + +```python +@durable_execution +def handler(event: dict, context: DurableContext) -> dict: + """Invoke a mix of durable and on-demand functions.""" + user_id = event["user_id"] + + # Invoke a regular Lambda function for data fetching + user_data = context.invoke( + function_name="fetch-user-data", # Regular Lambda function + payload={"user_id": user_id}, + name="fetch_user", + ) + + # Invoke a durable function for complex processing + processed = context.invoke( + function_name="process-user-workflow", # Durable function + payload=user_data, + name="process_user", + ) + + # Invoke another regular Lambda for notifications + notification = context.invoke( + function_name="send-notification", # Regular Lambda function + payload={"user_id": user_id, "data": processed}, + name="send_notification", + ) + + return { + "status": "completed", + "notification_sent": notification["sent"], + } +``` + +[↑ Back to top](#table-of-contents) + +## Configuration + +Configure invoke behavior using `InvokeConfig`: + +```python +from aws_durable_execution_sdk_python import ( + DurableContext, + durable_execution, +) +from aws_durable_execution_sdk_python.config import Duration, InvokeConfig + +@durable_execution +def handler(event: dict, context: DurableContext) -> dict: + # Configure invoke with timeout + invoke_config = InvokeConfig( + timeout=Duration.from_minutes(5), + ) + + result = context.invoke( + function_name="long-running-function", + payload=event, + name="long_running", + config=invoke_config, + ) + + return result +``` + +### InvokeConfig parameters + +**timeout** - Maximum duration to wait for the invoked function to complete. Default is no timeout. Use this to prevent long-running invocations from blocking execution indefinitely. + +**serdes_payload** - Custom serialization/deserialization for the payload sent to the invoked function. If None, uses default JSON serialization. + +**serdes_result** - Custom serialization/deserialization for the result returned from the invoked function. If None, uses default JSON serialization. + +**tenant_id** - Optional tenant identifier for multi-tenant isolation. If provided, the invocation will be scoped to this tenant. + +### Setting timeouts + +Use the `Duration` class to set timeouts: + +```python +from aws_durable_execution_sdk_python.config import Duration, InvokeConfig + +# Timeout after 30 seconds +config = InvokeConfig(timeout=Duration.from_seconds(30)) + +# Timeout after 5 minutes +config = InvokeConfig(timeout=Duration.from_minutes(5)) + +# Timeout after 2 hours +config = InvokeConfig(timeout=Duration.from_hours(2)) +``` + +[↑ Back to top](#table-of-contents) + +## Error handling + +### Handling invocation errors + +Errors from invoked functions propagate to the calling function. Catch and handle them as needed: + +```python +from aws_durable_execution_sdk_python import ( + DurableContext, + durable_execution, + CallableRuntimeError, +) + +@durable_execution +def handler(event: dict, context: DurableContext) -> dict: + """Handle errors from invoked functions.""" + try: + result = context.invoke( + function_name="risky-function", + payload=event, + name="risky_operation", + ) + return {"status": "success", "result": result} + + except CallableRuntimeError as e: + # Handle the error from the invoked function + context.logger.error(f"Invoked function failed: {e}") + return { + "status": "failed", + "error": str(e), + } +``` + +### Timeout handling + +Handle timeout errors specifically: + +```python +from aws_durable_execution_sdk_python.config import Duration, InvokeConfig + +@durable_execution +def handler(event: dict, context: DurableContext) -> dict: + """Handle timeout errors.""" + config = InvokeConfig(timeout=Duration.from_seconds(30)) + + try: + result = context.invoke( + function_name="slow-function", + payload=event, + config=config, + ) + return {"status": "success", "result": result} + + except CallableRuntimeError as e: + if "timed out" in str(e).lower(): + context.logger.warning("Function timed out, using fallback") + return {"status": "timeout", "fallback": True} + raise +``` + +### Retry patterns + +Implement retry logic for failed invocations: + +```python +@durable_execution +def handler(event: dict, context: DurableContext) -> dict: + """Retry failed invocations.""" + max_retries = 3 + + for attempt in range(max_retries): + try: + result = context.invoke( + function_name="unreliable-function", + payload=event, + name=f"attempt_{attempt + 1}", + ) + return {"status": "success", "result": result, "attempts": attempt + 1} + + except CallableRuntimeError as e: + if attempt == max_retries - 1: + # Last attempt failed + return { + "status": "failed", + "error": str(e), + "attempts": max_retries, + } + # Wait before retrying + context.wait(Duration.from_seconds(2 ** attempt)) + + return {"status": "failed", "reason": "max_retries_exceeded"} +``` + +[↑ Back to top](#table-of-contents) + +## Advanced patterns + +### Custom serialization + +Use custom serialization for complex data types: + +```python +from aws_durable_execution_sdk_python.config import InvokeConfig +from aws_durable_execution_sdk_python.serdes import SerDes + +class CustomSerDes(SerDes): + """Custom serialization for complex objects.""" + + def serialize(self, value): + # Custom serialization logic + return json.dumps({"custom": value}) + + def deserialize(self, data: str): + # Custom deserialization logic + obj = json.loads(data) + return obj["custom"] + +@durable_execution +def handler(event: dict, context: DurableContext) -> dict: + """Use custom serialization.""" + config = InvokeConfig( + serdes_payload=CustomSerDes(), + serdes_result=CustomSerDes(), + ) + + result = context.invoke( + function_name="custom-function", + payload={"complex": "data"}, + config=config, + ) + + return result +``` + +### Fan-out pattern with parallel invocations + +Invoke multiple functions in parallel using steps: + +```python +from aws_durable_execution_sdk_python import durable_step, StepContext + +@durable_step +def invoke_service(step_context: StepContext, service_name: str, data: dict) -> dict: + """Invoke a service and return its result.""" + # Note: This is a simplified example. In practice, you'd need access to context + # which isn't directly available in step functions. + return {"service": service_name, "result": data} + +@durable_execution +def handler(event: dict, context: DurableContext) -> dict: + """Fan out to multiple services.""" + services = ["service-a", "service-b", "service-c"] + + # Invoke each service sequentially + results = [] + for service in services: + result = context.invoke( + function_name=service, + payload=event, + name=f"invoke_{service}", + ) + results.append(result) + + return {"results": results} +``` + +### Passing context between invocations + +Pass data between invoked functions: + +```python +@durable_execution +def handler(event: dict, context: DurableContext) -> dict: + """Pass context between invocations.""" + # First invocation creates context + initial_context = context.invoke( + function_name="initialize-context", + payload=event, + name="initialize", + ) + + # Second invocation uses the context + processed = context.invoke( + function_name="process-with-context", + payload={ + "data": event["data"], + "context": initial_context, + }, + name="process", + ) + + # Third invocation finalizes + final_result = context.invoke( + function_name="finalize", + payload={ + "processed": processed, + "context": initial_context, + }, + name="finalize", + ) + + return final_result +``` + +[↑ Back to top](#table-of-contents) + +## Best practices + +**Use descriptive function names** - Choose clear, descriptive names for the functions you invoke to make workflows easier to understand. + +**Name invoke operations** - Use the `name` parameter to identify invoke operations in logs and tests. + +**Set appropriate timeouts** - Configure timeouts based on expected execution time. Don't set them too short or too long. + +**Handle errors explicitly** - Catch and handle errors from invoked functions. Don't let them propagate unexpectedly. + +**Keep payloads small** - Large payloads increase serialization overhead. Consider passing references instead of large data. + +**Design for idempotency** - Invoked functions should be idempotent since they might be retried. + +**Use hierarchical composition** - Break complex workflows into layers of functions, where each layer handles a specific level of abstraction. + +**Avoid deep nesting** - Don't create deeply nested invocation chains. Keep hierarchies shallow for better observability. + +**Log invocation boundaries** - Log when invoking functions and when receiving results for better debugging. + +**Consider cost implications** - Each invoke operation triggers a separate Lambda invocation, which has cost implications. + +**Mix durable and on-demand functions** - You can invoke both durable and regular Lambda functions. The orchestrator can be durable and compose regular on-demand functions. The orchestrator provides durability for the results of the invoked on-demand functions without needing to provide durability on the invoked functions themselves. Use durable functions for complex workflows and on-demand functions for simple operations. + +[↑ Back to top](#table-of-contents) + +## FAQ + +**Q: What's the difference between invoke and step?** + +A: `invoke()` calls another durable function (Lambda), while `step()` executes code within the current function. Use invoke for function composition, use step for checkpointing operations within a function. + +**Q: Can I invoke non-durable functions?** + +A: Yes, `context.invoke()` can call both durable functions and regular on-demand Lambda functions. The invoke operation works with any Lambda function that accepts and returns JSON-serializable data. + + +**Q: How do I pass the result from one invoke to another?** + +A: Simply use the return value. The type of the return value is governed by the `serdes_result` configuration: + +```python +result1 = context.invoke("function-1", payload1) +result2 = context.invoke("function-2", result1) +``` + +**Q: What happens if an invoked function fails?** + +A: The error propagates to the calling function as a `CallableRuntimeError`. You can catch and handle it. + +**Q: Can I invoke the same function multiple times?** + +A: Yes, you can invoke the same function multiple times with different payloads or names. + +**Q: How do I invoke a function in a different AWS account?** + +A: The `function_name` parameter accepts function names in the same account. For cross-account invocations, you need appropriate IAM permissions and may need to use function ARNs (check AWS documentation for cross-account Lambda invocations). + +**Q: What's the maximum timeout I can set?** + +A: The timeout is limited by Lambda's maximum execution time (15 minutes). However, durable functions can run longer by suspending and resuming. + +**Q: Can I invoke functions in parallel?** + +A: Not directly with `context.invoke()`. For parallel execution, consider using `context.parallel()` with steps that perform invocations, or invoke multiple functions sequentially. + +**Q: How do I debug invoke operations?** + +A: Use the `name` parameter to identify operations in logs. Check CloudWatch logs for both the calling and invoked functions. + +**Q: What happens if I don't set a timeout?** + +A: The invoke operation waits indefinitely for the invoked function to complete. It's recommended to set timeouts for better error handling. + +**Q: What's the difference between context.invoke() and using boto3's Lambda client to invoke functions?** + +A: When you use `context.invoke()`, the SDK suspends your durable function's execution while waiting for the result. This means you don't pay for Lambda compute time while waiting. With boto3's Lambda client, your function stays active and consumes billable compute time while waiting for the response. Additionally, `context.invoke()` automatically checkpoints the operation, handles errors durably, and integrates with the durable execution lifecycle. + +[↑ Back to top](#table-of-contents) + +## Testing + +You can test invoke operations using the testing SDK. The test runner executes your function and lets you inspect invoke operations. + +### Basic invoke testing + +```python +import pytest +from aws_durable_execution_sdk_python_testing import InvocationStatus +from my_function import handler + +@pytest.mark.durable_execution( + handler=handler, + lambda_function_name="my_function", +) +def test_invoke(durable_runner): + """Test a function with invoke operations.""" + with durable_runner: + result = durable_runner.run( + input={"order_id": "order-123", "amount": 100.0}, + timeout=30, + ) + + # Check overall status + assert result.status is InvocationStatus.SUCCEEDED + + # Check final result + assert result.result["status"] == "completed" +``` + +### Inspecting invoke operations + +Use the result object to inspect invoke operations: + +```python +@pytest.mark.durable_execution( + handler=handler, + lambda_function_name="my_function", +) +def test_invoke_operations(durable_runner): + """Test and inspect invoke operations.""" + with durable_runner: + result = durable_runner.run(input={"user_id": "user-123"}, timeout=30) + + # Get all operations + operations = result.operations + + # Find invoke operations + invoke_ops = [op for op in operations if op.operation_type == "CHAINED_INVOKE"] + + # Verify invoke operations were created + assert len(invoke_ops) == 2 + + # Check specific invoke operation + validate_op = next(op for op in invoke_ops if op.name == "validate_order") + assert validate_op.status is InvocationStatus.SUCCEEDED +``` + +### Testing error handling + +Test that invoke errors are handled correctly: + +```python +@pytest.mark.durable_execution( + handler=handler_with_error_handling, + lambda_function_name="error_handler_function", +) +def test_invoke_error_handling(durable_runner): + """Test invoke error handling.""" + with durable_runner: + result = durable_runner.run(input={}, timeout=30) + + # Function should handle the error gracefully + assert result.status is InvocationStatus.SUCCEEDED + assert result.result["status"] == "failed" + assert "error" in result.result +``` + +### Testing timeouts + +Test that timeouts are handled correctly: + +```python +from aws_durable_execution_sdk_python.config import Duration, InvokeConfig + +@pytest.mark.durable_execution( + handler=handler_with_timeout, + lambda_function_name="timeout_function", +) +def test_invoke_timeout(durable_runner): + """Test invoke timeout handling.""" + with durable_runner: + result = durable_runner.run(input={}, timeout=60) + + # Check that timeout was handled + assert result.status is InvocationStatus.SUCCEEDED + assert result.result["status"] == "timeout" +``` + +### Mocking invoked functions + +When testing, you can mock the invoked functions to control their behavior: + +```python +from unittest.mock import Mock, patch + +@pytest.mark.durable_execution( + handler=handler, + lambda_function_name="my_function", +) +def test_invoke_with_mock(durable_runner): + """Test invoke with mocked function.""" + # The testing framework handles invocations internally + # You can test the orchestration logic without deploying all functions + + with durable_runner: + result = durable_runner.run( + input={"order_id": "order-123"}, + timeout=30, + ) + + # Verify the orchestration logic + assert result.status is InvocationStatus.SUCCEEDED +``` + +For more testing patterns, see: +- [Basic tests](../testing-patterns/basic-tests.md) - Simple test examples +- [Complex workflows](../testing-patterns/complex-workflows.md) - Multi-step workflow testing +- [Best practices](../testing-patterns/best-practices.md) - Testing recommendations + +[↑ Back to top](#table-of-contents) + +## See also + +- [Steps](steps.md) - Execute code with checkpointing +- [Child contexts](child-contexts.md) - Organize operations hierarchically +- [Parallel operations](parallel.md) - Execute multiple operations concurrently +- [Error handling](../advanced/error-handling.md) - Handle errors in durable functions +- [DurableContext API](../api-reference/context.md) - Complete context reference + +[↑ Back to top](#table-of-contents) + +## License + +See the [LICENSE](../../LICENSE) file for our project's licensing. + +[↑ Back to top](#table-of-contents) diff --git a/docs/core/logger.md b/docs/core/logger.md new file mode 100644 index 0000000..71f3d02 --- /dev/null +++ b/docs/core/logger.md @@ -0,0 +1,663 @@ +# Logger integration + +The Durable Execution SDK automatically enriches your logs with execution context, making it easy to trace operations across checkpoints and replays. You can use the built-in logger or integrate with Powertools for AWS Lambda (Python) for advanced structured logging. + +## Table of contents + +- [Key features](#key-features) +- [Terminology](#terminology) +- [Getting started](#getting-started) +- [Method signature](#method-signature) +- [Automatic context enrichment](#automatic-context-enrichment) +- [Adding custom metadata](#adding-custom-metadata) +- [Logger inheritance in child contexts](#logger-inheritance-in-child-contexts) +- [Integration with Powertools for AWS Lambda (Python)](#integration-with-powertools-for-aws-lambda-python) +- [Replay behavior and log deduplication](#replay-behavior-and-log-deduplication) +- [Best practices](#best-practices) +- [FAQ](#faq) +- [Testing logger integration](#testing-logger-integration) +- [See also](#see-also) + +[← Back to main index](../index.md) + +## Key features + +- Automatic log deduplication during replays - logs from completed operations don't repeat +- Automatic enrichment with execution context (execution ARN, parent ID, operation name, attempt number) +- Logger inheritance in child contexts for hierarchical tracing +- Compatible with Python's standard logging and Powertools for AWS Lambda (Python) +- Support for custom metadata through the `extra` parameter +- All standard log levels: debug, info, warning, error, exception + +[↑ Back to top](#table-of-contents) + +## Terminology + +**Log deduplication** - The SDK prevents duplicate logs during replays by tracking completed operations. When your function is checkpointed and resumed, logs from already-completed operations aren't emitted again, keeping your CloudWatch logs clean. + +**Context enrichment** - The automatic addition of execution metadata (execution ARN, parent ID, operation name, attempt number) to log entries. The SDK handles this for you, so every log includes tracing information. + +**Logger inheritance** - When you create a child context, it inherits the parent's logger and adds its own context information. This creates a hierarchical logging structure that mirrors your execution flow. + +**Extra metadata** - Additional key-value pairs you can add to log entries using the `extra` parameter. These merge with the automatic context enrichment. + +[↑ Back to top](#table-of-contents) + +## Getting started + +Access the logger through `context.logger` in your durable functions: + +```python +from aws_durable_execution_sdk_python import DurableContext, durable_execution + +@durable_execution +def handler(event: dict, context: DurableContext) -> str: + # Log at the top level + context.logger.info("Starting workflow", extra={"event_id": event.get("id")}) + + # Execute a step + result: str = context.step( + lambda _: "processed", + name="process_data", + ) + + context.logger.info("Workflow completed", extra={"result": result}) + return result +``` + +The logger automatically includes execution context in every log entry. + +### Integration with Lambda Advanced Log Controls + +Durable functions work with Lambda's Advanced Log Controls. You can configure your Lambda function to filter logs by level, which helps reduce CloudWatch Logs costs and noise. When you set a log level filter (like INFO or ERROR), logs below that level are automatically ignored. + +For example, if you set your Lambda function's log level to INFO, debug logs won't appear in CloudWatch Logs: + +```python +@durable_execution +def handler(event: dict, context: DurableContext) -> str: + context.logger.debug("This won't appear if log level is INFO or higher") + context.logger.info("This will appear") + + result: str = context.step( + lambda _: "processed", + name="process_data", + ) + + return result +``` + +Learn more about configuring log levels in the [Lambda Advanced Log Controls documentation](https://docs.aws.amazon.com/lambda/latest/dg/monitoring-cloudwatchlogs.html#monitoring-cloudwatchlogs-advanced). + +[↑ Back to top](#table-of-contents) + +## Method signature + +The logger provides standard logging methods: + +```python +context.logger.debug(msg, *args, extra=None) +context.logger.info(msg, *args, extra=None) +context.logger.warning(msg, *args, extra=None) +context.logger.error(msg, *args, extra=None) +context.logger.exception(msg, *args, extra=None) +``` + +**Parameters:** +- `msg` (object) - The log message. Can include format placeholders. +- `*args` (object) - Arguments for message formatting. +- `extra` (dict[str, object] | None) - Optional dictionary of additional fields to include in the log entry. + +[↑ Back to top](#table-of-contents) + +## Automatic context enrichment + +The SDK automatically enriches logs with execution metadata: + +```python +@durable_execution +def handler(event: dict, context: DurableContext) -> str: + # This log includes: execution_arn + context.logger.info("Top-level log") + + result: str = context.step( + lambda _: "processed", + name="process_data", + ) + + # This log includes: execution_arn, parent_id, name, attempt + context.logger.info("Step completed") + + return result +``` + +**Enriched fields:** +- `execution_arn` - Always present, identifies the durable execution +- `parent_id` - Present in child contexts, identifies the parent operation +- `name` - Present when the operation has a name +- `attempt` - Present in steps, shows the retry attempt number + +[↑ Back to top](#table-of-contents) + +## Adding custom metadata + +Use the `extra` parameter to add custom fields to your logs: + +```python +@durable_execution +def handler(event: dict, context: DurableContext) -> str: + order_id = event.get("order_id") + + context.logger.info( + "Processing order", + extra={ + "order_id": order_id, + "customer_id": event.get("customer_id"), + "priority": "high" + } + ) + + result: str = context.step( + lambda _: f"order-{order_id}-processed", + name="process_order", + ) + + context.logger.info( + "Order completed", + extra={"order_id": order_id, "result": result} + ) + + return result +``` + +Custom fields merge with the automatic context enrichment, so your logs include both execution metadata and your custom data. + +[↑ Back to top](#table-of-contents) + +## Logger inheritance in child contexts + +Child contexts inherit the parent's logger and add their own context: + +```python +from aws_durable_execution_sdk_python import ( + DurableContext, + durable_execution, + durable_with_child_context, +) + +@durable_with_child_context +def child_workflow(ctx: DurableContext) -> str: + # Logger includes parent_id for the child context + ctx.logger.info("Running in child context") + + # Step in child context has nested parent_id + child_result: str = ctx.step( + lambda _: "child-processed", + name="child_step", + ) + + ctx.logger.info("Child workflow completed", extra={"result": child_result}) + return child_result + +@durable_execution +def handler(event: dict, context: DurableContext) -> str: + # Top-level logger: only execution_arn + context.logger.info("Starting workflow", extra={"event_id": event.get("id")}) + + # Child context inherits logger and adds its own parent_id + result: str = context.run_in_child_context( + child_workflow(), + name="child_workflow" + ) + + context.logger.info("Workflow completed", extra={"result": result}) + return result +``` + +This creates a hierarchical logging structure where you can trace operations from parent to child contexts. + +[↑ Back to top](#table-of-contents) + +## Integration with Powertools for AWS Lambda (Python) + +The SDK is compatible with Powertools for AWS Lambda (Python), giving you structured logging with JSON output and additional features. + +**Powertools for AWS Lambda (Python) benefits:** +- JSON structured logging for CloudWatch Logs Insights +- Automatic Lambda context injection (request ID, function name, etc.) +- Correlation IDs for distributed tracing +- Log sampling for cost optimization +- Integration with X-Ray tracing + +### Using Powertools for AWS Lambda (Python) directly + +You can use Powertools for AWS Lambda (Python) directly in your durable functions: + +```python +from aws_lambda_powertools import Logger +from aws_durable_execution_sdk_python import DurableContext, durable_execution + +logger = Logger(service="order-processing") + +@durable_execution +def handler(event: dict, context: DurableContext) -> str: + logger.info("Starting workflow") + + result: str = context.step( + lambda _: "processed", + name="process_data", + ) + + logger.info("Workflow completed", extra={"result": result}) + return result +``` + +This gives you all Powertools for AWS Lambda (Python) features like JSON logging and correlation IDs. + +### Integrating with context.logger + +For better integration with durable execution, set Powertools for AWS Lambda (Python) on the context: + +```python +from aws_lambda_powertools import Logger +from aws_durable_execution_sdk_python import DurableContext, durable_execution + +logger = Logger(service="order-processing") + +@durable_execution +def handler(event: dict, context: DurableContext) -> str: + # Set Powertools for AWS Lambda (Python) on the context + context.set_logger(logger) + + # Now context.logger uses Powertools for AWS Lambda (Python) with automatic enrichment + context.logger.info("Starting workflow", extra={"event_id": event.get("id")}) + + result: str = context.step( + lambda _: "processed", + name="process_data", + ) + + context.logger.info("Workflow completed", extra={"result": result}) + return result +``` + +**Benefits of using context.logger:** +- All Powertools for AWS Lambda (Python) features (JSON logging, correlation IDs, etc.) +- Automatic SDK context enrichment (execution_arn, parent_id, name, attempt) +- Log deduplication during replays (see next section) + +The SDK's context enrichment (execution_arn, parent_id, name, attempt) merges with Powertools for AWS Lambda (Python) fields (service, request_id, function_name, etc.) in the JSON output. + +[↑ Back to top](#table-of-contents) + +## Replay behavior and log deduplication + +A critical feature of `context.logger` is that it prevents duplicate logs during replays. When your durable function is checkpointed and resumed, the SDK replays your code to reach the next operation, but logs from completed operations aren't emitted again. + +### How context.logger prevents duplicate logs + +When you use `context.logger`, the SDK tracks which operations have completed and suppresses logs during replay: + +```python +from aws_durable_execution_sdk_python import DurableContext, durable_execution + +@durable_execution +def handler(event: dict, context: DurableContext) -> str: + # This log appears only once, even if the function is replayed + context.logger.info("Starting workflow") + + # Step 1 - logs appear only once + result1: str = context.step( + lambda _: "step1-done", + name="step_1", + ) + context.logger.info("Step 1 completed", extra={"result": result1}) + + # Step 2 - logs appear only once + result2: str = context.step( + lambda _: "step2-done", + name="step_2", + ) + context.logger.info("Step 2 completed", extra={"result": result2}) + + return f"{result1}-{result2}" +``` + +**What happens during replay:** +1. First invocation: All logs appear (starting workflow, step 1 completed, step 2 completed) +2. After checkpoint and resume: Only new logs appear (step 2 completed if step 1 was checkpointed) +3. Your CloudWatch logs show each message only once, making them clean and easy to read + +### Logging behavior with direct logger usage + +When you use a logger directly (not through `context.logger`), logs will be emitted on every replay: + +```python +from aws_lambda_powertools import Logger +from aws_durable_execution_sdk_python import DurableContext, durable_execution + +logger = Logger(service="order-processing") + +@durable_execution +def handler(event: dict, context: DurableContext) -> str: + # This log appears on every replay + logger.info("Starting workflow") + + result1: str = context.step( + lambda _: "step1-done", + name="step_1", + ) + # This log appears on every replay after step 1 + logger.info("Step 1 completed") + + result2: str = context.step( + lambda _: "step2-done", + name="step_2", + ) + # This log appears only once (no more replays after this) + logger.info("Step 2 completed") + + return f"{result1}-{result2}" +``` + +**What happens during replay:** +1. First invocation: All logs appear once +2. After checkpoint and resume: "Starting workflow" and "Step 1 completed" appear again +3. Your CloudWatch logs show duplicate entries for replayed operations + +### Using context.logger with Powertools for AWS Lambda (Python) + +To get both log deduplication and Powertools for AWS Lambda (Python) features, set the Powertools Logger on the context: + +```python +from aws_lambda_powertools import Logger +from aws_durable_execution_sdk_python import DurableContext, durable_execution + +logger = Logger(service="order-processing") + +@durable_execution +def handler(event: dict, context: DurableContext) -> str: + # Set Powertools for AWS Lambda (Python) on the context + context.set_logger(logger) + + # Now you get BOTH: + # - Powertools for AWS Lambda (Python) features (JSON logging, correlation IDs, etc.) + # - Log deduplication during replays + context.logger.info("Starting workflow") + + result1: str = context.step( + lambda _: "step1-done", + name="step_1", + ) + context.logger.info("Step 1 completed", extra={"result": result1}) + + result2: str = context.step( + lambda _: "step2-done", + name="step_2", + ) + context.logger.info("Step 2 completed", extra={"result": result2}) + + return f"{result1}-{result2}" +``` + +**Benefits of this approach:** +- Clean logs without duplicates during replays +- JSON structured logging from Powertools for AWS Lambda (Python) +- Automatic context enrichment from the SDK (execution_arn, parent_id, name, attempt) +- Lambda context injection from Powertools for AWS Lambda (Python) (request_id, function_name, etc.) +- Correlation IDs and X-Ray integration from Powertools for AWS Lambda (Python) + +### When you might see duplicate logs + +You'll still see duplicate logs in these scenarios: +- Logs from operations that fail and retry (this is expected and helpful for debugging) +- Logs outside of durable execution context (before `@durable_execution` decorator runs) +- Logs from code that runs during replay before reaching a checkpoint + +This is normal behavior and helps you understand the execution flow. + +[↑ Back to top](#table-of-contents) + +## Best practices + +**Use structured logging with extra fields** + +Add context-specific data through the `extra` parameter rather than embedding it in the message string: + +```python +# Good - structured and queryable +context.logger.info("Order processed", extra={"order_id": order_id, "amount": 100}) + +# Avoid - harder to query +context.logger.info(f"Order {order_id} processed with amount 100") +``` + +**Log at appropriate levels** + +- `debug` - Detailed diagnostic information for troubleshooting +- `info` - General informational messages about workflow progress +- `warning` - Unexpected situations that don't prevent execution +- `error` - Error conditions that may need attention +- `exception` - Exceptions with stack traces (use in except blocks) + +**Include business context in logs** + +Add identifiers that help you trace business operations: + +```python +context.logger.info( + "Processing payment", + extra={ + "order_id": order_id, + "customer_id": customer_id, + "payment_method": "credit_card" + } +) +``` + +**Use Powertools for AWS Lambda (Python) for production** + +For production workloads, use Powertools for AWS Lambda (Python) to get JSON structured logging and CloudWatch Logs Insights integration: + +```python +from aws_lambda_powertools import Logger + +logger = Logger(service="my-service") + +@durable_execution +def handler(event: dict, context: DurableContext) -> str: + context.set_logger(logger) + # Now you get JSON logs with all Powertools for AWS Lambda (Python) features + context.logger.info("Processing started") +``` + +**Don't log sensitive data** + +Avoid logging sensitive information like passwords, tokens, or personal data: + +```python +# Good - log identifiers only +context.logger.info("User authenticated", extra={"user_id": user_id}) + +# Avoid - don't log sensitive data +context.logger.info("User authenticated", extra={"password": password}) +``` + +[↑ Back to top](#table-of-contents) + +## FAQ + +**Q: Does logging work during replays?** + +Yes, but `context.logger` prevents duplicate logs. When you use `context.logger`, the SDK tracks completed operations and suppresses their logs during replay. This keeps your CloudWatch logs clean and easy to read. If you use a logger directly (not through `context.logger`), you'll see duplicate log entries on every replay. + +**Q: How do I filter logs by execution?** + +Use the `execution_arn` field that's automatically added to every log entry. In CloudWatch Logs Insights: + +``` +fields @timestamp, @message, execution_arn +| filter execution_arn = "arn:aws:lambda:us-east-1:123456789012:function:my-function:execution-id" +| sort @timestamp asc +``` + +**Q: Can I use a custom logger?** + +Yes. Any logger that implements the `LoggerInterface` protocol works with the SDK. Use `context.set_logger()` to set your custom logger. + +The protocol is defined in `aws_durable_execution_sdk_python.types`: + +```python +from typing import Protocol +from collections.abc import Mapping + +class LoggerInterface(Protocol): + def debug( + self, msg: object, *args: object, extra: Mapping[str, object] | None = None + ) -> None: ... + + def info( + self, msg: object, *args: object, extra: Mapping[str, object] | None = None + ) -> None: ... + + def warning( + self, msg: object, *args: object, extra: Mapping[str, object] | None = None + ) -> None: ... + + def error( + self, msg: object, *args: object, extra: Mapping[str, object] | None = None + ) -> None: ... + + def exception( + self, msg: object, *args: object, extra: Mapping[str, object] | None = None + ) -> None: ... +``` + +Any logger with these methods (like Python's standard `logging.Logger` or Powertools Logger) is compatible. + +**Q: What's the difference between the SDK logger and Powertools for AWS Lambda (Python)?** + +The SDK provides a logger wrapper that adds execution context. Powertools for AWS Lambda (Python) provides structured JSON logging and Lambda-specific features. You can use them together - set the Powertools Logger on the context, and the SDK will enrich it with execution metadata. + +**Q: Do child contexts get their own logger?** + +Child contexts inherit the parent's logger and add their own `parent_id` to the context. This creates a hierarchical logging structure where you can trace operations from parent to child. + +**Q: How do I change the log level?** + +If using Python's standard logging, configure it before your handler: + +```python +import logging +logging.basicConfig(level=logging.DEBUG) +``` + +If using Powertools for AWS Lambda (Python), set the level when creating the logger: + +```python +from aws_lambda_powertools import Logger +logger = Logger(service="my-service", level="DEBUG") +``` + +**Q: Can I access the underlying logger?** + +Yes. Use `context.logger.get_logger()` to access the underlying logger instance if you need to call methods not in the `LoggerInterface`. + +[↑ Back to top](#table-of-contents) + +## Testing logger integration + +You can verify that your durable functions log correctly by capturing log output in tests. + +### Example test + +```python +import pytest +from aws_durable_execution_sdk_python.execution import InvocationStatus + +from src.logger_example import logger_example +from test.conftest import deserialize_operation_payload + +@pytest.mark.durable_execution( + handler=logger_example.handler, + lambda_function_name="logger example", +) +def test_logger_example(durable_runner): + """Test logger example.""" + with durable_runner: + result = durable_runner.run(input={"id": "test-123"}, timeout=10) + + assert result.status is InvocationStatus.SUCCEEDED + assert deserialize_operation_payload(result.result) == "processed-child-processed" +``` + +### Verifying log output + +To verify specific log messages, capture log output using Python's logging test utilities: + +```python +import logging +import pytest +from aws_durable_execution_sdk_python.execution import InvocationStatus + +@pytest.mark.durable_execution(handler=my_handler) +def test_logging_output(durable_runner, caplog): + """Test that expected log messages are emitted.""" + with caplog.at_level(logging.INFO): + with durable_runner: + result = durable_runner.run(input={"id": "test-123"}, timeout=10) + + assert result.status is InvocationStatus.SUCCEEDED + + # Verify log messages + assert "Starting workflow" in caplog.text + assert "Workflow completed" in caplog.text +``` + +### Testing with Powertools for AWS Lambda (Python) + +When testing with Powertools for AWS Lambda (Python), you can verify structured log output: + +```python +import json +import pytest +from aws_lambda_powertools import Logger + +@pytest.mark.durable_execution(handler=my_handler) +def test_powertools_logging(durable_runner, caplog): + """Test Powertools for AWS Lambda (Python) integration.""" + logger = Logger(service="test-service") + + with caplog.at_level(logging.INFO): + with durable_runner: + result = durable_runner.run(input={"id": "test-123"}, timeout=10) + + # Parse JSON log entries + for record in caplog.records: + if hasattr(record, 'msg'): + try: + log_entry = json.loads(record.msg) + # Verify Powertools for AWS Lambda (Python) fields + assert "service" in log_entry + # Verify SDK enrichment fields + assert "execution_arn" in log_entry + except json.JSONDecodeError: + pass # Not a JSON log entry +``` + +[↑ Back to top](#table-of-contents) + +## See also + +- [Steps](steps.md) - Learn about step operations that use logger enrichment +- [Child contexts](child-contexts.md) - Understand logger inheritance in nested contexts +- [Getting started](../getting-started.md) - Basic durable function setup +- [Powertools for AWS Lambda (Python) - Logger](https://docs.powertools.aws.dev/lambda/python/latest/core/logger/) - Powertools Logger documentation + +[↑ Back to top](#table-of-contents) + +## License + +See the LICENSE file for our project's licensing. + +[↑ Back to top](#table-of-contents) diff --git a/docs/core/map.md b/docs/core/map.md new file mode 100644 index 0000000..204ce14 --- /dev/null +++ b/docs/core/map.md @@ -0,0 +1,638 @@ +# Map Operations + +## Table of Contents + +- [What are map operations?](#what-are-map-operations) +- [Terminology](#terminology) +- [Key features](#key-features) +- [Getting started](#getting-started) +- [Method signature](#method-signature) +- [Map function signature](#map-function-signature) +- [Configuration](#configuration) +- [Advanced patterns](#advanced-patterns) +- [Best practices](#best-practices) +- [Performance tips](#performance-tips) +- [FAQ](#faq) +- [Testing](#testing) +- [See also](#see-also) + +[← Back to main index](../index.md) + +## Terminology + +**Map operation** - A durable operation that processes a collection of items in parallel, where each item is processed independently and checkpointed. Created using `context.map()`. + +**Map function** - A function that processes a single item from the collection. Receives the context, item, index, and full collection as parameters. + +**BatchResult** - The result type returned by map operations, containing results from all processed items with success/failure status. + +**Concurrency control** - Limiting how many items process simultaneously using `max_concurrency` in `MapConfig`. + +**Item batching** - Grouping multiple items together for processing as a single unit to optimize efficiency. + +**Completion criteria** - Rules that determine when a map operation succeeds or fails based on individual item results. + +[↑ Back to top](#table-of-contents) + +## What are map operations? + +Map operations let you process collections durably by applying a function to each item in parallel. Each item's processing is checkpointed independently, so if your function is interrupted, completed items don't need to be reprocessed. + +Use map operations to: +- Transform collections with automatic checkpointing +- Process lists of items in parallel +- Handle large datasets with resilience +- Control concurrency and batching behavior +- Define custom success/failure criteria + +Map operations use `context.map()` to process collections efficiently. Each item becomes an independent operation that executes in parallel with other items. + +[↑ Back to top](#table-of-contents) + +## Key features + +- **Parallel processing** - Items process concurrently by default +- **Independent checkpointing** - Each item's result is saved separately +- **Partial completion** - Completed items don't reprocess on replay +- **Concurrency control** - Limit simultaneous processing with `max_concurrency` +- **Batching support** - Group items for efficient processing +- **Flexible completion** - Define custom success/failure criteria +- **Result ordering** - Results maintain the same order as inputs + +[↑ Back to top](#table-of-contents) + +## Getting started + +Here's a simple example of processing a collection: + +```python +from aws_durable_execution_sdk_python import ( + DurableContext, + durable_execution, + BatchResult, +) + +def square(context: DurableContext, item: int, index: int, items: list[int]) -> int: + """Square a number.""" + return item * item + +@durable_execution +def handler(event: dict, context: DurableContext) -> BatchResult[int]: + """Process a list of items using map operations.""" + items = [1, 2, 3, 4, 5] + + result = context.map(items, square) + return result +``` + +When this function runs: +1. Each item is processed in parallel +2. The `square` function is called for each item +3. Each result is checkpointed independently +4. The function returns a `BatchResult` with results `[1, 4, 9, 16, 25]` + +If the function is interrupted after processing items 0-2, it resumes at item 3 without reprocessing the first three items. + +[↑ Back to top](#table-of-contents) + +## Method signature + +### context.map() + +```python +def map( + inputs: Sequence[U], + func: Callable[[DurableContext, U | BatchedInput[Any, U], int, Sequence[U]], T], + name: str | None = None, + config: MapConfig | None = None, +) -> BatchResult[T] +``` + +**Parameters:** + +- `inputs` - A sequence of items to process (list, tuple, or any sequence type). +- `func` - A callable that processes each item. See [Map function signature](#map-function-signature) for details. +- `name` (optional) - A name for the map operation, useful for debugging and testing. +- `config` (optional) - A `MapConfig` object to configure concurrency, batching, and completion criteria. + +**Returns:** A `BatchResult[T]` containing the results from processing all items. + +**Raises:** Exceptions based on the completion criteria defined in `MapConfig`. + +[↑ Back to top](#table-of-contents) + +## Map function signature + +The map function receives four parameters: + +```python +def process_item( + context: DurableContext, + item: U | BatchedInput[Any, U], + index: int, + items: Sequence[U] +) -> T: + """Process a single item from the collection.""" + # Your processing logic here + return result +``` + +**Parameters:** + +- `context` - A `DurableContext` for the item's processing. Use this to call steps, waits, or other operations. +- `item` - The current item being processed. Can be a `BatchedInput` if batching is configured. +- `index` - The zero-based index of the item in the original collection. +- `items` - The full collection of items being processed. + +**Returns:** The result of processing the item. + +### Example + +```python +def validate_email( + context: DurableContext, + item: str, + index: int, + items: list[str] +) -> dict: + """Validate an email address.""" + is_valid = "@" in item and "." in item + return { + "email": item, + "valid": is_valid, + "position": index, + "total": len(items) + } + +@durable_execution +def handler(event: dict, context: DurableContext) -> BatchResult[dict]: + emails = ["jane_doe@example.com", "john_doe@example.org", "invalid"] + result = context.map(emails, validate_email) + return result +``` + +[↑ Back to top](#table-of-contents) + +## Configuration + +Configure map behavior using `MapConfig`: + +```python +from aws_durable_execution_sdk_python import ( + DurableContext, + durable_execution, + BatchResult, +) +from aws_durable_execution_sdk_python.config import ( + MapConfig, + CompletionConfig, + ItemBatcher, +) + +def process_item(context: DurableContext, item: int, index: int, items: list[int]) -> dict: + """Process a single item.""" + return {"item": item, "squared": item * item} + +@durable_execution +def handler(event: dict, context: DurableContext) -> BatchResult[dict]: + items = list(range(100)) + + # Configure map operation + config = MapConfig( + max_concurrency=10, # Process 10 items at a time + item_batcher=ItemBatcher(max_items_per_batch=5), # Batch 5 items together + completion_config=CompletionConfig.all_successful(), # Require all to succeed + ) + + result = context.map(items, process_item, name="process_numbers", config=config) + return result +``` + +### MapConfig parameters + +**max_concurrency** - Maximum number of items to process concurrently. If `None`, all items process in parallel. Use this to control resource usage. + +**item_batcher** - Configuration for batching items together. Use `ItemBatcher(max_items_per_batch=N)` to group items. + +**completion_config** - Defines when the map operation succeeds or fails: +- `CompletionConfig()` - Default, allows any number of failures +- `CompletionConfig.all_successful()` - Requires all items to succeed +- `CompletionConfig(min_successful=N)` - Requires at least N items to succeed +- `CompletionConfig(tolerated_failure_count=N)` - Fails after N failures +- `CompletionConfig(tolerated_failure_percentage=X)` - Fails if more than X% fail + +**serdes** - Custom serialization for the entire `BatchResult`. If `None`, uses JSON serialization. + +**item_serdes** - Custom serialization for individual item results. If `None`, uses JSON serialization. + +**summary_generator** - Function to generate compact summaries for large results (>256KB). + +[↑ Back to top](#table-of-contents) + +## Advanced patterns + +### Concurrency control + +Limit how many items process simultaneously: + +```python +from aws_durable_execution_sdk_python.config import MapConfig + +def fetch_data(context: DurableContext, url: str, index: int, urls: list[str]) -> dict: + """Fetch data from a URL.""" + # Network call that might be rate-limited + return {"url": url, "data": "..."} + +@durable_execution +def handler(event: dict, context: DurableContext) -> BatchResult[dict]: + urls = [f"https://example.com/api/%7Bi%7D" for i in range(100)] + + # Process only 5 URLs at a time + config = MapConfig(max_concurrency=5) + + result = context.map(urls, fetch_data, config=config) + return result +``` + +### Batching items + +Group multiple items for efficient processing: + +```python +from aws_durable_execution_sdk_python.config import MapConfig, ItemBatcher, BatchedInput + +def process_batch( + context: DurableContext, + batch: BatchedInput[None, int], + index: int, + items: list[int] +) -> list[dict]: + """Process a batch of items together.""" + # Process all items in the batch together + return [{"item": item, "squared": item * item} for item in batch.items] + +@durable_execution +def handler(event: dict, context: DurableContext) -> BatchResult[list[dict]]: + items = list(range(100)) + + # Process items in batches of 10 + config = MapConfig( + item_batcher=ItemBatcher(max_items_per_batch=10) + ) + + result = context.map(items, process_batch, config=config) + return result +``` + +### Custom completion criteria + +Define when the map operation should succeed or fail: + +```python +from aws_durable_execution_sdk_python.config import MapConfig, CompletionConfig + +def process_item(context: DurableContext, item: int, index: int, items: list[int]) -> dict: + """Process an item that might fail.""" + # Processing that might fail + if item % 7 == 0: + raise ValueError(f"Item {item} failed") + return {"item": item, "processed": True} + +@durable_execution +def handler(event: dict, context: DurableContext) -> BatchResult[dict]: + items = list(range(20)) + + # Succeed if at least 15 items succeed, fail after 5 failures + config = MapConfig( + completion_config=CompletionConfig( + min_successful=15, + tolerated_failure_count=5, + ) + ) + + result = context.map(items, process_item, config=config) + return result +``` + +### Using context operations in map functions + +Call steps, waits, or other operations inside map functions: + +```python +from aws_durable_execution_sdk_python import durable_step, StepContext + +@durable_step +def fetch_user_data(step_context: StepContext, user_id: str) -> dict: + """Fetch user data from external service.""" + return {"user_id": user_id, "name": "Jane Doe", "email": "jane_doe@example.com"} + +@durable_step +def send_notification(step_context: StepContext, user: dict) -> dict: + """Send notification to user.""" + return {"sent": True, "email": user["email"]} + +def process_user( + context: DurableContext, + user_id: str, + index: int, + user_ids: list[str] +) -> dict: + """Process a user by fetching data and sending notification.""" + # Use steps within the map function + user = context.step(fetch_user_data(user_id)) + notification = context.step(send_notification(user)) + return {"user_id": user_id, "notification_sent": notification["sent"]} + +@durable_execution +def handler(event: dict, context: DurableContext) -> BatchResult[dict]: + user_ids = ["user_1", "user_2", "user_3"] + + result = context.map(user_ids, process_user) + return result +``` + +### Filtering and transforming results + +Access individual results from the `BatchResult`: + +```python +def check_inventory( + context: DurableContext, + product_id: str, + index: int, + products: list[str] +) -> dict: + """Check if a product is in stock.""" + # Check if product is in stock + return {"product_id": product_id, "in_stock": True, "quantity": 10} + +@durable_execution +def handler(event: dict, context: DurableContext) -> list[str]: + product_ids = ["prod_1", "prod_2", "prod_3", "prod_4"] + + # Get all inventory results + batch_result = context.map(product_ids, check_inventory) + + # Filter to only in-stock products + in_stock = [ + r.result["product_id"] + for r in batch_result.results + if r.result["in_stock"] + ] + + return in_stock +``` + +[↑ Back to top](#table-of-contents) + +## Best practices + +**Use descriptive names** - Name your map operations for easier debugging: `context.map(items, process_item, name="process_orders")`. + +**Control concurrency for external calls** - When calling external APIs, use `max_concurrency` to avoid rate limits. + +**Batch for efficiency** - For small, fast operations, use `item_batcher` to reduce overhead. + +**Define completion criteria** - Use `CompletionConfig` to specify when the operation should succeed or fail. + +**Keep map functions focused** - Each map function should process one item. Don't mix collection iteration with item processing. + +**Use context operations** - Call steps, waits, or other operations inside map functions for complex processing. + +**Handle errors gracefully** - Wrap error-prone code in try-except blocks or use completion criteria to tolerate failures. + +**Consider collection size** - For very large collections (10,000+ items), consider batching or processing in chunks. + +**Monitor memory usage** - Large collections create many checkpoints. Monitor Lambda memory usage. + +**Return only necessary data** - Large result objects increase checkpoint size. Return minimal data from map functions. + +[↑ Back to top](#table-of-contents) + +## Performance tips + +**Parallel execution is automatic** - Items execute concurrently by default. Don't try to manually parallelize. + +**Use max_concurrency wisely** - Too much concurrency can overwhelm external services or exhaust Lambda resources. Start conservative and increase as needed. + +**Batch small operations** - If each item processes quickly (< 100ms), batching reduces overhead: + +```python +config = MapConfig( + item_batcher=ItemBatcher(max_items_per_batch=10) +) +``` + +**Optimize map functions** - Keep map functions lightweight. Move heavy computation into steps within the map function. + +**Use appropriate completion criteria** - Fail fast with `tolerated_failure_count` to avoid processing remaining items when many fail. + +**Monitor checkpoint size** - Large result objects increase checkpoint size and Lambda memory usage. Return only necessary data. + +**Consider memory limits** - Processing thousands of items creates many checkpoints. Monitor Lambda memory and adjust batch size or concurrency. + +**Profile your workload** - Test with representative data to find optimal concurrency and batch settings. + +[↑ Back to top](#table-of-contents) + +## FAQ + +**Q: What's the difference between map and parallel operations?** + +A: Map operations process a collection of similar items using the same function. Parallel operations execute different functions concurrently. Use map for collections, parallel for heterogeneous tasks. + +**Q: How many items can I process?** + +A: There's no hard limit, but consider Lambda's memory and timeout constraints. For very large collections (10,000+ items), use batching or process in chunks. + +**Q: Do items process in order?** + +A: Items execute in parallel, so processing order is non-deterministic. However, results maintain the same order as inputs in the `BatchResult`. + +**Q: What happens if one item fails?** + +A: By default, the map operation continues processing other items. Use `CompletionConfig` to define failure behavior (e.g., fail after N failures). + +**Q: Can I use async functions in map operations?** + +A: No, map functions must be synchronous. If you need async processing, use `asyncio.run()` inside your map function. + +**Q: How do I access individual results?** + +A: The `BatchResult` contains a `results` list with each item's result: + +```python +batch_result = context.map(items, process_item) +for item_result in batch_result.results: + print(item_result.result) +``` + +**Q: Can I nest map operations?** + +A: Yes, you can call `context.map()` inside a map function to process nested collections. + +**Q: How does batching work?** + +A: When you configure `item_batcher`, multiple items are grouped together and passed as a `BatchedInput` to your map function. Process all items in `batch.items`. + +**Q: What's the difference between serdes and item_serdes?** + +A: `item_serdes` serializes individual item results as they complete. `serdes` serializes the entire `BatchResult` at the end. Use both for custom serialization at different levels. + +**Q: How do I handle partial failures?** + +A: Check the `BatchResult.results` list. Each result has a status indicating success or failure: + +```python +batch_result = context.map(items, process_item) +successful = [r for r in batch_result.results if r.status == "SUCCEEDED"] +failed = [r for r in batch_result.results if r.status == "FAILED"] +``` + +**Q: Can I use map operations with steps?** + +A: Yes, call `context.step()` inside your map function to execute steps for each item. + +[↑ Back to top](#table-of-contents) + +## Testing + +You can test map operations using the testing SDK. The test runner executes your function and lets you inspect individual item results. + +### Basic map testing + +```python +import pytest +from aws_durable_execution_sdk_python_testing import InvocationStatus +from my_function import handler + +@pytest.mark.durable_execution( + handler=handler, + lambda_function_name="map_operations", +) +def test_map_operations(durable_runner): + """Test map operations.""" + with durable_runner: + result = durable_runner.run(input={}, timeout=10) + + # Check overall status + assert result.status is InvocationStatus.SUCCEEDED + + # Check the BatchResult + batch_result = result.result + assert batch_result.total_count == 5 + assert batch_result.success_count == 5 + assert batch_result.failure_count == 0 + + # Check individual results + assert batch_result.results[0].result == 1 + assert batch_result.results[1].result == 4 + assert batch_result.results[2].result == 9 +``` + +### Inspecting individual items + +Use `result.get_map()` to inspect the map operation: + +```python +from aws_durable_execution_sdk_python.lambda_service import OperationType + +@pytest.mark.durable_execution( + handler=handler, + lambda_function_name="map_operations", +) +def test_map_individual_items(durable_runner): + """Test individual item processing.""" + with durable_runner: + result = durable_runner.run(input={}, timeout=10) + + # Get the map operation + map_op = result.get_map("square") + assert map_op is not None + + # Verify all items were processed + assert map_op.result.total_count == 5 + + # Check specific items + assert map_op.result.results[0].result == 1 + assert map_op.result.results[2].result == 9 +``` + +### Testing error handling + +Test that individual item failures are handled correctly: + +```python +@pytest.mark.durable_execution( + handler=handler_with_errors, + lambda_function_name="map_with_errors", +) +def test_map_error_handling(durable_runner): + """Test error handling in map operations.""" + with durable_runner: + result = durable_runner.run(input={}, timeout=10) + + # Function should handle errors based on completion config + assert result.status is InvocationStatus.SUCCEEDED + + batch_result = result.result + + # Check that some items succeeded + successful = [r for r in batch_result.results if r.status == "SUCCEEDED"] + assert len(successful) > 0 + + # Check that some items failed + failed = [r for r in batch_result.results if r.status == "FAILED"] + assert len(failed) > 0 +``` + +### Testing with configuration + +Test map operations with custom configuration: + +```python +from aws_durable_execution_sdk_python.config import MapConfig, CompletionConfig + +@pytest.mark.durable_execution( + handler=handler_with_config, + lambda_function_name="map_with_config", +) +def test_map_with_config(durable_runner): + """Test map operations with custom configuration.""" + with durable_runner: + result = durable_runner.run(input={}, timeout=30) + + # Verify the map operation completed + assert result.status is InvocationStatus.SUCCEEDED + + # Get the map operation + map_op = result.get_map("process_items") + + # Verify configuration was applied + assert map_op is not None + assert map_op.result.total_count > 0 +``` + +For more testing patterns, see: +- [Basic tests](../testing-patterns/basic-tests.md) - Simple test examples +- [Complex workflows](../testing-patterns/complex-workflows.md) - Multi-step workflow testing +- [Best practices](../testing-patterns/best-practices.md) - Testing recommendations + +[↑ Back to top](#table-of-contents) + +## See also + +- [Parallel operations](parallel.md) - Execute different functions concurrently +- [Steps](steps.md) - Understanding step operations +- [Child contexts](child-contexts.md) - Organizing complex workflows +- [Configuration](../api-reference/config.md) - MapConfig and CompletionConfig details +- [BatchResult](../api-reference/result.md) - Working with batch results +- [Examples](https://github.com/awslabs/aws-durable-execution-sdk-python/tree/main/examples/src/map) - More map examples + +[↑ Back to top](#table-of-contents) + +## License + +See the [LICENSE](../../LICENSE) file for our project's licensing. + +[↑ Back to top](#table-of-contents) diff --git a/docs/core/parallel.md b/docs/core/parallel.md new file mode 100644 index 0000000..90b7b62 --- /dev/null +++ b/docs/core/parallel.md @@ -0,0 +1,900 @@ +# Parallel Operations + +## Table of Contents + +- [What are parallel operations?](#what-are-parallel-operations) +- [Terminology](#terminology) +- [Key features](#key-features) +- [Getting started](#getting-started) +- [Method signature](#method-signature) +- [Basic usage](#basic-usage) +- [Collecting results](#collecting-results) +- [Configuration](#configuration) +- [Advanced patterns](#advanced-patterns) +- [Error handling](#error-handling) +- [Result ordering](#result-ordering) +- [Performance considerations](#performance-considerations) +- [Best practices](#best-practices) +- [FAQ](#faq) +- [Testing](#testing) +- [See also](#see-also) + +[← Back to main index](../index.md) + +## Terminology + +**Parallel operation** - An operation that executes multiple functions concurrently using `context.parallel()`. Each function runs in its own child context. + +**Branch** - An individual function within a parallel operation. Each branch executes independently and can succeed or fail without affecting other branches. + +**BatchResult** - The result object returned by parallel operations, containing successful results, failed results, and execution metadata. + +**Completion strategy** - Configuration that determines when a parallel operation completes (e.g., all successful, first successful, all completed). + +**Concurrent execution** - Multiple operations executing at the same time. The SDK manages concurrency automatically, executing branches in parallel. + +**Child context** - An isolated execution context created for each branch. Each branch has its own step counter and operation tracking. + +[↑ Back to top](#table-of-contents) + +## What are parallel operations? + +Parallel operations let you execute multiple functions concurrently within a durable function. Each function runs in its own child context and can perform steps, waits, or other operations independently. The SDK manages the concurrent execution and collects results automatically. + +Use parallel operations to: +- Execute independent tasks concurrently for better performance +- Process multiple items that don't depend on each other +- Implement fan-out patterns where one input triggers multiple operations +- Reduce total execution time by running operations simultaneously + +[↑ Back to top](#table-of-contents) + +## Key features + +- **Automatic concurrency** - Functions execute concurrently without manual thread management +- **Independent execution** - Each branch runs in its own child context with isolated state +- **Flexible completion** - Configure when the operation completes (all successful, first successful, etc.) +- **Error isolation** - One branch failing doesn't automatically fail others +- **Result collection** - Automatic collection of successful and failed results +- **Concurrency control** - Limit maximum concurrent branches with `max_concurrency` +- **Checkpointing** - Results are checkpointed as branches complete + +[↑ Back to top](#table-of-contents) + +## Getting started + +Here's a simple example of parallel operations: + +```python +from aws_durable_execution_sdk_python import ( + BatchResult, + DurableContext, + durable_execution, +) + +@durable_execution +def handler(event: dict, context: DurableContext) -> list[str]: + """Execute three tasks in parallel.""" + # Define functions to execute in parallel + task1 = lambda ctx: ctx.step(lambda _: "Task 1 complete", name="task1") + task2 = lambda ctx: ctx.step(lambda _: "Task 2 complete", name="task2") + task3 = lambda ctx: ctx.step(lambda _: "Task 3 complete", name="task3") + + # Execute all tasks concurrently + result: BatchResult[str] = context.parallel([task1, task2, task3]) + + # Return successful results + return result.successful_results +``` + +When this function runs: +1. All three tasks execute concurrently +2. Each task runs in its own child context +3. Results are collected as tasks complete +4. The `BatchResult` contains all successful results + +[↑ Back to top](#table-of-contents) + +## Method signature + +### context.parallel() + +```python +def parallel( + functions: Sequence[Callable[[DurableContext], T]], + name: str | None = None, + config: ParallelConfig | None = None, +) -> BatchResult[T] +``` + +**Parameters:** + +- `functions` - A sequence of callables that each receive a `DurableContext` and return a result. Each function executes in its own child context. +- `name` (optional) - A name for the parallel operation, useful for debugging and testing. +- `config` (optional) - A `ParallelConfig` object to configure concurrency limits, completion criteria, and serialization. + +**Returns:** A `BatchResult[T]` object containing: +- `successful_results` - List of results from branches that succeeded +- `failed_results` - List of results from branches that failed +- `total_count` - Total number of branches +- `success_count` - Number of successful branches +- `failure_count` - Number of failed branches +- `status` - Overall status of the parallel operation +- `completion_reason` - Why the operation completed + +**Raises:** Exceptions are captured per branch and included in `failed_results`. The parallel operation itself doesn't raise unless all branches fail (depending on completion configuration). + +[↑ Back to top](#table-of-contents) + +## Basic usage + +### Simple parallel execution + +Execute multiple independent operations concurrently: + +```python +from aws_durable_execution_sdk_python import ( + BatchResult, + DurableContext, + durable_execution, +) + +@durable_execution +def handler(event: dict, context: DurableContext) -> dict: + """Process multiple services in parallel.""" + + def check_inventory(ctx: DurableContext) -> dict: + return ctx.step(lambda _: {"service": "inventory", "status": "ok"}) + + def check_payment(ctx: DurableContext) -> dict: + return ctx.step(lambda _: {"service": "payment", "status": "ok"}) + + def check_shipping(ctx: DurableContext) -> dict: + return ctx.step(lambda _: {"service": "shipping", "status": "ok"}) + + # Execute all checks in parallel + result: BatchResult[dict] = context.parallel([ + check_inventory, + check_payment, + check_shipping, + ]) + + return { + "total": result.total_count, + "successful": result.success_count, + "results": result.successful_results, + } +``` + +## Collecting results + +The `BatchResult` object provides multiple ways to access results: + +```python +from aws_durable_execution_sdk_python import ( + BatchResult, + DurableContext, + durable_execution, +) + +@durable_execution +def handler(event: dict, context: DurableContext) -> dict: + """Demonstrate result collection.""" + + functions = [ + lambda ctx: ctx.step(lambda _: f"Result {i}") + for i in range(5) + ] + + result: BatchResult[str] = context.parallel(functions) + + return { + # Successful results only + "successful": result.successful_results, + + # Failed results (if any) + "failed": result.failed_results, + + # Counts + "total_count": result.total_count, + "success_count": result.success_count, + "failure_count": result.failure_count, + + # Status information + "status": result.status.value, + "completion_reason": result.completion_reason.value, + } +``` + +### Accessing individual results + +Results are ordered by branch index: + +```python +from aws_durable_execution_sdk_python import ( + BatchResult, + DurableContext, + durable_execution, +) + +@durable_execution +def handler(event: dict, context: DurableContext) -> dict: + """Access individual results from parallel execution.""" + + def task_a(ctx: DurableContext) -> str: + return ctx.step(lambda _: "Result A") + + def task_b(ctx: DurableContext) -> str: + return ctx.step(lambda _: "Result B") + + def task_c(ctx: DurableContext) -> str: + return ctx.step(lambda _: "Result C") + + result: BatchResult[str] = context.parallel([task_a, task_b, task_c]) + + # Access results by index + first_result = result.successful_results[0] # "Result A" + second_result = result.successful_results[1] # "Result B" + third_result = result.successful_results[2] # "Result C" + + return { + "first": first_result, + "second": second_result, + "third": third_result, + "all": result.successful_results, + } +``` + +[↑ Back to top](#table-of-contents) + +## Configuration + +Configure parallel behavior using `ParallelConfig`: + +```python +from aws_durable_execution_sdk_python import ( + BatchResult, + DurableContext, + durable_execution, +) +from aws_durable_execution_sdk_python.config import ( + CompletionConfig, + ParallelConfig, +) + +@durable_execution +def handler(event: dict, context: DurableContext) -> str: + """Configure parallel execution.""" + + # Configure to complete when first branch succeeds + config = ParallelConfig( + max_concurrency=3, # Run at most 3 branches concurrently + completion_config=CompletionConfig.first_successful(), + ) + + functions = [ + lambda ctx: ctx.step(lambda _: "Task 1", name="task1"), + lambda ctx: ctx.step(lambda _: "Task 2", name="task2"), + lambda ctx: ctx.step(lambda _: "Task 3", name="task3"), + ] + + result: BatchResult[str] = context.parallel(functions, config=config) + + # Get the first successful result + first_result = ( + result.successful_results[0] + if result.successful_results + else "None" + ) + + return f"First successful result: {first_result}" +``` + +### ParallelConfig parameters + +**max_concurrency** - Maximum number of branches to execute concurrently. If `None` (default), all branches run concurrently. Use this to control resource usage: + +```python +# Limit to 5 concurrent branches +config = ParallelConfig(max_concurrency=5) +``` + +**completion_config** - Defines when the parallel operation completes: + +- `CompletionConfig.all_successful()` - Requires all branches to succeed (default) +- `CompletionConfig.first_successful()` - Completes when any branch succeeds +- `CompletionConfig.all_completed()` - Waits for all branches to complete regardless of success/failure +- Custom configuration with specific success/failure thresholds + +```python +# Require at least 3 successes, tolerate up to 2 failures +config = ParallelConfig( + completion_config=CompletionConfig( + min_successful=3, + tolerated_failure_count=2, + ) +) +``` + +**serdes** - Custom serialization for the `BatchResult` object. If not provided, uses JSON serialization. + +**item_serdes** - Custom serialization for individual branch results. If not provided, uses JSON serialization. + +[↑ Back to top](#table-of-contents) + +## Advanced patterns + +### First successful pattern + +Execute multiple strategies and use the first one that succeeds: + +```python +from aws_durable_execution_sdk_python import ( + DurableContext, + durable_execution, +) +from aws_durable_execution_sdk_python.config import ( + CompletionConfig, + ParallelConfig, +) + +@durable_execution +def handler(event: dict, context: DurableContext) -> str: + """Try multiple data sources, use first successful.""" + + def try_primary_db(ctx: DurableContext) -> dict: + return ctx.step(lambda _: {"source": "primary", "data": "..."}) + + def try_secondary_db(ctx: DurableContext) -> dict: + return ctx.step(lambda _: {"source": "secondary", "data": "..."}) + + def try_cache(ctx: DurableContext) -> dict: + return ctx.step(lambda _: {"source": "cache", "data": "..."}) + + # Complete as soon as any source succeeds + config = ParallelConfig( + completion_config=CompletionConfig.first_successful() + ) + + result: BatchResult[dict] = context.parallel( + [try_primary_db, try_secondary_db, try_cache], + config=config, + ) + + if result.successful_results: + return result.successful_results[0] + + return {"error": "All sources failed"} +``` + +### Controlled concurrency + +Limit concurrent execution to manage resource usage: + +```python +from aws_durable_execution_sdk_python import ( + DurableContext, + durable_execution, +) +from aws_durable_execution_sdk_python.config import ParallelConfig + +@durable_execution +def handler(event: dict, context: DurableContext) -> dict: + """Process many items with controlled concurrency.""" + items = event.get("items", []) + + # Create a function for each item + functions = [ + lambda ctx, item=item: ctx.step( + lambda _: f"Processed {item}", + name=f"process_{item}" + ) + for item in items + ] + + # Process at most 10 items concurrently + config = ParallelConfig(max_concurrency=10) + + result: BatchResult[str] = context.parallel(functions, config=config) + + return { + "processed": result.success_count, + "failed": result.failure_count, + "results": result.successful_results, + } +``` + +### Partial success handling + +Handle scenarios where some branches can fail: + +```python +from aws_durable_execution_sdk_python import ( + DurableContext, + durable_execution, +) +from aws_durable_execution_sdk_python.config import ( + CompletionConfig, + ParallelConfig, +) + +@durable_execution +def handler(event: dict, context: DurableContext) -> dict: + """Allow some branches to fail.""" + + # Require at least 2 successes, tolerate up to 1 failure + config = ParallelConfig( + completion_config=CompletionConfig( + min_successful=2, + tolerated_failure_count=1, + ) + ) + + functions = [ + lambda ctx: ctx.step(lambda _: "Success 1"), + lambda ctx: ctx.step(lambda _: "Success 2"), + lambda ctx: ctx.step(lambda _: raise_error()), # This might fail + ] + + result: BatchResult[str] = context.parallel(functions, config=config) + + return { + "status": "partial_success", + "successful": result.successful_results, + "failed_count": result.failure_count, + } +``` + +### Nested parallel operations + +Parallel operations can contain other parallel operations: + +```python +from aws_durable_execution_sdk_python import ( + DurableContext, + durable_execution, +) + +@durable_execution +def handler(event: dict, context: DurableContext) -> dict: + """Nested parallel execution.""" + + def process_group_a(ctx: DurableContext) -> list: + # Inner parallel operation for group A + task1 = lambda c: c.step(lambda _: "group-a-item-1") + task2 = lambda c: c.step(lambda _: "group-a-item-2") + task3 = lambda c: c.step(lambda _: "group-a-item-3") + + inner_result = ctx.parallel([task1, task2, task3]) + return inner_result.successful_results + + def process_group_b(ctx: DurableContext) -> list: + # Inner parallel operation for group B + task1 = lambda c: c.step(lambda _: "group-b-item-1") + task2 = lambda c: c.step(lambda _: "group-b-item-2") + task3 = lambda c: c.step(lambda _: "group-b-item-3") + + inner_result = ctx.parallel([task1, task2, task3]) + return inner_result.successful_results + + # Outer parallel operation + result: BatchResult[list] = context.parallel([process_group_a, process_group_b]) + + return { + "groups_processed": result.success_count, + "results": result.successful_results, + } +``` + +[↑ Back to top](#table-of-contents) + +## Error handling + +Parallel operations handle errors gracefully, isolating failures to individual branches: + +### Individual branch failures + +When a branch fails, other branches continue executing: + +```python +from aws_durable_execution_sdk_python import ( + DurableContext, + durable_execution, +) +from aws_durable_execution_sdk_python.config import ( + CompletionConfig, + ParallelConfig, +) + +@durable_execution +def handler(event: dict, context: DurableContext) -> dict: + """Handle individual branch failures.""" + + def successful_task(ctx: DurableContext) -> str: + return ctx.step(lambda _: "Success") + + def failing_task(ctx: DurableContext) -> str: + return ctx.step(lambda _: raise_error("Task failed")) + + functions = [successful_task, failing_task, successful_task] + + # Use all_completed to wait for all branches + config = ParallelConfig( + completion_config=CompletionConfig.all_completed() + ) + + result: BatchResult[str] = context.parallel(functions, config=config) + + return { + "successful": result.successful_results, + "failed_count": result.failure_count, + "status": result.status.value, + } +``` + +### Checking for failures + +Inspect the `BatchResult` to detect and handle failures: + +```python +from aws_durable_execution_sdk_python import BatchResult + +result: BatchResult = context.parallel(functions) + +if result.failure_count > 0: + # Some branches failed + return { + "status": "partial_failure", + "successful": result.successful_results, + "failed_count": result.failure_count, + } + +# All branches succeeded +return { + "status": "success", + "results": result.successful_results, +} +``` + +### Completion strategies and errors + +Different completion strategies handle errors differently: + +**all_successful()** - Fails fast when any branch fails: +```python +config = ParallelConfig( + completion_config=CompletionConfig.all_successful() +) +# Stops executing new branches after first failure +``` + +**first_successful()** - Continues until one branch succeeds: +```python +config = ParallelConfig( + completion_config=CompletionConfig.first_successful() +) +# Ignores failures until at least one succeeds +``` + +**all_completed()** - Waits for all branches regardless of errors: +```python +config = ParallelConfig( + completion_config=CompletionConfig.all_completed() +) +# All branches complete, collect both successes and failures +``` + +[↑ Back to top](#table-of-contents) + +## Result ordering + +Results in `successful_results` maintain the same order as the input functions: + +```python +from aws_durable_execution_sdk_python import ( + BatchResult, + DurableContext, + durable_execution, +) + +@durable_execution +def handler(event: dict, context: DurableContext) -> list[str]: + """Demonstrate result ordering.""" + + functions = [ + lambda ctx: ctx.step(lambda _: "First"), + lambda ctx: ctx.step(lambda _: "Second"), + lambda ctx: ctx.step(lambda _: "Third"), + ] + + result = context.parallel(functions) + + # Results are in the same order as functions + assert result.successful_results[0] == "First" + assert result.successful_results[1] == "Second" + assert result.successful_results[2] == "Third" + + return result.successful_results +``` + +**Important:** Even though branches execute concurrently and may complete in any order, the SDK preserves the original order in the results list. This makes it easy to correlate results with inputs. + +### Handling partial results + +When some branches fail, `successful_results` only contains results from successful branches, but the order is still preserved relative to the input: + +```python +# If function at index 1 fails: +# Input: [func0, func1, func2] +# Result: [result0, result2] # result1 is missing, but order preserved +``` + +[↑ Back to top](#table-of-contents) + +## Performance considerations + +### Concurrency limits + +Use `max_concurrency` to balance performance and resource usage: + +```python +from aws_durable_execution_sdk_python import BatchResult +from aws_durable_execution_sdk_python.config import ParallelConfig + +# Process 100 items, but only 10 at a time +config = ParallelConfig(max_concurrency=10) +result: BatchResult = context.parallel(functions, config=config) +``` + +**When to limit concurrency:** +- Processing many items (hundreds or thousands) +- Calling external APIs with rate limits +- Managing memory usage with large data +- Controlling database connection pools + +**When to use unlimited concurrency:** +- Small number of branches (< 50) +- Independent operations with no shared resources +- When maximum speed is critical + +### Completion strategies + +Choose the right completion strategy for your use case: + +**first_successful()** - Best for: +- Redundant operations (multiple data sources) +- Racing multiple strategies +- Minimizing latency + +**all_successful()** - Best for: +- Operations that must all succeed +- Fail-fast behavior +- Strict consistency requirements + +**all_completed()** - Best for: +- Best-effort operations +- Collecting partial results +- Logging or monitoring tasks + +### Checkpointing overhead + +Each branch creates checkpoints as it executes. For many small branches, consider: +- Batching items together +- Using map operations instead +- Grouping related operations + +[↑ Back to top](#table-of-contents) + +## Best practices + +**Use parallel for independent operations** - Only parallelize operations that don't depend on each other's results. + +**Limit concurrency for large workloads** - Use `max_concurrency` when processing many items to avoid overwhelming resources. + +**Choose appropriate completion strategies** - Match the completion strategy to your business requirements (all must succeed vs. best effort). + +**Handle partial failures gracefully** - Check `failure_count` and handle scenarios where some branches fail. + +**Keep branches focused** - Each branch should be a cohesive unit of work. Don't make branches too granular. + +**Use meaningful names** - Name your parallel operations for easier debugging and testing. + +**Consider map operations for collections** - If you're processing a collection of similar items, use `context.map()` instead. + +**Avoid shared state** - Each branch runs in its own context. Don't rely on shared variables or global state. + +**Monitor resource usage** - Parallel operations can consume significant resources. Monitor memory and API rate limits. + +**Test with realistic concurrency** - Test your parallel operations with realistic numbers of branches to catch resource issues. + +[↑ Back to top](#table-of-contents) + +## FAQ + +**Q: What's the difference between parallel() and map()?** + +A: `parallel()` executes a list of different functions, while `map()` executes the same function for each item in a collection. Use `parallel()` for heterogeneous operations and `map()` for homogeneous operations. + +**Q: How many branches can I run in parallel?** + +A: There's no hard limit, but consider resource constraints. For large numbers (> 100), use `max_concurrency` to limit concurrent execution. + +**Q: Do branches execute in a specific order?** + +A: Branches execute concurrently, so execution order is non-deterministic. However, results are returned in the same order as the input functions. + +**Q: Can I use async functions in parallel operations?** + +A: No, branch functions must be synchronous. If you need to call async code, use `asyncio.run()` inside your function. + +**Q: What happens if all branches fail?** + +A: The behavior depends on your completion configuration. With `all_successful()`, the operation fails. With `all_completed()`, you get a `BatchResult` with all failures in `failed_results`. + +**Q: Can I cancel running branches?** + +A: Not directly. The SDK doesn't provide branch cancellation. Use completion strategies like `first_successful()` to stop starting new branches early. + +**Q: How do I pass different arguments to each branch?** + +A: Use lambda functions with default arguments: + +```python +functions = [ + lambda ctx, val=value: process(ctx, val) + for value in values +] +``` + +**Q: Can branches communicate with each other?** + +A: No, branches are isolated. They can't share state or communicate during execution. Pass data through the parent context or use the results after parallel execution completes. + +**Q: What's the overhead of parallel operations?** + +A: Each branch creates a child context and checkpoints its results. For very small operations, the overhead might outweigh the benefits. Profile your specific use case. + +**Q: Can I nest parallel operations?** + +A: Yes, you can call `context.parallel()` inside a branch function. Each nested parallel operation creates its own set of child contexts. + +[↑ Back to top](#table-of-contents) + +## Testing + +You can test parallel operations using the testing SDK. The test runner executes your function and lets you inspect branch results. + +### Basic parallel testing + +```python +import pytest +from aws_durable_execution_sdk_python_testing import InvocationStatus +from my_function import handler + +@pytest.mark.durable_execution( + handler=handler, + lambda_function_name="parallel_function", +) +def test_parallel(durable_runner): + """Test parallel operations.""" + with durable_runner: + result = durable_runner.run(input={"data": "test"}, timeout=10) + + # Check overall status + assert result.status is InvocationStatus.SUCCEEDED + + # Check the result contains expected values + assert len(result.result) == 3 + assert "Task 1 complete" in result.result +``` + +### Inspecting branch operations + +Use the test result to inspect individual branch operations: + +```python +from aws_durable_execution_sdk_python_testing import OperationType + +@pytest.mark.durable_execution( + handler=handler, + lambda_function_name="parallel_function", +) +def test_parallel_branches(durable_runner): + """Test and inspect parallel branches.""" + with durable_runner: + result = durable_runner.run(input={}, timeout=10) + + # Verify all step operations exist + step_ops = [ + op for op in result.operations + if op.operation_type == OperationType.STEP + ] + assert len(step_ops) == 3 + + # Check step names + step_names = {op.name for op in step_ops} + assert step_names == {"task1", "task2", "task3"} +``` + +### Testing completion strategies + +Test that completion strategies work correctly: + +```python +@pytest.mark.durable_execution( + handler=handler_first_successful, + lambda_function_name="first_successful_function", +) +def test_first_successful(durable_runner): + """Test first successful completion strategy.""" + with durable_runner: + result = durable_runner.run(input={}, timeout=10) + + # Should succeed with at least one result + assert result.status is InvocationStatus.SUCCEEDED + assert "First successful result:" in result.result +``` + +### Testing error handling + +Test that parallel operations handle errors correctly: + +```python +@pytest.mark.durable_execution( + handler=handler_with_failures, + lambda_function_name="parallel_with_failures", +) +def test_parallel_with_failures(durable_runner): + """Test parallel operations with some failures.""" + with durable_runner: + result = durable_runner.run(input={}, timeout=10) + + # Check that some branches succeeded + assert result.status is InvocationStatus.SUCCEEDED + assert result.result["successful_count"] > 0 + assert result.result["failed_count"] > 0 +``` + +### Testing concurrency limits + +Test that concurrency limits are respected: + +```python +@pytest.mark.durable_execution( + handler=handler_with_concurrency_limit, + lambda_function_name="limited_concurrency", +) +def test_concurrency_limit(durable_runner): + """Test parallel operations with concurrency limit.""" + with durable_runner: + result = durable_runner.run(input={"items": list(range(20))}, timeout=30) + + # All items should be processed + assert result.status is InvocationStatus.SUCCEEDED + assert len(result.result["results"]) == 20 +``` + +For more testing patterns, see: +- [Basic tests](../testing-patterns/basic-tests.md) - Simple test examples +- [Complex workflows](../testing-patterns/complex-workflows.md) - Multi-step workflow testing +- [Best practices](../testing-patterns/best-practices.md) - Testing recommendations + +[↑ Back to top](#table-of-contents) + +## See also + +- [Map operations](map.md) - Process collections with the same function +- [Child contexts](child-contexts.md) - Understand child context isolation +- [Steps](steps.md) - Use steps within parallel branches +- [Error handling](../advanced/error-handling.md) - Handle errors in durable functions +- [ParallelConfig](../api-reference/config.md) - Configuration options +- [BatchResult](../api-reference/result.md) - Result object reference +- [Examples](https://github.com/awslabs/aws-durable-execution-sdk-python/tree/main/examples/src/parallel) - More parallel examples + +[↑ Back to top](#table-of-contents) + +## License + +See the [LICENSE](../../LICENSE) file for our project's licensing. + +[↑ Back to top](#table-of-contents) diff --git a/docs/core/steps.md b/docs/core/steps.md new file mode 100644 index 0000000..6ad7ac4 --- /dev/null +++ b/docs/core/steps.md @@ -0,0 +1,597 @@ +# Steps + +## Table of Contents + +- [What are steps?](#what-are-steps) +- [Terminology](#terminology) +- [Key features](#key-features) +- [Getting started](#getting-started) +- [Method signature](#method-signature) +- [Using the @durable_step decorator](#using-the-durable_step-decorator) +- [Naming steps](#naming-steps) +- [Configuration](#configuration) +- [Advanced patterns](#advanced-patterns) +- [Best practices](#best-practices) +- [FAQ](#faq) +- [Testing](#testing) +- [See also](#see-also) + +[← Back to main index](../index.md) + +## Terminology + +**Step** - A durable operation that executes a function and checkpoints its result. Created using `context.step()`. + +**Step function** - A function decorated with `@durable_step` that can be executed as a step. Receives a `StepContext` as its first parameter. + +**Checkpoint** - A saved state of execution that allows your function to resume from a specific point. The SDK creates checkpoints automatically after each step completes. + +**Replay** - The process of re-executing your function code when resuming from a checkpoint. Completed steps return their saved results instantly without re-executing. + +**Step semantics** - Controls how many times a step executes per retry attempt. At-least-once (default) re-executes on retry. At-most-once executes only once per retry attempt. + +**StepContext** - A context object passed to step functions containing metadata about the current execution. + +[↑ Back to top](#table-of-contents) + +## What are steps? + +Steps are the fundamental building blocks of durable functions. A step is a unit of work that executes your code and automatically checkpoints the result. A completed step won't execute again, it returns its saved result instantly. If a step fails to complete, it automatically retries and saves the error after all retry attempts are exhausted. + +Use steps to: +- Execute business logic with automatic checkpointing +- Retry operations that might fail +- Control execution semantics (at-most-once or at-least-once) +- Break complex workflows into manageable units + +[↑ Back to top](#table-of-contents) + +## Key features + +- **Automatic checkpointing** - Results are saved automatically after execution +- **Configurable retry** - Define retry strategies with custom backoff +- **Execution semantics** - Choose at-most-once or at-least-once per retry +- **Named operations** - Identify steps by name for debugging and testing +- **Custom serialization** - Control how inputs and results are serialized +- **Instant replay** - Completed steps return saved results without re-executing + +[↑ Back to top](#table-of-contents) + +## Getting started + +Here's a simple example of using steps: + +```python +from aws_durable_execution_sdk_python import ( + DurableContext, + durable_execution, + durable_step, + StepContext, +) + +@durable_step +def add_numbers(step_context: StepContext, a: int, b: int) -> int: + """Add two numbers together.""" + return a + b + +@durable_execution +def handler(event: dict, context: DurableContext) -> int: + """Simple durable function with a step.""" + result = context.step(add_numbers(5, 3)) + return result +``` + +When this function runs: +1. `add_numbers(5, 3)` executes and returns 8 +2. The result is checkpointed automatically +3. If the durable function replays, the step returns 8 instantly without re-executing the `add_numbers` function + +[↑ Back to top](#table-of-contents) + +## Method signature + +### context.step() + +```python +def step( + func: Callable[[StepContext], T], + name: str | None = None, + config: StepConfig | None = None, +) -> T +``` + +**Parameters:** + +- `func` - A callable that receives a `StepContext` and returns a result. Use the `@durable_step` decorator to create step functions. +- `name` (optional) - A name for the step, useful for debugging. If you decorate `func` with `@durable_step`, the SDK uses the function's name automatically. +- `config` (optional) - A `StepConfig` object to configure retry behavior, execution semantics, and serialization. + +**Returns:** The result of executing the step function. + +**Raises:** Any exception raised by the step function (after retries are exhausted if configured). + +[↑ Back to top](#table-of-contents) + +## Using the @durable_step decorator + +The `@durable_step` decorator marks a function as a step function. Step functions receive a `StepContext` as their first parameter: + +```python +from aws_durable_execution_sdk_python import durable_step, StepContext + +@durable_step +def validate_order(step_context: StepContext, order_id: str) -> dict: + """Validate an order.""" + # Your validation logic here + return {"order_id": order_id, "valid": True} + +@durable_execution +def handler(event: dict, context: DurableContext) -> dict: + order_id = event["order_id"] + validation = context.step(validate_order(order_id)) + return validation +``` + +**Why use @durable_step?** + +The decorator wraps your function so it can be called with arguments and passed to `context.step()`. It also automatically uses the wrapped function's name as the step's name. You can optionally use lambda functions instead: + +```python +# With @durable_step (recommended) +result = context.step(validate_order(order_id)) + +# Optionally, use a lambda function +result = context.step(lambda _: validate_order_logic(order_id)) +``` + +**StepContext parameter:** + +The `StepContext` provides metadata about the current execution. While you must include it in your function signature, you typically don't need to use it unless you need execution metadata or custom logging. + +[↑ Back to top](#table-of-contents) + +## Naming steps + +You can name steps explicitly using the `name` parameter. Named steps are easier to identify in logs and tests: + +```python +@durable_execution +def handler(event: dict, context: DurableContext) -> str: + # Explicit name + result = context.step( + lambda _: "Step with explicit name", + name="custom_step" + ) + return f"Result: {result}" +``` + +If you don't provide a name, the SDK uses the function's name automatically when using `@durable_step`: + +```python +@durable_step +def process_payment(step_context: StepContext, amount: float) -> dict: + return {"status": "completed", "amount": amount} + +@durable_execution +def handler(event: dict, context: DurableContext) -> dict: + # Step is automatically named "process_payment" + result = context.step(process_payment(100.0)) + return result +``` + +**Naming best practices:** + +- Use descriptive names that explain what the step does +- Keep names consistent across your codebase +- Use names when you need to inspect specific steps in tests +- Let the SDK auto-name steps when using `@durable_step` + +**Note:** Names don't need to be unique, but using distinct names improves observability when debugging or monitoring your workflows. + +[↑ Back to top](#table-of-contents) + +## Configuration + +Configure step behavior using `StepConfig`: + +```python +from aws_durable_execution_sdk_python import ( + DurableContext, + durable_execution, + durable_step, + StepContext, +) +from aws_durable_execution_sdk_python.config import StepConfig, StepSemantics +from aws_durable_execution_sdk_python.retries import ( + RetryStrategyConfig, + create_retry_strategy, +) + +@durable_step +def process_data(step_context: StepContext, data: str) -> dict: + """Process data with potential for transient failures.""" + # Your processing logic here + return {"processed": data, "status": "completed"} + +@durable_execution +def handler(event: dict, context: DurableContext) -> dict: + # Create a retry strategy + retry_config = RetryStrategyConfig( + max_attempts=3, + retryable_error_types=[RuntimeError, ValueError], + ) + + # Configure the step + step_config = StepConfig( + retry_strategy=create_retry_strategy(retry_config), + step_semantics=StepSemantics.AT_LEAST_ONCE_PER_RETRY, + ) + + # Use the configuration + result = context.step(process_data(event["data"]), config=step_config) + return result +``` + +### StepConfig parameters + +**retry_strategy** - A function that determines whether to retry after an exception. Use `create_retry_strategy()` to build one from `RetryStrategyConfig`. + +**step_semantics** - Controls execution behavior on retry: +- `AT_LEAST_ONCE_PER_RETRY` (default) - Step re-executes on each retry attempt +- `AT_MOST_ONCE_PER_RETRY` - Step executes only once per retry attempt, even if the function is replayed + +**serdes** - Custom serialization/deserialization for the step result. If not provided, uses JSON serialization. + +[↑ Back to top](#table-of-contents) + +## Advanced patterns + +### Retry with exponential backoff + +Configure steps to retry with exponential backoff when they fail: + +```python +from aws_durable_execution_sdk_python import ( + DurableContext, + durable_execution, +) +from aws_durable_execution_sdk_python.config import StepConfig +from aws_durable_execution_sdk_python.retries import ( + RetryStrategyConfig, + create_retry_strategy, +) + +@durable_execution +def handler(event: dict, context: DurableContext) -> str: + # Configure exponential backoff + retry_config = RetryStrategyConfig( + max_attempts=3, + initial_delay_seconds=1, + max_delay_seconds=10, + backoff_rate=2.0, + ) + + step_config = StepConfig( + retry_strategy=create_retry_strategy(retry_config) + ) + + result = context.step( + lambda _: "Step with exponential backoff", + name="retry_step", + config=step_config, + ) + return f"Result: {result}" +``` + +This configuration: +- Retries up to 3 times +- Waits 1 second before the first retry +- Doubles the wait time for each subsequent retry (2s, 4s, 8s) +- Caps the wait time at 10 seconds + +### Retry specific exceptions + +Only retry certain types of errors: + +```python +from random import random +from aws_durable_execution_sdk_python import ( + DurableContext, + durable_execution, + durable_step, + StepContext, +) +from aws_durable_execution_sdk_python.config import StepConfig +from aws_durable_execution_sdk_python.retries import ( + RetryStrategyConfig, + create_retry_strategy, +) + +@durable_step +def unreliable_operation(step_context: StepContext) -> str: + """Operation that might fail.""" + if random() > 0.5: + raise RuntimeError("Random error occurred") + return "Operation succeeded" + +@durable_execution +def handler(event: dict, context: DurableContext) -> str: + # Only retry RuntimeError, not other exceptions + retry_config = RetryStrategyConfig( + max_attempts=3, + retryable_error_types=[RuntimeError], + ) + + result = context.step( + unreliable_operation(), + config=StepConfig(create_retry_strategy(retry_config)), + ) + + return result +``` + +### At-most-once semantics + +Use at-most-once semantics when your step has side effects that shouldn't be repeated: + +```python +from aws_durable_execution_sdk_python.config import StepConfig, StepSemantics + +@durable_step +def charge_credit_card(step_context: StepContext, amount: float) -> dict: + """Charge a credit card - should only happen once.""" + # Payment processing logic + return {"transaction_id": "txn_123", "status": "completed"} + +@durable_execution +def handler(event: dict, context: DurableContext) -> dict: + # Use at-most-once to prevent duplicate charges + step_config = StepConfig( + step_semantics=StepSemantics.AT_MOST_ONCE_PER_RETRY + ) + + payment = context.step( + charge_credit_card(event["amount"]), + config=step_config, + ) + + return payment +``` + +With at-most-once semantics: +- The step executes only once per retry attempt +- If the function replays due to Lambda recycling, the step returns the saved result +- Use this for operations with side effects like payments, emails, or database writes + +### Multiple steps in sequence + +Chain multiple steps together to build complex workflows: + +```python +@durable_step +def fetch_user(step_context: StepContext, user_id: str) -> dict: + """Fetch user data.""" + return {"user_id": user_id, "name": "Jane Doe", "email": "jane_doe@example.com"} + +@durable_step +def validate_user(step_context: StepContext, user: dict) -> bool: + """Validate user data.""" + return user.get("email") is not None + +@durable_step +def send_notification(step_context: StepContext, user: dict) -> dict: + """Send notification to user.""" + return {"sent": True, "email": user["email"]} + +@durable_execution +def handler(event: dict, context: DurableContext) -> dict: + user_id = event["user_id"] + + # Step 1: Fetch user + user = context.step(fetch_user(user_id)) + + # Step 2: Validate user + is_valid = context.step(validate_user(user)) + + if not is_valid: + return {"status": "failed", "reason": "invalid_user"} + + # Step 3: Send notification + notification = context.step(send_notification(user)) + + return { + "status": "completed", + "user_id": user_id, + "notification_sent": notification["sent"], + } +``` + +Each step is checkpointed independently. If the function is interrupted after step 1, it resumes at step 2 without re-fetching the user. + +[↑ Back to top](#table-of-contents) + +## Best practices + +**Use @durable_step for reusable functions** - Decorate functions you'll use as steps to get automatic naming and convenient with succinct syntax. + +**Name steps for debugging** - Use explicit names for steps you'll need to inspect in logs or tests. + +**Keep steps focused** - Each step should do one thing. Break complex operations into multiple steps. + +**Use retry for transient failures** - Configure retry strategies for operations that might fail temporarily (network calls, rate limits). + +**Choose semantics carefully** - Use at-most-once for operations with side effects. Use at-least-once (default) for idempotent operations. + +**Don't share state between steps** - Pass data between steps through return values, not global variables. + +**Wrap non-deterministic code in steps** - All non-deterministic code, such as random values or timestamps, must be wrapped in a step. Once the step completes, the result won't change on replay. + +**Handle errors explicitly** - Catch and handle exceptions in your step functions. Let retries handle transient failures. + +[↑ Back to top](#table-of-contents) + +## FAQ + +**Q: What's the difference between a step and a regular function call?** + +A: A step is checkpointed automatically. Completed steps return their saved results without re-executing. Regular function calls execute every time your function runs. + +**Q: When should I use at-most-once vs at-least-once semantics?** + +A: Use at-most-once for operations with side effects (payments, emails, database writes). Use at-least-once (default) for idempotent operations (calculations, data transformations). + +**Q: Can I use async functions as steps?** + +A: No, step functions must be synchronous. If you need to call async code, use `asyncio.run()` inside your step function. + +**Q: How do I pass multiple arguments to a step?** + +A: Use the `@durable_step` decorator and pass arguments when calling the function: + +```python +@durable_step +def my_step(step_context: StepContext, arg1: str, arg2: int) -> str: + return f"{arg1}: {arg2}" + +result = context.step(my_step("value", 42)) +``` + +**Q: Can I nest steps inside other steps?** + +A: No, you can't call `context.step()` inside a step function. Steps are leaf operations. Use child contexts if you need nested operations. + +**Q: What happens if a step raises an exception?** + +A: If no retry strategy is configured, the exception propagates and fails the execution. If retry is configured, the SDK retries according to your strategy. After exhausting retries, the step checkpoints the error and the exception propagates. + +**Q: How do I access the StepContext?** + +A: The `StepContext` is passed as the first parameter to your step function. It contains metadata about the execution, though you typically don't need to use it. + +**Q: Can I use lambda functions as steps?** + +A: Yes, but they won't have automatic names: + +```python +result = context.step(lambda _: "some value", name="my_step") +``` + +Use `@durable_step` for better ergonomics. + +[↑ Back to top](#table-of-contents) + +## Testing + +You can test steps using the testing SDK. The test runner executes your function and lets you inspect step results. + +### Basic step testing + +```python +import pytest +from aws_durable_execution_sdk_python_testing import InvocationStatus +from my_function import handler + +@pytest.mark.durable_execution( + handler=handler, + lambda_function_name="my_function", +) +def test_step(durable_runner): + """Test a function with steps.""" + with durable_runner: + result = durable_runner.run(input={"data": "test"}, timeout=10) + + # Check overall status + assert result.status is InvocationStatus.SUCCEEDED + + # Check final result + assert result.result == 8 +``` + +### Inspecting step results + +Use `result.get_step()` to inspect individual step results: + +```python +@pytest.mark.durable_execution( + handler=handler, + lambda_function_name="my_function", +) +def test_step_result(durable_runner): + """Test and inspect step results.""" + with durable_runner: + result = durable_runner.run(input={"data": "test"}, timeout=10) + + # Get step by name + step_result = result.get_step("add_numbers") + assert step_result.result == 8 + + # Check step status + assert step_result.status is InvocationStatus.SUCCEEDED +``` + +### Testing retry behavior + +Test that steps retry correctly on failure: + +```python +@pytest.mark.durable_execution( + handler=handler_with_retry, + lambda_function_name="retry_function", +) +def test_step_retry(durable_runner): + """Test step retry behavior.""" + with durable_runner: + result = durable_runner.run(input={}, timeout=30) + + # Function should eventually succeed after retries + assert result.status is InvocationStatus.SUCCEEDED + + # Inspect the step that retried + step_result = result.get_step("unreliable_operation") + assert step_result.status is InvocationStatus.SUCCEEDED +``` + +### Testing error handling + +Test that steps fail correctly when errors occur: + +```python +@pytest.mark.durable_execution( + handler=handler_with_error, + lambda_function_name="error_function", +) +def test_step_error(durable_runner): + """Test step error handling.""" + with durable_runner: + result = durable_runner.run(input={}, timeout=10) + + # Function should fail + assert result.status is InvocationStatus.FAILED + + # Check the error + assert "RuntimeError" in str(result.error) +``` + +For more testing patterns, see: +- [Basic tests](../testing-patterns/basic-tests.md) - Simple test examples +- [Complex workflows](../testing-patterns/complex-workflows.md) - Multi-step workflow testing +- [Best practices](../testing-patterns/best-practices.md) - Testing recommendations + +[↑ Back to top](#table-of-contents) + +## See also + +- [DurableContext API](../api-reference/context.md) - Complete context reference +- [StepConfig](../api-reference/config.md) - Configuration options +- [Retry strategies](../advanced/error-handling.md) - Implementing retry logic +- [Wait operations](wait.md) - Pause execution between steps +- [Child contexts](child-contexts.md) - Organize complex workflows +- [Examples](https://github.com/awslabs/aws-durable-execution-sdk-python/tree/main/examples/src/step) - More step examples + +[↑ Back to top](#table-of-contents) + +## License + +See the [LICENSE](../../LICENSE) file for our project's licensing. + +[↑ Back to top](#table-of-contents) diff --git a/docs/core/wait.md b/docs/core/wait.md new file mode 100644 index 0000000..904e9fa --- /dev/null +++ b/docs/core/wait.md @@ -0,0 +1,450 @@ +# Wait Operations + +## Table of Contents + +- [What are wait operations?](#what-are-wait-operations) +- [When to use wait operations](#when-to-use-wait-operations) +- [Terminology](#terminology) +- [Key features](#key-features) +- [Getting started](#getting-started) +- [Method signature](#method-signature) +- [Duration helpers](#duration-helpers) +- [Naming wait operations](#naming-wait-operations) +- [Multiple sequential waits](#multiple-sequential-waits) +- [Understanding scheduled_end_timestamp](#understanding-scheduled_end_timestamp) +- [Best practices](#best-practices) +- [FAQ](#faq) +- [Alternatives to wait operations](#alternatives-to-wait-operations) +- [Testing](#testing) +- [See also](#see-also) + +[← Back to main index](../index.md) + +## Terminology + +**Wait operation** - A durable operation that pauses execution for a specified duration. Created using `context.wait()`. + +**Duration** - A time period specified in seconds, minutes, hours, or days using the `Duration` class. + +**Scheduled end timestamp** - The Unix timestamp (in milliseconds) when the wait operation is scheduled to complete. + +**Suspend** - The process of pausing execution and saving state. The Lambda function exits and resumes later. + +**Resume** - The process of continuing execution after a wait completes. The SDK automatically invokes your function again. + +[↑ Back to top](#table-of-contents) + +## What are wait operations? + +Wait operations pause execution for a specified time. Your function suspends, the Lambda exits, and the system automatically resumes execution when the wait completes. + +Unlike `time.sleep()`, waits don't consume Lambda execution time. Your function checkpoints, exits cleanly, and resumes later, even if the wait lasts hours or days. + +[↑ Back to top](#table-of-contents) + +## When to use wait operations + +Use `context.wait()` when you need a simple time-based delay. + +**Choose a different method if you need:** +- **Wait for external system response** → Use [`context.wait_for_callback()`](callbacks.md) +- **Wait until a condition is met** → Use [`context.wait_for_condition()`](../advanced/wait-for-condition.md) +- **Wait for a step to complete** → Use [`context.step()`](steps.md) + +[↑ Back to top](#table-of-contents) + +## Key features + +- **Durable pauses** - Execution suspends and resumes automatically +- **Flexible durations** - Specify time in seconds, minutes, hours, or days +- **Named operations** - Identify waits by name for debugging and testing +- **Automatic scheduling** - The SDK handles timing and resumption +- **Sequential waits** - Chain multiple waits together +- **No polling required** - The system invokes your function when ready + +[↑ Back to top](#table-of-contents) + +## Getting started + +Here's a simple example of using a wait operation: + +```python +from aws_durable_execution_sdk_python import DurableContext, durable_execution +from aws_durable_execution_sdk_python.config import Duration + +@durable_execution +def handler(event: dict, context: DurableContext) -> str: + """Simple durable function with a wait.""" + # Wait for 5 seconds + context.wait(duration=Duration.from_seconds(5)) + return "Wait completed" +``` + +When this function runs: +1. The wait operation is checkpointed with a scheduled end time +2. The Lambda function exits (suspends) +3. After 5 seconds, the system automatically invokes your function again +4. Execution resumes after the wait and returns "Wait completed" + +[↑ Back to top](#table-of-contents) + +## Method signature + +### context.wait() + +```python +def wait( + duration: Duration, + name: str | None = None, +) -> None +``` + +**Parameters:** + +- `duration` (Duration, required) - How long to wait. Must be at least 1 second. Use `Duration.from_seconds()`, `Duration.from_minutes()`, `Duration.from_hours()`, or `Duration.from_days()` to create a duration. +- `name` (str, optional) - A name for the wait operation. Useful for debugging and testing. + +**Returns:** None + +**Raises:** +- `ValidationError` - If duration is less than 1 second + +[↑ Back to top](#table-of-contents) + +## Duration helpers + +The `Duration` class provides convenient methods to specify time periods: + +```python +from aws_durable_execution_sdk_python.config import Duration + +# Wait for 30 seconds +context.wait(duration=Duration.from_seconds(30)) + +# Wait for 5 minutes +context.wait(duration=Duration.from_minutes(5)) + +# Wait for 2 hours +context.wait(duration=Duration.from_hours(2)) + +# Wait for 1 day +context.wait(duration=Duration.from_days(1)) +``` + +If using duration in seconds, you can also create a Duration directly: + +```python +# Wait for 300 seconds (5 minutes) +context.wait(duration=Duration(seconds=300)) +``` + +[↑ Back to top](#table-of-contents) + +## Naming wait operations + +You can name wait operations to make them easier to identify in logs and tests: + +```python +from aws_durable_execution_sdk_python import DurableContext, durable_execution +from aws_durable_execution_sdk_python.config import Duration + +@durable_execution +def handler(event: dict, context: DurableContext) -> str: + """Durable function with a named wait.""" + # Wait with explicit name + context.wait(duration=Duration.from_seconds(2), name="custom_wait") + return "Wait with name completed" +``` + +Named waits are helpful when: +- You have multiple waits in your function +- You want to identify specific waits in test assertions +- You're debugging execution flow + +[↑ Back to top](#table-of-contents) + +## Understanding scheduled_end_timestamp + +Each wait operation has a `scheduled_end_timestamp` attribute that indicates when the wait is scheduled to complete. This timestamp is in Unix milliseconds. + +You can access this timestamp when inspecting operations in tests or logs. The SDK uses this timestamp to determine when to resume your function. + +The scheduled end time is calculated when the wait operation is first checkpointed: +- Current time + wait duration = scheduled end timestamp + +[↑ Back to top](#table-of-contents) + +## Best practices + +### Choose appropriate wait durations + +When your function hits a wait, it terminates execution and doesn't incur compute charges during the wait period. The function resumes with a new invocation when the wait completes. Choose durations based on your workflow needs: + +```python +# Short wait for rate limiting +context.wait(duration=Duration.from_seconds(30)) + +# Medium wait for polling intervals +context.wait(duration=Duration.from_minutes(5)) + +# Long wait for scheduled tasks +context.wait(duration=Duration.from_hours(24)) +``` + +**Note:** If you have concurrent operations running (like parallel or map operations), those continue executing even when the main execution hits a wait. The function waits for all concurrent operations to complete before terminating. + +### Use named waits for clarity + +Name your waits when you have multiple waits or complex logic: + +```python +# Good - clear purpose +context.wait(duration=Duration.from_seconds(60), name="rate_limit_cooldown") +context.wait(duration=Duration.from_minutes(5), name="polling_interval") + +# Less clear - unnamed waits +context.wait(duration=Duration.from_seconds(60)) +context.wait(duration=Duration.from_minutes(5)) +``` + +### Combine waits with steps + +Use waits between steps to implement delays in your workflow: + +```python +@durable_execution +def handler(event: dict, context: DurableContext) -> dict: + # Start a process + job_id = context.step(start_job()) + + # Wait before checking status + context.wait(duration=Duration.from_seconds(30), name="initial_delay") + + # Check status + status = context.step(check_job_status(job_id)) + + return {"job_id": job_id, "status": status} +``` + +### Avoid very short waits + +Waits must be at least 1 second. For very short delays, consider if you actually need a wait: + +```python +# Avoid - too short, will raise ValidationError +context.wait(duration=Duration.from_seconds(0)) + +# Minimum - 1 second +context.wait(duration=Duration.from_seconds(1)) + +# Better - use meaningful durations +context.wait(duration=Duration.from_seconds(5)) +``` + +[↑ Back to top](#table-of-contents) + +## FAQ + +### How long can a wait operation last? + +There is an upper limit of 1 year - that's the maximum length of an execution. + +The wait itself doesn't consume Lambda execution time, your function suspends and resumes later. However, consider cost implications of long-running executions. + +### Can I cancel a wait operation? + +No, once a wait operation is checkpointed, it will complete after the specified duration. Design your workflows with this in mind. + +### Do waits execute in parallel? + +No, waits execute sequentially in the order they appear in your code. If you need parallel operations, use `context.parallel()` or `context.map()` instead. + +### How accurate are wait durations? + +Wait durations are approximate. The actual resume time depends on: +- System scheduling +- Lambda cold start time +- Current system load + +### Can I use waits for polling? + +You can, but we recommend using `context.wait_for_condition()` instead. It simplifies polling by handling the loop logic for you: + +```python +from aws_durable_execution_sdk_python.waits import WaitForConditionConfig, FixedWait + +@durable_execution +def handler(event: dict, context: DurableContext) -> dict: + job_id = context.step(start_job()) + + # wait_for_condition handles the polling loop + def check_status(state, check_context): + status = get_job_status(state["job_id"]) + return {"job_id": state["job_id"], "status": status} + + result = context.wait_for_condition( + check=check_status, + config=WaitForConditionConfig( + initial_state={"job_id": job_id}, + condition=lambda state: state["status"] == "completed", + wait_strategy=FixedWait(Duration.from_minutes(1)) + ) + ) + return result +``` + +See [Wait for Condition](../advanced/wait-for-condition.md) for more details. + +[↑ Back to top](#table-of-contents) + +## Alternatives to wait operations + +### Using wait_for_callback for external responses + +When you need to wait for an external system to respond, use `context.wait_for_callback()`: + +```python +from aws_durable_execution_sdk_python import DurableContext, durable_execution + +@durable_execution +def handler(event: dict, context: DurableContext) -> dict: + # Wait for external approval + def submit_for_approval(callback_id: str): + # Send callback_id to external approval system + send_to_approval_system(callback_id) + + result = context.wait_for_callback( + submitter=submit_for_approval, + name="approval_wait" + ) + return result +``` + +See [Callbacks](callbacks.md) for more details. + +### Using wait_for_condition for polling + +When you need to poll until a condition is met, use `context.wait_for_condition()`: + +```python +from aws_durable_execution_sdk_python import DurableContext, durable_execution +from aws_durable_execution_sdk_python.waits import WaitForConditionConfig, ExponentialBackoff +from aws_durable_execution_sdk_python.config import Duration + +@durable_execution +def handler(event: dict, context: DurableContext) -> dict: + # Poll until job completes + def check_job_status(state, check_context): + status = get_job_status(state["job_id"]) + return { + "job_id": state["job_id"], + "status": status, + "done": status == "COMPLETED" + } + + result = context.wait_for_condition( + check=check_job_status, + config=WaitForConditionConfig( + initial_state={"job_id": "job-123", "done": False}, + condition=lambda state: state["done"], + wait_strategy=ExponentialBackoff( + initial_wait=Duration.from_seconds(5) + ) + ) + ) + return result +``` + +See [Wait for Condition](../advanced/wait-for-condition.md) for more details. + +[↑ Back to top](#table-of-contents) + +## Testing + +### Testing wait operations + +You can verify wait operations in your tests by inspecting the operations list: + +```python +import pytest +from aws_durable_execution_sdk_python.execution import InvocationStatus +from src.wait import wait + +@pytest.mark.durable_execution( + handler=wait.handler, + lambda_function_name="Wait State", +) +def test_wait(durable_runner): + """Test wait example.""" + with durable_runner: + result = durable_runner.run(input="test", timeout=10) + + assert result.status is InvocationStatus.SUCCEEDED + + # Find the wait operation + wait_ops = [op for op in result.operations if op.operation_type.value == "WAIT"] + assert len(wait_ops) == 1 + + # Verify the wait has a scheduled end timestamp + wait_op = wait_ops[0] + assert wait_op.scheduled_end_timestamp is not None +``` + +### Testing multiple waits + +When testing functions with multiple waits, you can verify each wait individually: + +```python +@pytest.mark.durable_execution(handler=multiple_wait.handler) +def test_multiple_waits(durable_runner): + """Test multiple sequential waits.""" + with durable_runner: + result = durable_runner.run(input="test", timeout=20) + + assert result.status is InvocationStatus.SUCCEEDED + + # Find all wait operations + wait_ops = [op for op in result.operations if op.operation_type.value == "WAIT"] + assert len(wait_ops) == 2 + + # Verify both waits have names + wait_names = [op.name for op in wait_ops] + assert "wait-1" in wait_names + assert "wait-2" in wait_names +``` + +### Testing named waits + +Named waits are easier to identify in tests: + +```python +@pytest.mark.durable_execution(handler=wait_with_name.handler) +def test_named_wait(durable_runner): + """Test wait with custom name.""" + with durable_runner: + result = durable_runner.run(input="test", timeout=10) + + assert result.status is InvocationStatus.SUCCEEDED + + # Find the named wait operation + wait_ops = [op for op in result.operations + if op.operation_type.value == "WAIT" and op.name == "custom_wait"] + assert len(wait_ops) == 1 +``` + +[↑ Back to top](#table-of-contents) + +## See also + +- [Steps](steps.md) - Execute business logic with automatic checkpointing +- [Callbacks](callbacks.md) - Wait for external system responses +- [Wait for Condition](../advanced/wait-for-condition.md) - Poll until a condition is met +- [Getting Started](../getting-started.md) - Learn the basics of durable functions + +[↑ Back to top](#table-of-contents) + +## License + +See the [LICENSE](../../LICENSE) file for our project's licensing. + +[↑ Back to main index](../index.md) diff --git a/docs/getting-started.md b/docs/getting-started.md new file mode 100644 index 0000000..6c7e077 --- /dev/null +++ b/docs/getting-started.md @@ -0,0 +1,291 @@ +# Getting started + +## Table of Contents + +- [Overview](#overview) +- [The two SDKs](#the-two-sdks) +- [How durable execution works](#how-durable-execution-works) +- [Your development workflow](#your-development-workflow) +- [Quick start](#quick-start) +- [Next steps](#next-steps) + +[← Back to main index](index.md) + +## Overview + +This guide explains the fundamental concepts behind durable execution and how the SDK works. You'll understand: + +- The difference between `aws-durable-execution-sdk-python` and `aws-durable-execution-sdk-python-testing` +- How checkpoints and replay enable reliable workflows +- Why your function code runs multiple times but side effects happen once +- The development workflow from writing to testing to deployment + +[↑ Back to top](#table-of-contents) + +## The two SDKs + +The durable execution ecosystem has two separate packages: + +### Execution SDK (aws-durable-execution-sdk-python) + +This is the **core SDK** that runs in your Lambda functions. It provides: + +- `DurableContext` - The main interface for durable operations +- Operations - Steps, waits, callbacks, parallel, map, child contexts +- Decorators - `@durable_execution`, `@durable_step`, etc. +- Configuration - StepConfig, CallbackConfig, retry strategies +- Serialization - How data is saved in checkpoints + +Install it in your Lambda deployment package: + +```console +pip install aws-durable-execution-sdk-python +``` + +### Testing SDK (aws-durable-execution-sdk-python-testing) + +This is a **separate SDK** for testing your durable functions. It provides: + +- `DurableFunctionTestRunner` - Run functions locally without AWS +- `DurableFunctionCloudTestRunner` - Test deployed Lambda functions +- Pytest integration - Fixtures and markers for writing tests +- Result inspection - Examine execution state and operation results + +Install it in your development environment only: + +```console +pip install aws-durable-execution-sdk-python-testing +``` + +**Key distinction:** The execution SDK runs in production Lambda. The testing SDK runs on your laptop or CI/CD. They're separate concerns. + +[↑ Back to top](#table-of-contents) + +## How durable execution works + +Let's trace through a simple workflow to understand the execution model: + +```python +@durable_execution +def handler(event: dict, context: DurableContext) -> dict: + # Step 1: Call external API + data = context.step(fetch_data(event["id"])) + + # Step 2: Wait 30 seconds + context.wait(seconds=30) + + # Step 3: Process the data + result = context.step(process_data(data)) + + return result +``` + +**First invocation (t=0s):** + +1. Lambda invokes your function +2. `fetch_data` executes and calls an external API +3. Result is checkpointed to AWS +4. `context.wait(seconds=30)` is reached +5. Function returns, Lambda can recycle the environment + +**Second invocation (t=30s):** + +1. Lambda invokes your function again +2. Function code runs from the beginning +3. `fetch_data` returns the checkpointed result instantly (no API call) +4. `context.wait(seconds=30)` is already complete, execution continues +5. `process_data` executes for the first time +6. Result is checkpointed +7. Function returns the final result + +**Key insights:** + +- Your function code runs twice, but `fetch_data` only calls the API once +- The wait doesn't block Lambda - your environment can be recycled +- You write linear code that looks synchronous +- The SDK handles all the complexity of state management + +[↑ Back to top](#table-of-contents) + +## Your development workflow + +```mermaid +flowchart LR + subgraph dev["Development (Local)"] + direction LR + A["1. Write Function
aws-durable-execution-sdk-python"] + B["2. Write Tests
aws-durable-execution-sdk-python-testing"] + C["3. Run Tests
pytest"] + end + + subgraph prod["Production (AWS)"] + direction LR + D["4. Deploy
SAM/CDK/Terraform"] + E["5. Test in Cloud
pytest --runner-mode=cloud"] + end + + A --> B --> C --> D --> E + + style dev fill:#e3f2fd + style prod fill:#fff3e0 +``` + +Here's how you build and test durable functions: + +### 1. Write your function (execution SDK) + +Install the execution SDK and write your Lambda handler: + +```console +pip install aws-durable-execution-sdk-python +``` + +```python +from aws_durable_execution_sdk_python import ( + DurableContext, + durable_execution, + durable_step, +) + +@durable_step +def my_step(step_context, data): + # Your business logic + return result + +@durable_execution +def handler(event, context: DurableContext): + result = context.step(my_step(event["data"])) + return result +``` + +### 2. Test locally (testing SDK) + +Install the testing SDK and write tests: + +```console +pip install aws-durable-execution-sdk-python-testing +``` + +```python +import pytest +from my_function import handler + +@pytest.mark.durable_execution(handler=handler, lambda_function_name="my_function") +def test_my_function(durable_runner): + with durable_runner: + result = durable_runner.run(input={"data": "test"}, timeout=10) + assert result.status == "SUCCEEDED" +``` + +Run tests without AWS credentials: + +```console +pytest test_my_function.py +``` + +### 3. Deploy to Lambda + +Package your function with the execution SDK (not the testing SDK) and deploy using your preferred tool (SAM, CDK, Terraform, etc.). + +### 4. Test in the cloud (optional) + +Run the same tests against your deployed function: + +```console +export AWS_REGION=us-west-2 +export QUALIFIED_FUNCTION_NAME="MyFunction:$LATEST" +export LAMBDA_FUNCTION_TEST_NAME="my_function" + +pytest --runner-mode=cloud test_my_function.py +``` + +[↑ Back to top](#table-of-contents) + +## Quick start + +Ready to build your first durable function? Here's a minimal example: + +```python +from aws_durable_execution_sdk_python import ( + DurableContext, + durable_execution, + durable_step, + StepContext, +) + +@durable_step +def greet_user(step_context: StepContext, name: str) -> str: + """Generate a greeting.""" + return f"Hello {name}!" + +@durable_execution +def handler(event: dict, context: DurableContext) -> str: + """Simple durable function.""" + name = event.get("name", "World") + greeting = context.step(greet_user(name)) + return greeting +``` + +Deploy this to Lambda and you have a durable function. The `greet_user` step is checkpointed automatically. + +### Using a custom boto3 Lambda client + +If you need to customize the boto3 Lambda client used for durable execution operations (for example, to configure custom endpoints, retry settings, or credentials), you can pass a `boto3_client` parameter to the decorator. The client must be a boto3 Lambda client: + +```python +import boto3 +from botocore.config import Config +from aws_durable_execution_sdk_python import durable_execution, DurableContext + +# Create a custom boto3 Lambda client with specific configuration +custom_lambda_client = boto3.client( + 'lambda', + config=Config( + retries={'max_attempts': 5, 'mode': 'adaptive'}, + connect_timeout=10, + read_timeout=60, + ) +) + +@durable_execution(boto3_client=custom_lambda_client) +def handler(event: dict, context: DurableContext) -> dict: + # Your durable function logic + return {"status": "success"} +``` + +The custom Lambda client is used for all checkpoint and state management operations. If you don't provide a `boto3_client`, the SDK initializes a default Lambda client from your environment. + +[↑ Back to top](#table-of-contents) + +## Next steps + +Now that you've built your first durable function, explore the core features: + +**Learn the operations:** +- [Steps](core/steps.md) - Execute code with retry strategies and checkpointing +- [Wait operations](core/wait.md) - Pause execution for seconds, minutes, or hours +- [Callbacks](core/callbacks.md) - Wait for external systems to respond +- [Child contexts](core/child-contexts.md) - Organize complex workflows +- [Parallel operations](core/parallel.md) - Run multiple operations concurrently +- [Map operations](core/map.md) - Process collections in parallel + +**Dive deeper:** +- [Error handling](advanced/error-handling.md) - Handle failures and implement retry strategies +- [Testing patterns](testing-patterns/basic-tests.md) - Write effective tests for your workflows +- [Best practices](best-practices.md) - Avoid common pitfalls + +[↑ Back to top](#table-of-contents) + +## See also + +- [DurableContext API](api-reference/context.md) - Complete reference for the context object +- [Decorators](api-reference/decorators.md) - All available decorators +- [Examples directory](https://github.com/awslabs/aws-durable-execution-sdk-python/tree/main/examples) - More working examples + +[↑ Back to top](#table-of-contents) + +## License + +See the [LICENSE](../LICENSE) file for our project's licensing. + +[↑ Back to top](#table-of-contents) diff --git a/docs/index.md b/docs/index.md new file mode 100644 index 0000000..443f988 --- /dev/null +++ b/docs/index.md @@ -0,0 +1,234 @@ +# AWS Durable Execution SDK for Python + +> **Using JavaScript or TypeScript?** Check out the [AWS Durable Execution SDK for JavaScript](https://github.com/aws/aws-durable-execution-sdk-js) instead. + +## Table of Contents + +- [What is the Durable Execution SDK?](#what-is-the-durable-execution-sdk) +- [Key features](#key-features) +- [Quick navigation](#quick-navigation) +- [Installation](#installation) +- [Quick example](#quick-example) +- [Core concepts](#core-concepts) +- [Architecture](#architecture) +- [Use cases](#use-cases) +- [Getting help](#getting-help) +- [License](#license) + +## What is the Durable Execution SDK? + +The AWS Durable Execution SDK for Python lets you build reliable, long-running workflows in AWS Lambda. Your functions can pause execution, wait for external events, retry failed operations, and resume exactly where they left off—even if Lambda recycles your execution environment. + +The SDK provides a `DurableContext` that gives you operations like steps, waits, callbacks, and parallel execution. Each operation is checkpointed automatically, so your workflow state is preserved across interruptions. + +[↑ Back to top](#table-of-contents) + +## Key features + +- **Automatic checkpointing** - Your workflow state is saved automatically after each operation +- **Durable steps** - Execute code with configurable retry strategies and at-most-once or at-least-once semantics +- **Wait operations** - Pause execution for seconds, minutes, or hours without blocking Lambda resources +- **Callbacks** - Wait for external systems to respond with results or approvals +- **Parallel execution** - Run multiple operations concurrently with configurable completion criteria +- **Map operations** - Process collections in parallel with batching and failure tolerance +- **Child contexts** - Isolate nested workflows for better organization and error handling +- **Structured logging** - Integrate with your logger to track execution flow and debug issues + +[↑ Back to top](#table-of-contents) + +## Quick navigation + +**New to durable functions?** +- [Getting started guide](getting-started.md) - Build your first durable function + +**Core operations:** +- [Steps](core/steps.md) - Execute code with automatic checkpointing and retry support +- [Wait operations](core/wait.md) - Pause execution without blocking Lambda resources +- [Callbacks](core/callbacks.md) - Wait for external systems to respond +- [Invoke operations](core/invoke.md) - Call other durable functions and compose workflows +- [Child contexts](core/child-contexts.md) - Organize complex workflows into isolated units +- [Parallel operations](core/parallel.md) - Run multiple operations concurrently +- [Map operations](core/map.md) - Process collections in parallel with batching +- [Logger integration](core/logger.md) - Add structured logging to track execution + +**Advanced topics:** +- [Error handling](advanced/error-handling.md) - Handle failures and implement retry strategies +- [Testing modes](advanced/testing-modes.md) - Run tests locally or against deployed Lambda functions +- [Serialization](advanced/serialization.md) - Customize how data is serialized in checkpoints +- [Configuration](advanced/configuration.md) - Fine-tune operation behavior +- [Performance optimization](advanced/performance.md) - Best practices for efficient workflows + +**API reference:** +- [DurableContext](api-reference/context.md) - Main context class and methods +- [Configuration classes](api-reference/config.md) - StepConfig, CallbackConfig, and more +- [Decorators](api-reference/decorators.md) - @durable_execution, @durable_step, etc. +- [Types and protocols](api-reference/types.md) - Type definitions and interfaces +- [Exceptions](api-reference/exceptions.md) - DurableExecutionsError, InvocationError, and more + +[↑ Back to top](#table-of-contents) + +## Installation + +Install the SDK using pip: + +```console +pip install aws-durable-execution-sdk-python +``` + +[↑ Back to top](#table-of-contents) + +## Quick example + +Here's a simple durable function that processes an order: + +```python +from aws_durable_execution_sdk_python import ( + DurableContext, + durable_execution, + durable_step, +) + +@durable_step +def validate_order(order_id: str) -> dict: + # Validation logic here + return {"order_id": order_id, "valid": True} + +@durable_step +def charge_payment(order_id: str, amount: float) -> dict: + # Payment processing logic here + return {"transaction_id": "txn_123", "status": "completed"} + +@durable_step +def fulfill_order(order_id: str) -> dict: + # Fulfillment logic here + return {"tracking_number": "TRK123456"} + +@durable_execution +def process_order(event: dict, context: DurableContext) -> dict: + order_id = event["order_id"] + amount = event["amount"] + + # Step 1: Validate the order + validation = context.step(validate_order(order_id)) + + if not validation["valid"]: + return {"status": "failed", "reason": "invalid_order"} + + # Step 2: Charge payment + payment = context.step(charge_payment(order_id, amount)) + + # Step 3: Wait for payment confirmation (simulated) + context.wait(seconds=5) + + # Step 4: Fulfill the order + fulfillment = context.step(fulfill_order(order_id)) + + return { + "status": "completed", + "order_id": order_id, + "transaction_id": payment["transaction_id"], + "tracking_number": fulfillment["tracking_number"] + } +``` + +Each `context.step()` call is checkpointed automatically. If Lambda recycles your execution environment, the function resumes from the last completed step. + +[↑ Back to top](#table-of-contents) + +## Core concepts + +### Durable functions + +A durable function is a Lambda function decorated with `@durable_execution` that can be checkpointed and resumed. The function receives a `DurableContext` that provides methods for durable operations. + +### Operations + +Operations are units of work in a durable execution. Each operation type serves a specific purpose: + +- **Steps** - Execute code and checkpoint the result with retry support +- **Waits** - Pause execution for a specified duration without blocking Lambda +- **Callbacks** - Wait for external systems to respond with results +- **Invoke** - Call other durable functions to compose complex workflows +- **Child contexts** - Isolate nested workflows for better organization +- **Parallel** - Execute multiple operations concurrently with completion criteria +- **Map** - Process collections in parallel with batching and failure tolerance + +### Checkpoints + +Checkpoints are saved states of execution that allow resumption. When your function calls `context.step()` or other operations, the SDK creates a checkpoint and sends it to AWS. If Lambda recycles your environment or your function waits for an external event, execution can resume from the last checkpoint. + +### Replay + +When your function resumes, completed operations don't re-execute. Instead, they return their checkpointed results instantly. This means your function code runs multiple times, but side effects only happen once per operation. + +### Decorators + +The SDK provides decorators to mark functions as durable: + +- `@durable_execution` - Marks your Lambda handler as a durable function +- `@durable_step` - Marks a function that can be used with `context.step()` +- `@durable_with_child_context` - Marks a function that receives a child context + +[↑ Back to top](#table-of-contents) + +## Architecture + +The SDK integrates with AWS Lambda's durable execution service to provide reliable, long-running workflows. Here's how it works: + +1. **Execution starts** - Lambda invokes your function with a `DurableContext` +2. **Operations checkpoint** - Each `context.step()`, `context.wait()`, or other operation creates a checkpoint +3. **State is saved** - Checkpoints are sent to the durable execution service and persisted +4. **Execution may pause** - Lambda can recycle your environment or wait for external events +5. **Execution resumes** - When ready, Lambda invokes your function again with the saved state +6. **Operations replay** - Completed operations return their saved results instantly +7. **New operations execute** - Your function continues from where it left off + +### Key components + +- **DurableContext** - Main interface for durable operations, provided by Lambda +- **ExecutionState** - Manages checkpoints and tracks operation results +- **Operation handlers** - Execute steps, waits, callbacks, and other operations +- **Checkpoint batching** - Groups multiple checkpoints into efficient API calls +- **SerDes system** - Serializes and deserializes operation inputs and results + +### Checkpointing + +The SDK uses a background thread to batch checkpoints for efficiency. Critical operations (like step starts with at-most-once semantics) block until the checkpoint is confirmed. Non-critical operations (like observability checkpoints) are asynchronous for better performance + +[↑ Back to top](#table-of-contents) + +## Use cases + +The SDK helps you build: + +**Order processing workflows** - Validate orders, charge payments, and fulfill shipments with automatic retry on failures. + +**Approval workflows** - Wait for human approvals or external system responses using callbacks. + +**Data processing pipelines** - Process large datasets in parallel with map operations and failure tolerance. + +**Multi-step integrations** - Coordinate calls to multiple services with proper error handling and state management. + +**Long-running tasks** - Execute workflows that take minutes or hours without blocking Lambda resources. + +**Saga patterns** - Implement distributed transactions with compensation logic for failures. + +[↑ Back to top](#table-of-contents) + +## Getting help + +**Documentation** - You're reading it! Use the navigation above to find specific topics. + +**Examples** - Check the `examples/` directory in the repository for working code samples. + +**Issues** - Report bugs or request features on the [GitHub repository](https://github.com/awslabs/aws-durable-execution-sdk-python). + +**Contributing** - See [CONTRIBUTING.md](../CONTRIBUTING.md) for guidelines on contributing to the project. + +[↑ Back to top](#table-of-contents) + +## License + +See the [LICENSE](../LICENSE) file for our project's licensing. + +[↑ Back to top](#table-of-contents) diff --git a/docs/testing-patterns/.gitkeep b/docs/testing-patterns/.gitkeep new file mode 100644 index 0000000..9748135 --- /dev/null +++ b/docs/testing-patterns/.gitkeep @@ -0,0 +1 @@ +# This file will be removed once the directory has content diff --git a/docs/testing-patterns/basic-tests.md b/docs/testing-patterns/basic-tests.md new file mode 100644 index 0000000..7f6cd66 --- /dev/null +++ b/docs/testing-patterns/basic-tests.md @@ -0,0 +1,701 @@ +# Basic Test Patterns + +## Table of Contents + +- [Overview](#overview) +- [Prerequisites](#prerequisites) +- [Project structure](#project-structure) +- [Getting started](#getting-started) +- [Status checking patterns](#status-checking-patterns) +- [Result verification patterns](#result-verification-patterns) +- [Operation-specific assertions](#operation-specific-assertions) +- [Test organization tips](#test-organization-tips) +- [FAQ](#faq) +- [See also](#see-also) + +[← Back to main index](../index.md) + +## Overview + +When you test durable functions, you need to verify that your function executed successfully, returned the expected result, and that operations like steps or waits ran correctly. This document shows you common patterns for writing these tests with simple assertions using the testing SDK. + +The testing SDK (`aws-durable-execution-sdk-python-testing`) provides tools to run and inspect durable functions locally without deploying to AWS. Use these patterns as building blocks for your own tests, whether you're checking a simple calculation or inspecting individual operations. + +[↑ Back to top](#table-of-contents) + +## Prerequisites + +To test durable functions, you need both SDKs installed: + +```console +# Install the core SDK (for writing durable functions) +pip install aws-durable-execution-sdk-python + +# Install the testing SDK (for testing durable functions) +pip install aws-durable-execution-sdk-python-testing + +# Install pytest (test framework) +pip install pytest +``` + +The core SDK provides the decorators and context for writing durable functions. The testing SDK provides the test runner and assertions for testing them. + +[↑ Back to top](#table-of-contents) + +## Project structure + +Here's a typical project structure for testing durable functions: + +``` +my-project/ +├── src/ +│ ├── __init__.py +│ └── my_function.py # Your durable function +├── test/ +│ ├── __init__.py +│ ├── conftest.py # Pytest configuration and fixtures +│ └── test_my_function.py # Your tests +├── requirements.txt +└── pytest.ini +``` + +**Key files:** + +- `src/my_function.py` - Contains your durable function with `@durable_execution` decorator +- `test/conftest.py` - Configures the `durable_runner` fixture for pytest +- `test/test_my_function.py` - Contains your test cases using the `durable_runner` fixture + +**Example conftest.py:** + +```python +import pytest +from aws_durable_execution_sdk_python_testing.runner import DurableFunctionTestRunner + +@pytest.fixture +def durable_runner(request): + """Pytest fixture that provides a test runner.""" + marker = request.node.get_closest_marker("durable_execution") + if not marker: + pytest.fail("Test must be marked with @pytest.mark.durable_execution") + + handler = marker.kwargs.get("handler") + runner = DurableFunctionTestRunner(handler=handler) + + yield runner +``` + +[↑ Back to top](#table-of-contents) + +## Getting started + +Here's a simple durable function: + +```python +from aws_durable_execution_sdk_python import DurableContext, durable_execution + +@durable_execution +def handler(event: dict, context: DurableContext) -> str: + """Simple hello world durable function.""" + return "Hello World!" +``` + +And here's how you test it: + +```python +import pytest +from aws_durable_execution_sdk_python.execution import InvocationStatus +from test.conftest import deserialize_operation_payload + +@pytest.mark.durable_execution( + handler=handler, + lambda_function_name="hello world", +) +def test_hello_world(durable_runner): + """Test hello world example.""" + with durable_runner: + result = durable_runner.run(input="test", timeout=10) + + assert result.status is InvocationStatus.SUCCEEDED + assert deserialize_operation_payload(result.result) == "Hello World!" +``` + +This test: +1. Marks the test with `@pytest.mark.durable_execution` to configure the runner +2. Uses the `durable_runner` fixture to execute the function +3. Checks the execution status +4. Verifies the final result + +[↑ Back to top](#table-of-contents) + +## Status checking patterns + +### Check for successful execution + +The most basic pattern verifies that your function completed successfully: + +```python +@pytest.mark.durable_execution( + handler=my_handler, + lambda_function_name="my_function", +) +def test_success(durable_runner): + """Test successful execution.""" + with durable_runner: + result = durable_runner.run(input={"data": "test"}, timeout=10) + + assert result.status is InvocationStatus.SUCCEEDED +``` + +### Check for expected failures + +Test that your function fails correctly when given invalid input: + +```python +@pytest.mark.durable_execution( + handler=handler_with_validation, + lambda_function_name="validation_function", +) +def test_validation_failure(durable_runner): + """Test that invalid input causes failure.""" + with durable_runner: + result = durable_runner.run(input={"invalid": "data"}, timeout=10) + + assert result.status is InvocationStatus.FAILED + assert "ValidationError" in str(result.error) +``` + +### Check execution with timeout + +Verify that your function completes within the expected time: + +```python +@pytest.mark.durable_execution( + handler=quick_handler, + lambda_function_name="quick_function", +) +def test_completes_quickly(durable_runner): + """Test that function completes within timeout.""" + with durable_runner: + # Use a short timeout to verify quick execution + result = durable_runner.run(input={}, timeout=5) + + assert result.status is InvocationStatus.SUCCEEDED +``` + +[↑ Back to top](#table-of-contents) + +## Result verification patterns + +### Verify simple return values + +Check that your function returns the expected value: + +```python +from test.conftest import deserialize_operation_payload + +@pytest.mark.durable_execution( + handler=calculator_handler, + lambda_function_name="calculator", +) +def test_calculation_result(durable_runner): + """Test calculation returns correct result.""" + with durable_runner: + result = durable_runner.run(input={"a": 5, "b": 3}, timeout=10) + + assert result.status is InvocationStatus.SUCCEEDED + assert deserialize_operation_payload(result.result) == 8 +``` + +### Verify complex return values + +Check specific fields in complex return values: + +```python +@pytest.mark.durable_execution( + handler=order_handler, + lambda_function_name="order_processor", +) +def test_order_processing(durable_runner): + """Test order processing returns correct structure.""" + with durable_runner: + result = durable_runner.run( + input={"order_id": "order-123", "amount": 100.0}, + timeout=10 + ) + + assert result.status is InvocationStatus.SUCCEEDED + + order_result = deserialize_operation_payload(result.result) + assert order_result["order_id"] == "order-123" + assert order_result["status"] == "completed" + assert order_result["amount"] == 100.0 +``` + +### Verify list results + +Check that your function returns the expected list of values: + +```python +@pytest.mark.durable_execution( + handler=parallel_handler, + lambda_function_name="parallel_tasks", +) +def test_parallel_results(durable_runner): + """Test parallel operations return all results.""" + with durable_runner: + result = durable_runner.run(input={}, timeout=10) + + assert result.status is InvocationStatus.SUCCEEDED + + results = deserialize_operation_payload(result.result) + assert len(results) == 3 + assert results == [ + "Task 1 complete", + "Task 2 complete", + "Task 3 complete", + ] +``` + +[↑ Back to top](#table-of-contents) + +## Operation-specific assertions + +### Verify step operations + +Here's a function with a step: + +```python +from aws_durable_execution_sdk_python import ( + DurableContext, + durable_execution, + durable_step, + StepContext, +) + +@durable_step +def add_numbers(step_context: StepContext, a: int, b: int) -> int: + return a + b + +@durable_execution +def handler(event: dict, context: DurableContext) -> int: + result = context.step(add_numbers(5, 3)) + return result +``` + +Check that the step executed and produced the expected result: + +```python +import pytest +from aws_durable_execution_sdk_python.execution import InvocationStatus +from test.conftest import deserialize_operation_payload + +@pytest.mark.durable_execution( + handler=handler, + lambda_function_name="step_function", +) +def test_step_execution(durable_runner): + """Test step executes correctly.""" + with durable_runner: + result = durable_runner.run(input={}, timeout=10) + + assert result.status is InvocationStatus.SUCCEEDED + + # Get step by name + step_result = result.get_step("add_numbers") + assert deserialize_operation_payload(step_result.result) == 8 +``` + +### Verify wait operations + +Here's a function with a wait: + +```python +from aws_durable_execution_sdk_python import DurableContext, durable_execution + +@durable_execution +def handler(event: dict, context: DurableContext) -> str: + context.wait(seconds=5) + return "Wait completed" +``` + +Check that the wait operation was created with correct timing: + +```python +@pytest.mark.durable_execution( + handler=handler, + lambda_function_name="wait_function", +) +def test_wait_operation(durable_runner): + """Test wait operation is created.""" + with durable_runner: + result = durable_runner.run(input={}, timeout=10) + + assert result.status is InvocationStatus.SUCCEEDED + + # Find wait operations + wait_ops = [ + op for op in result.operations + if op.operation_type.value == "WAIT" + ] + assert len(wait_ops) == 1 + assert wait_ops[0].scheduled_end_timestamp is not None +``` + +### Verify callback operations + +Here's a function that creates a callback: + +```python +from aws_durable_execution_sdk_python import DurableContext, durable_execution +from aws_durable_execution_sdk_python.config import CallbackConfig + +@durable_execution +def handler(event: dict, context: DurableContext) -> str: + callback_config = CallbackConfig( + timeout_seconds=120, + heartbeat_timeout_seconds=60 + ) + + callback = context.create_callback( + name="example_callback", + config=callback_config + ) + + return f"Callback created with ID: {callback.callback_id}" +``` + +Check that the callback was created with correct configuration: + +```python +@pytest.mark.durable_execution( + handler=handler, + lambda_function_name="callback_function", +) +def test_callback_creation(durable_runner): + """Test callback is created correctly.""" + with durable_runner: + result = durable_runner.run(input={}, timeout=10) + + assert result.status is InvocationStatus.SUCCEEDED + + # Find callback operations + callback_ops = [ + op for op in result.operations + if op.operation_type.value == "CALLBACK" + ] + assert len(callback_ops) == 1 + + callback_op = callback_ops[0] + assert callback_op.name == "example_callback" + assert callback_op.callback_id is not None +``` + +### Verify child context operations + +Here's a function with a child context: + +```python +from aws_durable_execution_sdk_python import ( + DurableContext, + durable_execution, + durable_with_child_context, +) + +@durable_with_child_context +def child_operation(ctx: DurableContext, value: int) -> int: + return ctx.step(lambda _: value * 2, name="multiply") + +@durable_execution +def handler(event: dict, context: DurableContext) -> str: + result = context.run_in_child_context(child_operation(5)) + return f"Child context result: {result}" +``` + +Check that the child context executed correctly: + +```python +@pytest.mark.durable_execution( + handler=handler, + lambda_function_name="child_context_function", +) +def test_child_context(durable_runner): + """Test child context executes.""" + with durable_runner: + result = durable_runner.run(input={}, timeout=10) + + assert result.status is InvocationStatus.SUCCEEDED + + # Find child context operations + context_ops = [ + op for op in result.operations + if op.operation_type.value == "CONTEXT" + ] + assert len(context_ops) >= 1 +``` + +### Verify parallel operations + +Here's a function with parallel operations: + +```python +from aws_durable_execution_sdk_python import DurableContext, durable_execution + +@durable_execution +def handler(event: dict, context: DurableContext) -> list[str]: + # Execute multiple operations + task1 = context.step(lambda _: "Task 1 complete", name="task1") + task2 = context.step(lambda _: "Task 2 complete", name="task2") + task3 = context.step(lambda _: "Task 3 complete", name="task3") + + # All tasks execute concurrently and results are collected + return [task1, task2, task3] +``` + +Check that multiple operations executed in parallel: + +```python +from aws_durable_execution_sdk_python.lambda_service import OperationType + +@pytest.mark.durable_execution( + handler=handler, + lambda_function_name="parallel_function", +) +def test_parallel_operations(durable_runner): + """Test parallel operations execute.""" + with durable_runner: + result = durable_runner.run(input={}, timeout=10) + + assert result.status is InvocationStatus.SUCCEEDED + + # Find all step operations + step_ops = [ + op for op in result.operations + if op.operation_type == OperationType.STEP + ] + assert len(step_ops) == 3 + + # Verify step names + step_names = {op.name for op in step_ops} + assert step_names == {"task1", "task2", "task3"} +``` + +[↑ Back to top](#table-of-contents) + +## Test organization tips + +### Use descriptive test names + +Name your tests to clearly describe what they verify: + +```python +# Good - describes what is being tested +def test_order_processing_succeeds_with_valid_input(durable_runner): + pass + +def test_order_processing_fails_with_invalid_order_id(durable_runner): + pass + +# Avoid - vague or unclear +def test_order(durable_runner): + pass + +def test_case_1(durable_runner): + pass +``` + +### Group related tests + +Organize tests by feature or functionality: + +```python +# tests/test_order_processing.py +class TestOrderValidation: + """Tests for order validation.""" + + @pytest.mark.durable_execution(handler=handler, lambda_function_name="orders") + def test_valid_order(self, durable_runner): + """Test valid order is accepted.""" + pass + + @pytest.mark.durable_execution(handler=handler, lambda_function_name="orders") + def test_invalid_order_id(self, durable_runner): + """Test invalid order ID is rejected.""" + pass + +class TestOrderFulfillment: + """Tests for order fulfillment.""" + + @pytest.mark.durable_execution(handler=handler, lambda_function_name="orders") + def test_fulfillment_success(self, durable_runner): + """Test successful order fulfillment.""" + pass +``` + +### Use fixtures for common test data + +Create fixtures for test data you use across multiple tests: + +```python +# conftest.py +@pytest.fixture +def valid_order(): + """Provide valid order data.""" + return { + "order_id": "order-123", + "customer_id": "customer-456", + "amount": 100.0, + "items": [ + {"product_id": "prod-1", "quantity": 2}, + {"product_id": "prod-2", "quantity": 1}, + ], + } + +# test_orders.py +@pytest.mark.durable_execution(handler=handler, lambda_function_name="orders") +def test_order_processing(durable_runner, valid_order): + """Test order processing with valid data.""" + with durable_runner: + result = durable_runner.run(input=valid_order, timeout=10) + + assert result.status is InvocationStatus.SUCCEEDED +``` + +### Add docstrings to tests + +Document what each test verifies: + +```python +@pytest.mark.durable_execution(handler=handler, lambda_function_name="payment") +def test_payment_with_retry(durable_runner): + """Test payment processing retries on transient failures. + + This test verifies that: + 1. Payment step retries on RuntimeError + 2. Function eventually succeeds after retries + 3. Final result includes transaction ID + """ + with durable_runner: + result = durable_runner.run(input={"amount": 50.0}, timeout=30) + + assert result.status is InvocationStatus.SUCCEEDED +``` + +### Use parametrized tests for similar cases + +Test multiple inputs with the same logic using `pytest.mark.parametrize`: + +```python +@pytest.mark.parametrize("a,b,expected", [ + (5, 3, 8), + (10, 20, 30), + (0, 0, 0), + (-5, 5, 0), +]) +@pytest.mark.durable_execution(handler=add_handler, lambda_function_name="calculator") +def test_addition(durable_runner, a, b, expected): + """Test addition with various inputs.""" + with durable_runner: + result = durable_runner.run(input={"a": a, "b": b}, timeout=10) + + assert result.status is InvocationStatus.SUCCEEDED + assert deserialize_operation_payload(result.result) == expected +``` + +### Keep tests focused + +Each test should verify one specific behavior: + +```python +# Good - focused on one behavior +@pytest.mark.durable_execution(handler=handler, lambda_function_name="orders") +def test_order_validation_succeeds(durable_runner): + """Test order validation with valid input.""" + with durable_runner: + result = durable_runner.run(input={"order_id": "order-123"}, timeout=10) + + assert result.status is InvocationStatus.SUCCEEDED + +@pytest.mark.durable_execution(handler=handler, lambda_function_name="orders") +def test_order_validation_fails_missing_id(durable_runner): + """Test order validation fails without order ID.""" + with durable_runner: + result = durable_runner.run(input={}, timeout=10) + + assert result.status is InvocationStatus.FAILED + +# Avoid - testing multiple behaviors +@pytest.mark.durable_execution(handler=handler, lambda_function_name="orders") +def test_order_validation(durable_runner): + """Test order validation.""" + # Test valid input + result1 = durable_runner.run(input={"order_id": "order-123"}, timeout=10) + assert result1.status is InvocationStatus.SUCCEEDED + + # Test invalid input + result2 = durable_runner.run(input={}, timeout=10) + assert result2.status is InvocationStatus.FAILED +``` + +[↑ Back to top](#table-of-contents) + +## FAQ + +**Q: Do I need to deploy my function to test it?** + +A: No, the test runner executes your function locally. You only need to deploy for cloud testing mode. + +**Q: How do I test functions with external dependencies?** + +A: Mock external dependencies in your test setup. The test runner executes your function code as-is, so standard Python mocking works. + +**Q: Can I test multiple functions in one test file?** + +A: Yes, use different `@pytest.mark.durable_execution` markers for each function you want to test. + +**Q: How do I access operation results?** + +A: Use `result.get_step(name)` for steps, or iterate through `result.operations` to find specific operation types. + +**Q: What's the difference between result.result and step.result?** + +A: `result.result` is the final return value of your handler function. `step.result` is the return value of a specific step operation. + +**Q: How do I test error scenarios?** + +A: Check that `result.status is InvocationStatus.FAILED` and inspect `result.error` for the error message. + +**Q: Can I run tests in parallel?** + +A: Yes, use pytest-xdist: `pytest -n auto` to run tests in parallel. + +**Q: How do I debug failing tests?** + +A: Add print statements or use a debugger. The test runner executes your code locally, so standard debugging tools work. + +**Q: What timeout should I use?** + +A: Use a timeout slightly longer than your function's expected execution time. For most tests, 10-30 seconds is sufficient. + +**Q: How do I test functions that use environment variables?** + +A: Set environment variables in your test setup or use pytest fixtures to manage them. + +[↑ Back to top](#table-of-contents) + +## See also + +- [Complex workflows](complex-workflows.md) - Testing multi-step workflows +- [Best practices](best-practices.md) - Testing recommendations +- [Pytest integration](../advanced/pytest-integration.md) - Pytest fixtures and markers +- [Custom assertions](../advanced/custom-assertions.md) - Advanced result inspection +- [Steps](../core/steps.md) - Testing step operations +- [Wait operations](../core/wait.md) - Testing wait operations +- [Callbacks](../core/callbacks.md) - Testing callback operations + +[↑ Back to top](#table-of-contents) + +## License + +See the [LICENSE](../../LICENSE) file for our project's licensing. + +[↑ Back to top](#table-of-contents) diff --git a/docs/testing-patterns/complex-workflows.md b/docs/testing-patterns/complex-workflows.md new file mode 100644 index 0000000..cac74a1 --- /dev/null +++ b/docs/testing-patterns/complex-workflows.md @@ -0,0 +1,675 @@ +# Complex Workflow Testing + +## Table of Contents + +- [Overview](#overview) +- [Prerequisites](#prerequisites) +- [Multi-step workflows](#multi-step-workflows) +- [Nested child contexts](#nested-child-contexts) +- [Parallel operations](#parallel-operations) +- [Error scenarios](#error-scenarios) +- [Timeout handling](#timeout-handling) +- [Polling patterns](#polling-patterns) +- [FAQ](#faq) +- [See also](#see-also) + +[← Back to main index](../index.md) + +## Overview + +When your workflows involve multiple steps, nested contexts, or parallel operations, you need to verify more than just the final result. You'll want to check intermediate states, operation ordering, error handling, and timeout behavior. + +This guide shows you how to test workflows that chain operations together, handle errors gracefully, and implement polling patterns. + +[↑ Back to top](#table-of-contents) + +## Prerequisites + +You need both SDKs installed: + +```console +pip install aws-durable-execution-sdk-python +pip install aws-durable-execution-sdk-python-testing +pip install pytest +``` + +If you're new to testing durable functions, start with [Basic test patterns](basic-tests.md) first. + +[↑ Back to top](#table-of-contents) + +## Multi-step workflows + +### Sequential operations + + +Here's a workflow that processes an order through validation, payment, and fulfillment: + +```python +from aws_durable_execution_sdk_python import ( + DurableContext, + durable_execution, + durable_step, + StepContext, +) + +@durable_step +def validate_order(step_context: StepContext, order_id: str) -> dict: + return {"order_id": order_id, "status": "validated"} + +@durable_step +def process_payment(step_context: StepContext, order: dict) -> dict: + return {**order, "payment_status": "completed"} + +@durable_step +def fulfill_order(step_context: StepContext, order: dict) -> dict: + return {**order, "fulfillment_status": "shipped"} + +@durable_execution +def handler(event: dict, context: DurableContext) -> dict: + order_id = event["order_id"] + + validated = context.step(validate_order(order_id), name="validate") + paid = context.step(process_payment(validated), name="payment") + fulfilled = context.step(fulfill_order(paid), name="fulfillment") + + return fulfilled +``` + +Verify all steps execute in order: + +```python +import pytest +from aws_durable_execution_sdk_python.execution import InvocationStatus +from aws_durable_execution_sdk_python.lambda_service import OperationType +from test.conftest import deserialize_operation_payload + +@pytest.mark.durable_execution(handler=handler, lambda_function_name="order_workflow") +def test_order_workflow(durable_runner): + """Test order processing executes all steps.""" + with durable_runner: + result = durable_runner.run(input={"order_id": "order-123"}, timeout=30) + + assert result.status is InvocationStatus.SUCCEEDED + + # Check final result + final_result = deserialize_operation_payload(result.result) + assert final_result["order_id"] == "order-123" + assert final_result["payment_status"] == "completed" + assert final_result["fulfillment_status"] == "shipped" + + # Verify all three steps ran + step_ops = [op for op in result.operations if op.operation_type == OperationType.STEP] + assert len(step_ops) == 3 + + # Check step order + step_names = [op.name for op in step_ops] + assert step_names == ["validate", "payment", "fulfillment"] +``` + +[↑ Back to top](#table-of-contents) + +### Conditional branching + +Test different execution paths based on input: + +```python +@durable_execution +def handler(event: dict, context: DurableContext) -> str: + amount = event.get("amount", 0) + + context.step(lambda _: amount, name="validate_amount") + + if amount > 1000: + context.step(lambda _: "Manager approval required", name="approval") + context.wait(seconds=10, name="approval_wait") + result = context.step(lambda _: "High-value order processed", name="process_high") + else: + result = context.step(lambda _: "Standard order processed", name="process_standard") + + return result +``` + +Test both paths separately: + +```python +@pytest.mark.durable_execution(handler=handler, lambda_function_name="conditional_workflow") +def test_high_value_path(durable_runner): + """Test high-value orders require approval.""" + with durable_runner: + result = durable_runner.run(input={"amount": 1500}, timeout=30) + + assert result.status is InvocationStatus.SUCCEEDED + assert deserialize_operation_payload(result.result) == "High-value order processed" + + # Verify approval step exists + approval_step = result.get_step("approval") + assert approval_step is not None + +@pytest.mark.durable_execution(handler=handler, lambda_function_name="conditional_workflow") +def test_standard_path(durable_runner): + """Test standard orders skip approval.""" + with durable_runner: + result = durable_runner.run(input={"amount": 500}, timeout=30) + + assert result.status is InvocationStatus.SUCCEEDED + + # Verify no approval step + step_names = [op.name for op in result.operations if op.operation_type == OperationType.STEP] + assert "approval" not in step_names +``` + +[↑ Back to top](#table-of-contents) + +## Nested child contexts + + +### Single child context + +Child contexts isolate operations: + +```python +from aws_durable_execution_sdk_python import ( + DurableContext, + durable_execution, + durable_with_child_context, +) + +@durable_with_child_context +def process_item(ctx: DurableContext, item_id: str) -> dict: + ctx.step(lambda _: f"Validating {item_id}", name="validate") + result = ctx.step( + lambda _: {"item_id": item_id, "status": "processed"}, + name="process" + ) + return result + +@durable_execution +def handler(event: dict, context: DurableContext) -> dict: + item_id = event["item_id"] + result = context.run_in_child_context( + process_item(item_id), + name="item_processing" + ) + return result +``` + +Verify the child context executes: + +```python +@pytest.mark.durable_execution(handler=handler, lambda_function_name="child_context_workflow") +def test_child_context(durable_runner): + """Test child context execution.""" + with durable_runner: + result = durable_runner.run(input={"item_id": "item-123"}, timeout=30) + + assert result.status is InvocationStatus.SUCCEEDED + + # Check child context ran + context_ops = [op for op in result.operations if op.operation_type.value == "CONTEXT"] + assert len(context_ops) == 1 + assert context_ops[0].name == "item_processing" + + # Check child context result + child_result = result.get_context("item_processing") + child_data = deserialize_operation_payload(child_result.result) + assert child_data["item_id"] == "item-123" +``` + +[↑ Back to top](#table-of-contents) + +### Multiple child contexts + +Use multiple child contexts to organize operations: + +```python +@durable_with_child_context +def validate_data(ctx: DurableContext, data: dict) -> dict: + return ctx.step(lambda _: {**data, "validated": True}, name="validate") + +@durable_with_child_context +def transform_data(ctx: DurableContext, data: dict) -> dict: + return ctx.step(lambda _: {**data, "transformed": True}, name="transform") + +@durable_execution +def handler(event: dict, context: DurableContext) -> dict: + data = event["data"] + + validated = context.run_in_child_context(validate_data(data), name="validation") + transformed = context.run_in_child_context(transform_data(validated), name="transformation") + + return transformed +``` + +Verify both contexts execute: + +```python +@pytest.mark.durable_execution(handler=handler, lambda_function_name="multiple_contexts") +def test_multiple_child_contexts(durable_runner): + """Test multiple child contexts.""" + with durable_runner: + result = durable_runner.run(input={"data": {"value": 42}}, timeout=30) + + assert result.status is InvocationStatus.SUCCEEDED + + final_result = deserialize_operation_payload(result.result) + assert final_result["validated"] is True + assert final_result["transformed"] is True + + # Verify both contexts ran + context_ops = [op for op in result.operations if op.operation_type.value == "CONTEXT"] + assert len(context_ops) == 2 +``` + +[↑ Back to top](#table-of-contents) + +## Parallel operations + +### Basic parallel execution + +Multiple operations execute concurrently: + +```python +@durable_execution +def handler(event: dict, context: DurableContext) -> list[str]: + task1 = context.step(lambda _: "Task 1 complete", name="task1") + task2 = context.step(lambda _: "Task 2 complete", name="task2") + task3 = context.step(lambda _: "Task 3 complete", name="task3") + + return [task1, task2, task3] +``` + +Verify all operations execute: + +```python +@pytest.mark.durable_execution(handler=handler, lambda_function_name="parallel_ops") +def test_parallel_operations(durable_runner): + """Test parallel execution.""" + with durable_runner: + result = durable_runner.run(input={}, timeout=30) + + assert result.status is InvocationStatus.SUCCEEDED + + results = deserialize_operation_payload(result.result) + assert len(results) == 3 + + # Verify all steps ran + step_ops = [op for op in result.operations if op.operation_type == OperationType.STEP] + assert len(step_ops) == 3 + + step_names = {op.name for op in step_ops} + assert step_names == {"task1", "task2", "task3"} +``` + +[↑ Back to top](#table-of-contents) + +### Processing collections + + +Process collection items in parallel: + +```python +@durable_execution +def handler(event: dict, context: DurableContext) -> list[int]: + numbers = event.get("numbers", [1, 2, 3, 4, 5]) + + results = [] + for i, num in enumerate(numbers): + result = context.step(lambda _, n=num: n * 2, name=f"square_{i}") + results.append(result) + + return results +``` + +Verify collection processing: + +```python +@pytest.mark.durable_execution(handler=handler, lambda_function_name="parallel_collection") +def test_collection_processing(durable_runner): + """Test collection processing.""" + with durable_runner: + result = durable_runner.run(input={"numbers": [1, 2, 3, 4, 5]}, timeout=30) + + assert result.status is InvocationStatus.SUCCEEDED + assert deserialize_operation_payload(result.result) == [2, 4, 6, 8, 10] + + # Verify all steps ran + step_ops = [op for op in result.operations if op.operation_type == OperationType.STEP] + assert len(step_ops) == 5 +``` + +[↑ Back to top](#table-of-contents) + +## Error scenarios + +### Expected failures + +Test that your workflow fails correctly: + +```python +@durable_step +def validate_input(step_context: StepContext, value: int) -> int: + if value < 0: + raise ValueError("Value must be non-negative") + return value + +@durable_execution +def handler(event: dict, context: DurableContext) -> int: + value = event.get("value", 0) + validated = context.step(validate_input(value), name="validate") + return validated +``` + +Verify validation failures: + +```python +@pytest.mark.durable_execution(handler=handler, lambda_function_name="validation_workflow") +def test_validation_failure(durable_runner): + """Test validation fails with invalid input.""" + with durable_runner: + result = durable_runner.run(input={"value": -5}, timeout=30) + + assert result.status is InvocationStatus.FAILED + assert "Value must be non-negative" in str(result.error) +``` + +[↑ Back to top](#table-of-contents) + +### Retry behavior + +Test operations that retry on failure: + +```python +from aws_durable_execution_sdk_python.config import StepConfig +from aws_durable_execution_sdk_python.retries import ( + RetryStrategyConfig, + create_retry_strategy, +) + +attempt_count = 0 + +@durable_step +def unreliable_operation(step_context: StepContext) -> str: + global attempt_count + attempt_count += 1 + + if attempt_count < 3: + raise RuntimeError("Transient error") + + return "Operation succeeded" + +@durable_execution +def handler(event: dict, context: DurableContext) -> str: + retry_config = RetryStrategyConfig( + max_attempts=5, + retryable_error_types=[RuntimeError], + ) + + result = context.step( + unreliable_operation(), + config=StepConfig(create_retry_strategy(retry_config)), + name="unreliable" + ) + + return result +``` + +Verify retry succeeds: + +```python +@pytest.mark.durable_execution(handler=handler, lambda_function_name="retry_workflow") +def test_retry_behavior(durable_runner): + """Test operation retries on failure.""" + global attempt_count + attempt_count = 0 + + with durable_runner: + result = durable_runner.run(input={}, timeout=60) + + assert result.status is InvocationStatus.SUCCEEDED + assert deserialize_operation_payload(result.result) == "Operation succeeded" + assert attempt_count >= 3 +``` + +[↑ Back to top](#table-of-contents) + +### Partial failures + +Test workflows where some operations succeed before failure: + +```python +@durable_execution +def handler(event: dict, context: DurableContext) -> str: + context.step(lambda _: "Step 1 complete", name="step1") + context.step(lambda _: "Step 2 complete", name="step2") + context.step( + lambda _: (_ for _ in ()).throw(RuntimeError("Step 3 failed")), + name="step3" + ) + return "Should not reach here" +``` + +Verify partial execution: + +```python +@pytest.mark.durable_execution(handler=handler, lambda_function_name="partial_failure") +def test_partial_failure(durable_runner): + """Test workflow fails after some steps succeed.""" + with durable_runner: + result = durable_runner.run(input={}, timeout=30) + + assert result.status is InvocationStatus.FAILED + + # First two steps succeeded + step1 = result.get_step("step1") + assert deserialize_operation_payload(step1.result) == "Step 1 complete" + + step2 = result.get_step("step2") + assert deserialize_operation_payload(step2.result) == "Step 2 complete" + + assert "Step 3 failed" in str(result.error) +``` + +[↑ Back to top](#table-of-contents) + +## Timeout handling + +### Callback timeouts + + +Verify callback timeout configuration: + +```python +from aws_durable_execution_sdk_python.config import CallbackConfig + +@durable_execution +def handler(event: dict, context: DurableContext) -> str: + config = CallbackConfig(timeout_seconds=60, heartbeat_timeout_seconds=30) + callback = context.create_callback(name="approval_callback", config=config) + return f"Callback created: {callback.callback_id}" +``` + +Test callback configuration: + +```python +@pytest.mark.durable_execution(handler=handler, lambda_function_name="callback_timeout") +def test_callback_timeout(durable_runner): + """Test callback timeout configuration.""" + with durable_runner: + result = durable_runner.run(input={}, timeout=10) + + assert result.status is InvocationStatus.SUCCEEDED + + callback_ops = [op for op in result.operations if op.operation_type.value == "CALLBACK"] + assert len(callback_ops) == 1 + assert callback_ops[0].name == "approval_callback" +``` + +[↑ Back to top](#table-of-contents) + +### Long waits + +For workflows with long waits, verify configuration without actually waiting: + +```python +@durable_execution +def handler(event: dict, context: DurableContext) -> str: + context.step(lambda _: "Starting", name="start") + context.wait(seconds=3600, name="long_wait") # 1 hour + context.step(lambda _: "Continuing", name="continue") + return "Complete" +``` + +Test completes quickly: + +```python +@pytest.mark.durable_execution(handler=handler, lambda_function_name="long_wait") +def test_long_wait(durable_runner): + """Test long wait configuration.""" + with durable_runner: + result = durable_runner.run(input={}, timeout=10) + + assert result.status is InvocationStatus.SUCCEEDED + + # Verify wait exists + wait_ops = [op for op in result.operations if op.operation_type.value == "WAIT"] + assert len(wait_ops) == 1 + assert wait_ops[0].name == "long_wait" +``` + +[↑ Back to top](#table-of-contents) + +## Polling patterns + +### Wait-for-condition + +Poll until a condition is met: + +```python +@durable_execution +def handler(event: dict, context: DurableContext) -> int: + state = 0 + attempt = 0 + max_attempts = 5 + + while attempt < max_attempts: + attempt += 1 + + state = context.step(lambda _, s=state: s + 1, name=f"increment_{attempt}") + + if state >= 3: + break + + context.wait(seconds=1, name=f"wait_{attempt}") + + return state +``` + +Verify polling behavior: + +```python +@pytest.mark.durable_execution(handler=handler, lambda_function_name="polling") +def test_polling(durable_runner): + """Test wait-for-condition pattern.""" + with durable_runner: + result = durable_runner.run(input={}, timeout=30) + + assert result.status is InvocationStatus.SUCCEEDED + assert deserialize_operation_payload(result.result) == 3 + + # Should have 3 increment steps + step_ops = [op for op in result.operations if op.operation_type == OperationType.STEP] + assert len(step_ops) == 3 + + # Should have 2 waits (before reaching state 3) + wait_ops = [op for op in result.operations if op.operation_type.value == "WAIT"] + assert len(wait_ops) == 2 +``` + +[↑ Back to top](#table-of-contents) + +### Maximum attempts + +Test polling respects attempt limits: + +```python +@durable_execution +def handler(event: dict, context: DurableContext) -> dict: + target = event.get("target", 10) + state = 0 + attempt = 0 + max_attempts = 5 + + while attempt < max_attempts and state < target: + attempt += 1 + state = context.step(lambda _, s=state: s + 1, name=f"attempt_{attempt}") + + if state < target: + context.wait(seconds=1, name=f"wait_{attempt}") + + return {"state": state, "attempts": attempt, "reached_target": state >= target} +``` + +Test with unreachable target: + +```python +@pytest.mark.durable_execution(handler=handler, lambda_function_name="max_attempts") +def test_max_attempts(durable_runner): + """Test polling stops at max attempts.""" + with durable_runner: + result = durable_runner.run(input={"target": 10}, timeout=30) + + assert result.status is InvocationStatus.SUCCEEDED + + final_result = deserialize_operation_payload(result.result) + assert final_result["attempts"] == 5 + assert final_result["state"] == 5 + assert final_result["reached_target"] is False +``` + +[↑ Back to top](#table-of-contents) + +## FAQ + +**Q: How do I test workflows with long waits?** + +A: The test runner doesn't actually wait. You can verify wait operations are configured correctly without waiting for them to complete. + +**Q: Can I test workflows with external API calls?** + +A: Yes, but mock external dependencies in your tests. The test runner executes your code locally, so standard Python mocking works. + +**Q: What's the best way to test conditional logic?** + +A: Write separate tests for each execution path. Use descriptive test names and verify the specific operations that should execute in each path. + +**Q: How do I verify operation ordering?** + +A: Iterate through `result.operations` and check the order. You can also use operation names to verify specific sequences. + +**Q: What timeout should I use?** + +A: Use a timeout slightly longer than expected execution time. For most tests, 30-60 seconds is sufficient. + +**Q: How do I test error recovery?** + +A: Test both the failure case (verify the error is raised) and the recovery case (verify retry succeeds). Use separate tests for each scenario. + +[↑ Back to top](#table-of-contents) + +## See also + +- [Basic test patterns](basic-tests.md) - Simple testing patterns +- [Best practices](best-practices.md) - Testing recommendations +- [Steps](../core/steps.md) - Step operations +- [Wait operations](../core/wait.md) - Wait operations +- [Callbacks](../core/callbacks.md) - Callback operations +- [Child contexts](../core/child-contexts.md) - Child context operations +- [Parallel operations](../core/parallel.md) - Parallel execution + +[↑ Back to top](#table-of-contents) + +## License + +See the [LICENSE](../../LICENSE) file for our project's licensing. + +[↑ Back to top](#table-of-contents) diff --git a/docs/testing-patterns/stores.md b/docs/testing-patterns/stores.md new file mode 100644 index 0000000..dce1cfa --- /dev/null +++ b/docs/testing-patterns/stores.md @@ -0,0 +1,263 @@ +# Execution Stores + +## Table of Contents + +- [Overview](#overview) +- [Available stores](#available-stores) +- [In-memory store](#in-memory-store) +- [Filesystem store](#filesystem-store) +- [Choosing a store](#choosing-a-store) +- [Configuration](#configuration) +- [FAQ](#faq) +- [See also](#see-also) + +[← Back to main index](../index.md) + +## Overview + +Execution stores manage how test execution data is persisted during testing. The testing SDK (`aws-durable-execution-sdk-python-testing`) provides different store implementations for different testing scenarios. By default, tests use an in-memory store that's fast and doesn't require cleanup. For scenarios where you need persistence across test runs or want to inspect execution history, you can use a filesystem store. + +More store types will be added in future releases to support additional testing scenarios. + +[↑ Back to top](#table-of-contents) + +## Available stores + +The SDK currently provides two store implementations: + +- **In-memory store** - Fast, ephemeral storage for standard testing (default) +- **Filesystem store** - Persistent storage that saves executions to disk + +Additional store types may be added in future releases. + +[↑ Back to top](#table-of-contents) + +## In-memory store + +The in-memory store keeps execution data in memory during test runs. It's the default store and works well for most testing scenarios. + +### Characteristics + +- **Fast** - No disk I/O overhead +- **Ephemeral** - Data is lost when tests complete +- **Thread-safe** - Uses locks for concurrent access +- **No cleanup needed** - Memory is automatically freed + +### When to use + +Use the in-memory store when: +- Running standard unit tests +- You don't need to inspect executions after tests complete +- You want the fastest test execution +- You're running tests in CI/CD pipelines + +### Example + +The in-memory store is used by default: + +```python +import pytest +from aws_durable_execution_sdk_python.execution import InvocationStatus + +@pytest.mark.durable_execution( + handler=my_handler, + lambda_function_name="my_function", +) +def test_with_memory_store(durable_runner): + """Test uses in-memory store by default.""" + with durable_runner: + result = durable_runner.run(input={"data": "test"}, timeout=10) + + assert result.status is InvocationStatus.SUCCEEDED +``` + +[↑ Back to top](#table-of-contents) + +## Filesystem store + +The filesystem store persists execution data to disk as JSON files. Each execution is saved in a separate file, making it easy to inspect execution history. + +### Characteristics + +- **Persistent** - Data survives test runs +- **Inspectable** - JSON files can be viewed and analyzed +- **Configurable location** - Choose where files are stored +- **Automatic directory creation** - Creates storage directory if needed + +### When to use + +Use the filesystem store when: +- Debugging complex test failures +- You need to inspect execution history +- Running integration tests that span multiple sessions +- Analyzing execution patterns over time + +### Example + +Configure the filesystem store using environment variables: + +```console +# Set store type to filesystem +export AWS_DEX_STORE_TYPE=filesystem + +# Optionally set custom storage directory (defaults to .durable_executions) +export AWS_DEX_STORE_PATH=./test-executions + +# Run tests +pytest tests/ +``` + +Or configure it programmatically when using the cloud test runner: + +```python +from aws_durable_execution_sdk_python_testing.runner import ( + DurableFunctionCloudTestRunner, + DurableFunctionCloudTestRunnerConfig, +) +from aws_durable_execution_sdk_python_testing.stores.base import StoreType + +config = DurableFunctionCloudTestRunnerConfig( + function_name="my-function", + region="us-west-2", + store_type=StoreType.FILESYSTEM, + store_path="./my-test-executions", +) + +runner = DurableFunctionCloudTestRunner(config=config) +``` + +### Storage format + +Executions are stored as JSON files with sanitized ARN names: + +``` +.durable_executions/ +├── arn_aws_states_us-west-2_123456789012_execution_my-function_abc123.json +├── arn_aws_states_us-west-2_123456789012_execution_my-function_def456.json +└── arn_aws_states_us-west-2_123456789012_execution_my-function_ghi789.json +``` + +Each file contains the complete execution state including operations, checkpoints, and results. + +[↑ Back to top](#table-of-contents) + +## Choosing a store + +Use this guide to choose the right store for your needs: + +| Scenario | Recommended Store | Reason | +|----------|------------------|---------| +| Unit tests | In-memory | Fast, no cleanup needed | +| CI/CD pipelines | In-memory | Fast, ephemeral | +| Debugging failures | Filesystem | Inspect execution history | +| Integration tests | Filesystem | Persist across sessions | +| Performance testing | In-memory | Minimize I/O overhead | +| Execution analysis | Filesystem | Analyze patterns over time | + +[↑ Back to top](#table-of-contents) + +## Configuration + +### Environment variables + +Configure stores using environment variables: + +```console +# Store type (memory or filesystem) +export AWS_DEX_STORE_TYPE=filesystem + +# Storage directory for filesystem store (optional, defaults to .durable_executions) +export AWS_DEX_STORE_PATH=./test-executions +``` + +### Programmatic configuration + +Configure stores when creating a cloud test runner: + +```python +from aws_durable_execution_sdk_python_testing.runner import ( + DurableFunctionCloudTestRunner, + DurableFunctionCloudTestRunnerConfig, +) +from aws_durable_execution_sdk_python_testing.stores.base import StoreType + +# In-memory store (default) +config = DurableFunctionCloudTestRunnerConfig( + function_name="my-function", + region="us-west-2", + store_type=StoreType.MEMORY, +) + +# Filesystem store +config = DurableFunctionCloudTestRunnerConfig( + function_name="my-function", + region="us-west-2", + store_type=StoreType.FILESYSTEM, + store_path="./my-executions", +) + +runner = DurableFunctionCloudTestRunner(config=config) +``` + +### Default values + +If not specified: +- Store type defaults to `MEMORY` +- Filesystem store path defaults to `.durable_executions` + +[↑ Back to top](#table-of-contents) + +## FAQ + +**Q: Can I switch stores between test runs?** + +A: Yes, you can change the store type at any time. However, executions stored in one store won't be available in another. + +**Q: Does the filesystem store clean up old executions?** + +A: No, the filesystem store doesn't automatically delete old executions. You need to manually clean up the storage directory when needed. + +**Q: Can I use the filesystem store with the local test runner?** + +A: The filesystem store is primarily designed for the cloud test runner. The local test runner uses an in-memory store by default. + +**Q: Are execution files human-readable?** + +A: Yes, execution files are stored as formatted JSON and can be opened in any text editor. + +**Q: What happens if the storage directory doesn't exist?** + +A: The filesystem store automatically creates the directory if it doesn't exist. + +**Q: Can I use a custom store implementation?** + +A: The SDK defines an `ExecutionStore` protocol that you can implement for custom storage backends. However, this is an advanced use case. + +**Q: Will more store types be added?** + +A: Yes, additional store types may be added in future releases to support more testing scenarios. + +**Q: Does the in-memory store support concurrent tests?** + +A: Yes, the in-memory store is thread-safe and supports concurrent test execution. + +**Q: How much disk space does the filesystem store use?** + +A: Each execution typically uses a few KB to a few MB depending on the number of operations and data size. Monitor your storage directory if running many tests. + +[↑ Back to top](#table-of-contents) + +## See also + +- [Basic tests](basic-tests.md) - Simple test patterns +- [Cloud testing](../advanced/cloud-testing.md) - Testing with deployed functions +- [Test runner](../core/test-runner.md) - Test runner configuration +- [Best practices](best-practices.md) - Testing recommendations + +[↑ Back to top](#table-of-contents) + +## License + +See the [LICENSE](../../LICENSE) file for our project's licensing. + +[↑ Back to top](#table-of-contents) diff --git a/ops/__tests__/test_parse_sdk_branch.py b/ops/__tests__/test_parse_sdk_branch.py new file mode 100755 index 0000000..d458651 --- /dev/null +++ b/ops/__tests__/test_parse_sdk_branch.py @@ -0,0 +1,84 @@ +#!/usr/bin/env python3 + +import os +import sys + +sys.path.insert(0, os.path.dirname(os.path.dirname(__file__))) + +from parse_sdk_branch import parse_sdk_branch + + +def test_parse_sdk_branch(): + test_cases = [ + # Basic cases + ("TESTING_SDK_BRANCH = feature/test", "feature/test"), + ("TESTING_SDK_BRANCH: feature/test", "feature/test"), + ("TESTING_SDK_BRANCH=feature/test", "feature/test"), + ("testing_sdk_branch: feature/test", "feature/test"), + # Complex PR body with backticks and contractions + ( + """Updated the script to safely parse the testing SDK branch from the PR body, handling case insensitivity and whitespace. + +The goal here is to fix the usage of backticks such as in `foo`, and contractions that we've been using such as `we've` + +``` +plus of course the usage of multiple backticks to include code +``` + +TESTING_SDK_BRANCH = main + +By submitting this pull request, I confirm that you can use, modify, copy, and redistribute this contribution, under the terms of your choice.""", + "main", + ), + # Edge cases with markdown and special characters + ( + """# PR Title + +Some `code` and we've got contractions here. + +```python +def test(): + return "test" +``` + +TESTING_SDK_BRANCH: feature/fix-backticks + +More text with `inline code` and don't forget contractions.""", + "feature/fix-backticks", + ), + # Multiple occurrences (should take first) + ( + """TESTING_SDK_BRANCH = first-branch + +Some text here. + +TESTING_SDK_BRANCH = second-branch""", + "first-branch", + ), + # Whitespace variations + (" TESTING_SDK_BRANCH = feature/spaces ", "feature/spaces"), + ("TESTING_SDK_BRANCH:feature/no-space", "feature/no-space"), + # Default cases + ("No branch specified", "main"), + ("", "main"), + ("Just some random text", "main"), + # Case with backticks in branch name + ("TESTING_SDK_BRANCH = feature/fix-`backticks`", "feature/fix-`backticks`"), + # Case with contractions in surrounding text + ( + "We've updated this and TESTING_SDK_BRANCH = feature/test and we're done", + "feature/test", + ), + ] + + for input_text, expected in test_cases: + result = parse_sdk_branch(input_text) + # Assert is expected in test functions + assert result == expected, ( # noqa: S101 + f"Expected '{expected}' but got '{result}' for input: {input_text[:50]}..." + ) + + +if __name__ == "__main__": + test_parse_sdk_branch() + sys.exit(0) diff --git a/ops/parse_sdk_branch.py b/ops/parse_sdk_branch.py new file mode 100755 index 0000000..1967085 --- /dev/null +++ b/ops/parse_sdk_branch.py @@ -0,0 +1,31 @@ +#!/usr/bin/env python3 + +import os +import re + + +def parse_sdk_branch(pr_body: str, default_ref: str = "main") -> str: + """Parse PR body for TESTING_SDK_BRANCH and return the branch reference.""" + pattern = re.compile(r"(?i)TESTING_SDK_BRANCH\s*[:=]\s*(\S+)", re.MULTILINE) + + match = pattern.search(pr_body) + if match: + ref = match.group(1).strip() + if ref: + return ref + + return default_ref + + +def main(): + pr_body = os.environ.get("PR_BODY", "") + ref = parse_sdk_branch(pr_body) + + github_output = os.environ.get("GITHUB_OUTPUT") + if github_output: + with open(github_output, "a", encoding="utf-8") as f: + f.write(f"testing_ref={ref}\n") + + +if __name__ == "__main__": + main() diff --git a/pyproject.toml b/pyproject.toml index 639274c..d80d37c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -5,29 +5,29 @@ build-backend = "hatchling.build" [project] name = "aws-durable-execution-sdk-python" dynamic = ["version"] -description = 'This the Python SDK for AWS Lambda Durable Functions.' +description = 'AWS Durable Execution SDK for Python' readme = "README.md" -requires-python = ">=3.13" +requires-python = ">=3.11" license = "Apache-2.0" keywords = [] authors = [{ name = "yaythomas", email = "tgaigher@amazon.com" }] classifiers = [ "Development Status :: 4 - Beta", "Programming Language :: Python", + "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.13", + "Programming Language :: Python :: 3.14", "Programming Language :: Python :: Implementation :: CPython", "Programming Language :: Python :: Implementation :: PyPy", ] -dependencies = ["boto3>=1.40.30"] +dependencies = ["boto3>=1.42.1"] [project.urls] Documentation = "https://github.com/aws/aws-durable-execution-sdk-python#readme" Issues = "https://github.com/aws/aws-durable-execution-sdk-python/issues" Source = "https://github.com/aws/aws-durable-execution-sdk-python" -[tool.hatch.build.targets.sdist] -packages = ["src/aws_durable_execution_sdk_python"] - [tool.hatch.build.targets.wheel] packages = ["src/aws_durable_execution_sdk_python"] diff --git a/src/aws_durable_execution_sdk_python/__about__.py b/src/aws_durable_execution_sdk_python/__about__.py index 97a5269..212e79b 100644 --- a/src/aws_durable_execution_sdk_python/__about__.py +++ b/src/aws_durable_execution_sdk_python/__about__.py @@ -1,4 +1,4 @@ # SPDX-FileCopyrightText: 2025-present Amazon.com, Inc. or its affiliates. # # SPDX-License-Identifier: Apache-2.0 -__version__ = "0.0.1" +__version__ = "1.1.0" diff --git a/src/aws_durable_execution_sdk_python/__init__.py b/src/aws_durable_execution_sdk_python/__init__.py index 0f4de0d..1a24d31 100644 --- a/src/aws_durable_execution_sdk_python/__init__.py +++ b/src/aws_durable_execution_sdk_python/__init__.py @@ -1 +1,36 @@ """AWS Lambda Durable Executions Python SDK.""" + +# Main context - used in every durable function +# Helper decorators - commonly used for step functions +from aws_durable_execution_sdk_python.context import ( + DurableContext, + durable_step, + durable_wait_for_callback, + durable_with_child_context, +) + +# Most common exceptions - users need to handle these exceptions +from aws_durable_execution_sdk_python.exceptions import ( + DurableExecutionsError, + InvocationError, + ValidationError, +) + +# Core decorator - used in every durable function +from aws_durable_execution_sdk_python.execution import durable_execution + +# Essential context types - passed to user functions +from aws_durable_execution_sdk_python.types import BatchResult, StepContext + +__all__ = [ + "BatchResult", + "DurableContext", + "DurableExecutionsError", + "InvocationError", + "StepContext", + "ValidationError", + "durable_execution", + "durable_step", + "durable_wait_for_callback", + "durable_with_child_context", +] diff --git a/src/aws_durable_execution_sdk_python/botocore/data/lambdainternal-local/2015-03-31/service-2.json b/src/aws_durable_execution_sdk_python/botocore/data/lambdainternal-local/2015-03-31/service-2.json deleted file mode 100644 index 0a596a8..0000000 --- a/src/aws_durable_execution_sdk_python/botocore/data/lambdainternal-local/2015-03-31/service-2.json +++ /dev/null @@ -1,7856 +0,0 @@ -{ - "version":"2.0", - "metadata":{ - "apiVersion":"2015-03-31", - "endpointPrefix":"lambda", - "protocol":"rest-json", - "serviceFullName":"AWS Lambda", - "serviceId":"Lambda", - "signatureVersion":"v4", - "signingName":"execute-api", - "uid":"lambda-2015-03-31" - }, - "operations":{ - "AddLayerVersionPermission":{ - "name":"AddLayerVersionPermission", - "http":{ - "method":"POST", - "requestUri":"/2018-10-31/layers/{LayerName}/versions/{VersionNumber}/policy", - "responseCode":201 - }, - "input":{"shape":"AddLayerVersionPermissionRequest"}, - "output":{"shape":"AddLayerVersionPermissionResponse"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ResourceConflictException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"PolicyLengthExceededException"}, - {"shape":"ResourceNotFoundException"}, - {"shape":"PreconditionFailedException"} - ], - "documentation":"

Adds permissions to the resource-based policy of a version of an Lambda layer. Use this action to grant layer usage permission to other accounts. You can grant permission to a single account, all accounts in an organization, or all Amazon Web Services accounts.

To revoke permission, call RemoveLayerVersionPermission with the statement ID that you specified when you added it.

" - }, - "AddPermission":{ - "name":"AddPermission", - "http":{ - "method":"POST", - "requestUri":"/2015-03-31/functions/{FunctionName}/policy", - "responseCode":201 - }, - "input":{"shape":"AddPermissionRequest"}, - "output":{"shape":"AddPermissionResponse"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ResourceConflictException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"PolicyLengthExceededException"}, - {"shape":"ResourceNotFoundException"}, - {"shape":"PreconditionFailedException"} - ], - "documentation":"

Grants a principal permission to use a function. You can apply the policy at the function level, or specify a qualifier to restrict access to a single version or alias. If you use a qualifier, the invoker must use the full Amazon Resource Name (ARN) of that version or alias to invoke the function. Note: Lambda does not support adding policies to version $LATEST.

To grant permission to another account, specify the account ID as the Principal. To grant permission to an organization defined in Organizations, specify the organization ID as the PrincipalOrgID. For Amazon Web Services services, the principal is a domain-style identifier that the service defines, such as s3.amazonaws.com or sns.amazonaws.com. For Amazon Web Services services, you can also specify the ARN of the associated resource as the SourceArn. If you grant permission to a service principal without specifying the source, other accounts could potentially configure resources in their account to invoke your Lambda function.

This operation adds a statement to a resource-based permissions policy for the function. For more information about function policies, see Using resource-based policies for Lambda.

" - }, - "CheckpointDurableExecution":{ - "name":"CheckpointDurableExecution", - "http":{ - "method":"POST", - "requestUri":"/2025-12-01/durable-executions/{DurableExecutionArn}/checkpoint", - "responseCode":200 - }, - "input":{"shape":"CheckpointDurableExecutionRequest"}, - "output":{"shape":"CheckpointDurableExecutionResponse"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ServiceException"} - ], - "idempotent":true - }, - "CreateAlias":{ - "name":"CreateAlias", - "http":{ - "method":"POST", - "requestUri":"/2015-03-31/functions/{FunctionName}/aliases", - "responseCode":201 - }, - "input":{"shape":"CreateAliasRequest"}, - "output":{"shape":"AliasConfiguration"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ResourceConflictException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"} - ], - "documentation":"

Creates an alias for a Lambda function version. Use aliases to provide clients with a function identifier that you can update to invoke a different version.

You can also map an alias to split invocation requests between two versions. Use the RoutingConfig parameter to specify a second version and the percentage of invocation requests that it receives.

", - "idempotent":true - }, - "CreateCodeSigningConfig":{ - "name":"CreateCodeSigningConfig", - "http":{ - "method":"POST", - "requestUri":"/2020-04-22/code-signing-configs", - "responseCode":201 - }, - "input":{"shape":"CreateCodeSigningConfigRequest"}, - "output":{"shape":"CreateCodeSigningConfigResponse"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ServiceException"} - ], - "documentation":"

Creates a code signing configuration. A code signing configuration defines a list of allowed signing profiles and defines the code-signing validation policy (action to be taken if deployment validation checks fail).

" - }, - "CreateEventSourceMapping":{ - "name":"CreateEventSourceMapping", - "http":{ - "method":"POST", - "requestUri":"/2015-03-31/event-source-mappings", - "responseCode":202 - }, - "input":{"shape":"CreateEventSourceMappingRequest"}, - "output":{"shape":"EventSourceMappingConfiguration"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ResourceConflictException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"} - ], - "documentation":"

Creates a mapping between an event source and an Lambda function. Lambda reads items from the event source and invokes the function.

For details about how to configure different event sources, see the following topics.

The following error handling options are available only for DynamoDB and Kinesis event sources:

For stream sources (DynamoDB, Kinesis, Amazon MSK, and self-managed Apache Kafka), the following option is also available:

For information about which configuration parameters apply to each event source, see the following topics.

" - }, - "CreateFunction":{ - "name":"CreateFunction", - "http":{ - "method":"POST", - "requestUri":"/2015-03-31/functions", - "responseCode":201 - }, - "input":{"shape":"CreateFunctionRequest"}, - "output":{"shape":"FunctionConfiguration"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ResourceConflictException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"InvalidCodeSignatureException"}, - {"shape":"ResourceNotFoundException"}, - {"shape":"CodeVerificationFailedException"}, - {"shape":"CodeSigningConfigNotFoundException"}, - {"shape":"CodeStorageExceededException"} - ], - "documentation":"

Creates a Lambda function. To create a function, you need a deployment package and an execution role. The deployment package is a .zip file archive or container image that contains your function code. The execution role grants the function permission to use Amazon Web Services services, such as Amazon CloudWatch Logs for log streaming and X-Ray for request tracing.

If the deployment package is a container image, then you set the package type to Image. For a container image, the code property must include the URI of a container image in the Amazon ECR registry. You do not need to specify the handler and runtime properties.

If the deployment package is a .zip file archive, then you set the package type to Zip. For a .zip file archive, the code property specifies the location of the .zip file. You must also specify the handler and runtime properties. The code in the deployment package must be compatible with the target instruction set architecture of the function (x86-64 or arm64). If you do not specify the architecture, then the default value is x86-64.

When you create a function, Lambda provisions an instance of the function and its supporting resources. If your function connects to a VPC, this process can take a minute or so. During this time, you can't invoke or modify the function. The State, StateReason, and StateReasonCode fields in the response from GetFunctionConfiguration indicate when the function is ready to invoke. For more information, see Lambda function states.

A function has an unpublished version, and can have published versions and aliases. The unpublished version changes when you update your function's code and configuration. A published version is a snapshot of your function code and configuration that can't be changed. An alias is a named resource that maps to a version, and can be changed to map to a different version. Use the Publish parameter to create version 1 of your function from its initial configuration.

The other parameters let you configure version-specific and function-level settings. You can modify version-specific settings later with UpdateFunctionConfiguration. Function-level settings apply to both the unpublished and published versions of the function, and include tags (TagResource) and per-function concurrency limits (PutFunctionConcurrency).

You can use code signing if your deployment package is a .zip file archive. To enable code signing for this function, specify the ARN of a code-signing configuration. When a user attempts to deploy a code package with UpdateFunctionCode, Lambda checks that the code package has a valid signature from a trusted publisher. The code-signing configuration includes set of signing profiles, which define the trusted publishers for this function.

If another Amazon Web Services account or an Amazon Web Services service invokes your function, use AddPermission to grant permission by creating a resource-based Identity and Access Management (IAM) policy. You can grant permissions at the function level, on a version, or on an alias.

To invoke your function directly, use Invoke. To invoke your function in response to events in other Amazon Web Services services, create an event source mapping (CreateEventSourceMapping), or configure a function trigger in the other service. For more information, see Invoking Lambda functions.

", - "idempotent":true - }, - "CreateFunctionUrlConfig":{ - "name":"CreateFunctionUrlConfig", - "http":{ - "method":"POST", - "requestUri":"/2021-10-31/functions/{FunctionName}/url", - "responseCode":201 - }, - "input":{"shape":"CreateFunctionUrlConfigRequest"}, - "output":{"shape":"CreateFunctionUrlConfigResponse"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ResourceConflictException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"} - ], - "documentation":"

Creates a Lambda function URL with the specified configuration parameters. A function URL is a dedicated HTTP(S) endpoint that you can use to invoke your function.

" - }, - "DeleteAlias":{ - "name":"DeleteAlias", - "http":{ - "method":"DELETE", - "requestUri":"/2015-03-31/functions/{FunctionName}/aliases/{Name}", - "responseCode":204 - }, - "input":{"shape":"DeleteAliasRequest"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ResourceConflictException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"} - ], - "documentation":"

Deletes a Lambda function alias.

", - "idempotent":true - }, - "DeleteCodeSigningConfig":{ - "name":"DeleteCodeSigningConfig", - "http":{ - "method":"DELETE", - "requestUri":"/2020-04-22/code-signing-configs/{CodeSigningConfigArn}", - "responseCode":204 - }, - "input":{"shape":"DeleteCodeSigningConfigRequest"}, - "output":{"shape":"DeleteCodeSigningConfigResponse"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ResourceConflictException"}, - {"shape":"ServiceException"}, - {"shape":"ResourceNotFoundException"} - ], - "documentation":"

Deletes the code signing configuration. You can delete the code signing configuration only if no function is using it.

", - "idempotent":true - }, - "DeleteEventSourceMapping":{ - "name":"DeleteEventSourceMapping", - "http":{ - "method":"DELETE", - "requestUri":"/2015-03-31/event-source-mappings/{UUID}", - "responseCode":202 - }, - "input":{"shape":"DeleteEventSourceMappingRequest"}, - "output":{"shape":"EventSourceMappingConfiguration"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ResourceInUseException"}, - {"shape":"ResourceConflictException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"} - ], - "documentation":"

Deletes an event source mapping. You can get the identifier of a mapping from the output of ListEventSourceMappings.

When you delete an event source mapping, it enters a Deleting state and might not be completely deleted for several seconds.

", - "idempotent":true - }, - "DeleteFunction":{ - "name":"DeleteFunction", - "http":{ - "method":"DELETE", - "requestUri":"/2015-03-31/functions/{FunctionName}", - "responseCode":204 - }, - "input":{"shape":"DeleteFunctionRequest"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ResourceConflictException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"} - ], - "documentation":"

Deletes a Lambda function. To delete a specific function version, use the Qualifier parameter. Otherwise, all versions and aliases are deleted. This doesn't require the user to have explicit permissions for DeleteAlias.

To delete Lambda event source mappings that invoke a function, use DeleteEventSourceMapping. For Amazon Web Services services and resources that invoke your function directly, delete the trigger in the service where you originally configured it.

", - "idempotent":true - }, - "DeleteFunctionCodeSigningConfig":{ - "name":"DeleteFunctionCodeSigningConfig", - "http":{ - "method":"DELETE", - "requestUri":"/2020-06-30/functions/{FunctionName}/code-signing-config", - "responseCode":204 - }, - "input":{"shape":"DeleteFunctionCodeSigningConfigRequest"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ResourceConflictException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"}, - {"shape":"CodeSigningConfigNotFoundException"} - ], - "documentation":"

Removes the code signing configuration from the function.

" - }, - "DeleteFunctionConcurrency":{ - "name":"DeleteFunctionConcurrency", - "http":{ - "method":"DELETE", - "requestUri":"/2017-10-31/functions/{FunctionName}/concurrency", - "responseCode":204 - }, - "input":{"shape":"DeleteFunctionConcurrencyRequest"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ResourceConflictException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"} - ], - "documentation":"

Removes a concurrent execution limit from a function.

" - }, - "DeleteFunctionEventInvokeConfig":{ - "name":"DeleteFunctionEventInvokeConfig", - "http":{ - "method":"DELETE", - "requestUri":"/2019-09-25/functions/{FunctionName}/event-invoke-config", - "responseCode":204 - }, - "input":{"shape":"DeleteFunctionEventInvokeConfigRequest"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ResourceConflictException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"} - ], - "documentation":"

Deletes the configuration for asynchronous invocation for a function, version, or alias.

To configure options for asynchronous invocation, use PutFunctionEventInvokeConfig.

" - }, - "DeleteFunctionUrlConfig":{ - "name":"DeleteFunctionUrlConfig", - "http":{ - "method":"DELETE", - "requestUri":"/2021-10-31/functions/{FunctionName}/url", - "responseCode":204 - }, - "input":{"shape":"DeleteFunctionUrlConfigRequest"}, - "errors":[ - {"shape":"ResourceConflictException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"} - ], - "documentation":"

Deletes a Lambda function URL. When you delete a function URL, you can't recover it. Creating a new function URL results in a different URL address.

" - }, - "DeleteLayerVersion":{ - "name":"DeleteLayerVersion", - "http":{ - "method":"DELETE", - "requestUri":"/2018-10-31/layers/{LayerName}/versions/{VersionNumber}", - "responseCode":204 - }, - "input":{"shape":"DeleteLayerVersionRequest"}, - "errors":[ - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"} - ], - "documentation":"

Deletes a version of an Lambda layer. Deleted versions can no longer be viewed or added to functions. To avoid breaking functions, a copy of the version remains in Lambda until no functions refer to it.

", - "idempotent":true - }, - "DeleteProvisionedConcurrencyConfig":{ - "name":"DeleteProvisionedConcurrencyConfig", - "http":{ - "method":"DELETE", - "requestUri":"/2019-09-30/functions/{FunctionName}/provisioned-concurrency", - "responseCode":204 - }, - "input":{"shape":"DeleteProvisionedConcurrencyConfigRequest"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ResourceConflictException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"} - ], - "documentation":"

Deletes the provisioned concurrency configuration for a function.

", - "idempotent":true - }, - "GetAccountSettings":{ - "name":"GetAccountSettings", - "http":{ - "method":"GET", - "requestUri":"/2016-08-19/account-settings", - "responseCode":200 - }, - "input":{"shape":"GetAccountSettingsRequest"}, - "output":{"shape":"GetAccountSettingsResponse"}, - "errors":[ - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"} - ], - "documentation":"

Retrieves details about your account's limits and usage in an Amazon Web Services Region.

", - "readonly":true - }, - "GetAlias":{ - "name":"GetAlias", - "http":{ - "method":"GET", - "requestUri":"/2015-03-31/functions/{FunctionName}/aliases/{Name}", - "responseCode":200 - }, - "input":{"shape":"GetAliasRequest"}, - "output":{"shape":"AliasConfiguration"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"} - ], - "documentation":"

Returns details about a Lambda function alias.

", - "readonly":true - }, - "GetCodeSigningConfig":{ - "name":"GetCodeSigningConfig", - "http":{ - "method":"GET", - "requestUri":"/2020-04-22/code-signing-configs/{CodeSigningConfigArn}", - "responseCode":200 - }, - "input":{"shape":"GetCodeSigningConfigRequest"}, - "output":{"shape":"GetCodeSigningConfigResponse"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ServiceException"}, - {"shape":"ResourceNotFoundException"} - ], - "documentation":"

Returns information about the specified code signing configuration.

", - "readonly":true - }, - "GetDurableExecution":{ - "name":"GetDurableExecution", - "http":{ - "method":"GET", - "requestUri":"/2025-12-01/durable-executions/{DurableExecutionArn}", - "responseCode":200 - }, - "input":{"shape":"GetDurableExecutionRequest"}, - "output":{"shape":"GetDurableExecutionResponse"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ServiceException"}, - {"shape":"ResourceNotFoundException"} - ], - "readonly":true - }, - "GetDurableExecutionHistory":{ - "name":"GetDurableExecutionHistory", - "http":{ - "method":"GET", - "requestUri":"/2025-12-01/durable-executions/{DurableExecutionArn}/history", - "responseCode":200 - }, - "input":{"shape":"GetDurableExecutionHistoryRequest"}, - "output":{"shape":"GetDurableExecutionHistoryResponse"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ServiceException"}, - {"shape":"ResourceNotFoundException"} - ], - "readonly":true - }, - "GetDurableExecutionState":{ - "name":"GetDurableExecutionState", - "http":{ - "method":"GET", - "requestUri":"/2025-12-01/durable-executions/{DurableExecutionArn}/state", - "responseCode":200 - }, - "input":{"shape":"GetDurableExecutionStateRequest"}, - "output":{"shape":"GetDurableExecutionStateResponse"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ServiceException"} - ], - "readonly":true - }, - "GetEventSourceMapping":{ - "name":"GetEventSourceMapping", - "http":{ - "method":"GET", - "requestUri":"/2015-03-31/event-source-mappings/{UUID}", - "responseCode":200 - }, - "input":{"shape":"GetEventSourceMappingRequest"}, - "output":{"shape":"EventSourceMappingConfiguration"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"} - ], - "documentation":"

Returns details about an event source mapping. You can get the identifier of a mapping from the output of ListEventSourceMappings.

", - "readonly":true - }, - "GetFunction":{ - "name":"GetFunction", - "http":{ - "method":"GET", - "requestUri":"/2015-03-31/functions/{FunctionName}", - "responseCode":200 - }, - "input":{"shape":"GetFunctionRequest"}, - "output":{"shape":"GetFunctionResponse"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"} - ], - "documentation":"

Returns information about the function or function version, with a link to download the deployment package that's valid for 10 minutes. If you specify a function version, only details that are specific to that version are returned.

", - "readonly":true - }, - "GetFunctionCodeSigningConfig":{ - "name":"GetFunctionCodeSigningConfig", - "http":{ - "method":"GET", - "requestUri":"/2020-06-30/functions/{FunctionName}/code-signing-config", - "responseCode":200 - }, - "input":{"shape":"GetFunctionCodeSigningConfigRequest"}, - "output":{"shape":"GetFunctionCodeSigningConfigResponse"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"} - ], - "documentation":"

Returns the code signing configuration for the specified function.

", - "readonly":true - }, - "GetFunctionConcurrency":{ - "name":"GetFunctionConcurrency", - "http":{ - "method":"GET", - "requestUri":"/2019-09-30/functions/{FunctionName}/concurrency", - "responseCode":200 - }, - "input":{"shape":"GetFunctionConcurrencyRequest"}, - "output":{"shape":"GetFunctionConcurrencyResponse"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"} - ], - "documentation":"

Returns details about the reserved concurrency configuration for a function. To set a concurrency limit for a function, use PutFunctionConcurrency.

", - "readonly":true - }, - "GetFunctionConfiguration":{ - "name":"GetFunctionConfiguration", - "http":{ - "method":"GET", - "requestUri":"/2015-03-31/functions/{FunctionName}/configuration", - "responseCode":200 - }, - "input":{"shape":"GetFunctionConfigurationRequest"}, - "output":{"shape":"FunctionConfiguration"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"} - ], - "documentation":"

Returns the version-specific settings of a Lambda function or version. The output includes only options that can vary between versions of a function. To modify these settings, use UpdateFunctionConfiguration.

To get all of a function's details, including function-level settings, use GetFunction.

", - "readonly":true - }, - "GetFunctionEventInvokeConfig":{ - "name":"GetFunctionEventInvokeConfig", - "http":{ - "method":"GET", - "requestUri":"/2019-09-25/functions/{FunctionName}/event-invoke-config", - "responseCode":200 - }, - "input":{"shape":"GetFunctionEventInvokeConfigRequest"}, - "output":{"shape":"FunctionEventInvokeConfig"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"} - ], - "documentation":"

Retrieves the configuration for asynchronous invocation for a function, version, or alias.

To configure options for asynchronous invocation, use PutFunctionEventInvokeConfig.

", - "readonly":true - }, - "GetFunctionRecursionConfig":{ - "name":"GetFunctionRecursionConfig", - "http":{ - "method":"GET", - "requestUri":"/2024-08-31/functions/{FunctionName}/recursion-config", - "responseCode":200 - }, - "input":{"shape":"GetFunctionRecursionConfigRequest"}, - "output":{"shape":"GetFunctionRecursionConfigResponse"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"} - ], - "documentation":"

Returns your function's recursive loop detection configuration.

", - "readonly":true - }, - "GetFunctionUrlConfig":{ - "name":"GetFunctionUrlConfig", - "http":{ - "method":"GET", - "requestUri":"/2021-10-31/functions/{FunctionName}/url", - "responseCode":200 - }, - "input":{"shape":"GetFunctionUrlConfigRequest"}, - "output":{"shape":"GetFunctionUrlConfigResponse"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"} - ], - "documentation":"

Returns details about a Lambda function URL.

", - "readonly":true - }, - "GetLayerVersion":{ - "name":"GetLayerVersion", - "http":{ - "method":"GET", - "requestUri":"/2018-10-31/layers/{LayerName}/versions/{VersionNumber}", - "responseCode":200 - }, - "input":{"shape":"GetLayerVersionRequest"}, - "output":{"shape":"GetLayerVersionResponse"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"} - ], - "documentation":"

Returns information about a version of an Lambda layer, with a link to download the layer archive that's valid for 10 minutes.

", - "readonly":true - }, - "GetLayerVersionByArn":{ - "name":"GetLayerVersionByArn", - "http":{ - "method":"GET", - "requestUri":"/2018-10-31/layers?find=LayerVersion", - "responseCode":200 - }, - "input":{"shape":"GetLayerVersionByArnRequest"}, - "output":{"shape":"GetLayerVersionResponse"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"} - ], - "documentation":"

Returns information about a version of an Lambda layer, with a link to download the layer archive that's valid for 10 minutes.

", - "readonly":true - }, - "GetLayerVersionPolicy":{ - "name":"GetLayerVersionPolicy", - "http":{ - "method":"GET", - "requestUri":"/2018-10-31/layers/{LayerName}/versions/{VersionNumber}/policy", - "responseCode":200 - }, - "input":{"shape":"GetLayerVersionPolicyRequest"}, - "output":{"shape":"GetLayerVersionPolicyResponse"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"} - ], - "documentation":"

Returns the permission policy for a version of an Lambda layer. For more information, see AddLayerVersionPermission.

", - "readonly":true - }, - "GetPolicy":{ - "name":"GetPolicy", - "http":{ - "method":"GET", - "requestUri":"/2015-03-31/functions/{FunctionName}/policy", - "responseCode":200 - }, - "input":{"shape":"GetPolicyRequest"}, - "output":{"shape":"GetPolicyResponse"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"} - ], - "documentation":"

Returns the resource-based IAM policy for a function, version, or alias.

", - "readonly":true - }, - "GetProvisionedConcurrencyConfig":{ - "name":"GetProvisionedConcurrencyConfig", - "http":{ - "method":"GET", - "requestUri":"/2019-09-30/functions/{FunctionName}/provisioned-concurrency", - "responseCode":200 - }, - "input":{"shape":"GetProvisionedConcurrencyConfigRequest"}, - "output":{"shape":"GetProvisionedConcurrencyConfigResponse"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ProvisionedConcurrencyConfigNotFoundException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"} - ], - "documentation":"

Retrieves the provisioned concurrency configuration for a function's alias or version.

", - "readonly":true - }, - "GetRuntimeManagementConfig":{ - "name":"GetRuntimeManagementConfig", - "http":{ - "method":"GET", - "requestUri":"/2021-07-20/functions/{FunctionName}/runtime-management-config", - "responseCode":200 - }, - "input":{"shape":"GetRuntimeManagementConfigRequest"}, - "output":{"shape":"GetRuntimeManagementConfigResponse"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"} - ], - "documentation":"

Retrieves the runtime management configuration for a function's version. If the runtime update mode is Manual, this includes the ARN of the runtime version and the runtime update mode. If the runtime update mode is Auto or Function update, this includes the runtime update mode and null is returned for the ARN. For more information, see Runtime updates.

", - "readonly":true - }, - "Invoke":{ - "name":"Invoke", - "http":{ - "method":"POST", - "requestUri":"/2015-03-31/functions/{FunctionName}/invocations", - "responseCode":200 - }, - "input":{"shape":"InvocationRequest"}, - "output":{"shape":"InvocationResponse"}, - "errors":[ - {"shape":"ResourceNotReadyException"}, - {"shape":"InvalidSecurityGroupIDException"}, - {"shape":"SnapStartTimeoutException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"EC2ThrottledException"}, - {"shape":"EFSMountConnectivityException"}, - {"shape":"SubnetIPAddressLimitReachedException"}, - {"shape":"KMSAccessDeniedException"}, - {"shape":"RequestTooLargeException"}, - {"shape":"KMSDisabledException"}, - {"shape":"UnsupportedMediaTypeException"}, - {"shape":"InvalidRuntimeException"}, - {"shape":"EC2UnexpectedException"}, - {"shape":"InvalidSubnetIDException"}, - {"shape":"KMSNotFoundException"}, - {"shape":"InvalidParameterValueException"}, - {"shape":"EC2AccessDeniedException"}, - {"shape":"EFSIOException"}, - {"shape":"KMSInvalidStateException"}, - {"shape":"ResourceConflictException"}, - {"shape":"ENILimitReachedException"}, - {"shape":"SnapStartNotReadyException"}, - {"shape":"ServiceException"}, - {"shape":"SnapStartException"}, - {"shape":"RecursiveInvocationException"}, - {"shape":"EFSMountTimeoutException"}, - {"shape":"ResourceNotFoundException"}, - {"shape":"InvalidRequestContentException"}, - {"shape":"DurableExecutionAlreadyStartedException"}, - {"shape":"InvalidZipFileException"}, - {"shape":"EFSMountFailureException"} - ], - "documentation":"

Invokes a Lambda function. You can invoke a function synchronously (and wait for the response), or asynchronously. By default, Lambda invokes your function synchronously (i.e. theInvocationType is RequestResponse). To invoke a function asynchronously, set InvocationType to Event. Lambda passes the ClientContext object to your function for synchronous invocations only.

For synchronous invocation, details about the function response, including errors, are included in the response body and headers. For either invocation type, you can find more information in the execution log and trace.

When an error occurs, your function may be invoked multiple times. Retry behavior varies by error type, client, event source, and invocation type. For example, if you invoke a function asynchronously and it returns an error, Lambda executes the function up to two more times. For more information, see Error handling and automatic retries in Lambda.

For asynchronous invocation, Lambda adds events to a queue before sending them to your function. If your function does not have enough capacity to keep up with the queue, events may be lost. Occasionally, your function may receive the same event multiple times, even if no error occurs. To retain events that were not processed, configure your function with a dead-letter queue.

The status code in the API response doesn't reflect function errors. Error codes are reserved for errors that prevent your function from executing, such as permissions errors, quota errors, or issues with your function's code and configuration. For example, Lambda returns TooManyRequestsException if running the function would cause you to exceed a concurrency limit at either the account level (ConcurrentInvocationLimitExceeded) or function level (ReservedFunctionConcurrentInvocationLimitExceeded).

For functions with a long timeout, your client might disconnect during synchronous invocation while it waits for a response. Configure your HTTP client, SDK, firewall, proxy, or operating system to allow for long connections with timeout or keep-alive settings.

This operation requires permission for the lambda:InvokeFunction action. For details on how to set up permissions for cross-account invocations, see Granting function access to other accounts.

" - }, - "InvokeAsync":{ - "name":"InvokeAsync", - "http":{ - "method":"POST", - "requestUri":"/2014-11-13/functions/{FunctionName}/invoke-async", - "responseCode":202 - }, - "input":{"shape":"InvokeAsyncRequest"}, - "output":{"shape":"InvokeAsyncResponse"}, - "errors":[ - {"shape":"InvalidRuntimeException"}, - {"shape":"ResourceConflictException"}, - {"shape":"ServiceException"}, - {"shape":"ResourceNotFoundException"}, - {"shape":"InvalidRequestContentException"} - ], - "documentation":"

For asynchronous function invocation, use Invoke.

Invokes a function asynchronously.

If you do use the InvokeAsync action, note that it doesn't support the use of X-Ray active tracing. Trace ID is not propagated to the function, even if X-Ray active tracing is turned on.

", - "deprecated":true - }, - "InvokeWithResponseStream":{ - "name":"InvokeWithResponseStream", - "http":{ - "method":"POST", - "requestUri":"/2021-11-15/functions/{FunctionName}/response-streaming-invocations", - "responseCode":200 - }, - "input":{"shape":"InvokeWithResponseStreamRequest"}, - "output":{"shape":"InvokeWithResponseStreamResponse"}, - "errors":[ - {"shape":"ResourceNotReadyException"}, - {"shape":"InvalidSecurityGroupIDException"}, - {"shape":"SnapStartTimeoutException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"EC2ThrottledException"}, - {"shape":"EFSMountConnectivityException"}, - {"shape":"SubnetIPAddressLimitReachedException"}, - {"shape":"KMSAccessDeniedException"}, - {"shape":"RequestTooLargeException"}, - {"shape":"KMSDisabledException"}, - {"shape":"UnsupportedMediaTypeException"}, - {"shape":"InvalidRuntimeException"}, - {"shape":"EC2UnexpectedException"}, - {"shape":"InvalidSubnetIDException"}, - {"shape":"KMSNotFoundException"}, - {"shape":"InvalidParameterValueException"}, - {"shape":"EC2AccessDeniedException"}, - {"shape":"EFSIOException"}, - {"shape":"KMSInvalidStateException"}, - {"shape":"ResourceConflictException"}, - {"shape":"ENILimitReachedException"}, - {"shape":"SnapStartNotReadyException"}, - {"shape":"ServiceException"}, - {"shape":"SnapStartException"}, - {"shape":"RecursiveInvocationException"}, - {"shape":"EFSMountTimeoutException"}, - {"shape":"ResourceNotFoundException"}, - {"shape":"InvalidRequestContentException"}, - {"shape":"InvalidZipFileException"}, - {"shape":"EFSMountFailureException"} - ], - "documentation":"

Configure your Lambda functions to stream response payloads back to clients. For more information, see Configuring a Lambda function to stream responses.

This operation requires permission for the lambda:InvokeFunction action. For details on how to set up permissions for cross-account invocations, see Granting function access to other accounts.

" - }, - "ListAliases":{ - "name":"ListAliases", - "http":{ - "method":"GET", - "requestUri":"/2015-03-31/functions/{FunctionName}/aliases", - "responseCode":200 - }, - "input":{"shape":"ListAliasesRequest"}, - "output":{"shape":"ListAliasesResponse"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"} - ], - "documentation":"

Returns a list of aliases for a Lambda function.

", - "readonly":true - }, - "ListCodeSigningConfigs":{ - "name":"ListCodeSigningConfigs", - "http":{ - "method":"GET", - "requestUri":"/2020-04-22/code-signing-configs", - "responseCode":200 - }, - "input":{"shape":"ListCodeSigningConfigsRequest"}, - "output":{"shape":"ListCodeSigningConfigsResponse"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ServiceException"} - ], - "documentation":"

Returns a list of code signing configurations. A request returns up to 10,000 configurations per call. You can use the MaxItems parameter to return fewer configurations per call.

", - "readonly":true - }, - "ListDurableExecutionsByFunction":{ - "name":"ListDurableExecutionsByFunction", - "http":{ - "method":"GET", - "requestUri":"/2025-12-01/functions/{FunctionName}/durable-executions", - "responseCode":200 - }, - "input":{"shape":"ListDurableExecutionsByFunctionRequest"}, - "output":{"shape":"ListDurableExecutionsByFunctionResponse"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ServiceException"} - ], - "readonly":true - }, - "ListEventSourceMappings":{ - "name":"ListEventSourceMappings", - "http":{ - "method":"GET", - "requestUri":"/2015-03-31/event-source-mappings", - "responseCode":200 - }, - "input":{"shape":"ListEventSourceMappingsRequest"}, - "output":{"shape":"ListEventSourceMappingsResponse"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"} - ], - "documentation":"

Lists event source mappings. Specify an EventSourceArn to show only event source mappings for a single event source.

", - "readonly":true - }, - "ListFunctionEventInvokeConfigs":{ - "name":"ListFunctionEventInvokeConfigs", - "http":{ - "method":"GET", - "requestUri":"/2019-09-25/functions/{FunctionName}/event-invoke-config/list", - "responseCode":200 - }, - "input":{"shape":"ListFunctionEventInvokeConfigsRequest"}, - "output":{"shape":"ListFunctionEventInvokeConfigsResponse"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"} - ], - "documentation":"

Retrieves a list of configurations for asynchronous invocation for a function.

To configure options for asynchronous invocation, use PutFunctionEventInvokeConfig.

", - "readonly":true - }, - "ListFunctionUrlConfigs":{ - "name":"ListFunctionUrlConfigs", - "http":{ - "method":"GET", - "requestUri":"/2021-10-31/functions/{FunctionName}/urls", - "responseCode":200 - }, - "input":{"shape":"ListFunctionUrlConfigsRequest"}, - "output":{"shape":"ListFunctionUrlConfigsResponse"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"} - ], - "documentation":"

Returns a list of Lambda function URLs for the specified function.

", - "readonly":true - }, - "ListFunctions":{ - "name":"ListFunctions", - "http":{ - "method":"GET", - "requestUri":"/2015-03-31/functions", - "responseCode":200 - }, - "input":{"shape":"ListFunctionsRequest"}, - "output":{"shape":"ListFunctionsResponse"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"} - ], - "documentation":"

Returns a list of Lambda functions, with the version-specific configuration of each. Lambda returns up to 50 functions per call.

Set FunctionVersion to ALL to include all published versions of each function in addition to the unpublished version.

The ListFunctions operation returns a subset of the FunctionConfiguration fields. To get the additional fields (State, StateReasonCode, StateReason, LastUpdateStatus, LastUpdateStatusReason, LastUpdateStatusReasonCode, RuntimeVersionConfig) for a function or version, use GetFunction.

", - "readonly":true - }, - "ListFunctionsByCodeSigningConfig":{ - "name":"ListFunctionsByCodeSigningConfig", - "http":{ - "method":"GET", - "requestUri":"/2020-04-22/code-signing-configs/{CodeSigningConfigArn}/functions", - "responseCode":200 - }, - "input":{"shape":"ListFunctionsByCodeSigningConfigRequest"}, - "output":{"shape":"ListFunctionsByCodeSigningConfigResponse"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ServiceException"}, - {"shape":"ResourceNotFoundException"} - ], - "documentation":"

List the functions that use the specified code signing configuration. You can use this method prior to deleting a code signing configuration, to verify that no functions are using it.

", - "readonly":true - }, - "ListLayerVersions":{ - "name":"ListLayerVersions", - "http":{ - "method":"GET", - "requestUri":"/2018-10-31/layers/{LayerName}/versions", - "responseCode":200 - }, - "input":{"shape":"ListLayerVersionsRequest"}, - "output":{"shape":"ListLayerVersionsResponse"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"} - ], - "documentation":"

Lists the versions of an Lambda layer. Versions that have been deleted aren't listed. Specify a runtime identifier to list only versions that indicate that they're compatible with that runtime. Specify a compatible architecture to include only layer versions that are compatible with that architecture.

", - "readonly":true - }, - "ListLayers":{ - "name":"ListLayers", - "http":{ - "method":"GET", - "requestUri":"/2018-10-31/layers", - "responseCode":200 - }, - "input":{"shape":"ListLayersRequest"}, - "output":{"shape":"ListLayersResponse"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"} - ], - "documentation":"

Lists Lambda layers and shows information about the latest version of each. Specify a runtime identifier to list only layers that indicate that they're compatible with that runtime. Specify a compatible architecture to include only layers that are compatible with that instruction set architecture.

", - "readonly":true - }, - "ListProvisionedConcurrencyConfigs":{ - "name":"ListProvisionedConcurrencyConfigs", - "http":{ - "method":"GET", - "requestUri":"/2019-09-30/functions/{FunctionName}/provisioned-concurrency?List=ALL", - "responseCode":200 - }, - "input":{"shape":"ListProvisionedConcurrencyConfigsRequest"}, - "output":{"shape":"ListProvisionedConcurrencyConfigsResponse"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"} - ], - "documentation":"

Retrieves a list of provisioned concurrency configurations for a function.

", - "readonly":true - }, - "ListTags":{ - "name":"ListTags", - "http":{ - "method":"GET", - "requestUri":"/2017-03-31/tags/{Resource}", - "responseCode":200 - }, - "input":{"shape":"ListTagsRequest"}, - "output":{"shape":"ListTagsResponse"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"} - ], - "documentation":"

Returns a function, event source mapping, or code signing configuration's tags. You can also view function tags with GetFunction.

", - "readonly":true - }, - "ListVersionsByFunction":{ - "name":"ListVersionsByFunction", - "http":{ - "method":"GET", - "requestUri":"/2015-03-31/functions/{FunctionName}/versions", - "responseCode":200 - }, - "input":{"shape":"ListVersionsByFunctionRequest"}, - "output":{"shape":"ListVersionsByFunctionResponse"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"} - ], - "documentation":"

Returns a list of versions, with the version-specific configuration of each. Lambda returns up to 50 versions per call.

", - "readonly":true - }, - "PublishLayerVersion":{ - "name":"PublishLayerVersion", - "http":{ - "method":"POST", - "requestUri":"/2018-10-31/layers/{LayerName}/versions", - "responseCode":201 - }, - "input":{"shape":"PublishLayerVersionRequest"}, - "output":{"shape":"PublishLayerVersionResponse"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"}, - {"shape":"CodeStorageExceededException"} - ], - "documentation":"

Creates an Lambda layer from a ZIP archive. Each time you call PublishLayerVersion with the same layer name, a new version is created.

Add layers to your function with CreateFunction or UpdateFunctionConfiguration.

" - }, - "PublishVersion":{ - "name":"PublishVersion", - "http":{ - "method":"POST", - "requestUri":"/2015-03-31/functions/{FunctionName}/versions", - "responseCode":201 - }, - "input":{"shape":"PublishVersionRequest"}, - "output":{"shape":"FunctionConfiguration"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ResourceConflictException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"}, - {"shape":"CodeStorageExceededException"}, - {"shape":"PreconditionFailedException"} - ], - "documentation":"

Creates a version from the current code and configuration of a function. Use versions to create a snapshot of your function code and configuration that doesn't change.

Lambda doesn't publish a version if the function's configuration and code haven't changed since the last version. Use UpdateFunctionCode or UpdateFunctionConfiguration to update the function before publishing a version.

Clients can invoke versions directly or with an alias. To create an alias, use CreateAlias.

" - }, - "PutFunctionCodeSigningConfig":{ - "name":"PutFunctionCodeSigningConfig", - "http":{ - "method":"PUT", - "requestUri":"/2020-06-30/functions/{FunctionName}/code-signing-config", - "responseCode":200 - }, - "input":{"shape":"PutFunctionCodeSigningConfigRequest"}, - "output":{"shape":"PutFunctionCodeSigningConfigResponse"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ResourceConflictException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"}, - {"shape":"CodeSigningConfigNotFoundException"} - ], - "documentation":"

Update the code signing configuration for the function. Changes to the code signing configuration take effect the next time a user tries to deploy a code package to the function.

" - }, - "PutFunctionConcurrency":{ - "name":"PutFunctionConcurrency", - "http":{ - "method":"PUT", - "requestUri":"/2017-10-31/functions/{FunctionName}/concurrency", - "responseCode":200 - }, - "input":{"shape":"PutFunctionConcurrencyRequest"}, - "output":{"shape":"Concurrency"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ResourceConflictException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"} - ], - "documentation":"

Sets the maximum number of simultaneous executions for a function, and reserves capacity for that concurrency level.

Concurrency settings apply to the function as a whole, including all published versions and the unpublished version. Reserving concurrency both ensures that your function has capacity to process the specified number of events simultaneously, and prevents it from scaling beyond that level. Use GetFunction to see the current setting for a function.

Use GetAccountSettings to see your Regional concurrency limit. You can reserve concurrency for as many functions as you like, as long as you leave at least 100 simultaneous executions unreserved for functions that aren't configured with a per-function limit. For more information, see Lambda function scaling.

" - }, - "PutFunctionEventInvokeConfig":{ - "name":"PutFunctionEventInvokeConfig", - "http":{ - "method":"PUT", - "requestUri":"/2019-09-25/functions/{FunctionName}/event-invoke-config", - "responseCode":200 - }, - "input":{"shape":"PutFunctionEventInvokeConfigRequest"}, - "output":{"shape":"FunctionEventInvokeConfig"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ResourceConflictException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"} - ], - "documentation":"

Configures options for asynchronous invocation on a function, version, or alias. If a configuration already exists for a function, version, or alias, this operation overwrites it. If you exclude any settings, they are removed. To set one option without affecting existing settings for other options, use UpdateFunctionEventInvokeConfig.

By default, Lambda retries an asynchronous invocation twice if the function returns an error. It retains events in a queue for up to six hours. When an event fails all processing attempts or stays in the asynchronous invocation queue for too long, Lambda discards it. To retain discarded events, configure a dead-letter queue with UpdateFunctionConfiguration.

To send an invocation record to a queue, topic, S3 bucket, function, or event bus, specify a destination. You can configure separate destinations for successful invocations (on-success) and events that fail all processing attempts (on-failure). You can configure destinations in addition to or instead of a dead-letter queue.

S3 buckets are supported only for on-failure destinations. To retain records of successful invocations, use another destination type.

" - }, - "PutFunctionRecursionConfig":{ - "name":"PutFunctionRecursionConfig", - "http":{ - "method":"PUT", - "requestUri":"/2024-08-31/functions/{FunctionName}/recursion-config", - "responseCode":200 - }, - "input":{"shape":"PutFunctionRecursionConfigRequest"}, - "output":{"shape":"PutFunctionRecursionConfigResponse"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ResourceConflictException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"} - ], - "documentation":"

Sets your function's recursive loop detection configuration.

When you configure a Lambda function to output to the same service or resource that invokes the function, it's possible to create an infinite recursive loop. For example, a Lambda function might write a message to an Amazon Simple Queue Service (Amazon SQS) queue, which then invokes the same function. This invocation causes the function to write another message to the queue, which in turn invokes the function again.

Lambda can detect certain types of recursive loops shortly after they occur. When Lambda detects a recursive loop and your function's recursive loop detection configuration is set to Terminate, it stops your function being invoked and notifies you.

" - }, - "PutProvisionedConcurrencyConfig":{ - "name":"PutProvisionedConcurrencyConfig", - "http":{ - "method":"PUT", - "requestUri":"/2019-09-30/functions/{FunctionName}/provisioned-concurrency", - "responseCode":202 - }, - "input":{"shape":"PutProvisionedConcurrencyConfigRequest"}, - "output":{"shape":"PutProvisionedConcurrencyConfigResponse"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ResourceConflictException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"} - ], - "documentation":"

Adds a provisioned concurrency configuration to a function's alias or version.

", - "idempotent":true - }, - "PutRuntimeManagementConfig":{ - "name":"PutRuntimeManagementConfig", - "http":{ - "method":"PUT", - "requestUri":"/2021-07-20/functions/{FunctionName}/runtime-management-config", - "responseCode":200 - }, - "input":{"shape":"PutRuntimeManagementConfigRequest"}, - "output":{"shape":"PutRuntimeManagementConfigResponse"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ResourceConflictException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"} - ], - "documentation":"

Sets the runtime management configuration for a function's version. For more information, see Runtime updates.

" - }, - "RemoveLayerVersionPermission":{ - "name":"RemoveLayerVersionPermission", - "http":{ - "method":"DELETE", - "requestUri":"/2018-10-31/layers/{LayerName}/versions/{VersionNumber}/policy/{StatementId}", - "responseCode":204 - }, - "input":{"shape":"RemoveLayerVersionPermissionRequest"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"}, - {"shape":"PreconditionFailedException"} - ], - "documentation":"

Removes a statement from the permissions policy for a version of an Lambda layer. For more information, see AddLayerVersionPermission.

" - }, - "RemovePermission":{ - "name":"RemovePermission", - "http":{ - "method":"DELETE", - "requestUri":"/2015-03-31/functions/{FunctionName}/policy/{StatementId}", - "responseCode":204 - }, - "input":{"shape":"RemovePermissionRequest"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"}, - {"shape":"PreconditionFailedException"} - ], - "documentation":"

Revokes function-use permission from an Amazon Web Services service or another Amazon Web Services account. You can get the ID of the statement from the output of GetPolicy.

" - }, - "SendDurableExecutionCallbackFailure":{ - "name":"SendDurableExecutionCallbackFailure", - "http":{ - "method":"POST", - "requestUri":"/2025-12-01/durable-execution-callbacks/{CallbackId}/fail", - "responseCode":200 - }, - "input":{"shape":"SendDurableExecutionCallbackFailureRequest"}, - "output":{"shape":"SendDurableExecutionCallbackFailureResponse"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ServiceException"}, - {"shape":"CallbackTimeoutException"} - ] - }, - "SendDurableExecutionCallbackHeartbeat":{ - "name":"SendDurableExecutionCallbackHeartbeat", - "http":{ - "method":"POST", - "requestUri":"/2025-12-01/durable-execution-callbacks/{CallbackId}/heartbeat", - "responseCode":200 - }, - "input":{"shape":"SendDurableExecutionCallbackHeartbeatRequest"}, - "output":{"shape":"SendDurableExecutionCallbackHeartbeatResponse"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ServiceException"}, - {"shape":"CallbackTimeoutException"} - ] - }, - "SendDurableExecutionCallbackSuccess":{ - "name":"SendDurableExecutionCallbackSuccess", - "http":{ - "method":"POST", - "requestUri":"/2025-12-01/durable-execution-callbacks/{CallbackId}/succeed", - "responseCode":200 - }, - "input":{"shape":"SendDurableExecutionCallbackSuccessRequest"}, - "output":{"shape":"SendDurableExecutionCallbackSuccessResponse"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ServiceException"}, - {"shape":"CallbackTimeoutException"} - ] - }, - "StopDurableExecution":{ - "name":"StopDurableExecution", - "http":{ - "method":"POST", - "requestUri":"/2025-12-01/durable-executions/{DurableExecutionArn}/stop", - "responseCode":200 - }, - "input":{"shape":"StopDurableExecutionRequest"}, - "output":{"shape":"StopDurableExecutionResponse"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ServiceException"}, - {"shape":"ResourceNotFoundException"} - ] - }, - "TagResource":{ - "name":"TagResource", - "http":{ - "method":"POST", - "requestUri":"/2017-03-31/tags/{Resource}", - "responseCode":204 - }, - "input":{"shape":"TagResourceRequest"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ResourceConflictException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"} - ], - "documentation":"

Adds tags to a function, event source mapping, or code signing configuration.

" - }, - "UntagResource":{ - "name":"UntagResource", - "http":{ - "method":"DELETE", - "requestUri":"/2017-03-31/tags/{Resource}", - "responseCode":204 - }, - "input":{"shape":"UntagResourceRequest"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ResourceConflictException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"} - ], - "documentation":"

Removes tags from a function, event source mapping, or code signing configuration.

" - }, - "UpdateAlias":{ - "name":"UpdateAlias", - "http":{ - "method":"PUT", - "requestUri":"/2015-03-31/functions/{FunctionName}/aliases/{Name}", - "responseCode":200 - }, - "input":{"shape":"UpdateAliasRequest"}, - "output":{"shape":"AliasConfiguration"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ResourceConflictException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"}, - {"shape":"PreconditionFailedException"} - ], - "documentation":"

Updates the configuration of a Lambda function alias.

" - }, - "UpdateCodeSigningConfig":{ - "name":"UpdateCodeSigningConfig", - "http":{ - "method":"PUT", - "requestUri":"/2020-04-22/code-signing-configs/{CodeSigningConfigArn}", - "responseCode":200 - }, - "input":{"shape":"UpdateCodeSigningConfigRequest"}, - "output":{"shape":"UpdateCodeSigningConfigResponse"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ServiceException"}, - {"shape":"ResourceNotFoundException"} - ], - "documentation":"

Update the code signing configuration. Changes to the code signing configuration take effect the next time a user tries to deploy a code package to the function.

" - }, - "UpdateEventSourceMapping":{ - "name":"UpdateEventSourceMapping", - "http":{ - "method":"PUT", - "requestUri":"/2015-03-31/event-source-mappings/{UUID}", - "responseCode":202 - }, - "input":{"shape":"UpdateEventSourceMappingRequest"}, - "output":{"shape":"EventSourceMappingConfiguration"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ResourceInUseException"}, - {"shape":"ResourceConflictException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"} - ], - "documentation":"

Updates an event source mapping. You can change the function that Lambda invokes, or pause invocation and resume later from the same location.

For details about how to configure different event sources, see the following topics.

The following error handling options are available only for DynamoDB and Kinesis event sources:

For stream sources (DynamoDB, Kinesis, Amazon MSK, and self-managed Apache Kafka), the following option is also available:

For information about which configuration parameters apply to each event source, see the following topics.

" - }, - "UpdateFunctionCode":{ - "name":"UpdateFunctionCode", - "http":{ - "method":"PUT", - "requestUri":"/2015-03-31/functions/{FunctionName}/code", - "responseCode":200 - }, - "input":{"shape":"UpdateFunctionCodeRequest"}, - "output":{"shape":"FunctionConfiguration"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ResourceConflictException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"InvalidCodeSignatureException"}, - {"shape":"ResourceNotFoundException"}, - {"shape":"CodeVerificationFailedException"}, - {"shape":"CodeSigningConfigNotFoundException"}, - {"shape":"CodeStorageExceededException"}, - {"shape":"PreconditionFailedException"} - ], - "documentation":"

Updates a Lambda function's code. If code signing is enabled for the function, the code package must be signed by a trusted publisher. For more information, see Configuring code signing for Lambda.

If the function's package type is Image, then you must specify the code package in ImageUri as the URI of a container image in the Amazon ECR registry.

If the function's package type is Zip, then you must specify the deployment package as a .zip file archive. Enter the Amazon S3 bucket and key of the code .zip file location. You can also provide the function code inline using the ZipFile field.

The code in the deployment package must be compatible with the target instruction set architecture of the function (x86-64 or arm64).

The function's code is locked when you publish a version. You can't modify the code of a published version, only the unpublished version.

For a function defined as a container image, Lambda resolves the image tag to an image digest. In Amazon ECR, if you update the image tag to a new image, Lambda does not automatically update the function.

" - }, - "UpdateFunctionConfiguration":{ - "name":"UpdateFunctionConfiguration", - "http":{ - "method":"PUT", - "requestUri":"/2015-03-31/functions/{FunctionName}/configuration", - "responseCode":200 - }, - "input":{"shape":"UpdateFunctionConfigurationRequest"}, - "output":{"shape":"FunctionConfiguration"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ResourceConflictException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"InvalidCodeSignatureException"}, - {"shape":"ResourceNotFoundException"}, - {"shape":"CodeVerificationFailedException"}, - {"shape":"CodeSigningConfigNotFoundException"}, - {"shape":"PreconditionFailedException"} - ], - "documentation":"

Modify the version-specific settings of a Lambda function.

When you update a function, Lambda provisions an instance of the function and its supporting resources. If your function connects to a VPC, this process can take a minute. During this time, you can't modify the function, but you can still invoke it. The LastUpdateStatus, LastUpdateStatusReason, and LastUpdateStatusReasonCode fields in the response from GetFunctionConfiguration indicate when the update is complete and the function is processing events with the new configuration. For more information, see Lambda function states.

These settings can vary between versions of a function and are locked when you publish a version. You can't modify the configuration of a published version, only the unpublished version.

To configure function concurrency, use PutFunctionConcurrency. To grant invoke permissions to an Amazon Web Services account or Amazon Web Services service, use AddPermission.

" - }, - "UpdateFunctionEventInvokeConfig":{ - "name":"UpdateFunctionEventInvokeConfig", - "http":{ - "method":"POST", - "requestUri":"/2019-09-25/functions/{FunctionName}/event-invoke-config", - "responseCode":200 - }, - "input":{"shape":"UpdateFunctionEventInvokeConfigRequest"}, - "output":{"shape":"FunctionEventInvokeConfig"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ResourceConflictException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"} - ], - "documentation":"

Updates the configuration for asynchronous invocation for a function, version, or alias.

To configure options for asynchronous invocation, use PutFunctionEventInvokeConfig.

" - }, - "UpdateFunctionUrlConfig":{ - "name":"UpdateFunctionUrlConfig", - "http":{ - "method":"PUT", - "requestUri":"/2021-10-31/functions/{FunctionName}/url", - "responseCode":200 - }, - "input":{"shape":"UpdateFunctionUrlConfigRequest"}, - "output":{"shape":"UpdateFunctionUrlConfigResponse"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ResourceConflictException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"} - ], - "documentation":"

Updates the configuration for a Lambda function URL.

" - } - }, - "shapes":{ - "AccountLimit":{ - "type":"structure", - "members":{ - "TotalCodeSize":{ - "shape":"Long", - "documentation":"

The amount of storage space that you can use for all deployment packages and layer archives.

" - }, - "CodeSizeUnzipped":{ - "shape":"Long", - "documentation":"

The maximum size of a function's deployment package and layers when they're extracted.

" - }, - "CodeSizeZipped":{ - "shape":"Long", - "documentation":"

The maximum size of a deployment package when it's uploaded directly to Lambda. Use Amazon S3 for larger files.

" - }, - "ConcurrentExecutions":{ - "shape":"Integer", - "documentation":"

The maximum number of simultaneous function executions.

" - }, - "UnreservedConcurrentExecutions":{ - "shape":"UnreservedConcurrentExecutions", - "documentation":"

The maximum number of simultaneous function executions, minus the capacity that's reserved for individual functions with PutFunctionConcurrency.

" - } - }, - "documentation":"

Limits that are related to concurrency and storage. All file and storage sizes are in bytes.

" - }, - "AccountUsage":{ - "type":"structure", - "members":{ - "TotalCodeSize":{ - "shape":"Long", - "documentation":"

The amount of storage space, in bytes, that's being used by deployment packages and layer archives.

" - }, - "FunctionCount":{ - "shape":"Long", - "documentation":"

The number of Lambda functions.

" - } - }, - "documentation":"

The number of functions and amount of storage in use.

" - }, - "Action":{ - "type":"string", - "pattern":"(lambda:[*]|lambda:[a-zA-Z]+|[*])" - }, - "AddLayerVersionPermissionRequest":{ - "type":"structure", - "required":[ - "LayerName", - "VersionNumber", - "StatementId", - "Action", - "Principal" - ], - "members":{ - "LayerName":{ - "shape":"LayerName", - "documentation":"

The name or Amazon Resource Name (ARN) of the layer.

", - "location":"uri", - "locationName":"LayerName" - }, - "VersionNumber":{ - "shape":"LayerVersionNumber", - "documentation":"

The version number.

", - "location":"uri", - "locationName":"VersionNumber" - }, - "StatementId":{ - "shape":"StatementId", - "documentation":"

An identifier that distinguishes the policy from others on the same layer version.

" - }, - "Action":{ - "shape":"LayerPermissionAllowedAction", - "documentation":"

The API action that grants access to the layer. For example, lambda:GetLayerVersion.

" - }, - "Principal":{ - "shape":"LayerPermissionAllowedPrincipal", - "documentation":"

An account ID, or * to grant layer usage permission to all accounts in an organization, or all Amazon Web Services accounts (if organizationId is not specified). For the last case, make sure that you really do want all Amazon Web Services accounts to have usage permission to this layer.

" - }, - "OrganizationId":{ - "shape":"OrganizationId", - "documentation":"

With the principal set to *, grant permission to all accounts in the specified organization.

" - }, - "RevisionId":{ - "shape":"String", - "documentation":"

Only update the policy if the revision ID matches the ID specified. Use this option to avoid modifying a policy that has changed since you last read it.

", - "location":"querystring", - "locationName":"RevisionId" - } - } - }, - "AddLayerVersionPermissionResponse":{ - "type":"structure", - "members":{ - "Statement":{ - "shape":"String", - "documentation":"

The permission statement.

" - }, - "RevisionId":{ - "shape":"String", - "documentation":"

A unique identifier for the current revision of the policy.

" - } - } - }, - "AddPermissionRequest":{ - "type":"structure", - "required":[ - "FunctionName", - "StatementId", - "Action", - "Principal" - ], - "members":{ - "FunctionName":{ - "shape":"FunctionName", - "documentation":"

The name or ARN of the Lambda function, version, or alias.

Name formats

You can append a version number or alias to any of the formats. The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.

", - "location":"uri", - "locationName":"FunctionName" - }, - "StatementId":{ - "shape":"StatementId", - "documentation":"

A statement identifier that differentiates the statement from others in the same policy.

" - }, - "Action":{ - "shape":"Action", - "documentation":"

The action that the principal can use on the function. For example, lambda:InvokeFunction or lambda:GetFunction.

" - }, - "Principal":{ - "shape":"Principal", - "documentation":"

The Amazon Web Services service, Amazon Web Services account, IAM user, or IAM role that invokes the function. If you specify a service, use SourceArn or SourceAccount to limit who can invoke the function through that service.

" - }, - "SourceArn":{ - "shape":"Arn", - "documentation":"

For Amazon Web Services services, the ARN of the Amazon Web Services resource that invokes the function. For example, an Amazon S3 bucket or Amazon SNS topic.

Note that Lambda configures the comparison using the StringLike operator.

" - }, - "SourceAccount":{ - "shape":"SourceOwner", - "documentation":"

For Amazon Web Services service, the ID of the Amazon Web Services account that owns the resource. Use this together with SourceArn to ensure that the specified account owns the resource. It is possible for an Amazon S3 bucket to be deleted by its owner and recreated by another account.

" - }, - "EventSourceToken":{ - "shape":"EventSourceToken", - "documentation":"

For Alexa Smart Home functions, a token that the invoker must supply.

" - }, - "Qualifier":{ - "shape":"Qualifier", - "documentation":"

Specify a version or alias to add permissions to a published version of the function.

", - "location":"querystring", - "locationName":"Qualifier" - }, - "RevisionId":{ - "shape":"String", - "documentation":"

Update the policy only if the revision ID matches the ID that's specified. Use this option to avoid modifying a policy that has changed since you last read it.

" - }, - "PrincipalOrgID":{ - "shape":"PrincipalOrgID", - "documentation":"

The identifier for your organization in Organizations. Use this to grant permissions to all the Amazon Web Services accounts under this organization.

" - }, - "FunctionUrlAuthType":{ - "shape":"FunctionUrlAuthType", - "documentation":"

The type of authentication that your function URL uses. Set to AWS_IAM if you want to restrict access to authenticated users only. Set to NONE if you want to bypass IAM authentication to create a public endpoint. For more information, see Security and auth model for Lambda function URLs.

" - } - } - }, - "AddPermissionResponse":{ - "type":"structure", - "members":{ - "Statement":{ - "shape":"String", - "documentation":"

The permission statement that's added to the function policy.

" - } - } - }, - "AdditionalVersion":{ - "type":"string", - "max":1024, - "min":1, - "pattern":"[0-9]+" - }, - "AdditionalVersionWeights":{ - "type":"map", - "key":{"shape":"AdditionalVersion"}, - "value":{"shape":"Weight"} - }, - "Alias":{ - "type":"string", - "max":128, - "min":1, - "pattern":"(?!^[0-9]+$)([a-zA-Z0-9-_]+)" - }, - "AliasConfiguration":{ - "type":"structure", - "members":{ - "AliasArn":{ - "shape":"FunctionArn", - "documentation":"

The Amazon Resource Name (ARN) of the alias.

" - }, - "Name":{ - "shape":"Alias", - "documentation":"

The name of the alias.

" - }, - "FunctionVersion":{ - "shape":"Version", - "documentation":"

The function version that the alias invokes.

" - }, - "Description":{ - "shape":"Description", - "documentation":"

A description of the alias.

" - }, - "RoutingConfig":{ - "shape":"AliasRoutingConfiguration", - "documentation":"

The routing configuration of the alias.

" - }, - "RevisionId":{ - "shape":"String", - "documentation":"

A unique identifier that changes when you update the alias.

" - } - }, - "documentation":"

Provides configuration information about a Lambda function alias.

" - }, - "AliasList":{ - "type":"list", - "member":{"shape":"AliasConfiguration"} - }, - "AliasRoutingConfiguration":{ - "type":"structure", - "members":{ - "AdditionalVersionWeights":{ - "shape":"AdditionalVersionWeights", - "documentation":"

The second version, and the percentage of traffic that's routed to it.

" - } - }, - "documentation":"

The traffic-shifting configuration of a Lambda function alias.

" - }, - "AllowCredentials":{ - "type":"boolean", - "box":true - }, - "AllowMethodsList":{ - "type":"list", - "member":{"shape":"Method"}, - "max":6, - "min":0 - }, - "AllowOriginsList":{ - "type":"list", - "member":{"shape":"Origin"}, - "max":100, - "min":0 - }, - "AllowedPublishers":{ - "type":"structure", - "required":["SigningProfileVersionArns"], - "members":{ - "SigningProfileVersionArns":{ - "shape":"SigningProfileVersionArns", - "documentation":"

The Amazon Resource Name (ARN) for each of the signing profiles. A signing profile defines a trusted user who can sign a code package.

" - } - }, - "documentation":"

List of signing profiles that can sign a code package.

" - }, - "AmazonManagedKafkaEventSourceConfig":{ - "type":"structure", - "members":{ - "ConsumerGroupId":{ - "shape":"URI", - "documentation":"

The identifier for the Kafka consumer group to join. The consumer group ID must be unique among all your Kafka event sources. After creating a Kafka event source mapping with the consumer group ID specified, you cannot update this value. For more information, see Customizable consumer group ID.

" - }, - "SchemaRegistryConfig":{ - "shape":"KafkaSchemaRegistryConfig", - "documentation":"

Specific configuration settings for a Kafka schema registry.

" - } - }, - "documentation":"

Specific configuration settings for an Amazon Managed Streaming for Apache Kafka (Amazon MSK) event source.

" - }, - "ApplicationLogLevel":{ - "type":"string", - "enum":[ - "TRACE", - "DEBUG", - "INFO", - "WARN", - "ERROR", - "FATAL" - ] - }, - "Architecture":{ - "type":"string", - "enum":[ - "x86_64", - "arm64" - ] - }, - "ArchitecturesList":{ - "type":"list", - "member":{"shape":"Architecture"}, - "max":1, - "min":1 - }, - "Arn":{ - "type":"string", - "pattern":"arn:(aws[a-zA-Z0-9-]*):([a-zA-Z0-9\\-])+:([a-z]{2}(-gov)?-[a-z]+-\\d{1})?:(\\d{12})?:(.*)" - }, - "AttemptCount":{ - "type":"integer", - "min":0 - }, - "BatchSize":{ - "type":"integer", - "box":true, - "max":10000, - "min":1 - }, - "BinaryOperationPayload":{ - "type":"blob", - "max":262144, - "min":0, - "sensitive":true - }, - "BisectBatchOnFunctionError":{ - "type":"boolean", - "box":true - }, - "Blob":{ - "type":"blob", - "sensitive":true - }, - "BlobStream":{ - "type":"blob", - "streaming":true - }, - "Boolean":{"type":"boolean"}, - "CallbackDetails":{ - "type":"structure", - "members":{ - "CallbackId":{"shape":"CallbackId"}, - "Result":{"shape":"OperationPayload"}, - "Error":{"shape":"ErrorObject"} - } - }, - "CallbackFailedDetails":{ - "type":"structure", - "required":["Error"], - "members":{ - "Error":{"shape":"EventError"} - } - }, - "CallbackId":{ - "type":"string", - "max":1024, - "min":1, - "pattern":"[A-Za-z0-9+/]+={0,2}" - }, - "CallbackOptions":{ - "type":"structure", - "members":{ - "TimeoutSeconds":{"shape":"DurationSeconds"}, - "HeartbeatTimeoutSeconds":{"shape":"DurationSeconds"} - } - }, - "CallbackStartedDetails":{ - "type":"structure", - "required":["CallbackId"], - "members":{ - "CallbackId":{"shape":"CallbackId"}, - "HeartbeatTimeout":{"shape":"DurationSeconds"}, - "Timeout":{"shape":"DurationSeconds"} - } - }, - "CallbackSucceededDetails":{ - "type":"structure", - "required":["Result"], - "members":{ - "Result":{"shape":"EventResult"} - } - }, - "CallbackTimedOutDetails":{ - "type":"structure", - "required":["Error"], - "members":{ - "Error":{"shape":"EventError"} - } - }, - "CallbackTimeoutException":{ - "type":"structure", - "members":{ - "Type":{"shape":"String"}, - "Message":{"shape":"String"} - }, - "error":{ - "httpStatusCode":400, - "senderFault":true - }, - "exception":true - }, - "ChainedInvokeFailedDetails":{ - "type":"structure", - "required":["Error"], - "members":{ - "Error":{"shape":"EventError"} - } - }, - "ChainedInvokeOptions":{ - "type":"structure", - "members":{ - "FunctionName":{"shape":"FunctionName"} - } - }, - "ChainedInvokePendingDetails":{ - "type":"structure", - "required":[ - "Input", - "FunctionName" - ], - "members":{ - "Input":{"shape":"EventInput"}, - "FunctionName":{"shape":"FunctionName"} - } - }, - "ChainedInvokeStartedDetails":{ - "type":"structure", - "members":{ - "DurableExecutionArn":{"shape":"DurableExecutionArn"} - } - }, - "ChainedInvokeStoppedDetails":{ - "type":"structure", - "required":["Error"], - "members":{ - "Error":{"shape":"EventError"} - } - }, - "ChainedInvokeSucceededDetails":{ - "type":"structure", - "required":["Result"], - "members":{ - "Result":{"shape":"EventResult"} - } - }, - "ChainedInvokeTimedOutDetails":{ - "type":"structure", - "required":["Error"], - "members":{ - "Error":{"shape":"EventError"} - } - }, - "CheckpointDurableExecutionRequest":{ - "type":"structure", - "required":[ - "DurableExecutionArn", - "CheckpointToken" - ], - "members":{ - "DurableExecutionArn":{ - "shape":"DurableExecutionArn", - "location":"uri", - "locationName":"DurableExecutionArn" - }, - "CheckpointToken":{"shape":"CheckpointToken"}, - "Updates":{"shape":"OperationUpdates"}, - "ClientToken":{"shape":"ClientToken"} - } - }, - "CheckpointDurableExecutionResponse":{ - "type":"structure", - "required":["NewExecutionState"], - "members":{ - "CheckpointToken":{"shape":"CheckpointToken"}, - "NewExecutionState":{"shape":"CheckpointUpdatedExecutionState"} - } - }, - "CheckpointToken":{ - "type":"string", - "max":2048, - "min":1, - "pattern":"[A-Za-z0-9+/]+={0,2}" - }, - "CheckpointUpdatedExecutionState":{ - "type":"structure", - "members":{ - "Operations":{"shape":"Operations"}, - "NextMarker":{"shape":"String"} - } - }, - "ClientToken":{ - "type":"string", - "max":64, - "min":1, - "pattern":"[\\x21-\\x7E]+" - }, - "CodeSigningConfig":{ - "type":"structure", - "required":[ - "CodeSigningConfigId", - "CodeSigningConfigArn", - "AllowedPublishers", - "CodeSigningPolicies", - "LastModified" - ], - "members":{ - "CodeSigningConfigId":{ - "shape":"CodeSigningConfigId", - "documentation":"

Unique identifer for the Code signing configuration.

" - }, - "CodeSigningConfigArn":{ - "shape":"CodeSigningConfigArn", - "documentation":"

The Amazon Resource Name (ARN) of the Code signing configuration.

" - }, - "Description":{ - "shape":"Description", - "documentation":"

Code signing configuration description.

" - }, - "AllowedPublishers":{ - "shape":"AllowedPublishers", - "documentation":"

List of allowed publishers.

" - }, - "CodeSigningPolicies":{ - "shape":"CodeSigningPolicies", - "documentation":"

The code signing policy controls the validation failure action for signature mismatch or expiry.

" - }, - "LastModified":{ - "shape":"Timestamp", - "documentation":"

The date and time that the Code signing configuration was last modified, in ISO-8601 format (YYYY-MM-DDThh:mm:ss.sTZD).

" - } - }, - "documentation":"

Details about a Code signing configuration.

" - }, - "CodeSigningConfigArn":{ - "type":"string", - "max":200, - "min":0, - "pattern":"arn:(aws[a-zA-Z-]*)?:lambda:[a-z]{2}((-gov)|(-iso(b?)))?-[a-z]+-\\d{1}:\\d{12}:code-signing-config:csc-[a-z0-9]{17}" - }, - "CodeSigningConfigId":{ - "type":"string", - "pattern":"csc-[a-zA-Z0-9-_\\.]{17}" - }, - "CodeSigningConfigList":{ - "type":"list", - "member":{"shape":"CodeSigningConfig"} - }, - "CodeSigningConfigNotFoundException":{ - "type":"structure", - "members":{ - "Type":{"shape":"String"}, - "Message":{"shape":"String"} - }, - "documentation":"

The specified code signing configuration does not exist.

", - "error":{ - "httpStatusCode":404, - "senderFault":true - }, - "exception":true - }, - "CodeSigningPolicies":{ - "type":"structure", - "members":{ - "UntrustedArtifactOnDeployment":{ - "shape":"CodeSigningPolicy", - "documentation":"

Code signing configuration policy for deployment validation failure. If you set the policy to Enforce, Lambda blocks the deployment request if signature validation checks fail. If you set the policy to Warn, Lambda allows the deployment and creates a CloudWatch log.

Default value: Warn

" - } - }, - "documentation":"

Code signing configuration policies specify the validation failure action for signature mismatch or expiry.

" - }, - "CodeSigningPolicy":{ - "type":"string", - "enum":[ - "Warn", - "Enforce" - ] - }, - "CodeStorageExceededException":{ - "type":"structure", - "members":{ - "Type":{ - "shape":"String", - "documentation":"

The exception type.

" - }, - "message":{"shape":"String"} - }, - "documentation":"

Your Amazon Web Services account has exceeded its maximum total code size. For more information, see Lambda quotas.

", - "error":{ - "httpStatusCode":400, - "senderFault":true - }, - "exception":true - }, - "CodeVerificationFailedException":{ - "type":"structure", - "members":{ - "Type":{"shape":"String"}, - "Message":{"shape":"String"} - }, - "documentation":"

The code signature failed one or more of the validation checks for signature mismatch or expiry, and the code signing policy is set to ENFORCE. Lambda blocks the deployment.

", - "error":{ - "httpStatusCode":400, - "senderFault":true - }, - "exception":true - }, - "CollectionName":{ - "type":"string", - "max":57, - "min":1, - "pattern":"(^(?!(system\\x2e)))(^[_a-zA-Z0-9])([^$]*)" - }, - "CompatibleArchitectures":{ - "type":"list", - "member":{"shape":"Architecture"}, - "max":2, - "min":0 - }, - "CompatibleRuntimes":{ - "type":"list", - "member":{"shape":"Runtime"}, - "max":15, - "min":0 - }, - "Concurrency":{ - "type":"structure", - "members":{ - "ReservedConcurrentExecutions":{ - "shape":"ReservedConcurrentExecutions", - "documentation":"

The number of concurrent executions that are reserved for this function. For more information, see Managing Lambda reserved concurrency.

" - } - } - }, - "ContextDetails":{ - "type":"structure", - "members":{ - "ReplayChildren":{"shape":"ReplayChildren"}, - "Result":{"shape":"OperationPayload"}, - "Error":{"shape":"ErrorObject"} - } - }, - "ContextFailedDetails":{ - "type":"structure", - "required":["Error"], - "members":{ - "Error":{"shape":"EventError"} - } - }, - "ContextOptions":{ - "type":"structure", - "members":{ - "ReplayChildren":{"shape":"ReplayChildren"} - } - }, - "ContextStartedDetails":{ - "type":"structure", - "members":{} - }, - "ContextSucceededDetails":{ - "type":"structure", - "required":["Result"], - "members":{ - "Result":{"shape":"EventResult"} - } - }, - "Cors":{ - "type":"structure", - "members":{ - "AllowCredentials":{ - "shape":"AllowCredentials", - "documentation":"

Whether to allow cookies or other credentials in requests to your function URL. The default is false.

" - }, - "AllowHeaders":{ - "shape":"HeadersList", - "documentation":"

The HTTP headers that origins can include in requests to your function URL. For example: Date, Keep-Alive, X-Custom-Header.

" - }, - "AllowMethods":{ - "shape":"AllowMethodsList", - "documentation":"

The HTTP methods that are allowed when calling your function URL. For example: GET, POST, DELETE, or the wildcard character (*).

" - }, - "AllowOrigins":{ - "shape":"AllowOriginsList", - "documentation":"

The origins that can access your function URL. You can list any number of specific origins, separated by a comma. For example: https://www.example.com, http://localhost:60905.

Alternatively, you can grant access to all origins using the wildcard character (*).

" - }, - "ExposeHeaders":{ - "shape":"HeadersList", - "documentation":"

The HTTP headers in your function response that you want to expose to origins that call your function URL. For example: Date, Keep-Alive, X-Custom-Header.

" - }, - "MaxAge":{ - "shape":"MaxAge", - "documentation":"

The maximum amount of time, in seconds, that web browsers can cache results of a preflight request. By default, this is set to 0, which means that the browser doesn't cache results.

" - } - }, - "documentation":"

The cross-origin resource sharing (CORS) settings for your Lambda function URL. Use CORS to grant access to your function URL from any origin. You can also use CORS to control access for specific HTTP headers and methods in requests to your function URL.

" - }, - "CreateAliasRequest":{ - "type":"structure", - "required":[ - "FunctionName", - "Name", - "FunctionVersion" - ], - "members":{ - "FunctionName":{ - "shape":"FunctionName", - "documentation":"

The name or ARN of the Lambda function.

Name formats

The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.

", - "location":"uri", - "locationName":"FunctionName" - }, - "Name":{ - "shape":"Alias", - "documentation":"

The name of the alias.

" - }, - "FunctionVersion":{ - "shape":"Version", - "documentation":"

The function version that the alias invokes.

" - }, - "Description":{ - "shape":"Description", - "documentation":"

A description of the alias.

" - }, - "RoutingConfig":{ - "shape":"AliasRoutingConfiguration", - "documentation":"

The routing configuration of the alias.

" - } - } - }, - "CreateCodeSigningConfigRequest":{ - "type":"structure", - "required":["AllowedPublishers"], - "members":{ - "Description":{ - "shape":"Description", - "documentation":"

Descriptive name for this code signing configuration.

" - }, - "AllowedPublishers":{ - "shape":"AllowedPublishers", - "documentation":"

Signing profiles for this code signing configuration.

" - }, - "CodeSigningPolicies":{ - "shape":"CodeSigningPolicies", - "documentation":"

The code signing policies define the actions to take if the validation checks fail.

" - }, - "Tags":{ - "shape":"Tags", - "documentation":"

A list of tags to add to the code signing configuration.

" - } - } - }, - "CreateCodeSigningConfigResponse":{ - "type":"structure", - "required":["CodeSigningConfig"], - "members":{ - "CodeSigningConfig":{ - "shape":"CodeSigningConfig", - "documentation":"

The code signing configuration.

" - } - } - }, - "CreateEventSourceMappingRequest":{ - "type":"structure", - "required":["FunctionName"], - "members":{ - "EventSourceArn":{ - "shape":"Arn", - "documentation":"

The Amazon Resource Name (ARN) of the event source.

" - }, - "FunctionName":{ - "shape":"FunctionName", - "documentation":"

The name or ARN of the Lambda function.

Name formats

The length constraint applies only to the full ARN. If you specify only the function name, it's limited to 64 characters in length.

" - }, - "Enabled":{ - "shape":"Enabled", - "documentation":"

When true, the event source mapping is active. When false, Lambda pauses polling and invocation.

Default: True

" - }, - "BatchSize":{ - "shape":"BatchSize", - "documentation":"

The maximum number of records in each batch that Lambda pulls from your stream or queue and sends to your function. Lambda passes all of the records in the batch to the function in a single call, up to the payload limit for synchronous invocation (6 MB).

" - }, - "FilterCriteria":{ - "shape":"FilterCriteria", - "documentation":"

An object that defines the filter criteria that determine whether Lambda should process an event. For more information, see Lambda event filtering.

" - }, - "MaximumBatchingWindowInSeconds":{ - "shape":"MaximumBatchingWindowInSeconds", - "documentation":"

The maximum amount of time, in seconds, that Lambda spends gathering records before invoking the function. You can configure MaximumBatchingWindowInSeconds to any value from 0 seconds to 300 seconds in increments of seconds.

For Kinesis, DynamoDB, and Amazon SQS event sources, the default batching window is 0 seconds. For Amazon MSK, Self-managed Apache Kafka, Amazon MQ, and DocumentDB event sources, the default batching window is 500 ms. Note that because you can only change MaximumBatchingWindowInSeconds in increments of seconds, you cannot revert back to the 500 ms default batching window after you have changed it. To restore the default batching window, you must create a new event source mapping.

Related setting: For Kinesis, DynamoDB, and Amazon SQS event sources, when you set BatchSize to a value greater than 10, you must set MaximumBatchingWindowInSeconds to at least 1.

" - }, - "ParallelizationFactor":{ - "shape":"ParallelizationFactor", - "documentation":"

(Kinesis and DynamoDB Streams only) The number of batches to process from each shard concurrently.

" - }, - "StartingPosition":{ - "shape":"EventSourcePosition", - "documentation":"

The position in a stream from which to start reading. Required for Amazon Kinesis and Amazon DynamoDB Stream event sources. AT_TIMESTAMP is supported only for Amazon Kinesis streams, Amazon DocumentDB, Amazon MSK, and self-managed Apache Kafka.

" - }, - "StartingPositionTimestamp":{ - "shape":"Date", - "documentation":"

With StartingPosition set to AT_TIMESTAMP, the time from which to start reading. StartingPositionTimestamp cannot be in the future.

" - }, - "DestinationConfig":{ - "shape":"DestinationConfig", - "documentation":"

(Kinesis, DynamoDB Streams, Amazon MSK, and self-managed Kafka only) A configuration object that specifies the destination of an event after Lambda processes it.

" - }, - "MaximumRecordAgeInSeconds":{ - "shape":"MaximumRecordAgeInSeconds", - "documentation":"

(Kinesis and DynamoDB Streams only) Discard records older than the specified age. The default value is infinite (-1).

" - }, - "BisectBatchOnFunctionError":{ - "shape":"BisectBatchOnFunctionError", - "documentation":"

(Kinesis and DynamoDB Streams only) If the function returns an error, split the batch in two and retry.

" - }, - "MaximumRetryAttempts":{ - "shape":"MaximumRetryAttemptsEventSourceMapping", - "documentation":"

(Kinesis and DynamoDB Streams only) Discard records after the specified number of retries. The default value is infinite (-1). When set to infinite (-1), failed records are retried until the record expires.

" - }, - "Tags":{ - "shape":"Tags", - "documentation":"

A list of tags to apply to the event source mapping.

" - }, - "TumblingWindowInSeconds":{ - "shape":"TumblingWindowInSeconds", - "documentation":"

(Kinesis and DynamoDB Streams only) The duration in seconds of a processing window for DynamoDB and Kinesis Streams event sources. A value of 0 seconds indicates no tumbling window.

" - }, - "Topics":{ - "shape":"Topics", - "documentation":"

The name of the Kafka topic.

" - }, - "Queues":{ - "shape":"Queues", - "documentation":"

(MQ) The name of the Amazon MQ broker destination queue to consume.

" - }, - "SourceAccessConfigurations":{ - "shape":"SourceAccessConfigurations", - "documentation":"

An array of authentication protocols or VPC components required to secure your event source.

" - }, - "SelfManagedEventSource":{ - "shape":"SelfManagedEventSource", - "documentation":"

The self-managed Apache Kafka cluster to receive records from.

" - }, - "FunctionResponseTypes":{ - "shape":"FunctionResponseTypeList", - "documentation":"

(Kinesis, DynamoDB Streams, and Amazon SQS) A list of current response type enums applied to the event source mapping.

" - }, - "AmazonManagedKafkaEventSourceConfig":{ - "shape":"AmazonManagedKafkaEventSourceConfig", - "documentation":"

Specific configuration settings for an Amazon Managed Streaming for Apache Kafka (Amazon MSK) event source.

" - }, - "SelfManagedKafkaEventSourceConfig":{ - "shape":"SelfManagedKafkaEventSourceConfig", - "documentation":"

Specific configuration settings for a self-managed Apache Kafka event source.

" - }, - "ScalingConfig":{ - "shape":"ScalingConfig", - "documentation":"

(Amazon SQS only) The scaling configuration for the event source. For more information, see Configuring maximum concurrency for Amazon SQS event sources.

" - }, - "DocumentDBEventSourceConfig":{ - "shape":"DocumentDBEventSourceConfig", - "documentation":"

Specific configuration settings for a DocumentDB event source.

" - }, - "KMSKeyArn":{ - "shape":"KMSKeyArn", - "documentation":"

The ARN of the Key Management Service (KMS) customer managed key that Lambda uses to encrypt your function's filter criteria. By default, Lambda does not encrypt your filter criteria object. Specify this property to encrypt data using your own customer managed key.

" - }, - "MetricsConfig":{ - "shape":"EventSourceMappingMetricsConfig", - "documentation":"

The metrics configuration for your event source. For more information, see Event source mapping metrics.

" - }, - "ProvisionedPollerConfig":{ - "shape":"ProvisionedPollerConfig", - "documentation":"

(Amazon MSK and self-managed Apache Kafka only) The provisioned mode configuration for the event source. For more information, see provisioned mode.

" - } - } - }, - "CreateFunctionRequest":{ - "type":"structure", - "required":[ - "FunctionName", - "Role", - "Code" - ], - "members":{ - "FunctionName":{ - "shape":"FunctionName", - "documentation":"

The name or ARN of the Lambda function.

Name formats

The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.

" - }, - "Runtime":{ - "shape":"Runtime", - "documentation":"

The identifier of the function's runtime. Runtime is required if the deployment package is a .zip file archive. Specifying a runtime results in an error if you're deploying a function using a container image.

The following list includes deprecated runtimes. Lambda blocks creating new functions and updating existing functions shortly after each runtime is deprecated. For more information, see Runtime use after deprecation.

For a list of all currently supported runtimes, see Supported runtimes.

" - }, - "Role":{ - "shape":"RoleArn", - "documentation":"

The Amazon Resource Name (ARN) of the function's execution role.

" - }, - "Handler":{ - "shape":"Handler", - "documentation":"

The name of the method within your code that Lambda calls to run your function. Handler is required if the deployment package is a .zip file archive. The format includes the file name. It can also include namespaces and other qualifiers, depending on the runtime. For more information, see Lambda programming model.

" - }, - "Code":{ - "shape":"FunctionCode", - "documentation":"

The code for the function.

" - }, - "Description":{ - "shape":"Description", - "documentation":"

A description of the function.

" - }, - "Timeout":{ - "shape":"Timeout", - "documentation":"

The amount of time (in seconds) that Lambda allows a function to run before stopping it. The default is 3 seconds. The maximum allowed value is 900 seconds. For more information, see Lambda execution environment.

" - }, - "MemorySize":{ - "shape":"MemorySize", - "documentation":"

The amount of memory available to the function at runtime. Increasing the function memory also increases its CPU allocation. The default value is 128 MB. The value can be any multiple of 1 MB.

" - }, - "Publish":{ - "shape":"Boolean", - "documentation":"

Set to true to publish the first version of the function during creation.

" - }, - "VpcConfig":{ - "shape":"VpcConfig", - "documentation":"

For network connectivity to Amazon Web Services resources in a VPC, specify a list of security groups and subnets in the VPC. When you connect a function to a VPC, it can access resources and the internet only through that VPC. For more information, see Configuring a Lambda function to access resources in a VPC.

" - }, - "PackageType":{ - "shape":"PackageType", - "documentation":"

The type of deployment package. Set to Image for container image and set to Zip for .zip file archive.

" - }, - "DeadLetterConfig":{ - "shape":"DeadLetterConfig", - "documentation":"

A dead-letter queue configuration that specifies the queue or topic where Lambda sends asynchronous events when they fail processing. For more information, see Dead-letter queues.

" - }, - "Environment":{ - "shape":"Environment", - "documentation":"

Environment variables that are accessible from function code during execution.

" - }, - "KMSKeyArn":{ - "shape":"KMSKeyArn", - "documentation":"

The ARN of the Key Management Service (KMS) customer managed key that's used to encrypt the following resources:

If you don't provide a customer managed key, Lambda uses an Amazon Web Services owned key or an Amazon Web Services managed key.

" - }, - "TracingConfig":{ - "shape":"TracingConfig", - "documentation":"

Set Mode to Active to sample and trace a subset of incoming requests with X-Ray.

" - }, - "Tags":{ - "shape":"Tags", - "documentation":"

A list of tags to apply to the function.

" - }, - "Layers":{ - "shape":"LayerList", - "documentation":"

A list of function layers to add to the function's execution environment. Specify each layer by its ARN, including the version.

" - }, - "FileSystemConfigs":{ - "shape":"FileSystemConfigList", - "documentation":"

Connection settings for an Amazon EFS file system.

" - }, - "ImageConfig":{ - "shape":"ImageConfig", - "documentation":"

Container image configuration values that override the values in the container image Dockerfile.

" - }, - "CodeSigningConfigArn":{ - "shape":"CodeSigningConfigArn", - "documentation":"

To enable code signing for this function, specify the ARN of a code-signing configuration. A code-signing configuration includes a set of signing profiles, which define the trusted publishers for this function.

" - }, - "Architectures":{ - "shape":"ArchitecturesList", - "documentation":"

The instruction set architecture that the function supports. Enter a string array with one of the valid values (arm64 or x86_64). The default value is x86_64.

" - }, - "EphemeralStorage":{ - "shape":"EphemeralStorage", - "documentation":"

The size of the function's /tmp directory in MB. The default value is 512, but can be any whole number between 512 and 10,240 MB. For more information, see Configuring ephemeral storage (console).

" - }, - "SnapStart":{ - "shape":"SnapStart", - "documentation":"

The function's SnapStart setting.

" - }, - "LoggingConfig":{ - "shape":"LoggingConfig", - "documentation":"

The function's Amazon CloudWatch Logs configuration settings.

" - }, - "DurableConfig":{"shape":"DurableConfig"} - } - }, - "CreateFunctionUrlConfigRequest":{ - "type":"structure", - "required":[ - "FunctionName", - "AuthType" - ], - "members":{ - "FunctionName":{ - "shape":"FunctionName", - "documentation":"

The name or ARN of the Lambda function.

Name formats

The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.

", - "location":"uri", - "locationName":"FunctionName" - }, - "Qualifier":{ - "shape":"FunctionUrlQualifier", - "documentation":"

The alias name.

", - "location":"querystring", - "locationName":"Qualifier" - }, - "AuthType":{ - "shape":"FunctionUrlAuthType", - "documentation":"

The type of authentication that your function URL uses. Set to AWS_IAM if you want to restrict access to authenticated users only. Set to NONE if you want to bypass IAM authentication to create a public endpoint. For more information, see Security and auth model for Lambda function URLs.

" - }, - "Cors":{ - "shape":"Cors", - "documentation":"

The cross-origin resource sharing (CORS) settings for your function URL.

" - }, - "InvokeMode":{ - "shape":"InvokeMode", - "documentation":"

Use one of the following options:

" - } - } - }, - "CreateFunctionUrlConfigResponse":{ - "type":"structure", - "required":[ - "FunctionUrl", - "FunctionArn", - "AuthType", - "CreationTime" - ], - "members":{ - "FunctionUrl":{ - "shape":"FunctionUrl", - "documentation":"

The HTTP URL endpoint for your function.

" - }, - "FunctionArn":{ - "shape":"FunctionArn", - "documentation":"

The Amazon Resource Name (ARN) of your function.

" - }, - "AuthType":{ - "shape":"FunctionUrlAuthType", - "documentation":"

The type of authentication that your function URL uses. Set to AWS_IAM if you want to restrict access to authenticated users only. Set to NONE if you want to bypass IAM authentication to create a public endpoint. For more information, see Security and auth model for Lambda function URLs.

" - }, - "Cors":{ - "shape":"Cors", - "documentation":"

The cross-origin resource sharing (CORS) settings for your function URL.

" - }, - "CreationTime":{ - "shape":"Timestamp", - "documentation":"

When the function URL was created, in ISO-8601 format (YYYY-MM-DDThh:mm:ss.sTZD).

" - }, - "InvokeMode":{ - "shape":"InvokeMode", - "documentation":"

Use one of the following options:

" - } - } - }, - "DatabaseName":{ - "type":"string", - "max":63, - "min":1, - "pattern":"[^ /\\.$\\x22]*" - }, - "Date":{"type":"timestamp"}, - "DeadLetterConfig":{ - "type":"structure", - "members":{ - "TargetArn":{ - "shape":"ResourceArn", - "documentation":"

The Amazon Resource Name (ARN) of an Amazon SQS queue or Amazon SNS topic.

" - } - }, - "documentation":"

The dead-letter queue for failed asynchronous invocations.

" - }, - "DeleteAliasRequest":{ - "type":"structure", - "required":[ - "FunctionName", - "Name" - ], - "members":{ - "FunctionName":{ - "shape":"FunctionName", - "documentation":"

The name or ARN of the Lambda function.

Name formats

The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.

", - "location":"uri", - "locationName":"FunctionName" - }, - "Name":{ - "shape":"Alias", - "documentation":"

The name of the alias.

", - "location":"uri", - "locationName":"Name" - } - } - }, - "DeleteCodeSigningConfigRequest":{ - "type":"structure", - "required":["CodeSigningConfigArn"], - "members":{ - "CodeSigningConfigArn":{ - "shape":"CodeSigningConfigArn", - "documentation":"

The The Amazon Resource Name (ARN) of the code signing configuration.

", - "location":"uri", - "locationName":"CodeSigningConfigArn" - } - } - }, - "DeleteCodeSigningConfigResponse":{ - "type":"structure", - "members":{} - }, - "DeleteEventSourceMappingRequest":{ - "type":"structure", - "required":["UUID"], - "members":{ - "UUID":{ - "shape":"String", - "documentation":"

The identifier of the event source mapping.

", - "location":"uri", - "locationName":"UUID" - } - } - }, - "DeleteFunctionCodeSigningConfigRequest":{ - "type":"structure", - "required":["FunctionName"], - "members":{ - "FunctionName":{ - "shape":"FunctionName", - "documentation":"

The name or ARN of the Lambda function.

Name formats

The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.

", - "location":"uri", - "locationName":"FunctionName" - } - } - }, - "DeleteFunctionConcurrencyRequest":{ - "type":"structure", - "required":["FunctionName"], - "members":{ - "FunctionName":{ - "shape":"FunctionName", - "documentation":"

The name or ARN of the Lambda function.

Name formats

The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.

", - "location":"uri", - "locationName":"FunctionName" - } - } - }, - "DeleteFunctionEventInvokeConfigRequest":{ - "type":"structure", - "required":["FunctionName"], - "members":{ - "FunctionName":{ - "shape":"FunctionName", - "documentation":"

The name or ARN of the Lambda function, version, or alias.

Name formats

You can append a version number or alias to any of the formats. The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.

", - "location":"uri", - "locationName":"FunctionName" - }, - "Qualifier":{ - "shape":"Qualifier", - "documentation":"

A version number or alias name.

", - "location":"querystring", - "locationName":"Qualifier" - } - } - }, - "DeleteFunctionRequest":{ - "type":"structure", - "required":["FunctionName"], - "members":{ - "FunctionName":{ - "shape":"FunctionName", - "documentation":"

The name or ARN of the Lambda function or version.

Name formats

You can append a version number or alias to any of the formats. The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.

", - "location":"uri", - "locationName":"FunctionName" - }, - "Qualifier":{ - "shape":"Qualifier", - "documentation":"

Specify a version to delete. You can't delete a version that an alias references.

", - "location":"querystring", - "locationName":"Qualifier" - } - } - }, - "DeleteFunctionUrlConfigRequest":{ - "type":"structure", - "required":["FunctionName"], - "members":{ - "FunctionName":{ - "shape":"FunctionName", - "documentation":"

The name or ARN of the Lambda function.

Name formats

The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.

", - "location":"uri", - "locationName":"FunctionName" - }, - "Qualifier":{ - "shape":"FunctionUrlQualifier", - "documentation":"

The alias name.

", - "location":"querystring", - "locationName":"Qualifier" - } - } - }, - "DeleteLayerVersionRequest":{ - "type":"structure", - "required":[ - "LayerName", - "VersionNumber" - ], - "members":{ - "LayerName":{ - "shape":"LayerName", - "documentation":"

The name or Amazon Resource Name (ARN) of the layer.

", - "location":"uri", - "locationName":"LayerName" - }, - "VersionNumber":{ - "shape":"LayerVersionNumber", - "documentation":"

The version number.

", - "location":"uri", - "locationName":"VersionNumber" - } - } - }, - "DeleteProvisionedConcurrencyConfigRequest":{ - "type":"structure", - "required":[ - "FunctionName", - "Qualifier" - ], - "members":{ - "FunctionName":{ - "shape":"FunctionName", - "documentation":"

The name or ARN of the Lambda function.

Name formats

The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.

", - "location":"uri", - "locationName":"FunctionName" - }, - "Qualifier":{ - "shape":"Qualifier", - "documentation":"

The version number or alias name.

", - "location":"querystring", - "locationName":"Qualifier" - } - } - }, - "Description":{ - "type":"string", - "max":256, - "min":0 - }, - "DestinationArn":{ - "type":"string", - "max":350, - "min":0, - "pattern":"$|arn:(aws[a-zA-Z0-9-]*):([a-zA-Z0-9\\-])+:([a-z]{2}(-gov)?-[a-z]+-\\d{1})?:(\\d{12})?:(.*)" - }, - "DestinationConfig":{ - "type":"structure", - "members":{ - "OnSuccess":{ - "shape":"OnSuccess", - "documentation":"

The destination configuration for successful invocations. Not supported in CreateEventSourceMapping or UpdateEventSourceMapping.

" - }, - "OnFailure":{ - "shape":"OnFailure", - "documentation":"

The destination configuration for failed invocations.

" - } - }, - "documentation":"

A configuration object that specifies the destination of an event after Lambda processes it. For more information, see Adding a destination.

" - }, - "DocumentDBEventSourceConfig":{ - "type":"structure", - "members":{ - "DatabaseName":{ - "shape":"DatabaseName", - "documentation":"

The name of the database to consume within the DocumentDB cluster.

" - }, - "CollectionName":{ - "shape":"CollectionName", - "documentation":"

The name of the collection to consume within the database. If you do not specify a collection, Lambda consumes all collections.

" - }, - "FullDocument":{ - "shape":"FullDocument", - "documentation":"

Determines what DocumentDB sends to your event stream during document update operations. If set to UpdateLookup, DocumentDB sends a delta describing the changes, along with a copy of the entire document. Otherwise, DocumentDB sends only a partial document that contains the changes.

" - } - }, - "documentation":"

Specific configuration settings for a DocumentDB event source.

" - }, - "DurableConfig":{ - "type":"structure", - "members":{ - "RetentionPeriodInDays":{"shape":"RetentionPeriodInDays"}, - "ExecutionTimeout":{"shape":"ExecutionTimeout"} - } - }, - "DurableExecutionAlreadyStartedException":{ - "type":"structure", - "members":{ - "Type":{"shape":"String"}, - "Message":{"shape":"String"} - }, - "error":{ - "httpStatusCode":409, - "senderFault":true - }, - "exception":true - }, - "DurableExecutionArn":{ - "type":"string", - "max":1024, - "min":1, - "pattern":"arn:([a-zA-Z0-9-]+):lambda:([a-zA-Z0-9-]+):(\\d{12}):function:([a-zA-Z0-9_-]+):(\\$LATEST(?:\\.PUBLISHED)?|[0-9]+)/durable-execution/([a-zA-Z0-9_-]+)/([a-zA-Z0-9_-]+)" - }, - "DurableExecutionName":{ - "type":"string", - "max":64, - "min":1, - "pattern":"[a-zA-Z0-9-_]+" - }, - "DurableExecutions":{ - "type":"list", - "member":{"shape":"Execution"} - }, - "DurationSeconds":{ - "type":"integer", - "box":true, - "min":0 - }, - "EC2AccessDeniedException":{ - "type":"structure", - "members":{ - "Type":{"shape":"String"}, - "Message":{"shape":"String"} - }, - "documentation":"

Need additional permissions to configure VPC settings.

", - "error":{"httpStatusCode":502}, - "exception":true, - "fault":true - }, - "EC2ThrottledException":{ - "type":"structure", - "members":{ - "Type":{"shape":"String"}, - "Message":{"shape":"String"} - }, - "documentation":"

Amazon EC2 throttled Lambda during Lambda function initialization using the execution role provided for the function.

", - "error":{"httpStatusCode":502}, - "exception":true, - "fault":true - }, - "EC2UnexpectedException":{ - "type":"structure", - "members":{ - "Type":{"shape":"String"}, - "Message":{"shape":"String"}, - "EC2ErrorCode":{"shape":"String"} - }, - "documentation":"

Lambda received an unexpected Amazon EC2 client exception while setting up for the Lambda function.

", - "error":{"httpStatusCode":502}, - "exception":true, - "fault":true - }, - "EFSIOException":{ - "type":"structure", - "members":{ - "Type":{"shape":"String"}, - "Message":{"shape":"String"} - }, - "documentation":"

An error occurred when reading from or writing to a connected file system.

", - "error":{ - "httpStatusCode":410, - "senderFault":true - }, - "exception":true - }, - "EFSMountConnectivityException":{ - "type":"structure", - "members":{ - "Type":{"shape":"String"}, - "Message":{"shape":"String"} - }, - "documentation":"

The Lambda function couldn't make a network connection to the configured file system.

", - "error":{ - "httpStatusCode":408, - "senderFault":true - }, - "exception":true - }, - "EFSMountFailureException":{ - "type":"structure", - "members":{ - "Type":{"shape":"String"}, - "Message":{"shape":"String"} - }, - "documentation":"

The Lambda function couldn't mount the configured file system due to a permission or configuration issue.

", - "error":{ - "httpStatusCode":403, - "senderFault":true - }, - "exception":true - }, - "EFSMountTimeoutException":{ - "type":"structure", - "members":{ - "Type":{"shape":"String"}, - "Message":{"shape":"String"} - }, - "documentation":"

The Lambda function made a network connection to the configured file system, but the mount operation timed out.

", - "error":{ - "httpStatusCode":408, - "senderFault":true - }, - "exception":true - }, - "ENILimitReachedException":{ - "type":"structure", - "members":{ - "Type":{"shape":"String"}, - "Message":{"shape":"String"} - }, - "documentation":"

Lambda couldn't create an elastic network interface in the VPC, specified as part of Lambda function configuration, because the limit for network interfaces has been reached. For more information, see Lambda quotas.

", - "error":{"httpStatusCode":502}, - "exception":true, - "fault":true - }, - "Enabled":{ - "type":"boolean", - "box":true - }, - "EndPointType":{ - "type":"string", - "enum":["KAFKA_BOOTSTRAP_SERVERS"] - }, - "Endpoint":{ - "type":"string", - "max":300, - "min":1, - "pattern":"(([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\\-]*[a-zA-Z0-9])\\.)*([A-Za-z0-9]|[A-Za-z0-9][A-Za-z0-9\\-]*[A-Za-z0-9]):[0-9]{1,5}" - }, - "EndpointLists":{ - "type":"list", - "member":{"shape":"Endpoint"}, - "max":10, - "min":1 - }, - "Endpoints":{ - "type":"map", - "key":{"shape":"EndPointType"}, - "value":{"shape":"EndpointLists"}, - "max":2, - "min":1 - }, - "Environment":{ - "type":"structure", - "members":{ - "Variables":{ - "shape":"EnvironmentVariables", - "documentation":"

Environment variable key-value pairs. For more information, see Using Lambda environment variables.

" - } - }, - "documentation":"

A function's environment variable settings. You can use environment variables to adjust your function's behavior without updating code. An environment variable is a pair of strings that are stored in a function's version-specific configuration.

" - }, - "EnvironmentError":{ - "type":"structure", - "members":{ - "ErrorCode":{ - "shape":"String", - "documentation":"

The error code.

" - }, - "Message":{ - "shape":"SensitiveString", - "documentation":"

The error message.

" - } - }, - "documentation":"

Error messages for environment variables that couldn't be applied.

" - }, - "EnvironmentResponse":{ - "type":"structure", - "members":{ - "Variables":{ - "shape":"EnvironmentVariables", - "documentation":"

Environment variable key-value pairs. Omitted from CloudTrail logs.

" - }, - "Error":{ - "shape":"EnvironmentError", - "documentation":"

Error messages for environment variables that couldn't be applied.

" - } - }, - "documentation":"

The results of an operation to update or read environment variables. If the operation succeeds, the response contains the environment variables. If it fails, the response contains details about the error.

" - }, - "EnvironmentVariableName":{ - "type":"string", - "pattern":"[a-zA-Z]([a-zA-Z0-9_])+", - "sensitive":true - }, - "EnvironmentVariableValue":{ - "type":"string", - "sensitive":true - }, - "EnvironmentVariables":{ - "type":"map", - "key":{"shape":"EnvironmentVariableName"}, - "value":{"shape":"EnvironmentVariableValue"}, - "sensitive":true - }, - "EphemeralStorage":{ - "type":"structure", - "required":["Size"], - "members":{ - "Size":{ - "shape":"EphemeralStorageSize", - "documentation":"

The size of the function's /tmp directory.

" - } - }, - "documentation":"

The size of the function's /tmp directory in MB. The default value is 512, but can be any whole number between 512 and 10,240 MB. For more information, see Configuring ephemeral storage (console).

" - }, - "EphemeralStorageSize":{ - "type":"integer", - "box":true, - "max":10240, - "min":512 - }, - "ErrorData":{ - "type":"string", - "sensitive":true - }, - "ErrorMessage":{ - "type":"string", - "sensitive":true - }, - "ErrorObject":{ - "type":"structure", - "members":{ - "ErrorMessage":{"shape":"ErrorMessage"}, - "ErrorType":{"shape":"ErrorType"}, - "ErrorData":{"shape":"ErrorData"}, - "StackTrace":{"shape":"StackTraceEntries"} - } - }, - "ErrorType":{ - "type":"string", - "sensitive":true - }, - "Event":{ - "type":"structure", - "members":{ - "EventType":{"shape":"EventType"}, - "SubType":{"shape":"OperationSubType"}, - "EventId":{"shape":"EventId"}, - "Id":{"shape":"OperationId"}, - "Name":{"shape":"OperationName"}, - "EventTimestamp":{"shape":"ExecutionTimestamp"}, - "ParentId":{"shape":"OperationId"}, - "ExecutionStartedDetails":{"shape":"ExecutionStartedDetails"}, - "ExecutionSucceededDetails":{"shape":"ExecutionSucceededDetails"}, - "ExecutionFailedDetails":{"shape":"ExecutionFailedDetails"}, - "ExecutionTimedOutDetails":{"shape":"ExecutionTimedOutDetails"}, - "ExecutionStoppedDetails":{"shape":"ExecutionStoppedDetails"}, - "ContextStartedDetails":{"shape":"ContextStartedDetails"}, - "ContextSucceededDetails":{"shape":"ContextSucceededDetails"}, - "ContextFailedDetails":{"shape":"ContextFailedDetails"}, - "WaitStartedDetails":{"shape":"WaitStartedDetails"}, - "WaitSucceededDetails":{"shape":"WaitSucceededDetails"}, - "WaitCancelledDetails":{"shape":"WaitCancelledDetails"}, - "StepStartedDetails":{"shape":"StepStartedDetails"}, - "StepSucceededDetails":{"shape":"StepSucceededDetails"}, - "StepFailedDetails":{"shape":"StepFailedDetails"}, - "ChainedInvokePendingDetails":{"shape":"ChainedInvokePendingDetails"}, - "ChainedInvokeStartedDetails":{"shape":"ChainedInvokeStartedDetails"}, - "ChainedInvokeSucceededDetails":{"shape":"ChainedInvokeSucceededDetails"}, - "ChainedInvokeFailedDetails":{"shape":"ChainedInvokeFailedDetails"}, - "ChainedInvokeTimedOutDetails":{"shape":"ChainedInvokeTimedOutDetails"}, - "ChainedInvokeStoppedDetails":{"shape":"ChainedInvokeStoppedDetails"}, - "CallbackStartedDetails":{"shape":"CallbackStartedDetails"}, - "CallbackSucceededDetails":{"shape":"CallbackSucceededDetails"}, - "CallbackFailedDetails":{"shape":"CallbackFailedDetails"}, - "CallbackTimedOutDetails":{"shape":"CallbackTimedOutDetails"}, - "InvocationCompletedDetails":{"shape":"InvocationCompletedDetails"} - } - }, - "EventError":{ - "type":"structure", - "members":{ - "Payload":{"shape":"ErrorObject"}, - "Truncated":{"shape":"Truncated"} - } - }, - "EventId":{ - "type":"integer", - "box":true, - "min":1 - }, - "EventInput":{ - "type":"structure", - "members":{ - "Payload":{"shape":"InputPayload"}, - "Truncated":{"shape":"Truncated"} - } - }, - "EventResult":{ - "type":"structure", - "members":{ - "Payload":{"shape":"OperationPayload"}, - "Truncated":{"shape":"Truncated"} - } - }, - "EventSourceMappingArn":{ - "type":"string", - "max":120, - "min":85, - "pattern":"arn:(aws[a-zA-Z-]*)?:lambda:[a-z]{2}((-gov)|(-iso([a-z]?)))?-[a-z]+-\\d{1}:\\d{12}:event-source-mapping:[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}" - }, - "EventSourceMappingConfiguration":{ - "type":"structure", - "members":{ - "UUID":{ - "shape":"String", - "documentation":"

The identifier of the event source mapping.

" - }, - "StartingPosition":{ - "shape":"EventSourcePosition", - "documentation":"

The position in a stream from which to start reading. Required for Amazon Kinesis and Amazon DynamoDB Stream event sources. AT_TIMESTAMP is supported only for Amazon Kinesis streams, Amazon DocumentDB, Amazon MSK, and self-managed Apache Kafka.

" - }, - "StartingPositionTimestamp":{ - "shape":"Date", - "documentation":"

With StartingPosition set to AT_TIMESTAMP, the time from which to start reading. StartingPositionTimestamp cannot be in the future.

" - }, - "BatchSize":{ - "shape":"BatchSize", - "documentation":"

The maximum number of records in each batch that Lambda pulls from your stream or queue and sends to your function. Lambda passes all of the records in the batch to the function in a single call, up to the payload limit for synchronous invocation (6 MB).

Default value: Varies by service. For Amazon SQS, the default is 10. For all other services, the default is 100.

Related setting: When you set BatchSize to a value greater than 10, you must set MaximumBatchingWindowInSeconds to at least 1.

" - }, - "MaximumBatchingWindowInSeconds":{ - "shape":"MaximumBatchingWindowInSeconds", - "documentation":"

The maximum amount of time, in seconds, that Lambda spends gathering records before invoking the function. You can configure MaximumBatchingWindowInSeconds to any value from 0 seconds to 300 seconds in increments of seconds.

For streams and Amazon SQS event sources, the default batching window is 0 seconds. For Amazon MSK, Self-managed Apache Kafka, Amazon MQ, and DocumentDB event sources, the default batching window is 500 ms. Note that because you can only change MaximumBatchingWindowInSeconds in increments of seconds, you cannot revert back to the 500 ms default batching window after you have changed it. To restore the default batching window, you must create a new event source mapping.

Related setting: For streams and Amazon SQS event sources, when you set BatchSize to a value greater than 10, you must set MaximumBatchingWindowInSeconds to at least 1.

" - }, - "ParallelizationFactor":{ - "shape":"ParallelizationFactor", - "documentation":"

(Kinesis and DynamoDB Streams only) The number of batches to process concurrently from each shard. The default value is 1.

" - }, - "EventSourceArn":{ - "shape":"Arn", - "documentation":"

The Amazon Resource Name (ARN) of the event source.

" - }, - "FilterCriteria":{ - "shape":"FilterCriteria", - "documentation":"

An object that defines the filter criteria that determine whether Lambda should process an event. For more information, see Lambda event filtering.

If filter criteria is encrypted, this field shows up as null in the response of ListEventSourceMapping API calls. You can view this field in plaintext in the response of GetEventSourceMapping and DeleteEventSourceMapping calls if you have kms:Decrypt permissions for the correct KMS key.

" - }, - "FunctionArn":{ - "shape":"FunctionArn", - "documentation":"

The ARN of the Lambda function.

" - }, - "LastModified":{ - "shape":"Date", - "documentation":"

The date that the event source mapping was last updated or that its state changed.

" - }, - "LastProcessingResult":{ - "shape":"String", - "documentation":"

The result of the event source mapping's last processing attempt.

" - }, - "State":{ - "shape":"String", - "documentation":"

The state of the event source mapping. It can be one of the following: Creating, Enabling, Enabled, Disabling, Disabled, Updating, or Deleting.

" - }, - "StateTransitionReason":{ - "shape":"String", - "documentation":"

Indicates whether a user or Lambda made the last change to the event source mapping.

" - }, - "DestinationConfig":{ - "shape":"DestinationConfig", - "documentation":"

(Kinesis, DynamoDB Streams, Amazon MSK, and self-managed Apache Kafka event sources only) A configuration object that specifies the destination of an event after Lambda processes it.

" - }, - "Topics":{ - "shape":"Topics", - "documentation":"

The name of the Kafka topic.

" - }, - "Queues":{ - "shape":"Queues", - "documentation":"

(Amazon MQ) The name of the Amazon MQ broker destination queue to consume.

" - }, - "SourceAccessConfigurations":{ - "shape":"SourceAccessConfigurations", - "documentation":"

An array of the authentication protocol, VPC components, or virtual host to secure and define your event source.

" - }, - "SelfManagedEventSource":{ - "shape":"SelfManagedEventSource", - "documentation":"

The self-managed Apache Kafka cluster for your event source.

" - }, - "MaximumRecordAgeInSeconds":{ - "shape":"MaximumRecordAgeInSeconds", - "documentation":"

(Kinesis and DynamoDB Streams only) Discard records older than the specified age. The default value is -1, which sets the maximum age to infinite. When the value is set to infinite, Lambda never discards old records.

The minimum valid value for maximum record age is 60s. Although values less than 60 and greater than -1 fall within the parameter's absolute range, they are not allowed

" - }, - "BisectBatchOnFunctionError":{ - "shape":"BisectBatchOnFunctionError", - "documentation":"

(Kinesis and DynamoDB Streams only) If the function returns an error, split the batch in two and retry. The default value is false.

" - }, - "MaximumRetryAttempts":{ - "shape":"MaximumRetryAttemptsEventSourceMapping", - "documentation":"

(Kinesis and DynamoDB Streams only) Discard records after the specified number of retries. The default value is -1, which sets the maximum number of retries to infinite. When MaximumRetryAttempts is infinite, Lambda retries failed records until the record expires in the event source.

" - }, - "TumblingWindowInSeconds":{ - "shape":"TumblingWindowInSeconds", - "documentation":"

(Kinesis and DynamoDB Streams only) The duration in seconds of a processing window for DynamoDB and Kinesis Streams event sources. A value of 0 seconds indicates no tumbling window.

" - }, - "FunctionResponseTypes":{ - "shape":"FunctionResponseTypeList", - "documentation":"

(Kinesis, DynamoDB Streams, and Amazon SQS) A list of current response type enums applied to the event source mapping.

" - }, - "AmazonManagedKafkaEventSourceConfig":{ - "shape":"AmazonManagedKafkaEventSourceConfig", - "documentation":"

Specific configuration settings for an Amazon Managed Streaming for Apache Kafka (Amazon MSK) event source.

" - }, - "SelfManagedKafkaEventSourceConfig":{ - "shape":"SelfManagedKafkaEventSourceConfig", - "documentation":"

Specific configuration settings for a self-managed Apache Kafka event source.

" - }, - "ScalingConfig":{ - "shape":"ScalingConfig", - "documentation":"

(Amazon SQS only) The scaling configuration for the event source. For more information, see Configuring maximum concurrency for Amazon SQS event sources.

" - }, - "DocumentDBEventSourceConfig":{ - "shape":"DocumentDBEventSourceConfig", - "documentation":"

Specific configuration settings for a DocumentDB event source.

" - }, - "KMSKeyArn":{ - "shape":"KMSKeyArn", - "documentation":"

The ARN of the Key Management Service (KMS) customer managed key that Lambda uses to encrypt your function's filter criteria.

" - }, - "FilterCriteriaError":{ - "shape":"FilterCriteriaError", - "documentation":"

An object that contains details about an error related to filter criteria encryption.

" - }, - "EventSourceMappingArn":{ - "shape":"EventSourceMappingArn", - "documentation":"

The Amazon Resource Name (ARN) of the event source mapping.

" - }, - "MetricsConfig":{ - "shape":"EventSourceMappingMetricsConfig", - "documentation":"

The metrics configuration for your event source. For more information, see Event source mapping metrics.

" - }, - "ProvisionedPollerConfig":{ - "shape":"ProvisionedPollerConfig", - "documentation":"

(Amazon MSK and self-managed Apache Kafka only) The provisioned mode configuration for the event source. For more information, see provisioned mode.

" - } - }, - "documentation":"

A mapping between an Amazon Web Services resource and a Lambda function. For details, see CreateEventSourceMapping.

" - }, - "EventSourceMappingMetric":{ - "type":"string", - "enum":["EventCount"] - }, - "EventSourceMappingMetricList":{ - "type":"list", - "member":{"shape":"EventSourceMappingMetric"}, - "max":1, - "min":0 - }, - "EventSourceMappingMetricsConfig":{ - "type":"structure", - "members":{ - "Metrics":{ - "shape":"EventSourceMappingMetricList", - "documentation":"

The metrics you want your event source mapping to produce. Include EventCount to receive event source mapping metrics related to the number of events processed by your event source mapping. For more information about these metrics, see Event source mapping metrics.

" - } - }, - "documentation":"

The metrics configuration for your event source. Use this configuration object to define which metrics you want your event source mapping to produce.

" - }, - "EventSourceMappingsList":{ - "type":"list", - "member":{"shape":"EventSourceMappingConfiguration"} - }, - "EventSourcePosition":{ - "type":"string", - "enum":[ - "TRIM_HORIZON", - "LATEST", - "AT_TIMESTAMP" - ] - }, - "EventSourceToken":{ - "type":"string", - "max":256, - "min":0, - "pattern":"[a-zA-Z0-9._\\-]+" - }, - "EventType":{ - "type":"string", - "enum":[ - "ExecutionStarted", - "ExecutionSucceeded", - "ExecutionFailed", - "ExecutionTimedOut", - "ExecutionStopped", - "ContextStarted", - "ContextSucceeded", - "ContextFailed", - "WaitStarted", - "WaitSucceeded", - "WaitCancelled", - "StepStarted", - "StepSucceeded", - "StepFailed", - "ChainedInvokePending", - "ChainedInvokeStarted", - "ChainedInvokeSucceeded", - "ChainedInvokeFailed", - "ChainedInvokeTimedOut", - "ChainedInvokeCancelled", - "CallbackStarted", - "CallbackSucceeded", - "CallbackFailed", - "CallbackTimedOut", - "InvocationCompleted" - ] - }, - "Events":{ - "type":"list", - "member":{"shape":"Event"} - }, - "Execution":{ - "type":"structure", - "required":[ - "DurableExecutionArn", - "DurableExecutionName", - "FunctionArn", - "Status", - "StartTimestamp" - ], - "members":{ - "DurableExecutionArn":{"shape":"DurableExecutionArn"}, - "DurableExecutionName":{"shape":"DurableExecutionName"}, - "FunctionArn":{"shape":"FunctionArn"}, - "Status":{"shape":"ExecutionStatus"}, - "StartTimestamp":{"shape":"ExecutionTimestamp"}, - "EndTimestamp":{"shape":"ExecutionTimestamp"} - } - }, - "ExecutionDetails":{ - "type":"structure", - "members":{ - "InputPayload":{"shape":"InputPayload"} - } - }, - "ExecutionFailedDetails":{ - "type":"structure", - "required":["Error"], - "members":{ - "Error":{"shape":"EventError"} - } - }, - "ExecutionStartedDetails":{ - "type":"structure", - "required":[ - "Input", - "ExecutionTimeout" - ], - "members":{ - "Input":{"shape":"EventInput"}, - "ExecutionTimeout":{"shape":"DurationSeconds"} - } - }, - "ExecutionStatus":{ - "type":"string", - "enum":[ - "RUNNING", - "SUCCEEDED", - "FAILED", - "TIMED_OUT", - "STOPPED" - ] - }, - "ExecutionStatusList":{ - "type":"list", - "member":{"shape":"ExecutionStatus"} - }, - "ExecutionStoppedDetails":{ - "type":"structure", - "required":["Error"], - "members":{ - "Error":{"shape":"EventError"} - } - }, - "ExecutionSucceededDetails":{ - "type":"structure", - "required":["Result"], - "members":{ - "Result":{"shape":"EventResult"} - } - }, - "ExecutionTimedOutDetails":{ - "type":"structure", - "members":{ - "Error":{"shape":"EventError"} - } - }, - "ExecutionTimeout":{ - "type":"integer", - "box":true, - "max":31622400, - "min":1 - }, - "ExecutionTimestamp":{"type":"timestamp"}, - "FileSystemArn":{ - "type":"string", - "max":200, - "min":0, - "pattern":"arn:aws[a-zA-Z-]*:elasticfilesystem:[a-z]{2}((-gov)|(-iso(b?)))?-[a-z]+-\\d{1}:\\d{12}:access-point/fsap-[a-f0-9]{17}" - }, - "FileSystemConfig":{ - "type":"structure", - "required":[ - "Arn", - "LocalMountPath" - ], - "members":{ - "Arn":{ - "shape":"FileSystemArn", - "documentation":"

The Amazon Resource Name (ARN) of the Amazon EFS access point that provides access to the file system.

" - }, - "LocalMountPath":{ - "shape":"LocalMountPath", - "documentation":"

The path where the function can access the file system, starting with /mnt/.

" - } - }, - "documentation":"

Details about the connection between a Lambda function and an Amazon EFS file system.

" - }, - "FileSystemConfigList":{ - "type":"list", - "member":{"shape":"FileSystemConfig"}, - "max":1, - "min":0 - }, - "Filter":{ - "type":"structure", - "members":{ - "Pattern":{ - "shape":"Pattern", - "documentation":"

A filter pattern. For more information on the syntax of a filter pattern, see Filter rule syntax.

" - } - }, - "documentation":"

A structure within a FilterCriteria object that defines an event filtering pattern.

" - }, - "FilterCriteria":{ - "type":"structure", - "members":{ - "Filters":{ - "shape":"FilterList", - "documentation":"

A list of filters.

" - } - }, - "documentation":"

An object that contains the filters for an event source.

" - }, - "FilterCriteriaError":{ - "type":"structure", - "members":{ - "ErrorCode":{ - "shape":"FilterCriteriaErrorCode", - "documentation":"

The KMS exception that resulted from filter criteria encryption or decryption.

" - }, - "Message":{ - "shape":"FilterCriteriaErrorMessage", - "documentation":"

The error message.

" - } - }, - "documentation":"

An object that contains details about an error related to filter criteria encryption.

" - }, - "FilterCriteriaErrorCode":{ - "type":"string", - "max":50, - "min":10, - "pattern":"[A-Za-z]+Exception" - }, - "FilterCriteriaErrorMessage":{ - "type":"string", - "max":2048, - "min":10, - "pattern":".*" - }, - "FilterList":{ - "type":"list", - "member":{"shape":"Filter"} - }, - "FullDocument":{ - "type":"string", - "enum":[ - "UpdateLookup", - "Default" - ] - }, - "FunctionArn":{ - "type":"string", - "max":10000, - "min":0, - "pattern":"arn:(aws[a-zA-Z-]*)?:lambda:[a-z]{2}(-gov)?-[a-z]+-\\d{1}:\\d{12}:function:[a-zA-Z0-9-_]+(:(\\$LATEST|[a-zA-Z0-9-_]+))?" - }, - "FunctionArnList":{ - "type":"list", - "member":{"shape":"FunctionArn"} - }, - "FunctionCode":{ - "type":"structure", - "members":{ - "ZipFile":{ - "shape":"Blob", - "documentation":"

The base64-encoded contents of the deployment package. Amazon Web Services SDK and CLI clients handle the encoding for you.

" - }, - "S3Bucket":{ - "shape":"S3Bucket", - "documentation":"

An Amazon S3 bucket in the same Amazon Web Services Region as your function. The bucket can be in a different Amazon Web Services account.

" - }, - "S3Key":{ - "shape":"S3Key", - "documentation":"

The Amazon S3 key of the deployment package.

" - }, - "S3ObjectVersion":{ - "shape":"S3ObjectVersion", - "documentation":"

For versioned objects, the version of the deployment package object to use.

" - }, - "ImageUri":{ - "shape":"String", - "documentation":"

URI of a container image in the Amazon ECR registry.

" - }, - "SourceKMSKeyArn":{ - "shape":"KMSKeyArn", - "documentation":"

The ARN of the Key Management Service (KMS) customer managed key that's used to encrypt your function's .zip deployment package. If you don't provide a customer managed key, Lambda uses an Amazon Web Services owned key.

" - } - }, - "documentation":"

The code for the Lambda function. You can either specify an object in Amazon S3, upload a .zip file archive deployment package directly, or specify the URI of a container image.

" - }, - "FunctionCodeLocation":{ - "type":"structure", - "members":{ - "RepositoryType":{ - "shape":"String", - "documentation":"

The service that's hosting the file.

" - }, - "Location":{ - "shape":"String", - "documentation":"

A presigned URL that you can use to download the deployment package.

" - }, - "ImageUri":{ - "shape":"String", - "documentation":"

URI of a container image in the Amazon ECR registry.

" - }, - "ResolvedImageUri":{ - "shape":"String", - "documentation":"

The resolved URI for the image.

" - }, - "SourceKMSKeyArn":{ - "shape":"String", - "documentation":"

The ARN of the Key Management Service (KMS) customer managed key that's used to encrypt your function's .zip deployment package. If you don't provide a customer managed key, Lambda uses an Amazon Web Services owned key.

" - } - }, - "documentation":"

Details about a function's deployment package.

" - }, - "FunctionConfiguration":{ - "type":"structure", - "members":{ - "FunctionName":{ - "shape":"NamespacedFunctionName", - "documentation":"

The name of the function.

" - }, - "FunctionArn":{ - "shape":"NameSpacedFunctionArn", - "documentation":"

The function's Amazon Resource Name (ARN).

" - }, - "Runtime":{ - "shape":"Runtime", - "documentation":"

The identifier of the function's runtime. Runtime is required if the deployment package is a .zip file archive. Specifying a runtime results in an error if you're deploying a function using a container image.

The following list includes deprecated runtimes. Lambda blocks creating new functions and updating existing functions shortly after each runtime is deprecated. For more information, see Runtime use after deprecation.

For a list of all currently supported runtimes, see Supported runtimes.

" - }, - "Role":{ - "shape":"RoleArn", - "documentation":"

The function's execution role.

" - }, - "Handler":{ - "shape":"Handler", - "documentation":"

The function that Lambda calls to begin running your function.

" - }, - "CodeSize":{ - "shape":"Long", - "documentation":"

The size of the function's deployment package, in bytes.

" - }, - "Description":{ - "shape":"Description", - "documentation":"

The function's description.

" - }, - "Timeout":{ - "shape":"Timeout", - "documentation":"

The amount of time in seconds that Lambda allows a function to run before stopping it.

" - }, - "MemorySize":{ - "shape":"MemorySize", - "documentation":"

The amount of memory available to the function at runtime.

" - }, - "LastModified":{ - "shape":"Timestamp", - "documentation":"

The date and time that the function was last updated, in ISO-8601 format (YYYY-MM-DDThh:mm:ss.sTZD).

" - }, - "CodeSha256":{ - "shape":"String", - "documentation":"

The SHA256 hash of the function's deployment package.

" - }, - "Version":{ - "shape":"Version", - "documentation":"

The version of the Lambda function.

" - }, - "VpcConfig":{ - "shape":"VpcConfigResponse", - "documentation":"

The function's networking configuration.

" - }, - "DeadLetterConfig":{ - "shape":"DeadLetterConfig", - "documentation":"

The function's dead letter queue.

" - }, - "Environment":{ - "shape":"EnvironmentResponse", - "documentation":"

The function's environment variables. Omitted from CloudTrail logs.

" - }, - "KMSKeyArn":{ - "shape":"KMSKeyArn", - "documentation":"

The ARN of the Key Management Service (KMS) customer managed key that's used to encrypt the following resources:

If you don't provide a customer managed key, Lambda uses an Amazon Web Services owned key or an Amazon Web Services managed key.

" - }, - "TracingConfig":{ - "shape":"TracingConfigResponse", - "documentation":"

The function's X-Ray tracing configuration.

" - }, - "MasterArn":{ - "shape":"FunctionArn", - "documentation":"

For Lambda@Edge functions, the ARN of the main function.

" - }, - "RevisionId":{ - "shape":"String", - "documentation":"

The latest updated revision of the function or alias.

" - }, - "Layers":{ - "shape":"LayersReferenceList", - "documentation":"

The function's layers.

" - }, - "State":{ - "shape":"State", - "documentation":"

The current state of the function. When the state is Inactive, you can reactivate the function by invoking it.

" - }, - "StateReason":{ - "shape":"StateReason", - "documentation":"

The reason for the function's current state.

" - }, - "StateReasonCode":{ - "shape":"StateReasonCode", - "documentation":"

The reason code for the function's current state. When the code is Creating, you can't invoke or modify the function.

" - }, - "LastUpdateStatus":{ - "shape":"LastUpdateStatus", - "documentation":"

The status of the last update that was performed on the function. This is first set to Successful after function creation completes.

" - }, - "LastUpdateStatusReason":{ - "shape":"LastUpdateStatusReason", - "documentation":"

The reason for the last update that was performed on the function.

" - }, - "LastUpdateStatusReasonCode":{ - "shape":"LastUpdateStatusReasonCode", - "documentation":"

The reason code for the last update that was performed on the function.

" - }, - "FileSystemConfigs":{ - "shape":"FileSystemConfigList", - "documentation":"

Connection settings for an Amazon EFS file system.

" - }, - "PackageType":{ - "shape":"PackageType", - "documentation":"

The type of deployment package. Set to Image for container image and set Zip for .zip file archive.

" - }, - "ImageConfigResponse":{ - "shape":"ImageConfigResponse", - "documentation":"

The function's image configuration values.

" - }, - "SigningProfileVersionArn":{ - "shape":"Arn", - "documentation":"

The ARN of the signing profile version.

" - }, - "SigningJobArn":{ - "shape":"Arn", - "documentation":"

The ARN of the signing job.

" - }, - "Architectures":{ - "shape":"ArchitecturesList", - "documentation":"

The instruction set architecture that the function supports. Architecture is a string array with one of the valid values. The default architecture value is x86_64.

" - }, - "EphemeralStorage":{ - "shape":"EphemeralStorage", - "documentation":"

The size of the function's /tmp directory in MB. The default value is 512, but can be any whole number between 512 and 10,240 MB. For more information, see Configuring ephemeral storage (console).

" - }, - "SnapStart":{ - "shape":"SnapStartResponse", - "documentation":"

Set ApplyOn to PublishedVersions to create a snapshot of the initialized execution environment when you publish a function version. For more information, see Improving startup performance with Lambda SnapStart.

" - }, - "RuntimeVersionConfig":{ - "shape":"RuntimeVersionConfig", - "documentation":"

The ARN of the runtime and any errors that occured.

" - }, - "LoggingConfig":{ - "shape":"LoggingConfig", - "documentation":"

The function's Amazon CloudWatch Logs configuration settings.

" - }, - "DurableConfig":{"shape":"DurableConfig"} - }, - "documentation":"

Details about a function's configuration.

" - }, - "FunctionEventInvokeConfig":{ - "type":"structure", - "members":{ - "LastModified":{ - "shape":"Date", - "documentation":"

The date and time that the configuration was last updated.

" - }, - "FunctionArn":{ - "shape":"FunctionArn", - "documentation":"

The Amazon Resource Name (ARN) of the function.

" - }, - "MaximumRetryAttempts":{ - "shape":"MaximumRetryAttempts", - "documentation":"

The maximum number of times to retry when the function returns an error.

" - }, - "MaximumEventAgeInSeconds":{ - "shape":"MaximumEventAgeInSeconds", - "documentation":"

The maximum age of a request that Lambda sends to a function for processing.

" - }, - "DestinationConfig":{ - "shape":"DestinationConfig", - "documentation":"

A destination for events after they have been sent to a function for processing.

Destinations

S3 buckets are supported only for on-failure destinations. To retain records of successful invocations, use another destination type.

" - } - } - }, - "FunctionEventInvokeConfigList":{ - "type":"list", - "member":{"shape":"FunctionEventInvokeConfig"} - }, - "FunctionList":{ - "type":"list", - "member":{"shape":"FunctionConfiguration"} - }, - "FunctionName":{ - "type":"string", - "max":140, - "min":1, - "pattern":"(arn:(aws[a-zA-Z-]*)?:lambda:)?([a-z]{2}(-gov)?-[a-z]+-\\d{1}:)?(\\d{12}:)?(function:)?([a-zA-Z0-9-_]+)(:(\\$LATEST|[a-zA-Z0-9-_]+))?" - }, - "FunctionResponseType":{ - "type":"string", - "enum":["ReportBatchItemFailures"] - }, - "FunctionResponseTypeList":{ - "type":"list", - "member":{"shape":"FunctionResponseType"}, - "max":1, - "min":0 - }, - "FunctionUrl":{ - "type":"string", - "max":100, - "min":40 - }, - "FunctionUrlAuthType":{ - "type":"string", - "enum":[ - "NONE", - "AWS_IAM" - ] - }, - "FunctionUrlConfig":{ - "type":"structure", - "required":[ - "FunctionUrl", - "FunctionArn", - "CreationTime", - "LastModifiedTime", - "AuthType" - ], - "members":{ - "FunctionUrl":{ - "shape":"FunctionUrl", - "documentation":"

The HTTP URL endpoint for your function.

" - }, - "FunctionArn":{ - "shape":"FunctionArn", - "documentation":"

The Amazon Resource Name (ARN) of your function.

" - }, - "CreationTime":{ - "shape":"Timestamp", - "documentation":"

When the function URL was created, in ISO-8601 format (YYYY-MM-DDThh:mm:ss.sTZD).

" - }, - "LastModifiedTime":{ - "shape":"Timestamp", - "documentation":"

When the function URL configuration was last updated, in ISO-8601 format (YYYY-MM-DDThh:mm:ss.sTZD).

" - }, - "Cors":{ - "shape":"Cors", - "documentation":"

The cross-origin resource sharing (CORS) settings for your function URL.

" - }, - "AuthType":{ - "shape":"FunctionUrlAuthType", - "documentation":"

The type of authentication that your function URL uses. Set to AWS_IAM if you want to restrict access to authenticated users only. Set to NONE if you want to bypass IAM authentication to create a public endpoint. For more information, see Security and auth model for Lambda function URLs.

" - }, - "InvokeMode":{ - "shape":"InvokeMode", - "documentation":"

Use one of the following options:

" - } - }, - "documentation":"

Details about a Lambda function URL.

" - }, - "FunctionUrlConfigList":{ - "type":"list", - "member":{"shape":"FunctionUrlConfig"} - }, - "FunctionUrlQualifier":{ - "type":"string", - "max":128, - "min":1, - "pattern":"(^\\$LATEST$)|((?!^[0-9]+$)([a-zA-Z0-9-_]+))" - }, - "FunctionVersion":{ - "type":"string", - "enum":["ALL"] - }, - "GetAccountSettingsRequest":{ - "type":"structure", - "members":{} - }, - "GetAccountSettingsResponse":{ - "type":"structure", - "members":{ - "AccountLimit":{ - "shape":"AccountLimit", - "documentation":"

Limits that are related to concurrency and code storage.

" - }, - "AccountUsage":{ - "shape":"AccountUsage", - "documentation":"

The number of functions and amount of storage in use.

" - } - } - }, - "GetAliasRequest":{ - "type":"structure", - "required":[ - "FunctionName", - "Name" - ], - "members":{ - "FunctionName":{ - "shape":"FunctionName", - "documentation":"

The name or ARN of the Lambda function.

Name formats

The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.

", - "location":"uri", - "locationName":"FunctionName" - }, - "Name":{ - "shape":"Alias", - "documentation":"

The name of the alias.

", - "location":"uri", - "locationName":"Name" - } - } - }, - "GetCodeSigningConfigRequest":{ - "type":"structure", - "required":["CodeSigningConfigArn"], - "members":{ - "CodeSigningConfigArn":{ - "shape":"CodeSigningConfigArn", - "documentation":"

The The Amazon Resource Name (ARN) of the code signing configuration.

", - "location":"uri", - "locationName":"CodeSigningConfigArn" - } - } - }, - "GetCodeSigningConfigResponse":{ - "type":"structure", - "required":["CodeSigningConfig"], - "members":{ - "CodeSigningConfig":{ - "shape":"CodeSigningConfig", - "documentation":"

The code signing configuration

" - } - } - }, - "GetDurableExecutionHistoryRequest":{ - "type":"structure", - "required":["DurableExecutionArn"], - "members":{ - "DurableExecutionArn":{ - "shape":"DurableExecutionArn", - "location":"uri", - "locationName":"DurableExecutionArn" - }, - "IncludeExecutionData":{ - "shape":"IncludeExecutionData", - "location":"querystring", - "locationName":"IncludeExecutionData" - }, - "MaxItems":{ - "shape":"ItemCount", - "location":"querystring", - "locationName":"MaxItems" - }, - "Marker":{ - "shape":"String", - "location":"querystring", - "locationName":"Marker" - }, - "ReverseOrder":{ - "shape":"ReverseOrder", - "location":"querystring", - "locationName":"ReverseOrder" - } - } - }, - "GetDurableExecutionHistoryResponse":{ - "type":"structure", - "required":["Events"], - "members":{ - "Events":{"shape":"Events"}, - "NextMarker":{"shape":"String"} - } - }, - "GetDurableExecutionRequest":{ - "type":"structure", - "required":["DurableExecutionArn"], - "members":{ - "DurableExecutionArn":{ - "shape":"DurableExecutionArn", - "location":"uri", - "locationName":"DurableExecutionArn" - } - } - }, - "GetDurableExecutionResponse":{ - "type":"structure", - "required":[ - "DurableExecutionArn", - "DurableExecutionName", - "FunctionArn", - "StartTimestamp", - "Status" - ], - "members":{ - "DurableExecutionArn":{"shape":"DurableExecutionArn"}, - "DurableExecutionName":{"shape":"DurableExecutionName"}, - "FunctionArn":{"shape":"FunctionArn"}, - "InputPayload":{"shape":"InputPayload"}, - "Result":{"shape":"OutputPayload"}, - "Error":{"shape":"ErrorObject"}, - "StartTimestamp":{"shape":"ExecutionTimestamp"}, - "Status":{"shape":"ExecutionStatus"}, - "EndTimestamp":{"shape":"ExecutionTimestamp"}, - "Version":{"shape":"Version"} - } - }, - "GetDurableExecutionStateRequest":{ - "type":"structure", - "required":[ - "DurableExecutionArn", - "CheckpointToken" - ], - "members":{ - "DurableExecutionArn":{ - "shape":"DurableExecutionArn", - "location":"uri", - "locationName":"DurableExecutionArn" - }, - "CheckpointToken":{ - "shape":"CheckpointToken", - "location":"querystring", - "locationName":"CheckpointToken" - }, - "Marker":{ - "shape":"String", - "location":"querystring", - "locationName":"Marker" - }, - "MaxItems":{ - "shape":"ItemCount", - "location":"querystring", - "locationName":"MaxItems" - } - } - }, - "GetDurableExecutionStateResponse":{ - "type":"structure", - "required":["Operations"], - "members":{ - "Operations":{"shape":"Operations"}, - "NextMarker":{"shape":"String"} - } - }, - "GetEventSourceMappingRequest":{ - "type":"structure", - "required":["UUID"], - "members":{ - "UUID":{ - "shape":"String", - "documentation":"

The identifier of the event source mapping.

", - "location":"uri", - "locationName":"UUID" - } - } - }, - "GetFunctionCodeSigningConfigRequest":{ - "type":"structure", - "required":["FunctionName"], - "members":{ - "FunctionName":{ - "shape":"FunctionName", - "documentation":"

The name or ARN of the Lambda function.

Name formats

The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.

", - "location":"uri", - "locationName":"FunctionName" - } - } - }, - "GetFunctionCodeSigningConfigResponse":{ - "type":"structure", - "required":[ - "CodeSigningConfigArn", - "FunctionName" - ], - "members":{ - "CodeSigningConfigArn":{ - "shape":"CodeSigningConfigArn", - "documentation":"

The The Amazon Resource Name (ARN) of the code signing configuration.

" - }, - "FunctionName":{ - "shape":"FunctionName", - "documentation":"

The name or ARN of the Lambda function.

Name formats

The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.

" - } - } - }, - "GetFunctionConcurrencyRequest":{ - "type":"structure", - "required":["FunctionName"], - "members":{ - "FunctionName":{ - "shape":"FunctionName", - "documentation":"

The name or ARN of the Lambda function.

Name formats

The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.

", - "location":"uri", - "locationName":"FunctionName" - } - } - }, - "GetFunctionConcurrencyResponse":{ - "type":"structure", - "members":{ - "ReservedConcurrentExecutions":{ - "shape":"ReservedConcurrentExecutions", - "documentation":"

The number of simultaneous executions that are reserved for the function.

" - } - } - }, - "GetFunctionConfigurationRequest":{ - "type":"structure", - "required":["FunctionName"], - "members":{ - "FunctionName":{ - "shape":"NamespacedFunctionName", - "documentation":"

The name or ARN of the Lambda function, version, or alias.

Name formats

You can append a version number or alias to any of the formats. The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.

", - "location":"uri", - "locationName":"FunctionName" - }, - "Qualifier":{ - "shape":"Qualifier", - "documentation":"

Specify a version or alias to get details about a published version of the function.

", - "location":"querystring", - "locationName":"Qualifier" - } - } - }, - "GetFunctionEventInvokeConfigRequest":{ - "type":"structure", - "required":["FunctionName"], - "members":{ - "FunctionName":{ - "shape":"FunctionName", - "documentation":"

The name or ARN of the Lambda function, version, or alias.

Name formats

You can append a version number or alias to any of the formats. The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.

", - "location":"uri", - "locationName":"FunctionName" - }, - "Qualifier":{ - "shape":"Qualifier", - "documentation":"

A version number or alias name.

", - "location":"querystring", - "locationName":"Qualifier" - } - } - }, - "GetFunctionRecursionConfigRequest":{ - "type":"structure", - "required":["FunctionName"], - "members":{ - "FunctionName":{ - "shape":"UnqualifiedFunctionName", - "documentation":"

", - "location":"uri", - "locationName":"FunctionName" - } - } - }, - "GetFunctionRecursionConfigResponse":{ - "type":"structure", - "members":{ - "RecursiveLoop":{ - "shape":"RecursiveLoop", - "documentation":"

If your function's recursive loop detection configuration is Allow, Lambda doesn't take any action when it detects your function being invoked as part of a recursive loop.

If your function's recursive loop detection configuration is Terminate, Lambda stops your function being invoked and notifies you when it detects your function being invoked as part of a recursive loop.

By default, Lambda sets your function's configuration to Terminate. You can update this configuration using the PutFunctionRecursionConfig action.

" - } - } - }, - "GetFunctionRequest":{ - "type":"structure", - "required":["FunctionName"], - "members":{ - "FunctionName":{ - "shape":"NamespacedFunctionName", - "documentation":"

The name or ARN of the Lambda function, version, or alias.

Name formats

You can append a version number or alias to any of the formats. The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.

", - "location":"uri", - "locationName":"FunctionName" - }, - "Qualifier":{ - "shape":"Qualifier", - "documentation":"

Specify a version or alias to get details about a published version of the function.

", - "location":"querystring", - "locationName":"Qualifier" - } - } - }, - "GetFunctionResponse":{ - "type":"structure", - "members":{ - "Configuration":{ - "shape":"FunctionConfiguration", - "documentation":"

The configuration of the function or version.

" - }, - "Code":{ - "shape":"FunctionCodeLocation", - "documentation":"

The deployment package of the function or version.

" - }, - "Tags":{ - "shape":"Tags", - "documentation":"

The function's tags. Lambda returns tag data only if you have explicit allow permissions for lambda:ListTags.

" - }, - "TagsError":{ - "shape":"TagsError", - "documentation":"

An object that contains details about an error related to retrieving tags.

" - }, - "Concurrency":{ - "shape":"Concurrency", - "documentation":"

The function's reserved concurrency.

" - } - } - }, - "GetFunctionUrlConfigRequest":{ - "type":"structure", - "required":["FunctionName"], - "members":{ - "FunctionName":{ - "shape":"FunctionName", - "documentation":"

The name or ARN of the Lambda function.

Name formats

The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.

", - "location":"uri", - "locationName":"FunctionName" - }, - "Qualifier":{ - "shape":"FunctionUrlQualifier", - "documentation":"

The alias name.

", - "location":"querystring", - "locationName":"Qualifier" - } - } - }, - "GetFunctionUrlConfigResponse":{ - "type":"structure", - "required":[ - "FunctionUrl", - "FunctionArn", - "AuthType", - "CreationTime", - "LastModifiedTime" - ], - "members":{ - "FunctionUrl":{ - "shape":"FunctionUrl", - "documentation":"

The HTTP URL endpoint for your function.

" - }, - "FunctionArn":{ - "shape":"FunctionArn", - "documentation":"

The Amazon Resource Name (ARN) of your function.

" - }, - "AuthType":{ - "shape":"FunctionUrlAuthType", - "documentation":"

The type of authentication that your function URL uses. Set to AWS_IAM if you want to restrict access to authenticated users only. Set to NONE if you want to bypass IAM authentication to create a public endpoint. For more information, see Security and auth model for Lambda function URLs.

" - }, - "Cors":{ - "shape":"Cors", - "documentation":"

The cross-origin resource sharing (CORS) settings for your function URL.

" - }, - "CreationTime":{ - "shape":"Timestamp", - "documentation":"

When the function URL was created, in ISO-8601 format (YYYY-MM-DDThh:mm:ss.sTZD).

" - }, - "LastModifiedTime":{ - "shape":"Timestamp", - "documentation":"

When the function URL configuration was last updated, in ISO-8601 format (YYYY-MM-DDThh:mm:ss.sTZD).

" - }, - "InvokeMode":{ - "shape":"InvokeMode", - "documentation":"

Use one of the following options:

" - } - } - }, - "GetLayerVersionByArnRequest":{ - "type":"structure", - "required":["Arn"], - "members":{ - "Arn":{ - "shape":"LayerVersionArn", - "documentation":"

The ARN of the layer version.

", - "location":"querystring", - "locationName":"Arn" - } - } - }, - "GetLayerVersionPolicyRequest":{ - "type":"structure", - "required":[ - "LayerName", - "VersionNumber" - ], - "members":{ - "LayerName":{ - "shape":"LayerName", - "documentation":"

The name or Amazon Resource Name (ARN) of the layer.

", - "location":"uri", - "locationName":"LayerName" - }, - "VersionNumber":{ - "shape":"LayerVersionNumber", - "documentation":"

The version number.

", - "location":"uri", - "locationName":"VersionNumber" - } - } - }, - "GetLayerVersionPolicyResponse":{ - "type":"structure", - "members":{ - "Policy":{ - "shape":"String", - "documentation":"

The policy document.

" - }, - "RevisionId":{ - "shape":"String", - "documentation":"

A unique identifier for the current revision of the policy.

" - } - } - }, - "GetLayerVersionRequest":{ - "type":"structure", - "required":[ - "LayerName", - "VersionNumber" - ], - "members":{ - "LayerName":{ - "shape":"LayerName", - "documentation":"

The name or Amazon Resource Name (ARN) of the layer.

", - "location":"uri", - "locationName":"LayerName" - }, - "VersionNumber":{ - "shape":"LayerVersionNumber", - "documentation":"

The version number.

", - "location":"uri", - "locationName":"VersionNumber" - } - } - }, - "GetLayerVersionResponse":{ - "type":"structure", - "members":{ - "Content":{ - "shape":"LayerVersionContentOutput", - "documentation":"

Details about the layer version.

" - }, - "LayerArn":{ - "shape":"LayerArn", - "documentation":"

The ARN of the layer.

" - }, - "LayerVersionArn":{ - "shape":"LayerVersionArn", - "documentation":"

The ARN of the layer version.

" - }, - "Description":{ - "shape":"Description", - "documentation":"

The description of the version.

" - }, - "CreatedDate":{ - "shape":"Timestamp", - "documentation":"

The date that the layer version was created, in ISO-8601 format (YYYY-MM-DDThh:mm:ss.sTZD).

" - }, - "Version":{ - "shape":"LayerVersionNumber", - "documentation":"

The version number.

" - }, - "CompatibleRuntimes":{ - "shape":"CompatibleRuntimes", - "documentation":"

The layer's compatible runtimes.

The following list includes deprecated runtimes. For more information, see Runtime use after deprecation.

For a list of all currently supported runtimes, see Supported runtimes.

" - }, - "LicenseInfo":{ - "shape":"LicenseInfo", - "documentation":"

The layer's software license.

" - }, - "CompatibleArchitectures":{ - "shape":"CompatibleArchitectures", - "documentation":"

A list of compatible instruction set architectures.

" - } - } - }, - "GetPolicyRequest":{ - "type":"structure", - "required":["FunctionName"], - "members":{ - "FunctionName":{ - "shape":"NamespacedFunctionName", - "documentation":"

The name or ARN of the Lambda function, version, or alias.

Name formats

You can append a version number or alias to any of the formats. The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.

", - "location":"uri", - "locationName":"FunctionName" - }, - "Qualifier":{ - "shape":"Qualifier", - "documentation":"

Specify a version or alias to get the policy for that resource.

", - "location":"querystring", - "locationName":"Qualifier" - } - } - }, - "GetPolicyResponse":{ - "type":"structure", - "members":{ - "Policy":{ - "shape":"String", - "documentation":"

The resource-based policy.

" - }, - "RevisionId":{ - "shape":"String", - "documentation":"

A unique identifier for the current revision of the policy.

" - } - } - }, - "GetProvisionedConcurrencyConfigRequest":{ - "type":"structure", - "required":[ - "FunctionName", - "Qualifier" - ], - "members":{ - "FunctionName":{ - "shape":"FunctionName", - "documentation":"

The name or ARN of the Lambda function.

Name formats

The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.

", - "location":"uri", - "locationName":"FunctionName" - }, - "Qualifier":{ - "shape":"Qualifier", - "documentation":"

The version number or alias name.

", - "location":"querystring", - "locationName":"Qualifier" - } - } - }, - "GetProvisionedConcurrencyConfigResponse":{ - "type":"structure", - "members":{ - "RequestedProvisionedConcurrentExecutions":{ - "shape":"PositiveInteger", - "documentation":"

The amount of provisioned concurrency requested.

" - }, - "AvailableProvisionedConcurrentExecutions":{ - "shape":"NonNegativeInteger", - "documentation":"

The amount of provisioned concurrency available.

" - }, - "AllocatedProvisionedConcurrentExecutions":{ - "shape":"NonNegativeInteger", - "documentation":"

The amount of provisioned concurrency allocated. When a weighted alias is used during linear and canary deployments, this value fluctuates depending on the amount of concurrency that is provisioned for the function versions.

" - }, - "Status":{ - "shape":"ProvisionedConcurrencyStatusEnum", - "documentation":"

The status of the allocation process.

" - }, - "StatusReason":{ - "shape":"String", - "documentation":"

For failed allocations, the reason that provisioned concurrency could not be allocated.

" - }, - "LastModified":{ - "shape":"Timestamp", - "documentation":"

The date and time that a user last updated the configuration, in ISO 8601 format.

" - } - } - }, - "GetRuntimeManagementConfigRequest":{ - "type":"structure", - "required":["FunctionName"], - "members":{ - "FunctionName":{ - "shape":"NamespacedFunctionName", - "documentation":"

The name or ARN of the Lambda function.

Name formats

The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.

", - "location":"uri", - "locationName":"FunctionName" - }, - "Qualifier":{ - "shape":"Qualifier", - "documentation":"

Specify a version of the function. This can be $LATEST or a published version number. If no value is specified, the configuration for the $LATEST version is returned.

", - "location":"querystring", - "locationName":"Qualifier" - } - } - }, - "GetRuntimeManagementConfigResponse":{ - "type":"structure", - "members":{ - "UpdateRuntimeOn":{ - "shape":"UpdateRuntimeOn", - "documentation":"

The current runtime update mode of the function.

" - }, - "RuntimeVersionArn":{ - "shape":"RuntimeVersionArn", - "documentation":"

The ARN of the runtime the function is configured to use. If the runtime update mode is Manual, the ARN is returned, otherwise null is returned.

" - }, - "FunctionArn":{ - "shape":"NameSpacedFunctionArn", - "documentation":"

The Amazon Resource Name (ARN) of your function.

" - } - } - }, - "Handler":{ - "type":"string", - "max":128, - "min":0, - "pattern":"[^\\s]+" - }, - "Header":{ - "type":"string", - "max":1024, - "min":0, - "pattern":".*" - }, - "HeadersList":{ - "type":"list", - "member":{"shape":"Header"}, - "max":100, - "min":0 - }, - "HttpStatus":{"type":"integer"}, - "ImageConfig":{ - "type":"structure", - "members":{ - "EntryPoint":{ - "shape":"StringList", - "documentation":"

Specifies the entry point to their application, which is typically the location of the runtime executable.

" - }, - "Command":{ - "shape":"StringList", - "documentation":"

Specifies parameters that you want to pass in with ENTRYPOINT.

" - }, - "WorkingDirectory":{ - "shape":"WorkingDirectory", - "documentation":"

Specifies the working directory.

" - } - }, - "documentation":"

Configuration values that override the container image Dockerfile settings. For more information, see Container image settings.

" - }, - "ImageConfigError":{ - "type":"structure", - "members":{ - "ErrorCode":{ - "shape":"String", - "documentation":"

Error code.

" - }, - "Message":{ - "shape":"SensitiveString", - "documentation":"

Error message.

" - } - }, - "documentation":"

Error response to GetFunctionConfiguration.

" - }, - "ImageConfigResponse":{ - "type":"structure", - "members":{ - "ImageConfig":{ - "shape":"ImageConfig", - "documentation":"

Configuration values that override the container image Dockerfile.

" - }, - "Error":{ - "shape":"ImageConfigError", - "documentation":"

Error response to GetFunctionConfiguration.

" - } - }, - "documentation":"

Response to a GetFunctionConfiguration request.

" - }, - "IncludeExecutionData":{ - "type":"boolean", - "box":true - }, - "InputPayload":{ - "type":"string", - "max":6291456, - "min":0, - "sensitive":true - }, - "Integer":{"type":"integer"}, - "InvalidCodeSignatureException":{ - "type":"structure", - "members":{ - "Type":{"shape":"String"}, - "Message":{"shape":"String"} - }, - "documentation":"

The code signature failed the integrity check. If the integrity check fails, then Lambda blocks deployment, even if the code signing policy is set to WARN.

", - "error":{ - "httpStatusCode":400, - "senderFault":true - }, - "exception":true - }, - "InvalidParameterValueException":{ - "type":"structure", - "members":{ - "Type":{ - "shape":"String", - "documentation":"

The exception type.

" - }, - "message":{ - "shape":"String", - "documentation":"

The exception message.

" - } - }, - "documentation":"

One of the parameters in the request is not valid.

", - "error":{ - "httpStatusCode":400, - "senderFault":true - }, - "exception":true - }, - "InvalidRequestContentException":{ - "type":"structure", - "members":{ - "Type":{ - "shape":"String", - "documentation":"

The exception type.

" - }, - "message":{ - "shape":"String", - "documentation":"

The exception message.

" - } - }, - "documentation":"

The request body could not be parsed as JSON, or a request header is invalid. For example, the 'x-amzn-RequestId' header is not a valid UUID string.

", - "error":{ - "httpStatusCode":400, - "senderFault":true - }, - "exception":true - }, - "InvalidRuntimeException":{ - "type":"structure", - "members":{ - "Type":{"shape":"String"}, - "Message":{"shape":"String"} - }, - "documentation":"

The runtime or runtime version specified is not supported.

", - "error":{"httpStatusCode":502}, - "exception":true, - "fault":true - }, - "InvalidSecurityGroupIDException":{ - "type":"structure", - "members":{ - "Type":{"shape":"String"}, - "Message":{"shape":"String"} - }, - "documentation":"

The security group ID provided in the Lambda function VPC configuration is not valid.

", - "error":{"httpStatusCode":502}, - "exception":true, - "fault":true - }, - "InvalidSubnetIDException":{ - "type":"structure", - "members":{ - "Type":{"shape":"String"}, - "Message":{"shape":"String"} - }, - "documentation":"

The subnet ID provided in the Lambda function VPC configuration is not valid.

", - "error":{"httpStatusCode":502}, - "exception":true, - "fault":true - }, - "InvalidZipFileException":{ - "type":"structure", - "members":{ - "Type":{"shape":"String"}, - "Message":{"shape":"String"} - }, - "documentation":"

Lambda could not unzip the deployment package.

", - "error":{"httpStatusCode":502}, - "exception":true, - "fault":true - }, - "InvocationCompletedDetails":{ - "type":"structure", - "required":[ - "StartTimestamp", - "EndTimestamp", - "RequestId" - ], - "members":{ - "StartTimestamp":{"shape":"ExecutionTimestamp"}, - "EndTimestamp":{"shape":"ExecutionTimestamp"}, - "RequestId":{"shape":"String"}, - "Error":{"shape":"EventError"} - } - }, - "InvocationRequest":{ - "type":"structure", - "required":["FunctionName"], - "members":{ - "FunctionName":{ - "shape":"NamespacedFunctionName", - "documentation":"

The name or ARN of the Lambda function, version, or alias.

Name formats

You can append a version number or alias to any of the formats. The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.

", - "location":"uri", - "locationName":"FunctionName" - }, - "InvocationType":{ - "shape":"InvocationType", - "documentation":"

Choose from the following options.

", - "location":"header", - "locationName":"X-Amz-Invocation-Type" - }, - "LogType":{ - "shape":"LogType", - "documentation":"

Set to Tail to include the execution log in the response. Applies to synchronously invoked functions only.

", - "location":"header", - "locationName":"X-Amz-Log-Type" - }, - "ClientContext":{ - "shape":"String", - "documentation":"

Up to 3,583 bytes of base64-encoded data about the invoking client to pass to the function in the context object. Lambda passes the ClientContext object to your function for synchronous invocations only.

", - "location":"header", - "locationName":"X-Amz-Client-Context" - }, - "DurableExecutionName":{ - "shape":"DurableExecutionName", - "location":"header", - "locationName":"X-Amz-Durable-Execution-Name" - }, - "Payload":{ - "shape":"Blob", - "documentation":"

The JSON that you want to provide to your Lambda function as input.

You can enter the JSON directly. For example, --payload '{ \"key\": \"value\" }'. You can also specify a file path. For example, --payload file://payload.json.

" - }, - "Qualifier":{ - "shape":"Qualifier", - "documentation":"

Specify a version or alias to invoke a published version of the function.

", - "location":"querystring", - "locationName":"Qualifier" - } - }, - "payload":"Payload" - }, - "InvocationResponse":{ - "type":"structure", - "members":{ - "StatusCode":{ - "shape":"Integer", - "documentation":"

The HTTP status code is in the 200 range for a successful request. For the RequestResponse invocation type, this status code is 200. For the Event invocation type, this status code is 202. For the DryRun invocation type, the status code is 204.

", - "location":"statusCode" - }, - "FunctionError":{ - "shape":"String", - "documentation":"

If present, indicates that an error occurred during function execution. Details about the error are included in the response payload.

", - "location":"header", - "locationName":"X-Amz-Function-Error" - }, - "LogResult":{ - "shape":"String", - "documentation":"

The last 4 KB of the execution log, which is base64-encoded.

", - "location":"header", - "locationName":"X-Amz-Log-Result" - }, - "Payload":{ - "shape":"Blob", - "documentation":"

The response from the function, or an error object.

" - }, - "ExecutedVersion":{ - "shape":"Version", - "documentation":"

The version of the function that executed. When you invoke a function with an alias, this indicates which version the alias resolved to.

", - "location":"header", - "locationName":"X-Amz-Executed-Version" - }, - "DurableExecutionArn":{ - "shape":"DurableExecutionArn", - "location":"header", - "locationName":"X-Amz-Durable-Execution-Arn" - } - }, - "payload":"Payload" - }, - "InvocationType":{ - "type":"string", - "enum":[ - "Event", - "RequestResponse", - "DryRun" - ] - }, - "InvokeAsyncRequest":{ - "type":"structure", - "required":[ - "FunctionName", - "InvokeArgs" - ], - "members":{ - "FunctionName":{ - "shape":"NamespacedFunctionName", - "documentation":"

The name or ARN of the Lambda function.

Name formats

The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.

", - "location":"uri", - "locationName":"FunctionName" - }, - "InvokeArgs":{ - "shape":"BlobStream", - "documentation":"

The JSON that you want to provide to your Lambda function as input.

" - } - }, - "deprecated":true, - "payload":"InvokeArgs" - }, - "InvokeAsyncResponse":{ - "type":"structure", - "members":{ - "Status":{ - "shape":"HttpStatus", - "documentation":"

The status code.

", - "location":"statusCode" - } - }, - "deprecated":true, - "payload":"Body", - "documentation":"

A success response (202 Accepted) indicates that the request is queued for invocation.

" - }, - "InvokeCancelledDetails":{ - "type":"structure", - "members":{ - "Error":{"shape":"EventError"} - } - }, - "ChainedInvokeDetails":{ - "type":"structure", - "members":{ - "Result":{"shape":"OperationPayload"}, - "Error":{"shape":"ErrorObject"} - } - }, - "InvokeFailedDetails":{ - "type":"structure", - "members":{ - "Error":{"shape":"EventError"}, - "RetryDetails":{"shape":"RetryDetails"} - } - }, - "InvokeMode":{ - "type":"string", - "enum":[ - "BUFFERED", - "RESPONSE_STREAM" - ] - }, - "InvokeResponseStreamUpdate":{ - "type":"structure", - "members":{ - "Payload":{ - "shape":"Blob", - "documentation":"

Data returned by your Lambda function.

", - "eventpayload":true - } - }, - "documentation":"

A chunk of the streamed response payload.

", - "event":true - }, - "InvokeWithResponseStreamCompleteEvent":{ - "type":"structure", - "members":{ - "ErrorCode":{ - "shape":"String", - "documentation":"

An error code.

" - }, - "ErrorDetails":{ - "shape":"String", - "documentation":"

The details of any returned error.

" - }, - "LogResult":{ - "shape":"String", - "documentation":"

The last 4 KB of the execution log, which is base64-encoded.

" - } - }, - "documentation":"

A response confirming that the event stream is complete.

", - "event":true - }, - "InvokeWithResponseStreamRequest":{ - "type":"structure", - "required":["FunctionName"], - "members":{ - "FunctionName":{ - "shape":"NamespacedFunctionName", - "documentation":"

The name or ARN of the Lambda function.

Name formats

The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.

", - "location":"uri", - "locationName":"FunctionName" - }, - "InvocationType":{ - "shape":"ResponseStreamingInvocationType", - "documentation":"

Use one of the following options:

", - "location":"header", - "locationName":"X-Amz-Invocation-Type" - }, - "LogType":{ - "shape":"LogType", - "documentation":"

Set to Tail to include the execution log in the response. Applies to synchronously invoked functions only.

", - "location":"header", - "locationName":"X-Amz-Log-Type" - }, - "ClientContext":{ - "shape":"String", - "documentation":"

Up to 3,583 bytes of base64-encoded data about the invoking client to pass to the function in the context object.

", - "location":"header", - "locationName":"X-Amz-Client-Context" - }, - "Qualifier":{ - "shape":"Qualifier", - "documentation":"

The alias name.

", - "location":"querystring", - "locationName":"Qualifier" - }, - "Payload":{ - "shape":"Blob", - "documentation":"

The JSON that you want to provide to your Lambda function as input.

You can enter the JSON directly. For example, --payload '{ \"key\": \"value\" }'. You can also specify a file path. For example, --payload file://payload.json.

" - } - }, - "payload":"Payload" - }, - "InvokeWithResponseStreamResponse":{ - "type":"structure", - "members":{ - "StatusCode":{ - "shape":"Integer", - "documentation":"

For a successful request, the HTTP status code is in the 200 range. For the RequestResponse invocation type, this status code is 200. For the DryRun invocation type, this status code is 204.

", - "location":"statusCode" - }, - "ExecutedVersion":{ - "shape":"Version", - "documentation":"

The version of the function that executed. When you invoke a function with an alias, this indicates which version the alias resolved to.

", - "location":"header", - "locationName":"X-Amz-Executed-Version" - }, - "EventStream":{ - "shape":"InvokeWithResponseStreamResponseEvent", - "documentation":"

The stream of response payloads.

" - }, - "ResponseStreamContentType":{ - "shape":"String", - "documentation":"

The type of data the stream is returning.

", - "location":"header", - "locationName":"Content-Type" - } - }, - "payload":"EventStream" - }, - "InvokeWithResponseStreamResponseEvent":{ - "type":"structure", - "members":{ - "PayloadChunk":{ - "shape":"InvokeResponseStreamUpdate", - "documentation":"

A chunk of the streamed response payload.

" - }, - "InvokeComplete":{ - "shape":"InvokeWithResponseStreamCompleteEvent", - "documentation":"

An object that's returned when the stream has ended and all the payload chunks have been returned.

" - } - }, - "documentation":"

An object that includes a chunk of the response payload. When the stream has ended, Lambda includes a InvokeComplete object.

", - "eventstream":true - }, - "ItemCount":{ - "type":"integer", - "max":1000, - "min":0 - }, - "KMSAccessDeniedException":{ - "type":"structure", - "members":{ - "Type":{"shape":"String"}, - "Message":{"shape":"String"} - }, - "documentation":"

Lambda couldn't decrypt the environment variables because KMS access was denied. Check the Lambda function's KMS permissions.

", - "error":{"httpStatusCode":502}, - "exception":true, - "fault":true - }, - "KMSDisabledException":{ - "type":"structure", - "members":{ - "Type":{"shape":"String"}, - "Message":{"shape":"String"} - }, - "documentation":"

Lambda couldn't decrypt the environment variables because the KMS key used is disabled. Check the Lambda function's KMS key settings.

", - "error":{"httpStatusCode":502}, - "exception":true, - "fault":true - }, - "KMSInvalidStateException":{ - "type":"structure", - "members":{ - "Type":{"shape":"String"}, - "Message":{"shape":"String"} - }, - "documentation":"

Lambda couldn't decrypt the environment variables because the state of the KMS key used is not valid for Decrypt. Check the function's KMS key settings.

", - "error":{"httpStatusCode":502}, - "exception":true, - "fault":true - }, - "KMSKeyArn":{ - "type":"string", - "pattern":"(arn:(aws[a-zA-Z-]*)?:[a-z0-9-.]+:.*)|()" - }, - "KMSNotFoundException":{ - "type":"structure", - "members":{ - "Type":{"shape":"String"}, - "Message":{"shape":"String"} - }, - "documentation":"

Lambda couldn't decrypt the environment variables because the KMS key was not found. Check the function's KMS key settings.

", - "error":{"httpStatusCode":502}, - "exception":true, - "fault":true - }, - "KafkaSchemaRegistryAccessConfig":{ - "type":"structure", - "members":{ - "Type":{ - "shape":"KafkaSchemaRegistryAuthType", - "documentation":"

The type of authentication Lambda uses to access your schema registry.

" - }, - "URI":{ - "shape":"Arn", - "documentation":"

The URI of the secret (Secrets Manager secret ARN) to authenticate with your schema registry.

" - } - }, - "documentation":"

Specific access configuration settings that tell Lambda how to authenticate with your schema registry.

If you're working with an Glue schema registry, don't provide authentication details in this object. Instead, ensure that your execution role has the required permissions for Lambda to access your cluster.

If you're working with a Confluent schema registry, choose the authentication method in the Type field, and provide the Secrets Manager secret ARN in the URI field.

" - }, - "KafkaSchemaRegistryAccessConfigList":{ - "type":"list", - "member":{"shape":"KafkaSchemaRegistryAccessConfig"} - }, - "KafkaSchemaRegistryAuthType":{ - "type":"string", - "enum":[ - "BASIC_AUTH", - "CLIENT_CERTIFICATE_TLS_AUTH", - "SERVER_ROOT_CA_CERTIFICATE" - ] - }, - "KafkaSchemaRegistryConfig":{ - "type":"structure", - "members":{ - "SchemaRegistryURI":{ - "shape":"SchemaRegistryUri", - "documentation":"

The URI for your schema registry. The correct URI format depends on the type of schema registry you're using.

" - }, - "EventRecordFormat":{ - "shape":"SchemaRegistryEventRecordFormat", - "documentation":"

The record format that Lambda delivers to your function after schema validation.

" - }, - "AccessConfigs":{ - "shape":"KafkaSchemaRegistryAccessConfigList", - "documentation":"

An array of access configuration objects that tell Lambda how to authenticate with your schema registry.

" - }, - "SchemaValidationConfigs":{ - "shape":"KafkaSchemaValidationConfigList", - "documentation":"

An array of schema validation configuration objects, which tell Lambda the message attributes you want to validate and filter using your schema registry.

" - } - }, - "documentation":"

Specific configuration settings for a Kafka schema registry.

" - }, - "KafkaSchemaValidationAttribute":{ - "type":"string", - "enum":[ - "KEY", - "VALUE" - ] - }, - "KafkaSchemaValidationConfig":{ - "type":"structure", - "members":{ - "Attribute":{ - "shape":"KafkaSchemaValidationAttribute", - "documentation":"

The attributes you want your schema registry to validate and filter for. If you selected JSON as the EventRecordFormat, Lambda also deserializes the selected message attributes.

" - } - }, - "documentation":"

Specific schema validation configuration settings that tell Lambda the message attributes you want to validate and filter using your schema registry.

" - }, - "KafkaSchemaValidationConfigList":{ - "type":"list", - "member":{"shape":"KafkaSchemaValidationConfig"} - }, - "LastUpdateStatus":{ - "type":"string", - "enum":[ - "Successful", - "Failed", - "InProgress" - ] - }, - "LastUpdateStatusReason":{"type":"string"}, - "LastUpdateStatusReasonCode":{ - "type":"string", - "enum":[ - "EniLimitExceeded", - "InsufficientRolePermissions", - "InvalidConfiguration", - "InternalError", - "SubnetOutOfIPAddresses", - "InvalidSubnet", - "InvalidSecurityGroup", - "ImageDeleted", - "ImageAccessDenied", - "InvalidImage", - "KMSKeyAccessDenied", - "KMSKeyNotFound", - "InvalidStateKMSKey", - "DisabledKMSKey", - "EFSIOError", - "EFSMountConnectivityError", - "EFSMountFailure", - "EFSMountTimeout", - "InvalidRuntime", - "InvalidZipFileException", - "FunctionError" - ] - }, - "Layer":{ - "type":"structure", - "members":{ - "Arn":{ - "shape":"LayerVersionArn", - "documentation":"

The Amazon Resource Name (ARN) of the function layer.

" - }, - "CodeSize":{ - "shape":"Long", - "documentation":"

The size of the layer archive in bytes.

" - }, - "SigningProfileVersionArn":{ - "shape":"Arn", - "documentation":"

The Amazon Resource Name (ARN) for a signing profile version.

" - }, - "SigningJobArn":{ - "shape":"Arn", - "documentation":"

The Amazon Resource Name (ARN) of a signing job.

" - } - }, - "documentation":"

An Lambda layer.

" - }, - "LayerArn":{ - "type":"string", - "max":140, - "min":1, - "pattern":"arn:[a-zA-Z0-9-]+:lambda:[a-zA-Z0-9-]+:\\d{12}:layer:[a-zA-Z0-9-_]+" - }, - "LayerList":{ - "type":"list", - "member":{"shape":"LayerVersionArn"} - }, - "LayerName":{ - "type":"string", - "max":140, - "min":1, - "pattern":"(arn:[a-zA-Z0-9-]+:lambda:[a-zA-Z0-9-]+:\\d{12}:layer:[a-zA-Z0-9-_]+)|[a-zA-Z0-9-_]+" - }, - "LayerPermissionAllowedAction":{ - "type":"string", - "max":22, - "min":0, - "pattern":"lambda:GetLayerVersion" - }, - "LayerPermissionAllowedPrincipal":{ - "type":"string", - "pattern":"\\d{12}|\\*|arn:(aws[a-zA-Z-]*):iam::\\d{12}:root" - }, - "LayerVersionArn":{ - "type":"string", - "max":140, - "min":1, - "pattern":"arn:[a-zA-Z0-9-]+:lambda:[a-zA-Z0-9-]+:\\d{12}:layer:[a-zA-Z0-9-_]+:[0-9]+" - }, - "LayerVersionContentInput":{ - "type":"structure", - "members":{ - "S3Bucket":{ - "shape":"S3Bucket", - "documentation":"

The Amazon S3 bucket of the layer archive.

" - }, - "S3Key":{ - "shape":"S3Key", - "documentation":"

The Amazon S3 key of the layer archive.

" - }, - "S3ObjectVersion":{ - "shape":"S3ObjectVersion", - "documentation":"

For versioned objects, the version of the layer archive object to use.

" - }, - "ZipFile":{ - "shape":"Blob", - "documentation":"

The base64-encoded contents of the layer archive. Amazon Web Services SDK and Amazon Web Services CLI clients handle the encoding for you.

" - } - }, - "documentation":"

A ZIP archive that contains the contents of an Lambda layer. You can specify either an Amazon S3 location, or upload a layer archive directly.

" - }, - "LayerVersionContentOutput":{ - "type":"structure", - "members":{ - "Location":{ - "shape":"String", - "documentation":"

A link to the layer archive in Amazon S3 that is valid for 10 minutes.

" - }, - "CodeSha256":{ - "shape":"String", - "documentation":"

The SHA-256 hash of the layer archive.

" - }, - "CodeSize":{ - "shape":"Long", - "documentation":"

The size of the layer archive in bytes.

" - }, - "SigningProfileVersionArn":{ - "shape":"String", - "documentation":"

The Amazon Resource Name (ARN) for a signing profile version.

" - }, - "SigningJobArn":{ - "shape":"String", - "documentation":"

The Amazon Resource Name (ARN) of a signing job.

" - } - }, - "documentation":"

Details about a version of an Lambda layer.

" - }, - "LayerVersionNumber":{"type":"long"}, - "LayerVersionsList":{ - "type":"list", - "member":{"shape":"LayerVersionsListItem"} - }, - "LayerVersionsListItem":{ - "type":"structure", - "members":{ - "LayerVersionArn":{ - "shape":"LayerVersionArn", - "documentation":"

The ARN of the layer version.

" - }, - "Version":{ - "shape":"LayerVersionNumber", - "documentation":"

The version number.

" - }, - "Description":{ - "shape":"Description", - "documentation":"

The description of the version.

" - }, - "CreatedDate":{ - "shape":"Timestamp", - "documentation":"

The date that the version was created, in ISO 8601 format. For example, 2018-11-27T15:10:45.123+0000.

" - }, - "CompatibleRuntimes":{ - "shape":"CompatibleRuntimes", - "documentation":"

The layer's compatible runtimes.

The following list includes deprecated runtimes. For more information, see Runtime use after deprecation.

For a list of all currently supported runtimes, see Supported runtimes.

" - }, - "LicenseInfo":{ - "shape":"LicenseInfo", - "documentation":"

The layer's open-source license.

" - }, - "CompatibleArchitectures":{ - "shape":"CompatibleArchitectures", - "documentation":"

A list of compatible instruction set architectures.

" - } - }, - "documentation":"

Details about a version of an Lambda layer.

" - }, - "LayersList":{ - "type":"list", - "member":{"shape":"LayersListItem"} - }, - "LayersListItem":{ - "type":"structure", - "members":{ - "LayerName":{ - "shape":"LayerName", - "documentation":"

The name of the layer.

" - }, - "LayerArn":{ - "shape":"LayerArn", - "documentation":"

The Amazon Resource Name (ARN) of the function layer.

" - }, - "LatestMatchingVersion":{ - "shape":"LayerVersionsListItem", - "documentation":"

The newest version of the layer.

" - } - }, - "documentation":"

Details about an Lambda layer.

" - }, - "LayersReferenceList":{ - "type":"list", - "member":{"shape":"Layer"} - }, - "LicenseInfo":{ - "type":"string", - "max":512, - "min":0 - }, - "ListAliasesRequest":{ - "type":"structure", - "required":["FunctionName"], - "members":{ - "FunctionName":{ - "shape":"FunctionName", - "documentation":"

The name or ARN of the Lambda function.

Name formats

The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.

", - "location":"uri", - "locationName":"FunctionName" - }, - "FunctionVersion":{ - "shape":"Version", - "documentation":"

Specify a function version to only list aliases that invoke that version.

", - "location":"querystring", - "locationName":"FunctionVersion" - }, - "Marker":{ - "shape":"String", - "documentation":"

Specify the pagination token that's returned by a previous request to retrieve the next page of results.

", - "location":"querystring", - "locationName":"Marker" - }, - "MaxItems":{ - "shape":"MaxListItems", - "documentation":"

Limit the number of aliases returned.

", - "location":"querystring", - "locationName":"MaxItems" - } - } - }, - "ListAliasesResponse":{ - "type":"structure", - "members":{ - "NextMarker":{ - "shape":"String", - "documentation":"

The pagination token that's included if more results are available.

" - }, - "Aliases":{ - "shape":"AliasList", - "documentation":"

A list of aliases.

" - } - } - }, - "ListCodeSigningConfigsRequest":{ - "type":"structure", - "members":{ - "Marker":{ - "shape":"String", - "documentation":"

Specify the pagination token that's returned by a previous request to retrieve the next page of results.

", - "location":"querystring", - "locationName":"Marker" - }, - "MaxItems":{ - "shape":"MaxListItems", - "documentation":"

Maximum number of items to return.

", - "location":"querystring", - "locationName":"MaxItems" - } - } - }, - "ListCodeSigningConfigsResponse":{ - "type":"structure", - "members":{ - "NextMarker":{ - "shape":"String", - "documentation":"

The pagination token that's included if more results are available.

" - }, - "CodeSigningConfigs":{ - "shape":"CodeSigningConfigList", - "documentation":"

The code signing configurations

" - } - } - }, - "ListDurableExecutionsByFunctionRequest":{ - "type":"structure", - "required":["FunctionName"], - "members":{ - "FunctionName":{ - "shape":"FunctionName", - "location":"uri", - "locationName":"FunctionName" - }, - "Qualifier":{ - "shape":"Qualifier", - "location":"querystring", - "locationName":"Qualifier" - }, - "DurableExecutionName":{ - "shape":"DurableExecutionName", - "location":"querystring", - "locationName":"DurableExecutionName" - }, - "Statuses":{ - "shape":"ExecutionStatusList", - "location":"querystring", - "locationName":"Statuses" - }, - "StartedAfter":{ - "shape":"ExecutionTimestamp", - "location":"querystring", - "locationName":"StartedAfter" - }, - "StartedBefore":{ - "shape":"ExecutionTimestamp", - "location":"querystring", - "locationName":"StartedBefore" - }, - "ReverseOrder":{ - "shape":"ReverseOrder", - "location":"querystring", - "locationName":"ReverseOrder" - }, - "Marker":{ - "shape":"String", - "location":"querystring", - "locationName":"Marker" - }, - "MaxItems":{ - "shape":"ItemCount", - "location":"querystring", - "locationName":"MaxItems" - } - } - }, - "ListDurableExecutionsByFunctionResponse":{ - "type":"structure", - "members":{ - "DurableExecutions":{"shape":"DurableExecutions"}, - "NextMarker":{"shape":"String"} - } - }, - "ListEventSourceMappingsRequest":{ - "type":"structure", - "members":{ - "EventSourceArn":{ - "shape":"Arn", - "documentation":"

The Amazon Resource Name (ARN) of the event source.

", - "location":"querystring", - "locationName":"EventSourceArn" - }, - "FunctionName":{ - "shape":"FunctionName", - "documentation":"

The name or ARN of the Lambda function.

Name formats

The length constraint applies only to the full ARN. If you specify only the function name, it's limited to 64 characters in length.

", - "location":"querystring", - "locationName":"FunctionName" - }, - "Marker":{ - "shape":"String", - "documentation":"

A pagination token returned by a previous call.

", - "location":"querystring", - "locationName":"Marker" - }, - "MaxItems":{ - "shape":"MaxListItems", - "documentation":"

The maximum number of event source mappings to return. Note that ListEventSourceMappings returns a maximum of 100 items in each response, even if you set the number higher.

", - "location":"querystring", - "locationName":"MaxItems" - } - } - }, - "ListEventSourceMappingsResponse":{ - "type":"structure", - "members":{ - "NextMarker":{ - "shape":"String", - "documentation":"

A pagination token that's returned when the response doesn't contain all event source mappings.

" - }, - "EventSourceMappings":{ - "shape":"EventSourceMappingsList", - "documentation":"

A list of event source mappings.

" - } - } - }, - "ListFunctionEventInvokeConfigsRequest":{ - "type":"structure", - "required":["FunctionName"], - "members":{ - "FunctionName":{ - "shape":"FunctionName", - "documentation":"

The name or ARN of the Lambda function.

Name formats

The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.

", - "location":"uri", - "locationName":"FunctionName" - }, - "Marker":{ - "shape":"String", - "documentation":"

Specify the pagination token that's returned by a previous request to retrieve the next page of results.

", - "location":"querystring", - "locationName":"Marker" - }, - "MaxItems":{ - "shape":"MaxFunctionEventInvokeConfigListItems", - "documentation":"

The maximum number of configurations to return.

", - "location":"querystring", - "locationName":"MaxItems" - } - } - }, - "ListFunctionEventInvokeConfigsResponse":{ - "type":"structure", - "members":{ - "FunctionEventInvokeConfigs":{ - "shape":"FunctionEventInvokeConfigList", - "documentation":"

A list of configurations.

" - }, - "NextMarker":{ - "shape":"String", - "documentation":"

The pagination token that's included if more results are available.

" - } - } - }, - "ListFunctionUrlConfigsRequest":{ - "type":"structure", - "required":["FunctionName"], - "members":{ - "FunctionName":{ - "shape":"FunctionName", - "documentation":"

The name or ARN of the Lambda function.

Name formats

The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.

", - "location":"uri", - "locationName":"FunctionName" - }, - "Marker":{ - "shape":"String", - "documentation":"

Specify the pagination token that's returned by a previous request to retrieve the next page of results.

", - "location":"querystring", - "locationName":"Marker" - }, - "MaxItems":{ - "shape":"MaxItems", - "documentation":"

The maximum number of function URLs to return in the response. Note that ListFunctionUrlConfigs returns a maximum of 50 items in each response, even if you set the number higher.

", - "location":"querystring", - "locationName":"MaxItems" - } - } - }, - "ListFunctionUrlConfigsResponse":{ - "type":"structure", - "required":["FunctionUrlConfigs"], - "members":{ - "FunctionUrlConfigs":{ - "shape":"FunctionUrlConfigList", - "documentation":"

A list of function URL configurations.

" - }, - "NextMarker":{ - "shape":"String", - "documentation":"

The pagination token that's included if more results are available.

" - } - } - }, - "ListFunctionsByCodeSigningConfigRequest":{ - "type":"structure", - "required":["CodeSigningConfigArn"], - "members":{ - "CodeSigningConfigArn":{ - "shape":"CodeSigningConfigArn", - "documentation":"

The The Amazon Resource Name (ARN) of the code signing configuration.

", - "location":"uri", - "locationName":"CodeSigningConfigArn" - }, - "Marker":{ - "shape":"String", - "documentation":"

Specify the pagination token that's returned by a previous request to retrieve the next page of results.

", - "location":"querystring", - "locationName":"Marker" - }, - "MaxItems":{ - "shape":"MaxListItems", - "documentation":"

Maximum number of items to return.

", - "location":"querystring", - "locationName":"MaxItems" - } - } - }, - "ListFunctionsByCodeSigningConfigResponse":{ - "type":"structure", - "members":{ - "NextMarker":{ - "shape":"String", - "documentation":"

The pagination token that's included if more results are available.

" - }, - "FunctionArns":{ - "shape":"FunctionArnList", - "documentation":"

The function ARNs.

" - } - } - }, - "ListFunctionsRequest":{ - "type":"structure", - "members":{ - "MasterRegion":{ - "shape":"MasterRegion", - "documentation":"

For Lambda@Edge functions, the Amazon Web Services Region of the master function. For example, us-east-1 filters the list of functions to include only Lambda@Edge functions replicated from a master function in US East (N. Virginia). If specified, you must set FunctionVersion to ALL.

", - "location":"querystring", - "locationName":"MasterRegion" - }, - "FunctionVersion":{ - "shape":"FunctionVersion", - "documentation":"

Set to ALL to include entries for all published versions of each function.

", - "location":"querystring", - "locationName":"FunctionVersion" - }, - "Marker":{ - "shape":"String", - "documentation":"

Specify the pagination token that's returned by a previous request to retrieve the next page of results.

", - "location":"querystring", - "locationName":"Marker" - }, - "MaxItems":{ - "shape":"MaxListItems", - "documentation":"

The maximum number of functions to return in the response. Note that ListFunctions returns a maximum of 50 items in each response, even if you set the number higher.

", - "location":"querystring", - "locationName":"MaxItems" - } - } - }, - "ListFunctionsResponse":{ - "type":"structure", - "members":{ - "NextMarker":{ - "shape":"String", - "documentation":"

The pagination token that's included if more results are available.

" - }, - "Functions":{ - "shape":"FunctionList", - "documentation":"

A list of Lambda functions.

" - } - }, - "documentation":"

A list of Lambda functions.

" - }, - "ListLayerVersionsRequest":{ - "type":"structure", - "required":["LayerName"], - "members":{ - "CompatibleRuntime":{ - "shape":"Runtime", - "documentation":"

A runtime identifier.

The following list includes deprecated runtimes. For more information, see Runtime use after deprecation.

For a list of all currently supported runtimes, see Supported runtimes.

", - "location":"querystring", - "locationName":"CompatibleRuntime" - }, - "LayerName":{ - "shape":"LayerName", - "documentation":"

The name or Amazon Resource Name (ARN) of the layer.

", - "location":"uri", - "locationName":"LayerName" - }, - "Marker":{ - "shape":"String", - "documentation":"

A pagination token returned by a previous call.

", - "location":"querystring", - "locationName":"Marker" - }, - "MaxItems":{ - "shape":"MaxLayerListItems", - "documentation":"

The maximum number of versions to return.

", - "location":"querystring", - "locationName":"MaxItems" - }, - "CompatibleArchitecture":{ - "shape":"Architecture", - "documentation":"

The compatible instruction set architecture.

", - "location":"querystring", - "locationName":"CompatibleArchitecture" - } - } - }, - "ListLayerVersionsResponse":{ - "type":"structure", - "members":{ - "NextMarker":{ - "shape":"String", - "documentation":"

A pagination token returned when the response doesn't contain all versions.

" - }, - "LayerVersions":{ - "shape":"LayerVersionsList", - "documentation":"

A list of versions.

" - } - } - }, - "ListLayersRequest":{ - "type":"structure", - "members":{ - "CompatibleRuntime":{ - "shape":"Runtime", - "documentation":"

A runtime identifier.

The following list includes deprecated runtimes. For more information, see Runtime use after deprecation.

For a list of all currently supported runtimes, see Supported runtimes.

", - "location":"querystring", - "locationName":"CompatibleRuntime" - }, - "Marker":{ - "shape":"String", - "documentation":"

A pagination token returned by a previous call.

", - "location":"querystring", - "locationName":"Marker" - }, - "MaxItems":{ - "shape":"MaxLayerListItems", - "documentation":"

The maximum number of layers to return.

", - "location":"querystring", - "locationName":"MaxItems" - }, - "CompatibleArchitecture":{ - "shape":"Architecture", - "documentation":"

The compatible instruction set architecture.

", - "location":"querystring", - "locationName":"CompatibleArchitecture" - } - } - }, - "ListLayersResponse":{ - "type":"structure", - "members":{ - "NextMarker":{ - "shape":"String", - "documentation":"

A pagination token returned when the response doesn't contain all layers.

" - }, - "Layers":{ - "shape":"LayersList", - "documentation":"

A list of function layers.

" - } - } - }, - "ListProvisionedConcurrencyConfigsRequest":{ - "type":"structure", - "required":["FunctionName"], - "members":{ - "FunctionName":{ - "shape":"FunctionName", - "documentation":"

The name or ARN of the Lambda function.

Name formats

The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.

", - "location":"uri", - "locationName":"FunctionName" - }, - "Marker":{ - "shape":"String", - "documentation":"

Specify the pagination token that's returned by a previous request to retrieve the next page of results.

", - "location":"querystring", - "locationName":"Marker" - }, - "MaxItems":{ - "shape":"MaxProvisionedConcurrencyConfigListItems", - "documentation":"

Specify a number to limit the number of configurations returned.

", - "location":"querystring", - "locationName":"MaxItems" - } - } - }, - "ListProvisionedConcurrencyConfigsResponse":{ - "type":"structure", - "members":{ - "ProvisionedConcurrencyConfigs":{ - "shape":"ProvisionedConcurrencyConfigList", - "documentation":"

A list of provisioned concurrency configurations.

" - }, - "NextMarker":{ - "shape":"String", - "documentation":"

The pagination token that's included if more results are available.

" - } - } - }, - "ListTagsRequest":{ - "type":"structure", - "required":["Resource"], - "members":{ - "Resource":{ - "shape":"TaggableResource", - "documentation":"

The resource's Amazon Resource Name (ARN). Note: Lambda does not support adding tags to function aliases or versions.

", - "location":"uri", - "locationName":"Resource" - } - } - }, - "ListTagsResponse":{ - "type":"structure", - "members":{ - "Tags":{ - "shape":"Tags", - "documentation":"

The function's tags.

" - } - } - }, - "ListVersionsByFunctionRequest":{ - "type":"structure", - "required":["FunctionName"], - "members":{ - "FunctionName":{ - "shape":"NamespacedFunctionName", - "documentation":"

The name or ARN of the Lambda function.

Name formats

The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.

", - "location":"uri", - "locationName":"FunctionName" - }, - "Marker":{ - "shape":"String", - "documentation":"

Specify the pagination token that's returned by a previous request to retrieve the next page of results.

", - "location":"querystring", - "locationName":"Marker" - }, - "MaxItems":{ - "shape":"MaxListItems", - "documentation":"

The maximum number of versions to return. Note that ListVersionsByFunction returns a maximum of 50 items in each response, even if you set the number higher.

", - "location":"querystring", - "locationName":"MaxItems" - } - } - }, - "ListVersionsByFunctionResponse":{ - "type":"structure", - "members":{ - "NextMarker":{ - "shape":"String", - "documentation":"

The pagination token that's included if more results are available.

" - }, - "Versions":{ - "shape":"FunctionList", - "documentation":"

A list of Lambda function versions.

" - } - } - }, - "LocalMountPath":{ - "type":"string", - "max":160, - "min":0, - "pattern":"/mnt/[a-zA-Z0-9-_.]+" - }, - "LogFormat":{ - "type":"string", - "enum":[ - "JSON", - "Text" - ] - }, - "LogGroup":{ - "type":"string", - "max":512, - "min":1, - "pattern":"[\\.\\-_/#A-Za-z0-9]+" - }, - "LogType":{ - "type":"string", - "enum":[ - "None", - "Tail" - ] - }, - "LoggingConfig":{ - "type":"structure", - "members":{ - "LogFormat":{ - "shape":"LogFormat", - "documentation":"

The format in which Lambda sends your function's application and system logs to CloudWatch. Select between plain text and structured JSON.

" - }, - "ApplicationLogLevel":{ - "shape":"ApplicationLogLevel", - "documentation":"

Set this property to filter the application logs for your function that Lambda sends to CloudWatch. Lambda only sends application logs at the selected level of detail and lower, where TRACE is the highest level and FATAL is the lowest.

" - }, - "SystemLogLevel":{ - "shape":"SystemLogLevel", - "documentation":"

Set this property to filter the system logs for your function that Lambda sends to CloudWatch. Lambda only sends system logs at the selected level of detail and lower, where DEBUG is the highest level and WARN is the lowest.

" - }, - "LogGroup":{ - "shape":"LogGroup", - "documentation":"

The name of the Amazon CloudWatch log group the function sends logs to. By default, Lambda functions send logs to a default log group named /aws/lambda/<function name>. To use a different log group, enter an existing log group or enter a new log group name.

" - } - }, - "documentation":"

The function's Amazon CloudWatch Logs configuration settings.

" - }, - "Long":{"type":"long"}, - "MasterRegion":{ - "type":"string", - "pattern":"ALL|[a-z]{2}(-gov)?-[a-z]+-\\d{1}" - }, - "MaxAge":{ - "type":"integer", - "box":true, - "max":86400, - "min":0 - }, - "MaxFunctionEventInvokeConfigListItems":{ - "type":"integer", - "box":true, - "max":50, - "min":1 - }, - "MaxItems":{ - "type":"integer", - "box":true, - "max":50, - "min":1 - }, - "MaxLayerListItems":{ - "type":"integer", - "box":true, - "max":50, - "min":1 - }, - "MaxListItems":{ - "type":"integer", - "box":true, - "max":10000, - "min":1 - }, - "MaxProvisionedConcurrencyConfigListItems":{ - "type":"integer", - "box":true, - "max":50, - "min":1 - }, - "MaximumBatchingWindowInSeconds":{ - "type":"integer", - "box":true, - "max":300, - "min":0 - }, - "MaximumConcurrency":{ - "type":"integer", - "box":true, - "max":1000, - "min":2 - }, - "MaximumEventAgeInSeconds":{ - "type":"integer", - "box":true, - "max":21600, - "min":60 - }, - "MaximumNumberOfPollers":{ - "type":"integer", - "box":true, - "max":2000, - "min":1 - }, - "MaximumRecordAgeInSeconds":{ - "type":"integer", - "box":true, - "max":604800, - "min":-1 - }, - "MaximumRetryAttempts":{ - "type":"integer", - "box":true, - "max":2, - "min":0 - }, - "MaximumRetryAttemptsEventSourceMapping":{ - "type":"integer", - "box":true, - "max":10000, - "min":-1 - }, - "MemorySize":{ - "type":"integer", - "box":true, - "max":10240, - "min":128 - }, - "Method":{ - "type":"string", - "max":6, - "min":0, - "pattern":".*" - }, - "MinimumNumberOfPollers":{ - "type":"integer", - "box":true, - "max":200, - "min":1 - }, - "NameSpacedFunctionArn":{ - "type":"string", - "pattern":"arn:(aws[a-zA-Z-]*)?:lambda:[a-z]{2}(-gov)?-[a-z]+-\\d{1}:\\d{12}:function:[a-zA-Z0-9-_\\.]+(:(\\$LATEST|[a-zA-Z0-9-_]+))?" - }, - "NamespacedFunctionName":{ - "type":"string", - "max":170, - "min":1, - "pattern":"(arn:(aws[a-zA-Z-]*)?:lambda:)?([a-z]{2}(-gov)?-[a-z]+-\\d{1}:)?(\\d{12}:)?(function:)?([a-zA-Z0-9-_\\.]+)(:(\\$LATEST|[a-zA-Z0-9-_]+))?" - }, - "NamespacedStatementId":{ - "type":"string", - "max":100, - "min":1, - "pattern":"([a-zA-Z0-9-_.]+)" - }, - "NonNegativeInteger":{ - "type":"integer", - "box":true, - "min":0 - }, - "NullableBoolean":{ - "type":"boolean", - "box":true - }, - "OnFailure":{ - "type":"structure", - "members":{ - "Destination":{ - "shape":"DestinationArn", - "documentation":"

The Amazon Resource Name (ARN) of the destination resource.

To retain records of unsuccessful asynchronous invocations, you can configure an Amazon SNS topic, Amazon SQS queue, Amazon S3 bucket, Lambda function, or Amazon EventBridge event bus as the destination.

To retain records of failed invocations from Kinesis, DynamoDB, self-managed Kafka or Amazon MSK, you can configure an Amazon SNS topic, Amazon SQS queue, or Amazon S3 bucket as the destination.

" - } - }, - "documentation":"

A destination for events that failed processing. For more information, see Adding a destination.

" - }, - "OnSuccess":{ - "type":"structure", - "members":{ - "Destination":{ - "shape":"DestinationArn", - "documentation":"

The Amazon Resource Name (ARN) of the destination resource.

" - } - }, - "documentation":"

A destination for events that were processed successfully.

To retain records of successful asynchronous invocations, you can configure an Amazon SNS topic, Amazon SQS queue, Lambda function, or Amazon EventBridge event bus as the destination.

OnSuccess is not supported in CreateEventSourceMapping or UpdateEventSourceMapping requests.

" - }, - "Operation":{ - "type":"structure", - "required":[ - "Id", - "Type", - "StartTimestamp", - "Status" - ], - "members":{ - "Id":{"shape":"OperationId"}, - "ParentId":{"shape":"OperationId"}, - "Name":{"shape":"OperationName"}, - "Type":{"shape":"OperationType"}, - "SubType":{"shape":"OperationSubType"}, - "StartTimestamp":{"shape":"ExecutionTimestamp"}, - "EndTimestamp":{"shape":"ExecutionTimestamp"}, - "Status":{"shape":"OperationStatus"}, - "ExecutionDetails":{"shape":"ExecutionDetails"}, - "ContextDetails":{"shape":"ContextDetails"}, - "StepDetails":{"shape":"StepDetails"}, - "WaitDetails":{"shape":"WaitDetails"}, - "CallbackDetails":{"shape":"CallbackDetails"}, - "ChainedInvokeDetails":{"shape":"ChainedInvokeDetails"} - } - }, - "OperationAction":{ - "type":"string", - "enum":[ - "START", - "SUCCEED", - "FAIL", - "RETRY", - "CANCEL" - ] - }, - "OperationId":{ - "type":"string", - "max":64, - "min":1, - "pattern":"[a-zA-Z0-9-_]+" - }, - "OperationName":{ - "type":"string", - "max":256, - "min":1, - "pattern":"[a-zA-Z0-9-_]+" - }, - "OperationPayload":{ - "type":"string", - "max":6291456, - "min":0, - "sensitive":true - }, - "OperationStatus":{ - "type":"string", - "enum":[ - "STARTED", - "PENDING", - "READY", - "SUCCEEDED", - "FAILED", - "CANCELLED", - "TIMED_OUT", - "STOPPED" - ] - }, - "OperationSubType":{ - "type":"string", - "max":32, - "min":1, - "pattern":"[a-zA-Z-_]+" - }, - "OperationType":{ - "type":"string", - "enum":[ - "EXECUTION", - "CONTEXT", - "STEP", - "WAIT", - "CALLBACK", - "CHAINED_INVOKE" - ] - }, - "OperationUpdate":{ - "type":"structure", - "required":[ - "Id", - "Type", - "Action" - ], - "members":{ - "Id":{"shape":"OperationId"}, - "ParentId":{"shape":"OperationId"}, - "Name":{"shape":"OperationName"}, - "Type":{"shape":"OperationType"}, - "SubType":{"shape":"OperationSubType"}, - "Action":{"shape":"OperationAction"}, - "Payload":{"shape":"OperationPayload"}, - "Error":{"shape":"ErrorObject"}, - "ContextOptions":{"shape":"ContextOptions"}, - "StepOptions":{"shape":"StepOptions"}, - "WaitOptions":{"shape":"WaitOptions"}, - "CallbackOptions":{"shape":"CallbackOptions"}, - "ChainedInvokeOptions":{"shape":"ChainedInvokeOptions"} - } - }, - "OperationUpdates":{ - "type":"list", - "member":{"shape":"OperationUpdate"} - }, - "Operations":{ - "type":"list", - "member":{"shape":"Operation"} - }, - "OrganizationId":{ - "type":"string", - "max":34, - "min":0, - "pattern":"o-[a-z0-9]{10,32}" - }, - "Origin":{ - "type":"string", - "max":253, - "min":1, - "pattern":".*" - }, - "OutputPayload":{ - "type":"string", - "max":6291456, - "min":0, - "sensitive":true - }, - "PackageType":{ - "type":"string", - "enum":[ - "Zip", - "Image" - ] - }, - "ParallelizationFactor":{ - "type":"integer", - "box":true, - "max":10, - "min":1 - }, - "Pattern":{ - "type":"string", - "max":4096, - "min":0, - "pattern":".*" - }, - "PolicyLengthExceededException":{ - "type":"structure", - "members":{ - "Type":{"shape":"String"}, - "message":{"shape":"String"} - }, - "documentation":"

The permissions policy for the resource is too large. For more information, see Lambda quotas.

", - "error":{ - "httpStatusCode":400, - "senderFault":true - }, - "exception":true - }, - "PositiveInteger":{ - "type":"integer", - "box":true, - "min":1 - }, - "PreconditionFailedException":{ - "type":"structure", - "members":{ - "Type":{ - "shape":"String", - "documentation":"

The exception type.

" - }, - "message":{ - "shape":"String", - "documentation":"

The exception message.

" - } - }, - "documentation":"

The RevisionId provided does not match the latest RevisionId for the Lambda function or alias.

", - "error":{ - "httpStatusCode":412, - "senderFault":true - }, - "exception":true - }, - "Principal":{ - "type":"string", - "pattern":"[^\\s]+" - }, - "PrincipalOrgID":{ - "type":"string", - "max":34, - "min":12, - "pattern":"o-[a-z0-9]{10,32}" - }, - "ProvisionedConcurrencyConfigList":{ - "type":"list", - "member":{"shape":"ProvisionedConcurrencyConfigListItem"} - }, - "ProvisionedConcurrencyConfigListItem":{ - "type":"structure", - "members":{ - "FunctionArn":{ - "shape":"FunctionArn", - "documentation":"

The Amazon Resource Name (ARN) of the alias or version.

" - }, - "RequestedProvisionedConcurrentExecutions":{ - "shape":"PositiveInteger", - "documentation":"

The amount of provisioned concurrency requested.

" - }, - "AvailableProvisionedConcurrentExecutions":{ - "shape":"NonNegativeInteger", - "documentation":"

The amount of provisioned concurrency available.

" - }, - "AllocatedProvisionedConcurrentExecutions":{ - "shape":"NonNegativeInteger", - "documentation":"

The amount of provisioned concurrency allocated. When a weighted alias is used during linear and canary deployments, this value fluctuates depending on the amount of concurrency that is provisioned for the function versions.

" - }, - "Status":{ - "shape":"ProvisionedConcurrencyStatusEnum", - "documentation":"

The status of the allocation process.

" - }, - "StatusReason":{ - "shape":"String", - "documentation":"

For failed allocations, the reason that provisioned concurrency could not be allocated.

" - }, - "LastModified":{ - "shape":"Timestamp", - "documentation":"

The date and time that a user last updated the configuration, in ISO 8601 format.

" - } - }, - "documentation":"

Details about the provisioned concurrency configuration for a function alias or version.

" - }, - "ProvisionedConcurrencyConfigNotFoundException":{ - "type":"structure", - "members":{ - "Type":{"shape":"String"}, - "message":{"shape":"String"} - }, - "documentation":"

The specified configuration does not exist.

", - "error":{ - "httpStatusCode":404, - "senderFault":true - }, - "exception":true - }, - "ProvisionedConcurrencyStatusEnum":{ - "type":"string", - "enum":[ - "IN_PROGRESS", - "READY", - "FAILED" - ] - }, - "ProvisionedPollerConfig":{ - "type":"structure", - "members":{ - "MinimumPollers":{ - "shape":"MinimumNumberOfPollers", - "documentation":"

The minimum number of event pollers this event source can scale down to.

" - }, - "MaximumPollers":{ - "shape":"MaximumNumberOfPollers", - "documentation":"

The maximum number of event pollers this event source can scale up to.

" - } - }, - "documentation":"

The provisioned mode configuration for the event source. Use Provisioned Mode to customize the minimum and maximum number of event pollers for your event source. An event poller is a compute unit that provides approximately 5 MBps of throughput.

" - }, - "PublishLayerVersionRequest":{ - "type":"structure", - "required":[ - "LayerName", - "Content" - ], - "members":{ - "LayerName":{ - "shape":"LayerName", - "documentation":"

The name or Amazon Resource Name (ARN) of the layer.

", - "location":"uri", - "locationName":"LayerName" - }, - "Description":{ - "shape":"Description", - "documentation":"

The description of the version.

" - }, - "Content":{ - "shape":"LayerVersionContentInput", - "documentation":"

The function layer archive.

" - }, - "CompatibleRuntimes":{ - "shape":"CompatibleRuntimes", - "documentation":"

A list of compatible function runtimes. Used for filtering with ListLayers and ListLayerVersions.

The following list includes deprecated runtimes. For more information, see Runtime deprecation policy.

" - }, - "LicenseInfo":{ - "shape":"LicenseInfo", - "documentation":"

The layer's software license. It can be any of the following:

" - }, - "CompatibleArchitectures":{ - "shape":"CompatibleArchitectures", - "documentation":"

A list of compatible instruction set architectures.

" - } - } - }, - "PublishLayerVersionResponse":{ - "type":"structure", - "members":{ - "Content":{ - "shape":"LayerVersionContentOutput", - "documentation":"

Details about the layer version.

" - }, - "LayerArn":{ - "shape":"LayerArn", - "documentation":"

The ARN of the layer.

" - }, - "LayerVersionArn":{ - "shape":"LayerVersionArn", - "documentation":"

The ARN of the layer version.

" - }, - "Description":{ - "shape":"Description", - "documentation":"

The description of the version.

" - }, - "CreatedDate":{ - "shape":"Timestamp", - "documentation":"

The date that the layer version was created, in ISO-8601 format (YYYY-MM-DDThh:mm:ss.sTZD).

" - }, - "Version":{ - "shape":"LayerVersionNumber", - "documentation":"

The version number.

" - }, - "CompatibleRuntimes":{ - "shape":"CompatibleRuntimes", - "documentation":"

The layer's compatible runtimes.

The following list includes deprecated runtimes. For more information, see Runtime use after deprecation.

For a list of all currently supported runtimes, see Supported runtimes.

" - }, - "LicenseInfo":{ - "shape":"LicenseInfo", - "documentation":"

The layer's software license.

" - }, - "CompatibleArchitectures":{ - "shape":"CompatibleArchitectures", - "documentation":"

A list of compatible instruction set architectures.

" - } - } - }, - "PublishVersionRequest":{ - "type":"structure", - "required":["FunctionName"], - "members":{ - "FunctionName":{ - "shape":"FunctionName", - "documentation":"

The name or ARN of the Lambda function.

Name formats

The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.

", - "location":"uri", - "locationName":"FunctionName" - }, - "CodeSha256":{ - "shape":"String", - "documentation":"

Only publish a version if the hash value matches the value that's specified. Use this option to avoid publishing a version if the function code has changed since you last updated it. You can get the hash for the version that you uploaded from the output of UpdateFunctionCode.

" - }, - "Description":{ - "shape":"Description", - "documentation":"

A description for the version to override the description in the function configuration.

" - }, - "RevisionId":{ - "shape":"String", - "documentation":"

Only update the function if the revision ID matches the ID that's specified. Use this option to avoid publishing a version if the function configuration has changed since you last updated it.

" - } - } - }, - "PutFunctionCodeSigningConfigRequest":{ - "type":"structure", - "required":[ - "CodeSigningConfigArn", - "FunctionName" - ], - "members":{ - "CodeSigningConfigArn":{ - "shape":"CodeSigningConfigArn", - "documentation":"

The The Amazon Resource Name (ARN) of the code signing configuration.

" - }, - "FunctionName":{ - "shape":"FunctionName", - "documentation":"

The name or ARN of the Lambda function.

Name formats

The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.

", - "location":"uri", - "locationName":"FunctionName" - } - } - }, - "PutFunctionCodeSigningConfigResponse":{ - "type":"structure", - "required":[ - "CodeSigningConfigArn", - "FunctionName" - ], - "members":{ - "CodeSigningConfigArn":{ - "shape":"CodeSigningConfigArn", - "documentation":"

The The Amazon Resource Name (ARN) of the code signing configuration.

" - }, - "FunctionName":{ - "shape":"FunctionName", - "documentation":"

The name or ARN of the Lambda function.

Name formats

The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.

" - } - } - }, - "PutFunctionConcurrencyRequest":{ - "type":"structure", - "required":[ - "FunctionName", - "ReservedConcurrentExecutions" - ], - "members":{ - "FunctionName":{ - "shape":"FunctionName", - "documentation":"

The name or ARN of the Lambda function.

Name formats

The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.

", - "location":"uri", - "locationName":"FunctionName" - }, - "ReservedConcurrentExecutions":{ - "shape":"ReservedConcurrentExecutions", - "documentation":"

The number of simultaneous executions to reserve for the function.

" - } - } - }, - "PutFunctionEventInvokeConfigRequest":{ - "type":"structure", - "required":["FunctionName"], - "members":{ - "FunctionName":{ - "shape":"FunctionName", - "documentation":"

The name or ARN of the Lambda function, version, or alias.

Name formats

You can append a version number or alias to any of the formats. The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.

", - "location":"uri", - "locationName":"FunctionName" - }, - "Qualifier":{ - "shape":"Qualifier", - "documentation":"

A version number or alias name.

", - "location":"querystring", - "locationName":"Qualifier" - }, - "MaximumRetryAttempts":{ - "shape":"MaximumRetryAttempts", - "documentation":"

The maximum number of times to retry when the function returns an error.

" - }, - "MaximumEventAgeInSeconds":{ - "shape":"MaximumEventAgeInSeconds", - "documentation":"

The maximum age of a request that Lambda sends to a function for processing.

" - }, - "DestinationConfig":{ - "shape":"DestinationConfig", - "documentation":"

A destination for events after they have been sent to a function for processing.

Destinations

S3 buckets are supported only for on-failure destinations. To retain records of successful invocations, use another destination type.

" - } - } - }, - "PutFunctionRecursionConfigRequest":{ - "type":"structure", - "required":[ - "FunctionName", - "RecursiveLoop" - ], - "members":{ - "FunctionName":{ - "shape":"UnqualifiedFunctionName", - "documentation":"

The name or ARN of the Lambda function.

Name formats

The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.

", - "location":"uri", - "locationName":"FunctionName" - }, - "RecursiveLoop":{ - "shape":"RecursiveLoop", - "documentation":"

If you set your function's recursive loop detection configuration to Allow, Lambda doesn't take any action when it detects your function being invoked as part of a recursive loop. We recommend that you only use this setting if your design intentionally uses a Lambda function to write data back to the same Amazon Web Services resource that invokes it.

If you set your function's recursive loop detection configuration to Terminate, Lambda stops your function being invoked and notifies you when it detects your function being invoked as part of a recursive loop.

By default, Lambda sets your function's configuration to Terminate.

If your design intentionally uses a Lambda function to write data back to the same Amazon Web Services resource that invokes the function, then use caution and implement suitable guard rails to prevent unexpected charges being billed to your Amazon Web Services account. To learn more about best practices for using recursive invocation patterns, see Recursive patterns that cause run-away Lambda functions in Serverless Land.

" - } - } - }, - "PutFunctionRecursionConfigResponse":{ - "type":"structure", - "members":{ - "RecursiveLoop":{ - "shape":"RecursiveLoop", - "documentation":"

The status of your function's recursive loop detection configuration.

When this value is set to Allowand Lambda detects your function being invoked as part of a recursive loop, it doesn't take any action.

When this value is set to Terminate and Lambda detects your function being invoked as part of a recursive loop, it stops your function being invoked and notifies you.

" - } - } - }, - "PutProvisionedConcurrencyConfigRequest":{ - "type":"structure", - "required":[ - "FunctionName", - "Qualifier", - "ProvisionedConcurrentExecutions" - ], - "members":{ - "FunctionName":{ - "shape":"FunctionName", - "documentation":"

The name or ARN of the Lambda function.

Name formats

The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.

", - "location":"uri", - "locationName":"FunctionName" - }, - "Qualifier":{ - "shape":"Qualifier", - "documentation":"

The version number or alias name.

", - "location":"querystring", - "locationName":"Qualifier" - }, - "ProvisionedConcurrentExecutions":{ - "shape":"PositiveInteger", - "documentation":"

The amount of provisioned concurrency to allocate for the version or alias.

" - } - } - }, - "PutProvisionedConcurrencyConfigResponse":{ - "type":"structure", - "members":{ - "RequestedProvisionedConcurrentExecutions":{ - "shape":"PositiveInteger", - "documentation":"

The amount of provisioned concurrency requested.

" - }, - "AvailableProvisionedConcurrentExecutions":{ - "shape":"NonNegativeInteger", - "documentation":"

The amount of provisioned concurrency available.

" - }, - "AllocatedProvisionedConcurrentExecutions":{ - "shape":"NonNegativeInteger", - "documentation":"

The amount of provisioned concurrency allocated. When a weighted alias is used during linear and canary deployments, this value fluctuates depending on the amount of concurrency that is provisioned for the function versions.

" - }, - "Status":{ - "shape":"ProvisionedConcurrencyStatusEnum", - "documentation":"

The status of the allocation process.

" - }, - "StatusReason":{ - "shape":"String", - "documentation":"

For failed allocations, the reason that provisioned concurrency could not be allocated.

" - }, - "LastModified":{ - "shape":"Timestamp", - "documentation":"

The date and time that a user last updated the configuration, in ISO 8601 format.

" - } - } - }, - "PutRuntimeManagementConfigRequest":{ - "type":"structure", - "required":[ - "FunctionName", - "UpdateRuntimeOn" - ], - "members":{ - "FunctionName":{ - "shape":"FunctionName", - "documentation":"

The name or ARN of the Lambda function.

Name formats

The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.

", - "location":"uri", - "locationName":"FunctionName" - }, - "Qualifier":{ - "shape":"Qualifier", - "documentation":"

Specify a version of the function. This can be $LATEST or a published version number. If no value is specified, the configuration for the $LATEST version is returned.

", - "location":"querystring", - "locationName":"Qualifier" - }, - "UpdateRuntimeOn":{ - "shape":"UpdateRuntimeOn", - "documentation":"

Specify the runtime update mode.

" - }, - "RuntimeVersionArn":{ - "shape":"RuntimeVersionArn", - "documentation":"

The ARN of the runtime version you want the function to use.

This is only required if you're using the Manual runtime update mode.

" - } - } - }, - "PutRuntimeManagementConfigResponse":{ - "type":"structure", - "required":[ - "UpdateRuntimeOn", - "FunctionArn" - ], - "members":{ - "UpdateRuntimeOn":{ - "shape":"UpdateRuntimeOn", - "documentation":"

The runtime update mode.

" - }, - "FunctionArn":{ - "shape":"FunctionArn", - "documentation":"

The ARN of the function

" - }, - "RuntimeVersionArn":{ - "shape":"RuntimeVersionArn", - "documentation":"

The ARN of the runtime the function is configured to use. If the runtime update mode is manual, the ARN is returned, otherwise null is returned.

" - } - } - }, - "Qualifier":{ - "type":"string", - "max":128, - "min":1, - "pattern":"(|[a-zA-Z0-9$_-]+)" - }, - "Queue":{ - "type":"string", - "max":1000, - "min":1, - "pattern":"[\\s\\S]*" - }, - "Queues":{ - "type":"list", - "member":{"shape":"Queue"}, - "max":1, - "min":1 - }, - "RecursiveInvocationException":{ - "type":"structure", - "members":{ - "Type":{ - "shape":"String", - "documentation":"

The exception type.

" - }, - "Message":{ - "shape":"String", - "documentation":"

The exception message.

" - } - }, - "documentation":"

Lambda has detected your function being invoked in a recursive loop with other Amazon Web Services resources and stopped your function's invocation.

", - "error":{ - "httpStatusCode":400, - "senderFault":true - }, - "exception":true - }, - "RecursiveLoop":{ - "type":"string", - "enum":[ - "Allow", - "Terminate" - ] - }, - "RemoveLayerVersionPermissionRequest":{ - "type":"structure", - "required":[ - "LayerName", - "VersionNumber", - "StatementId" - ], - "members":{ - "LayerName":{ - "shape":"LayerName", - "documentation":"

The name or Amazon Resource Name (ARN) of the layer.

", - "location":"uri", - "locationName":"LayerName" - }, - "VersionNumber":{ - "shape":"LayerVersionNumber", - "documentation":"

The version number.

", - "location":"uri", - "locationName":"VersionNumber" - }, - "StatementId":{ - "shape":"StatementId", - "documentation":"

The identifier that was specified when the statement was added.

", - "location":"uri", - "locationName":"StatementId" - }, - "RevisionId":{ - "shape":"String", - "documentation":"

Only update the policy if the revision ID matches the ID specified. Use this option to avoid modifying a policy that has changed since you last read it.

", - "location":"querystring", - "locationName":"RevisionId" - } - } - }, - "RemovePermissionRequest":{ - "type":"structure", - "required":[ - "FunctionName", - "StatementId" - ], - "members":{ - "FunctionName":{ - "shape":"FunctionName", - "documentation":"

The name or ARN of the Lambda function, version, or alias.

Name formats

You can append a version number or alias to any of the formats. The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.

", - "location":"uri", - "locationName":"FunctionName" - }, - "StatementId":{ - "shape":"NamespacedStatementId", - "documentation":"

Statement ID of the permission to remove.

", - "location":"uri", - "locationName":"StatementId" - }, - "Qualifier":{ - "shape":"Qualifier", - "documentation":"

Specify a version or alias to remove permissions from a published version of the function.

", - "location":"querystring", - "locationName":"Qualifier" - }, - "RevisionId":{ - "shape":"String", - "documentation":"

Update the policy only if the revision ID matches the ID that's specified. Use this option to avoid modifying a policy that has changed since you last read it.

", - "location":"querystring", - "locationName":"RevisionId" - } - } - }, - "ReplayChildren":{ - "type":"boolean", - "box":true - }, - "RequestTooLargeException":{ - "type":"structure", - "members":{ - "Type":{"shape":"String"}, - "message":{"shape":"String"} - }, - "documentation":"

The request payload exceeded the Invoke request body JSON input quota. For more information, see Lambda quotas.

", - "error":{ - "httpStatusCode":413, - "senderFault":true - }, - "exception":true - }, - "ReservedConcurrentExecutions":{ - "type":"integer", - "box":true, - "min":0 - }, - "ResourceArn":{ - "type":"string", - "pattern":"(arn:(aws[a-zA-Z-]*)?:[a-z0-9-.]+:.*)|()" - }, - "ResourceConflictException":{ - "type":"structure", - "members":{ - "Type":{ - "shape":"String", - "documentation":"

The exception type.

" - }, - "message":{ - "shape":"String", - "documentation":"

The exception message.

" - } - }, - "documentation":"

The resource already exists, or another operation is in progress.

", - "error":{ - "httpStatusCode":409, - "senderFault":true - }, - "exception":true - }, - "ResourceInUseException":{ - "type":"structure", - "members":{ - "Type":{"shape":"String"}, - "Message":{"shape":"String"} - }, - "documentation":"

The operation conflicts with the resource's availability. For example, you tried to update an event source mapping in the CREATING state, or you tried to delete an event source mapping currently UPDATING.

", - "error":{ - "httpStatusCode":400, - "senderFault":true - }, - "exception":true - }, - "ResourceNotFoundException":{ - "type":"structure", - "members":{ - "Type":{"shape":"String"}, - "Message":{"shape":"String"} - }, - "documentation":"

The resource specified in the request does not exist.

", - "error":{ - "httpStatusCode":404, - "senderFault":true - }, - "exception":true - }, - "ResourceNotReadyException":{ - "type":"structure", - "members":{ - "Type":{ - "shape":"String", - "documentation":"

The exception type.

" - }, - "message":{ - "shape":"String", - "documentation":"

The exception message.

" - } - }, - "documentation":"

The function is inactive and its VPC connection is no longer available. Wait for the VPC connection to reestablish and try again.

", - "error":{"httpStatusCode":502}, - "exception":true, - "fault":true - }, - "ResponseStreamingInvocationType":{ - "type":"string", - "enum":[ - "RequestResponse", - "DryRun" - ] - }, - "RetentionPeriodInDays":{ - "type":"integer", - "box":true, - "max":90, - "min":1 - }, - "RetryDetails":{ - "type":"structure", - "members":{ - "CurrentAttempt":{"shape":"AttemptCount"}, - "NextAttemptDelaySeconds":{"shape":"DurationSeconds"} - } - }, - "ReverseOrder":{ - "type":"boolean", - "box":true - }, - "RoleArn":{ - "type":"string", - "pattern":"arn:(aws[a-zA-Z-]*)?:iam::\\d{12}:role/?[a-zA-Z_0-9+=,.@\\-_/]+" - }, - "Runtime":{ - "type":"string", - "enum":[ - "nodejs", - "nodejs4.3", - "nodejs6.10", - "nodejs8.10", - "nodejs10.x", - "nodejs12.x", - "nodejs14.x", - "nodejs16.x", - "java8", - "java8.al2", - "java11", - "python2.7", - "python3.6", - "python3.7", - "python3.8", - "python3.9", - "dotnetcore1.0", - "dotnetcore2.0", - "dotnetcore2.1", - "dotnetcore3.1", - "dotnet6", - "dotnet8", - "nodejs4.3-edge", - "go1.x", - "ruby2.5", - "ruby2.7", - "provided", - "provided.al2", - "nodejs18.x", - "python3.10", - "java17", - "ruby3.2", - "ruby3.3", - "ruby3.4", - "python3.11", - "nodejs20.x", - "provided.al2023", - "python3.12", - "java21", - "python3.13", - "nodejs22.x" - ] - }, - "RuntimeVersionArn":{ - "type":"string", - "max":2048, - "min":26, - "pattern":"arn:(aws[a-zA-Z-]*):lambda:[a-z]{2}((-gov)|(-iso(b?)))?-[a-z]+-\\d{1}::runtime:.+" - }, - "RuntimeVersionConfig":{ - "type":"structure", - "members":{ - "RuntimeVersionArn":{ - "shape":"RuntimeVersionArn", - "documentation":"

The ARN of the runtime version you want the function to use.

" - }, - "Error":{ - "shape":"RuntimeVersionError", - "documentation":"

Error response when Lambda is unable to retrieve the runtime version for a function.

" - } - }, - "documentation":"

The ARN of the runtime and any errors that occured.

" - }, - "RuntimeVersionError":{ - "type":"structure", - "members":{ - "ErrorCode":{ - "shape":"String", - "documentation":"

The error code.

" - }, - "Message":{ - "shape":"SensitiveString", - "documentation":"

The error message.

" - } - }, - "documentation":"

Any error returned when the runtime version information for the function could not be retrieved.

" - }, - "S3Bucket":{ - "type":"string", - "max":63, - "min":3, - "pattern":"[0-9A-Za-z\\.\\-_]*(?Limits the number of concurrent instances that the Amazon SQS event source can invoke.

" - } - }, - "documentation":"

(Amazon SQS only) The scaling configuration for the event source. To remove the configuration, pass an empty value.

" - }, - "SchemaRegistryEventRecordFormat":{ - "type":"string", - "enum":[ - "JSON", - "SOURCE" - ] - }, - "SchemaRegistryUri":{ - "type":"string", - "max":10000, - "min":1, - "pattern":"[a-zA-Z0-9-\\/*:_+=.@-]*" - }, - "SecurityGroupId":{"type":"string"}, - "SecurityGroupIds":{ - "type":"list", - "member":{"shape":"SecurityGroupId"}, - "max":5, - "min":0 - }, - "SelfManagedEventSource":{ - "type":"structure", - "members":{ - "Endpoints":{ - "shape":"Endpoints", - "documentation":"

The list of bootstrap servers for your Kafka brokers in the following format: \"KAFKA_BOOTSTRAP_SERVERS\": [\"abc.xyz.com:xxxx\",\"abc2.xyz.com:xxxx\"].

" - } - }, - "documentation":"

The self-managed Apache Kafka cluster for your event source.

" - }, - "SelfManagedKafkaEventSourceConfig":{ - "type":"structure", - "members":{ - "ConsumerGroupId":{ - "shape":"URI", - "documentation":"

The identifier for the Kafka consumer group to join. The consumer group ID must be unique among all your Kafka event sources. After creating a Kafka event source mapping with the consumer group ID specified, you cannot update this value. For more information, see Customizable consumer group ID.

" - }, - "SchemaRegistryConfig":{ - "shape":"KafkaSchemaRegistryConfig", - "documentation":"

Specific configuration settings for a Kafka schema registry.

" - } - }, - "documentation":"

Specific configuration settings for a self-managed Apache Kafka event source.

" - }, - "SendDurableExecutionCallbackFailureRequest":{ - "type":"structure", - "required":["CallbackId"], - "members":{ - "CallbackId":{ - "shape":"CallbackId", - "location":"uri", - "locationName":"CallbackId" - }, - "Error":{"shape":"ErrorObject"} - }, - "payload":"Error" - }, - "SendDurableExecutionCallbackFailureResponse":{ - "type":"structure", - "members":{} - }, - "SendDurableExecutionCallbackHeartbeatRequest":{ - "type":"structure", - "required":["CallbackId"], - "members":{ - "CallbackId":{ - "shape":"CallbackId", - "location":"uri", - "locationName":"CallbackId" - } - } - }, - "SendDurableExecutionCallbackHeartbeatResponse":{ - "type":"structure", - "members":{} - }, - "SendDurableExecutionCallbackSuccessRequest":{ - "type":"structure", - "required":["CallbackId"], - "members":{ - "CallbackId":{ - "shape":"CallbackId", - "location":"uri", - "locationName":"CallbackId" - }, - "Result":{"shape":"BinaryOperationPayload"} - }, - "payload":"Result" - }, - "SendDurableExecutionCallbackSuccessResponse":{ - "type":"structure", - "members":{} - }, - "SensitiveString":{ - "type":"string", - "sensitive":true - }, - "ServiceException":{ - "type":"structure", - "members":{ - "Type":{"shape":"String"}, - "Message":{"shape":"String"} - }, - "documentation":"

The Lambda service encountered an internal error.

", - "error":{"httpStatusCode":500}, - "exception":true, - "fault":true - }, - "SigningProfileVersionArns":{ - "type":"list", - "member":{"shape":"Arn"}, - "max":20, - "min":1 - }, - "SnapStart":{ - "type":"structure", - "members":{ - "ApplyOn":{ - "shape":"SnapStartApplyOn", - "documentation":"

Set to PublishedVersions to create a snapshot of the initialized execution environment when you publish a function version.

" - } - }, - "documentation":"

The function's Lambda SnapStart setting. Set ApplyOn to PublishedVersions to create a snapshot of the initialized execution environment when you publish a function version.

" - }, - "SnapStartApplyOn":{ - "type":"string", - "enum":[ - "PublishedVersions", - "None" - ] - }, - "SnapStartException":{ - "type":"structure", - "members":{ - "Type":{"shape":"String"}, - "Message":{"shape":"String"} - }, - "documentation":"

The afterRestore() runtime hook encountered an error. For more information, check the Amazon CloudWatch logs.

", - "error":{ - "httpStatusCode":400, - "senderFault":true - }, - "exception":true - }, - "SnapStartNotReadyException":{ - "type":"structure", - "members":{ - "Type":{"shape":"String"}, - "Message":{"shape":"String"} - }, - "documentation":"

Lambda is initializing your function. You can invoke the function when the function state becomes Active.

", - "error":{ - "httpStatusCode":409, - "senderFault":true - }, - "exception":true - }, - "SnapStartOptimizationStatus":{ - "type":"string", - "enum":[ - "On", - "Off" - ] - }, - "SnapStartResponse":{ - "type":"structure", - "members":{ - "ApplyOn":{ - "shape":"SnapStartApplyOn", - "documentation":"

When set to PublishedVersions, Lambda creates a snapshot of the execution environment when you publish a function version.

" - }, - "OptimizationStatus":{ - "shape":"SnapStartOptimizationStatus", - "documentation":"

When you provide a qualified Amazon Resource Name (ARN), this response element indicates whether SnapStart is activated for the specified function version.

" - } - }, - "documentation":"

The function's SnapStart setting.

" - }, - "SnapStartTimeoutException":{ - "type":"structure", - "members":{ - "Type":{"shape":"String"}, - "Message":{"shape":"String"} - }, - "documentation":"

Lambda couldn't restore the snapshot within the timeout limit.

", - "error":{ - "httpStatusCode":408, - "senderFault":true - }, - "exception":true - }, - "SourceAccessConfiguration":{ - "type":"structure", - "members":{ - "Type":{ - "shape":"SourceAccessType", - "documentation":"

The type of authentication protocol, VPC components, or virtual host for your event source. For example: \"Type\":\"SASL_SCRAM_512_AUTH\".

" - }, - "URI":{ - "shape":"URI", - "documentation":"

The value for your chosen configuration in Type. For example: \"URI\": \"arn:aws:secretsmanager:us-east-1:01234567890:secret:MyBrokerSecretName\".

" - } - }, - "documentation":"

To secure and define access to your event source, you can specify the authentication protocol, VPC components, or virtual host.

" - }, - "SourceAccessConfigurations":{ - "type":"list", - "member":{"shape":"SourceAccessConfiguration"}, - "max":22, - "min":0 - }, - "SourceAccessType":{ - "type":"string", - "enum":[ - "BASIC_AUTH", - "VPC_SUBNET", - "VPC_SECURITY_GROUP", - "SASL_SCRAM_512_AUTH", - "SASL_SCRAM_256_AUTH", - "VIRTUAL_HOST", - "CLIENT_CERTIFICATE_TLS_AUTH", - "SERVER_ROOT_CA_CERTIFICATE" - ] - }, - "SourceOwner":{ - "type":"string", - "max":12, - "min":0, - "pattern":"\\d{12}" - }, - "StackTraceEntries":{ - "type":"list", - "member":{"shape":"StackTraceEntry"} - }, - "StackTraceEntry":{ - "type":"string", - "sensitive":true - }, - "State":{ - "type":"string", - "enum":[ - "Pending", - "Active", - "Inactive", - "Failed" - ] - }, - "StateReason":{"type":"string"}, - "StateReasonCode":{ - "type":"string", - "enum":[ - "Idle", - "Creating", - "Restoring", - "EniLimitExceeded", - "InsufficientRolePermissions", - "InvalidConfiguration", - "InternalError", - "SubnetOutOfIPAddresses", - "InvalidSubnet", - "InvalidSecurityGroup", - "ImageDeleted", - "ImageAccessDenied", - "InvalidImage", - "KMSKeyAccessDenied", - "KMSKeyNotFound", - "InvalidStateKMSKey", - "DisabledKMSKey", - "EFSIOError", - "EFSMountConnectivityError", - "EFSMountFailure", - "EFSMountTimeout", - "InvalidRuntime", - "InvalidZipFileException", - "FunctionError", - "DrainingDurableExecutions" - ] - }, - "StatementId":{ - "type":"string", - "max":100, - "min":1, - "pattern":"([a-zA-Z0-9-_]+)" - }, - "StepDetails":{ - "type":"structure", - "members":{ - "Attempt":{"shape":"AttemptCount"}, - "NextAttemptTimestamp":{"shape":"ExecutionTimestamp"}, - "Result":{"shape":"OperationPayload"}, - "Error":{"shape":"ErrorObject"} - } - }, - "StepFailedDetails":{ - "type":"structure", - "required":[ - "Error", - "RetryDetails" - ], - "members":{ - "Error":{"shape":"EventError"}, - "RetryDetails":{"shape":"RetryDetails"} - } - }, - "StepOptions":{ - "type":"structure", - "members":{ - "NextAttemptDelaySeconds":{"shape":"StepOptionsNextAttemptDelaySecondsInteger"} - } - }, - "StepOptionsNextAttemptDelaySecondsInteger":{ - "type":"integer", - "box":true, - "max":31622400, - "min":1 - }, - "StepStartedDetails":{ - "type":"structure", - "members":{} - }, - "StepSucceededDetails":{ - "type":"structure", - "required":[ - "Result", - "RetryDetails" - ], - "members":{ - "Result":{"shape":"EventResult"}, - "RetryDetails":{"shape":"RetryDetails"} - } - }, - "StopDurableExecutionRequest":{ - "type":"structure", - "required":["DurableExecutionArn"], - "members":{ - "DurableExecutionArn":{ - "shape":"DurableExecutionArn", - "location":"uri", - "locationName":"DurableExecutionArn" - }, - "Error":{"shape":"ErrorObject"} - }, - "payload":"Error" - }, - "StopDurableExecutionResponse":{ - "type":"structure", - "required":["StopTimestamp"], - "members":{ - "StopTimestamp":{"shape":"ExecutionTimestamp"} - } - }, - "String":{"type":"string"}, - "StringList":{ - "type":"list", - "member":{"shape":"String"}, - "max":1500, - "min":0 - }, - "SubnetIPAddressLimitReachedException":{ - "type":"structure", - "members":{ - "Type":{"shape":"String"}, - "Message":{"shape":"String"} - }, - "documentation":"

Lambda couldn't set up VPC access for the Lambda function because one or more configured subnets has no available IP addresses.

", - "error":{"httpStatusCode":502}, - "exception":true, - "fault":true - }, - "SubnetId":{"type":"string"}, - "SubnetIds":{ - "type":"list", - "member":{"shape":"SubnetId"}, - "max":16, - "min":0 - }, - "SystemLogLevel":{ - "type":"string", - "enum":[ - "DEBUG", - "INFO", - "WARN" - ] - }, - "TagKey":{"type":"string"}, - "TagKeyList":{ - "type":"list", - "member":{"shape":"TagKey"} - }, - "TagResourceRequest":{ - "type":"structure", - "required":[ - "Resource", - "Tags" - ], - "members":{ - "Resource":{ - "shape":"TaggableResource", - "documentation":"

The resource's Amazon Resource Name (ARN).

", - "location":"uri", - "locationName":"Resource" - }, - "Tags":{ - "shape":"Tags", - "documentation":"

A list of tags to apply to the resource.

" - } - } - }, - "TagValue":{"type":"string"}, - "TaggableResource":{ - "type":"string", - "max":256, - "min":1, - "pattern":"arn:(aws[a-zA-Z-]*):lambda:[a-z]{2}((-gov)|(-iso([a-z]?)))?-[a-z]+-\\d{1}:\\d{12}:(function:[a-zA-Z0-9-_]+(:(\\$LATEST|[a-zA-Z0-9-_]+))?|code-signing-config:csc-[a-z0-9]{17}|event-source-mapping:[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12})" - }, - "Tags":{ - "type":"map", - "key":{"shape":"TagKey"}, - "value":{"shape":"TagValue"} - }, - "TagsError":{ - "type":"structure", - "required":[ - "ErrorCode", - "Message" - ], - "members":{ - "ErrorCode":{ - "shape":"TagsErrorCode", - "documentation":"

The error code.

" - }, - "Message":{ - "shape":"TagsErrorMessage", - "documentation":"

The error message.

" - } - }, - "documentation":"

An object that contains details about an error related to retrieving tags.

" - }, - "TagsErrorCode":{ - "type":"string", - "max":21, - "min":10, - "pattern":"[A-Za-z]+Exception" - }, - "TagsErrorMessage":{ - "type":"string", - "max":1000, - "min":84, - "pattern":".*" - }, - "ThrottleReason":{ - "type":"string", - "enum":[ - "ConcurrentInvocationLimitExceeded", - "FunctionInvocationRateLimitExceeded", - "ReservedFunctionConcurrentInvocationLimitExceeded", - "ReservedFunctionInvocationRateLimitExceeded", - "CallerRateLimitExceeded", - "ConcurrentSnapshotCreateLimitExceeded" - ] - }, - "Timeout":{ - "type":"integer", - "box":true, - "min":1 - }, - "Timestamp":{"type":"string"}, - "TooManyRequestsException":{ - "type":"structure", - "members":{ - "retryAfterSeconds":{ - "shape":"String", - "documentation":"

The number of seconds the caller should wait before retrying.

", - "location":"header", - "locationName":"Retry-After" - }, - "Type":{"shape":"String"}, - "message":{"shape":"String"}, - "Reason":{"shape":"ThrottleReason"} - }, - "documentation":"

The request throughput limit was exceeded. For more information, see Lambda quotas.

", - "error":{ - "httpStatusCode":429, - "senderFault":true - }, - "exception":true - }, - "Topic":{ - "type":"string", - "max":249, - "min":1, - "pattern":"[^.]([a-zA-Z0-9\\-_.]+)" - }, - "Topics":{ - "type":"list", - "member":{"shape":"Topic"}, - "max":1, - "min":1 - }, - "TracingConfig":{ - "type":"structure", - "members":{ - "Mode":{ - "shape":"TracingMode", - "documentation":"

The tracing mode.

" - } - }, - "documentation":"

The function's X-Ray tracing configuration. To sample and record incoming requests, set Mode to Active.

" - }, - "TracingConfigResponse":{ - "type":"structure", - "members":{ - "Mode":{ - "shape":"TracingMode", - "documentation":"

The tracing mode.

" - } - }, - "documentation":"

The function's X-Ray tracing configuration.

" - }, - "TracingMode":{ - "type":"string", - "enum":[ - "Active", - "PassThrough" - ] - }, - "Truncated":{ - "type":"boolean", - "box":true - }, - "TumblingWindowInSeconds":{ - "type":"integer", - "box":true, - "max":900, - "min":0 - }, - "URI":{ - "type":"string", - "max":200, - "min":1, - "pattern":"[a-zA-Z0-9-\\/*:_+=.@-]*" - }, - "UnqualifiedFunctionName":{ - "type":"string", - "max":140, - "min":1, - "pattern":"(arn:(aws[a-zA-Z-]*)?:lambda:)?([a-z]{2}((-gov)|(-iso([a-z]?)))?-[a-z]+-\\d{1}:)?(\\d{12}:)?(function:)?([a-zA-Z0-9-_]+)" - }, - "UnreservedConcurrentExecutions":{ - "type":"integer", - "box":true, - "min":0 - }, - "UnsupportedMediaTypeException":{ - "type":"structure", - "members":{ - "Type":{"shape":"String"}, - "message":{"shape":"String"} - }, - "documentation":"

The content type of the Invoke request body is not JSON.

", - "error":{ - "httpStatusCode":415, - "senderFault":true - }, - "exception":true - }, - "UntagResourceRequest":{ - "type":"structure", - "required":[ - "Resource", - "TagKeys" - ], - "members":{ - "Resource":{ - "shape":"TaggableResource", - "documentation":"

The resource's Amazon Resource Name (ARN).

", - "location":"uri", - "locationName":"Resource" - }, - "TagKeys":{ - "shape":"TagKeyList", - "documentation":"

A list of tag keys to remove from the resource.

", - "location":"querystring", - "locationName":"tagKeys" - } - } - }, - "UpdateAliasRequest":{ - "type":"structure", - "required":[ - "FunctionName", - "Name" - ], - "members":{ - "FunctionName":{ - "shape":"FunctionName", - "documentation":"

The name or ARN of the Lambda function.

Name formats

The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.

", - "location":"uri", - "locationName":"FunctionName" - }, - "Name":{ - "shape":"Alias", - "documentation":"

The name of the alias.

", - "location":"uri", - "locationName":"Name" - }, - "FunctionVersion":{ - "shape":"Version", - "documentation":"

The function version that the alias invokes.

" - }, - "Description":{ - "shape":"Description", - "documentation":"

A description of the alias.

" - }, - "RoutingConfig":{ - "shape":"AliasRoutingConfiguration", - "documentation":"

The routing configuration of the alias.

" - }, - "RevisionId":{ - "shape":"String", - "documentation":"

Only update the alias if the revision ID matches the ID that's specified. Use this option to avoid modifying an alias that has changed since you last read it.

" - } - } - }, - "UpdateCodeSigningConfigRequest":{ - "type":"structure", - "required":["CodeSigningConfigArn"], - "members":{ - "CodeSigningConfigArn":{ - "shape":"CodeSigningConfigArn", - "documentation":"

The The Amazon Resource Name (ARN) of the code signing configuration.

", - "location":"uri", - "locationName":"CodeSigningConfigArn" - }, - "Description":{ - "shape":"Description", - "documentation":"

Descriptive name for this code signing configuration.

" - }, - "AllowedPublishers":{ - "shape":"AllowedPublishers", - "documentation":"

Signing profiles for this code signing configuration.

" - }, - "CodeSigningPolicies":{ - "shape":"CodeSigningPolicies", - "documentation":"

The code signing policy.

" - } - } - }, - "UpdateCodeSigningConfigResponse":{ - "type":"structure", - "required":["CodeSigningConfig"], - "members":{ - "CodeSigningConfig":{ - "shape":"CodeSigningConfig", - "documentation":"

The code signing configuration

" - } - } - }, - "UpdateEventSourceMappingRequest":{ - "type":"structure", - "required":["UUID"], - "members":{ - "UUID":{ - "shape":"String", - "documentation":"

The identifier of the event source mapping.

", - "location":"uri", - "locationName":"UUID" - }, - "FunctionName":{ - "shape":"FunctionName", - "documentation":"

The name or ARN of the Lambda function.

Name formats

The length constraint applies only to the full ARN. If you specify only the function name, it's limited to 64 characters in length.

" - }, - "Enabled":{ - "shape":"Enabled", - "documentation":"

When true, the event source mapping is active. When false, Lambda pauses polling and invocation.

Default: True

" - }, - "BatchSize":{ - "shape":"BatchSize", - "documentation":"

The maximum number of records in each batch that Lambda pulls from your stream or queue and sends to your function. Lambda passes all of the records in the batch to the function in a single call, up to the payload limit for synchronous invocation (6 MB).

" - }, - "FilterCriteria":{ - "shape":"FilterCriteria", - "documentation":"

An object that defines the filter criteria that determine whether Lambda should process an event. For more information, see Lambda event filtering.

" - }, - "MaximumBatchingWindowInSeconds":{ - "shape":"MaximumBatchingWindowInSeconds", - "documentation":"

The maximum amount of time, in seconds, that Lambda spends gathering records before invoking the function. You can configure MaximumBatchingWindowInSeconds to any value from 0 seconds to 300 seconds in increments of seconds.

For Kinesis, DynamoDB, and Amazon SQS event sources, the default batching window is 0 seconds. For Amazon MSK, Self-managed Apache Kafka, Amazon MQ, and DocumentDB event sources, the default batching window is 500 ms. Note that because you can only change MaximumBatchingWindowInSeconds in increments of seconds, you cannot revert back to the 500 ms default batching window after you have changed it. To restore the default batching window, you must create a new event source mapping.

Related setting: For Kinesis, DynamoDB, and Amazon SQS event sources, when you set BatchSize to a value greater than 10, you must set MaximumBatchingWindowInSeconds to at least 1.

" - }, - "DestinationConfig":{ - "shape":"DestinationConfig", - "documentation":"

(Kinesis, DynamoDB Streams, Amazon MSK, and self-managed Kafka only) A configuration object that specifies the destination of an event after Lambda processes it.

" - }, - "MaximumRecordAgeInSeconds":{ - "shape":"MaximumRecordAgeInSeconds", - "documentation":"

(Kinesis and DynamoDB Streams only) Discard records older than the specified age. The default value is infinite (-1).

" - }, - "BisectBatchOnFunctionError":{ - "shape":"BisectBatchOnFunctionError", - "documentation":"

(Kinesis and DynamoDB Streams only) If the function returns an error, split the batch in two and retry.

" - }, - "MaximumRetryAttempts":{ - "shape":"MaximumRetryAttemptsEventSourceMapping", - "documentation":"

(Kinesis and DynamoDB Streams only) Discard records after the specified number of retries. The default value is infinite (-1). When set to infinite (-1), failed records are retried until the record expires.

" - }, - "ParallelizationFactor":{ - "shape":"ParallelizationFactor", - "documentation":"

(Kinesis and DynamoDB Streams only) The number of batches to process from each shard concurrently.

" - }, - "SourceAccessConfigurations":{ - "shape":"SourceAccessConfigurations", - "documentation":"

An array of authentication protocols or VPC components required to secure your event source.

" - }, - "TumblingWindowInSeconds":{ - "shape":"TumblingWindowInSeconds", - "documentation":"

(Kinesis and DynamoDB Streams only) The duration in seconds of a processing window for DynamoDB and Kinesis Streams event sources. A value of 0 seconds indicates no tumbling window.

" - }, - "FunctionResponseTypes":{ - "shape":"FunctionResponseTypeList", - "documentation":"

(Kinesis, DynamoDB Streams, and Amazon SQS) A list of current response type enums applied to the event source mapping.

" - }, - "ScalingConfig":{ - "shape":"ScalingConfig", - "documentation":"

(Amazon SQS only) The scaling configuration for the event source. For more information, see Configuring maximum concurrency for Amazon SQS event sources.

" - }, - "AmazonManagedKafkaEventSourceConfig":{"shape":"AmazonManagedKafkaEventSourceConfig"}, - "SelfManagedKafkaEventSourceConfig":{"shape":"SelfManagedKafkaEventSourceConfig"}, - "DocumentDBEventSourceConfig":{ - "shape":"DocumentDBEventSourceConfig", - "documentation":"

Specific configuration settings for a DocumentDB event source.

" - }, - "KMSKeyArn":{ - "shape":"KMSKeyArn", - "documentation":"

The ARN of the Key Management Service (KMS) customer managed key that Lambda uses to encrypt your function's filter criteria. By default, Lambda does not encrypt your filter criteria object. Specify this property to encrypt data using your own customer managed key.

" - }, - "MetricsConfig":{ - "shape":"EventSourceMappingMetricsConfig", - "documentation":"

The metrics configuration for your event source. For more information, see Event source mapping metrics.

" - }, - "ProvisionedPollerConfig":{ - "shape":"ProvisionedPollerConfig", - "documentation":"

(Amazon MSK and self-managed Apache Kafka only) The provisioned mode configuration for the event source. For more information, see provisioned mode.

" - } - } - }, - "UpdateFunctionCodeRequest":{ - "type":"structure", - "required":["FunctionName"], - "members":{ - "FunctionName":{ - "shape":"FunctionName", - "documentation":"

The name or ARN of the Lambda function.

Name formats

The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.

", - "location":"uri", - "locationName":"FunctionName" - }, - "ZipFile":{ - "shape":"Blob", - "documentation":"

The base64-encoded contents of the deployment package. Amazon Web Services SDK and CLI clients handle the encoding for you. Use only with a function defined with a .zip file archive deployment package.

" - }, - "S3Bucket":{ - "shape":"S3Bucket", - "documentation":"

An Amazon S3 bucket in the same Amazon Web Services Region as your function. The bucket can be in a different Amazon Web Services account. Use only with a function defined with a .zip file archive deployment package.

" - }, - "S3Key":{ - "shape":"S3Key", - "documentation":"

The Amazon S3 key of the deployment package. Use only with a function defined with a .zip file archive deployment package.

" - }, - "S3ObjectVersion":{ - "shape":"S3ObjectVersion", - "documentation":"

For versioned objects, the version of the deployment package object to use.

" - }, - "ImageUri":{ - "shape":"String", - "documentation":"

URI of a container image in the Amazon ECR registry. Do not use for a function defined with a .zip file archive.

" - }, - "Publish":{ - "shape":"Boolean", - "documentation":"

Set to true to publish a new version of the function after updating the code. This has the same effect as calling PublishVersion separately.

" - }, - "DryRun":{ - "shape":"Boolean", - "documentation":"

Set to true to validate the request parameters and access permissions without modifying the function code.

" - }, - "RevisionId":{ - "shape":"String", - "documentation":"

Update the function only if the revision ID matches the ID that's specified. Use this option to avoid modifying a function that has changed since you last read it.

" - }, - "Architectures":{ - "shape":"ArchitecturesList", - "documentation":"

The instruction set architecture that the function supports. Enter a string array with one of the valid values (arm64 or x86_64). The default value is x86_64.

" - }, - "SourceKMSKeyArn":{ - "shape":"KMSKeyArn", - "documentation":"

The ARN of the Key Management Service (KMS) customer managed key that's used to encrypt your function's .zip deployment package. If you don't provide a customer managed key, Lambda uses an Amazon Web Services managed key.

" - } - } - }, - "UpdateFunctionConfigurationRequest":{ - "type":"structure", - "required":["FunctionName"], - "members":{ - "FunctionName":{ - "shape":"FunctionName", - "documentation":"

The name or ARN of the Lambda function.

Name formats

The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.

", - "location":"uri", - "locationName":"FunctionName" - }, - "Role":{ - "shape":"RoleArn", - "documentation":"

The Amazon Resource Name (ARN) of the function's execution role.

" - }, - "Handler":{ - "shape":"Handler", - "documentation":"

The name of the method within your code that Lambda calls to run your function. Handler is required if the deployment package is a .zip file archive. The format includes the file name. It can also include namespaces and other qualifiers, depending on the runtime. For more information, see Lambda programming model.

" - }, - "Description":{ - "shape":"Description", - "documentation":"

A description of the function.

" - }, - "Timeout":{ - "shape":"Timeout", - "documentation":"

The amount of time (in seconds) that Lambda allows a function to run before stopping it. The default is 3 seconds. The maximum allowed value is 900 seconds. For more information, see Lambda execution environment.

" - }, - "MemorySize":{ - "shape":"MemorySize", - "documentation":"

The amount of memory available to the function at runtime. Increasing the function memory also increases its CPU allocation. The default value is 128 MB. The value can be any multiple of 1 MB.

" - }, - "VpcConfig":{ - "shape":"VpcConfig", - "documentation":"

For network connectivity to Amazon Web Services resources in a VPC, specify a list of security groups and subnets in the VPC. When you connect a function to a VPC, it can access resources and the internet only through that VPC. For more information, see Configuring a Lambda function to access resources in a VPC.

" - }, - "Environment":{ - "shape":"Environment", - "documentation":"

Environment variables that are accessible from function code during execution.

" - }, - "Runtime":{ - "shape":"Runtime", - "documentation":"

The identifier of the function's runtime. Runtime is required if the deployment package is a .zip file archive. Specifying a runtime results in an error if you're deploying a function using a container image.

The following list includes deprecated runtimes. Lambda blocks creating new functions and updating existing functions shortly after each runtime is deprecated. For more information, see Runtime use after deprecation.

For a list of all currently supported runtimes, see Supported runtimes.

" - }, - "DeadLetterConfig":{ - "shape":"DeadLetterConfig", - "documentation":"

A dead-letter queue configuration that specifies the queue or topic where Lambda sends asynchronous events when they fail processing. For more information, see Dead-letter queues.

" - }, - "KMSKeyArn":{ - "shape":"KMSKeyArn", - "documentation":"

The ARN of the Key Management Service (KMS) customer managed key that's used to encrypt the following resources:

If you don't provide a customer managed key, Lambda uses an Amazon Web Services owned key or an Amazon Web Services managed key.

" - }, - "TracingConfig":{ - "shape":"TracingConfig", - "documentation":"

Set Mode to Active to sample and trace a subset of incoming requests with X-Ray.

" - }, - "RevisionId":{ - "shape":"String", - "documentation":"

Update the function only if the revision ID matches the ID that's specified. Use this option to avoid modifying a function that has changed since you last read it.

" - }, - "Layers":{ - "shape":"LayerList", - "documentation":"

A list of function layers to add to the function's execution environment. Specify each layer by its ARN, including the version.

" - }, - "FileSystemConfigs":{ - "shape":"FileSystemConfigList", - "documentation":"

Connection settings for an Amazon EFS file system.

" - }, - "ImageConfig":{ - "shape":"ImageConfig", - "documentation":"

Container image configuration values that override the values in the container image Docker file.

" - }, - "EphemeralStorage":{ - "shape":"EphemeralStorage", - "documentation":"

The size of the function's /tmp directory in MB. The default value is 512, but can be any whole number between 512 and 10,240 MB. For more information, see Configuring ephemeral storage (console).

" - }, - "SnapStart":{ - "shape":"SnapStart", - "documentation":"

The function's SnapStart setting.

" - }, - "LoggingConfig":{ - "shape":"LoggingConfig", - "documentation":"

The function's Amazon CloudWatch Logs configuration settings.

" - }, - "DurableConfig":{"shape":"DurableConfig"} - } - }, - "UpdateFunctionEventInvokeConfigRequest":{ - "type":"structure", - "required":["FunctionName"], - "members":{ - "FunctionName":{ - "shape":"FunctionName", - "documentation":"

The name or ARN of the Lambda function, version, or alias.

Name formats

You can append a version number or alias to any of the formats. The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.

", - "location":"uri", - "locationName":"FunctionName" - }, - "Qualifier":{ - "shape":"Qualifier", - "documentation":"

A version number or alias name.

", - "location":"querystring", - "locationName":"Qualifier" - }, - "MaximumRetryAttempts":{ - "shape":"MaximumRetryAttempts", - "documentation":"

The maximum number of times to retry when the function returns an error.

" - }, - "MaximumEventAgeInSeconds":{ - "shape":"MaximumEventAgeInSeconds", - "documentation":"

The maximum age of a request that Lambda sends to a function for processing.

" - }, - "DestinationConfig":{ - "shape":"DestinationConfig", - "documentation":"

A destination for events after they have been sent to a function for processing.

Destinations

S3 buckets are supported only for on-failure destinations. To retain records of successful invocations, use another destination type.

" - } - } - }, - "UpdateFunctionUrlConfigRequest":{ - "type":"structure", - "required":["FunctionName"], - "members":{ - "FunctionName":{ - "shape":"FunctionName", - "documentation":"

The name or ARN of the Lambda function.

Name formats

The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.

", - "location":"uri", - "locationName":"FunctionName" - }, - "Qualifier":{ - "shape":"FunctionUrlQualifier", - "documentation":"

The alias name.

", - "location":"querystring", - "locationName":"Qualifier" - }, - "AuthType":{ - "shape":"FunctionUrlAuthType", - "documentation":"

The type of authentication that your function URL uses. Set to AWS_IAM if you want to restrict access to authenticated users only. Set to NONE if you want to bypass IAM authentication to create a public endpoint. For more information, see Security and auth model for Lambda function URLs.

" - }, - "Cors":{ - "shape":"Cors", - "documentation":"

The cross-origin resource sharing (CORS) settings for your function URL.

" - }, - "InvokeMode":{ - "shape":"InvokeMode", - "documentation":"

Use one of the following options:

" - } - } - }, - "UpdateFunctionUrlConfigResponse":{ - "type":"structure", - "required":[ - "FunctionUrl", - "FunctionArn", - "AuthType", - "CreationTime", - "LastModifiedTime" - ], - "members":{ - "FunctionUrl":{ - "shape":"FunctionUrl", - "documentation":"

The HTTP URL endpoint for your function.

" - }, - "FunctionArn":{ - "shape":"FunctionArn", - "documentation":"

The Amazon Resource Name (ARN) of your function.

" - }, - "AuthType":{ - "shape":"FunctionUrlAuthType", - "documentation":"

The type of authentication that your function URL uses. Set to AWS_IAM if you want to restrict access to authenticated users only. Set to NONE if you want to bypass IAM authentication to create a public endpoint. For more information, see Security and auth model for Lambda function URLs.

" - }, - "Cors":{ - "shape":"Cors", - "documentation":"

The cross-origin resource sharing (CORS) settings for your function URL.

" - }, - "CreationTime":{ - "shape":"Timestamp", - "documentation":"

When the function URL was created, in ISO-8601 format (YYYY-MM-DDThh:mm:ss.sTZD).

" - }, - "LastModifiedTime":{ - "shape":"Timestamp", - "documentation":"

When the function URL configuration was last updated, in ISO-8601 format (YYYY-MM-DDThh:mm:ss.sTZD).

" - }, - "InvokeMode":{ - "shape":"InvokeMode", - "documentation":"

Use one of the following options:

" - } - } - }, - "UpdateRuntimeOn":{ - "type":"string", - "enum":[ - "Auto", - "Manual", - "FunctionUpdate" - ] - }, - "Version":{ - "type":"string", - "max":1024, - "min":1, - "pattern":"(\\$LATEST|[0-9]+)" - }, - "VpcConfig":{ - "type":"structure", - "members":{ - "SubnetIds":{ - "shape":"SubnetIds", - "documentation":"

A list of VPC subnet IDs.

" - }, - "SecurityGroupIds":{ - "shape":"SecurityGroupIds", - "documentation":"

A list of VPC security group IDs.

" - }, - "Ipv6AllowedForDualStack":{ - "shape":"NullableBoolean", - "documentation":"

Allows outbound IPv6 traffic on VPC functions that are connected to dual-stack subnets.

" - } - }, - "documentation":"

The VPC security groups and subnets that are attached to a Lambda function. For more information, see Configuring a Lambda function to access resources in a VPC.

" - }, - "VpcConfigResponse":{ - "type":"structure", - "members":{ - "SubnetIds":{ - "shape":"SubnetIds", - "documentation":"

A list of VPC subnet IDs.

" - }, - "SecurityGroupIds":{ - "shape":"SecurityGroupIds", - "documentation":"

A list of VPC security group IDs.

" - }, - "VpcId":{ - "shape":"VpcId", - "documentation":"

The ID of the VPC.

" - }, - "Ipv6AllowedForDualStack":{ - "shape":"NullableBoolean", - "documentation":"

Allows outbound IPv6 traffic on VPC functions that are connected to dual-stack subnets.

" - } - }, - "documentation":"

The VPC security groups and subnets that are attached to a Lambda function.

" - }, - "VpcId":{"type":"string"}, - "WaitCancelledDetails":{ - "type":"structure", - "members":{ - "Error":{"shape":"EventError"} - } - }, - "WaitDetails":{ - "type":"structure", - "members":{ - "ScheduledEndTimestamp":{"shape":"ExecutionTimestamp"} - } - }, - "WaitOptions":{ - "type":"structure", - "members":{ - "WaitSeconds":{"shape":"WaitOptionsWaitSecondsInteger"} - } - }, - "WaitOptionsWaitSecondsInteger":{ - "type":"integer", - "box":true, - "max":31622400, - "min":1 - }, - "WaitStartedDetails":{ - "type":"structure", - "required":[ - "Duration", - "ScheduledEndTimestamp" - ], - "members":{ - "Duration":{"shape":"DurationSeconds"}, - "ScheduledEndTimestamp":{"shape":"ExecutionTimestamp"} - } - }, - "WaitSucceededDetails":{ - "type":"structure", - "members":{ - "Duration":{"shape":"DurationSeconds"} - } - }, - "Weight":{ - "type":"double", - "max":1.0, - "min":0.0 - }, - "WorkingDirectory":{ - "type":"string", - "max":1000, - "min":0 - } - }, - "documentation":"

Lambda

Overview

Lambda is a compute service that lets you run code without provisioning or managing servers. Lambda runs your code on a high-availability compute infrastructure and performs all of the administration of the compute resources, including server and operating system maintenance, capacity provisioning and automatic scaling, code monitoring and logging. With Lambda, you can run code for virtually any type of application or backend service. For more information about the Lambda service, see What is Lambda in the Lambda Developer Guide.

The Lambda API Reference provides information about each of the API methods, including details about the parameters in each API request and response.

You can use Software Development Kits (SDKs), Integrated Development Environment (IDE) Toolkits, and command line tools to access the API. For installation instructions, see Tools for Amazon Web Services.

For a list of Region-specific endpoints that Lambda supports, see Lambda endpoints and quotas in the Amazon Web Services General Reference..

When making the API calls, you will need to authenticate your request by providing a signature. Lambda supports signature version 4. For more information, see Signature Version 4 signing process in the Amazon Web Services General Reference..

CA certificates

Because Amazon Web Services SDKs use the CA certificates from your computer, changes to the certificates on the Amazon Web Services servers can cause connection failures when you attempt to use an SDK. You can prevent these failures by keeping your computer's CA certificates and operating system up-to-date. If you encounter this issue in a corporate environment and do not manage your own computer, you might need to ask an administrator to assist with the update process. The following list shows minimum operating system and Java versions:

When accessing the Lambda management console or Lambda API endpoints, whether through browsers or programmatically, you will need to ensure your client machines support any of the following CAs:

Root certificates from the first two authorities are available from Amazon trust services, but keeping your computer up-to-date is the more straightforward solution. To learn more about ACM-provided certificates, see Amazon Web Services Certificate Manager FAQs.

" -} diff --git a/src/aws_durable_execution_sdk_python/botocore/data/lambdainternal/2015-03-31/service-2.json b/src/aws_durable_execution_sdk_python/botocore/data/lambdainternal/2015-03-31/service-2.json deleted file mode 100644 index a96f3e3..0000000 --- a/src/aws_durable_execution_sdk_python/botocore/data/lambdainternal/2015-03-31/service-2.json +++ /dev/null @@ -1,7855 +0,0 @@ -{ - "version":"2.0", - "metadata":{ - "apiVersion":"2015-03-31", - "endpointPrefix":"lambda", - "protocol":"rest-json", - "serviceFullName":"AWS Lambda", - "serviceId":"Lambda", - "signatureVersion":"v4", - "uid":"lambda-2015-03-31" - }, - "operations":{ - "AddLayerVersionPermission":{ - "name":"AddLayerVersionPermission", - "http":{ - "method":"POST", - "requestUri":"/2018-10-31/layers/{LayerName}/versions/{VersionNumber}/policy", - "responseCode":201 - }, - "input":{"shape":"AddLayerVersionPermissionRequest"}, - "output":{"shape":"AddLayerVersionPermissionResponse"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ResourceConflictException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"PolicyLengthExceededException"}, - {"shape":"ResourceNotFoundException"}, - {"shape":"PreconditionFailedException"} - ], - "documentation":"

Adds permissions to the resource-based policy of a version of an Lambda layer. Use this action to grant layer usage permission to other accounts. You can grant permission to a single account, all accounts in an organization, or all Amazon Web Services accounts.

To revoke permission, call RemoveLayerVersionPermission with the statement ID that you specified when you added it.

" - }, - "AddPermission":{ - "name":"AddPermission", - "http":{ - "method":"POST", - "requestUri":"/2015-03-31/functions/{FunctionName}/policy", - "responseCode":201 - }, - "input":{"shape":"AddPermissionRequest"}, - "output":{"shape":"AddPermissionResponse"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ResourceConflictException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"PolicyLengthExceededException"}, - {"shape":"ResourceNotFoundException"}, - {"shape":"PreconditionFailedException"} - ], - "documentation":"

Grants a principal permission to use a function. You can apply the policy at the function level, or specify a qualifier to restrict access to a single version or alias. If you use a qualifier, the invoker must use the full Amazon Resource Name (ARN) of that version or alias to invoke the function. Note: Lambda does not support adding policies to version $LATEST.

To grant permission to another account, specify the account ID as the Principal. To grant permission to an organization defined in Organizations, specify the organization ID as the PrincipalOrgID. For Amazon Web Services services, the principal is a domain-style identifier that the service defines, such as s3.amazonaws.com or sns.amazonaws.com. For Amazon Web Services services, you can also specify the ARN of the associated resource as the SourceArn. If you grant permission to a service principal without specifying the source, other accounts could potentially configure resources in their account to invoke your Lambda function.

This operation adds a statement to a resource-based permissions policy for the function. For more information about function policies, see Using resource-based policies for Lambda.

" - }, - "CheckpointDurableExecution":{ - "name":"CheckpointDurableExecution", - "http":{ - "method":"POST", - "requestUri":"/2025-12-01/durable-executions/{DurableExecutionArn}/checkpoint", - "responseCode":200 - }, - "input":{"shape":"CheckpointDurableExecutionRequest"}, - "output":{"shape":"CheckpointDurableExecutionResponse"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ServiceException"} - ], - "idempotent":true - }, - "CreateAlias":{ - "name":"CreateAlias", - "http":{ - "method":"POST", - "requestUri":"/2015-03-31/functions/{FunctionName}/aliases", - "responseCode":201 - }, - "input":{"shape":"CreateAliasRequest"}, - "output":{"shape":"AliasConfiguration"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ResourceConflictException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"} - ], - "documentation":"

Creates an alias for a Lambda function version. Use aliases to provide clients with a function identifier that you can update to invoke a different version.

You can also map an alias to split invocation requests between two versions. Use the RoutingConfig parameter to specify a second version and the percentage of invocation requests that it receives.

", - "idempotent":true - }, - "CreateCodeSigningConfig":{ - "name":"CreateCodeSigningConfig", - "http":{ - "method":"POST", - "requestUri":"/2020-04-22/code-signing-configs", - "responseCode":201 - }, - "input":{"shape":"CreateCodeSigningConfigRequest"}, - "output":{"shape":"CreateCodeSigningConfigResponse"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ServiceException"} - ], - "documentation":"

Creates a code signing configuration. A code signing configuration defines a list of allowed signing profiles and defines the code-signing validation policy (action to be taken if deployment validation checks fail).

" - }, - "CreateEventSourceMapping":{ - "name":"CreateEventSourceMapping", - "http":{ - "method":"POST", - "requestUri":"/2015-03-31/event-source-mappings", - "responseCode":202 - }, - "input":{"shape":"CreateEventSourceMappingRequest"}, - "output":{"shape":"EventSourceMappingConfiguration"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ResourceConflictException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"} - ], - "documentation":"

Creates a mapping between an event source and an Lambda function. Lambda reads items from the event source and invokes the function.

For details about how to configure different event sources, see the following topics.

The following error handling options are available only for DynamoDB and Kinesis event sources:

For stream sources (DynamoDB, Kinesis, Amazon MSK, and self-managed Apache Kafka), the following option is also available:

For information about which configuration parameters apply to each event source, see the following topics.

" - }, - "CreateFunction":{ - "name":"CreateFunction", - "http":{ - "method":"POST", - "requestUri":"/2015-03-31/functions", - "responseCode":201 - }, - "input":{"shape":"CreateFunctionRequest"}, - "output":{"shape":"FunctionConfiguration"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ResourceConflictException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"InvalidCodeSignatureException"}, - {"shape":"ResourceNotFoundException"}, - {"shape":"CodeVerificationFailedException"}, - {"shape":"CodeSigningConfigNotFoundException"}, - {"shape":"CodeStorageExceededException"} - ], - "documentation":"

Creates a Lambda function. To create a function, you need a deployment package and an execution role. The deployment package is a .zip file archive or container image that contains your function code. The execution role grants the function permission to use Amazon Web Services services, such as Amazon CloudWatch Logs for log streaming and X-Ray for request tracing.

If the deployment package is a container image, then you set the package type to Image. For a container image, the code property must include the URI of a container image in the Amazon ECR registry. You do not need to specify the handler and runtime properties.

If the deployment package is a .zip file archive, then you set the package type to Zip. For a .zip file archive, the code property specifies the location of the .zip file. You must also specify the handler and runtime properties. The code in the deployment package must be compatible with the target instruction set architecture of the function (x86-64 or arm64). If you do not specify the architecture, then the default value is x86-64.

When you create a function, Lambda provisions an instance of the function and its supporting resources. If your function connects to a VPC, this process can take a minute or so. During this time, you can't invoke or modify the function. The State, StateReason, and StateReasonCode fields in the response from GetFunctionConfiguration indicate when the function is ready to invoke. For more information, see Lambda function states.

A function has an unpublished version, and can have published versions and aliases. The unpublished version changes when you update your function's code and configuration. A published version is a snapshot of your function code and configuration that can't be changed. An alias is a named resource that maps to a version, and can be changed to map to a different version. Use the Publish parameter to create version 1 of your function from its initial configuration.

The other parameters let you configure version-specific and function-level settings. You can modify version-specific settings later with UpdateFunctionConfiguration. Function-level settings apply to both the unpublished and published versions of the function, and include tags (TagResource) and per-function concurrency limits (PutFunctionConcurrency).

You can use code signing if your deployment package is a .zip file archive. To enable code signing for this function, specify the ARN of a code-signing configuration. When a user attempts to deploy a code package with UpdateFunctionCode, Lambda checks that the code package has a valid signature from a trusted publisher. The code-signing configuration includes set of signing profiles, which define the trusted publishers for this function.

If another Amazon Web Services account or an Amazon Web Services service invokes your function, use AddPermission to grant permission by creating a resource-based Identity and Access Management (IAM) policy. You can grant permissions at the function level, on a version, or on an alias.

To invoke your function directly, use Invoke. To invoke your function in response to events in other Amazon Web Services services, create an event source mapping (CreateEventSourceMapping), or configure a function trigger in the other service. For more information, see Invoking Lambda functions.

", - "idempotent":true - }, - "CreateFunctionUrlConfig":{ - "name":"CreateFunctionUrlConfig", - "http":{ - "method":"POST", - "requestUri":"/2021-10-31/functions/{FunctionName}/url", - "responseCode":201 - }, - "input":{"shape":"CreateFunctionUrlConfigRequest"}, - "output":{"shape":"CreateFunctionUrlConfigResponse"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ResourceConflictException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"} - ], - "documentation":"

Creates a Lambda function URL with the specified configuration parameters. A function URL is a dedicated HTTP(S) endpoint that you can use to invoke your function.

" - }, - "DeleteAlias":{ - "name":"DeleteAlias", - "http":{ - "method":"DELETE", - "requestUri":"/2015-03-31/functions/{FunctionName}/aliases/{Name}", - "responseCode":204 - }, - "input":{"shape":"DeleteAliasRequest"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ResourceConflictException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"} - ], - "documentation":"

Deletes a Lambda function alias.

", - "idempotent":true - }, - "DeleteCodeSigningConfig":{ - "name":"DeleteCodeSigningConfig", - "http":{ - "method":"DELETE", - "requestUri":"/2020-04-22/code-signing-configs/{CodeSigningConfigArn}", - "responseCode":204 - }, - "input":{"shape":"DeleteCodeSigningConfigRequest"}, - "output":{"shape":"DeleteCodeSigningConfigResponse"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ResourceConflictException"}, - {"shape":"ServiceException"}, - {"shape":"ResourceNotFoundException"} - ], - "documentation":"

Deletes the code signing configuration. You can delete the code signing configuration only if no function is using it.

", - "idempotent":true - }, - "DeleteEventSourceMapping":{ - "name":"DeleteEventSourceMapping", - "http":{ - "method":"DELETE", - "requestUri":"/2015-03-31/event-source-mappings/{UUID}", - "responseCode":202 - }, - "input":{"shape":"DeleteEventSourceMappingRequest"}, - "output":{"shape":"EventSourceMappingConfiguration"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ResourceInUseException"}, - {"shape":"ResourceConflictException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"} - ], - "documentation":"

Deletes an event source mapping. You can get the identifier of a mapping from the output of ListEventSourceMappings.

When you delete an event source mapping, it enters a Deleting state and might not be completely deleted for several seconds.

", - "idempotent":true - }, - "DeleteFunction":{ - "name":"DeleteFunction", - "http":{ - "method":"DELETE", - "requestUri":"/2015-03-31/functions/{FunctionName}", - "responseCode":204 - }, - "input":{"shape":"DeleteFunctionRequest"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ResourceConflictException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"} - ], - "documentation":"

Deletes a Lambda function. To delete a specific function version, use the Qualifier parameter. Otherwise, all versions and aliases are deleted. This doesn't require the user to have explicit permissions for DeleteAlias.

To delete Lambda event source mappings that invoke a function, use DeleteEventSourceMapping. For Amazon Web Services services and resources that invoke your function directly, delete the trigger in the service where you originally configured it.

", - "idempotent":true - }, - "DeleteFunctionCodeSigningConfig":{ - "name":"DeleteFunctionCodeSigningConfig", - "http":{ - "method":"DELETE", - "requestUri":"/2020-06-30/functions/{FunctionName}/code-signing-config", - "responseCode":204 - }, - "input":{"shape":"DeleteFunctionCodeSigningConfigRequest"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ResourceConflictException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"}, - {"shape":"CodeSigningConfigNotFoundException"} - ], - "documentation":"

Removes the code signing configuration from the function.

" - }, - "DeleteFunctionConcurrency":{ - "name":"DeleteFunctionConcurrency", - "http":{ - "method":"DELETE", - "requestUri":"/2017-10-31/functions/{FunctionName}/concurrency", - "responseCode":204 - }, - "input":{"shape":"DeleteFunctionConcurrencyRequest"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ResourceConflictException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"} - ], - "documentation":"

Removes a concurrent execution limit from a function.

" - }, - "DeleteFunctionEventInvokeConfig":{ - "name":"DeleteFunctionEventInvokeConfig", - "http":{ - "method":"DELETE", - "requestUri":"/2019-09-25/functions/{FunctionName}/event-invoke-config", - "responseCode":204 - }, - "input":{"shape":"DeleteFunctionEventInvokeConfigRequest"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ResourceConflictException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"} - ], - "documentation":"

Deletes the configuration for asynchronous invocation for a function, version, or alias.

To configure options for asynchronous invocation, use PutFunctionEventInvokeConfig.

" - }, - "DeleteFunctionUrlConfig":{ - "name":"DeleteFunctionUrlConfig", - "http":{ - "method":"DELETE", - "requestUri":"/2021-10-31/functions/{FunctionName}/url", - "responseCode":204 - }, - "input":{"shape":"DeleteFunctionUrlConfigRequest"}, - "errors":[ - {"shape":"ResourceConflictException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"} - ], - "documentation":"

Deletes a Lambda function URL. When you delete a function URL, you can't recover it. Creating a new function URL results in a different URL address.

" - }, - "DeleteLayerVersion":{ - "name":"DeleteLayerVersion", - "http":{ - "method":"DELETE", - "requestUri":"/2018-10-31/layers/{LayerName}/versions/{VersionNumber}", - "responseCode":204 - }, - "input":{"shape":"DeleteLayerVersionRequest"}, - "errors":[ - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"} - ], - "documentation":"

Deletes a version of an Lambda layer. Deleted versions can no longer be viewed or added to functions. To avoid breaking functions, a copy of the version remains in Lambda until no functions refer to it.

", - "idempotent":true - }, - "DeleteProvisionedConcurrencyConfig":{ - "name":"DeleteProvisionedConcurrencyConfig", - "http":{ - "method":"DELETE", - "requestUri":"/2019-09-30/functions/{FunctionName}/provisioned-concurrency", - "responseCode":204 - }, - "input":{"shape":"DeleteProvisionedConcurrencyConfigRequest"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ResourceConflictException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"} - ], - "documentation":"

Deletes the provisioned concurrency configuration for a function.

", - "idempotent":true - }, - "GetAccountSettings":{ - "name":"GetAccountSettings", - "http":{ - "method":"GET", - "requestUri":"/2016-08-19/account-settings", - "responseCode":200 - }, - "input":{"shape":"GetAccountSettingsRequest"}, - "output":{"shape":"GetAccountSettingsResponse"}, - "errors":[ - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"} - ], - "documentation":"

Retrieves details about your account's limits and usage in an Amazon Web Services Region.

", - "readonly":true - }, - "GetAlias":{ - "name":"GetAlias", - "http":{ - "method":"GET", - "requestUri":"/2015-03-31/functions/{FunctionName}/aliases/{Name}", - "responseCode":200 - }, - "input":{"shape":"GetAliasRequest"}, - "output":{"shape":"AliasConfiguration"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"} - ], - "documentation":"

Returns details about a Lambda function alias.

", - "readonly":true - }, - "GetCodeSigningConfig":{ - "name":"GetCodeSigningConfig", - "http":{ - "method":"GET", - "requestUri":"/2020-04-22/code-signing-configs/{CodeSigningConfigArn}", - "responseCode":200 - }, - "input":{"shape":"GetCodeSigningConfigRequest"}, - "output":{"shape":"GetCodeSigningConfigResponse"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ServiceException"}, - {"shape":"ResourceNotFoundException"} - ], - "documentation":"

Returns information about the specified code signing configuration.

", - "readonly":true - }, - "GetDurableExecution":{ - "name":"GetDurableExecution", - "http":{ - "method":"GET", - "requestUri":"/2025-12-01/durable-executions/{DurableExecutionArn}", - "responseCode":200 - }, - "input":{"shape":"GetDurableExecutionRequest"}, - "output":{"shape":"GetDurableExecutionResponse"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ServiceException"}, - {"shape":"ResourceNotFoundException"} - ], - "readonly":true - }, - "GetDurableExecutionHistory":{ - "name":"GetDurableExecutionHistory", - "http":{ - "method":"GET", - "requestUri":"/2025-12-01/durable-executions/{DurableExecutionArn}/history", - "responseCode":200 - }, - "input":{"shape":"GetDurableExecutionHistoryRequest"}, - "output":{"shape":"GetDurableExecutionHistoryResponse"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ServiceException"}, - {"shape":"ResourceNotFoundException"} - ], - "readonly":true - }, - "GetDurableExecutionState":{ - "name":"GetDurableExecutionState", - "http":{ - "method":"GET", - "requestUri":"/2025-12-01/durable-executions/{DurableExecutionArn}/state", - "responseCode":200 - }, - "input":{"shape":"GetDurableExecutionStateRequest"}, - "output":{"shape":"GetDurableExecutionStateResponse"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ServiceException"} - ], - "readonly":true - }, - "GetEventSourceMapping":{ - "name":"GetEventSourceMapping", - "http":{ - "method":"GET", - "requestUri":"/2015-03-31/event-source-mappings/{UUID}", - "responseCode":200 - }, - "input":{"shape":"GetEventSourceMappingRequest"}, - "output":{"shape":"EventSourceMappingConfiguration"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"} - ], - "documentation":"

Returns details about an event source mapping. You can get the identifier of a mapping from the output of ListEventSourceMappings.

", - "readonly":true - }, - "GetFunction":{ - "name":"GetFunction", - "http":{ - "method":"GET", - "requestUri":"/2015-03-31/functions/{FunctionName}", - "responseCode":200 - }, - "input":{"shape":"GetFunctionRequest"}, - "output":{"shape":"GetFunctionResponse"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"} - ], - "documentation":"

Returns information about the function or function version, with a link to download the deployment package that's valid for 10 minutes. If you specify a function version, only details that are specific to that version are returned.

", - "readonly":true - }, - "GetFunctionCodeSigningConfig":{ - "name":"GetFunctionCodeSigningConfig", - "http":{ - "method":"GET", - "requestUri":"/2020-06-30/functions/{FunctionName}/code-signing-config", - "responseCode":200 - }, - "input":{"shape":"GetFunctionCodeSigningConfigRequest"}, - "output":{"shape":"GetFunctionCodeSigningConfigResponse"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"} - ], - "documentation":"

Returns the code signing configuration for the specified function.

", - "readonly":true - }, - "GetFunctionConcurrency":{ - "name":"GetFunctionConcurrency", - "http":{ - "method":"GET", - "requestUri":"/2019-09-30/functions/{FunctionName}/concurrency", - "responseCode":200 - }, - "input":{"shape":"GetFunctionConcurrencyRequest"}, - "output":{"shape":"GetFunctionConcurrencyResponse"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"} - ], - "documentation":"

Returns details about the reserved concurrency configuration for a function. To set a concurrency limit for a function, use PutFunctionConcurrency.

", - "readonly":true - }, - "GetFunctionConfiguration":{ - "name":"GetFunctionConfiguration", - "http":{ - "method":"GET", - "requestUri":"/2015-03-31/functions/{FunctionName}/configuration", - "responseCode":200 - }, - "input":{"shape":"GetFunctionConfigurationRequest"}, - "output":{"shape":"FunctionConfiguration"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"} - ], - "documentation":"

Returns the version-specific settings of a Lambda function or version. The output includes only options that can vary between versions of a function. To modify these settings, use UpdateFunctionConfiguration.

To get all of a function's details, including function-level settings, use GetFunction.

", - "readonly":true - }, - "GetFunctionEventInvokeConfig":{ - "name":"GetFunctionEventInvokeConfig", - "http":{ - "method":"GET", - "requestUri":"/2019-09-25/functions/{FunctionName}/event-invoke-config", - "responseCode":200 - }, - "input":{"shape":"GetFunctionEventInvokeConfigRequest"}, - "output":{"shape":"FunctionEventInvokeConfig"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"} - ], - "documentation":"

Retrieves the configuration for asynchronous invocation for a function, version, or alias.

To configure options for asynchronous invocation, use PutFunctionEventInvokeConfig.

", - "readonly":true - }, - "GetFunctionRecursionConfig":{ - "name":"GetFunctionRecursionConfig", - "http":{ - "method":"GET", - "requestUri":"/2024-08-31/functions/{FunctionName}/recursion-config", - "responseCode":200 - }, - "input":{"shape":"GetFunctionRecursionConfigRequest"}, - "output":{"shape":"GetFunctionRecursionConfigResponse"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"} - ], - "documentation":"

Returns your function's recursive loop detection configuration.

", - "readonly":true - }, - "GetFunctionUrlConfig":{ - "name":"GetFunctionUrlConfig", - "http":{ - "method":"GET", - "requestUri":"/2021-10-31/functions/{FunctionName}/url", - "responseCode":200 - }, - "input":{"shape":"GetFunctionUrlConfigRequest"}, - "output":{"shape":"GetFunctionUrlConfigResponse"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"} - ], - "documentation":"

Returns details about a Lambda function URL.

", - "readonly":true - }, - "GetLayerVersion":{ - "name":"GetLayerVersion", - "http":{ - "method":"GET", - "requestUri":"/2018-10-31/layers/{LayerName}/versions/{VersionNumber}", - "responseCode":200 - }, - "input":{"shape":"GetLayerVersionRequest"}, - "output":{"shape":"GetLayerVersionResponse"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"} - ], - "documentation":"

Returns information about a version of an Lambda layer, with a link to download the layer archive that's valid for 10 minutes.

", - "readonly":true - }, - "GetLayerVersionByArn":{ - "name":"GetLayerVersionByArn", - "http":{ - "method":"GET", - "requestUri":"/2018-10-31/layers?find=LayerVersion", - "responseCode":200 - }, - "input":{"shape":"GetLayerVersionByArnRequest"}, - "output":{"shape":"GetLayerVersionResponse"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"} - ], - "documentation":"

Returns information about a version of an Lambda layer, with a link to download the layer archive that's valid for 10 minutes.

", - "readonly":true - }, - "GetLayerVersionPolicy":{ - "name":"GetLayerVersionPolicy", - "http":{ - "method":"GET", - "requestUri":"/2018-10-31/layers/{LayerName}/versions/{VersionNumber}/policy", - "responseCode":200 - }, - "input":{"shape":"GetLayerVersionPolicyRequest"}, - "output":{"shape":"GetLayerVersionPolicyResponse"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"} - ], - "documentation":"

Returns the permission policy for a version of an Lambda layer. For more information, see AddLayerVersionPermission.

", - "readonly":true - }, - "GetPolicy":{ - "name":"GetPolicy", - "http":{ - "method":"GET", - "requestUri":"/2015-03-31/functions/{FunctionName}/policy", - "responseCode":200 - }, - "input":{"shape":"GetPolicyRequest"}, - "output":{"shape":"GetPolicyResponse"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"} - ], - "documentation":"

Returns the resource-based IAM policy for a function, version, or alias.

", - "readonly":true - }, - "GetProvisionedConcurrencyConfig":{ - "name":"GetProvisionedConcurrencyConfig", - "http":{ - "method":"GET", - "requestUri":"/2019-09-30/functions/{FunctionName}/provisioned-concurrency", - "responseCode":200 - }, - "input":{"shape":"GetProvisionedConcurrencyConfigRequest"}, - "output":{"shape":"GetProvisionedConcurrencyConfigResponse"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ProvisionedConcurrencyConfigNotFoundException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"} - ], - "documentation":"

Retrieves the provisioned concurrency configuration for a function's alias or version.

", - "readonly":true - }, - "GetRuntimeManagementConfig":{ - "name":"GetRuntimeManagementConfig", - "http":{ - "method":"GET", - "requestUri":"/2021-07-20/functions/{FunctionName}/runtime-management-config", - "responseCode":200 - }, - "input":{"shape":"GetRuntimeManagementConfigRequest"}, - "output":{"shape":"GetRuntimeManagementConfigResponse"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"} - ], - "documentation":"

Retrieves the runtime management configuration for a function's version. If the runtime update mode is Manual, this includes the ARN of the runtime version and the runtime update mode. If the runtime update mode is Auto or Function update, this includes the runtime update mode and null is returned for the ARN. For more information, see Runtime updates.

", - "readonly":true - }, - "Invoke":{ - "name":"Invoke", - "http":{ - "method":"POST", - "requestUri":"/2015-03-31/functions/{FunctionName}/invocations", - "responseCode":200 - }, - "input":{"shape":"InvocationRequest"}, - "output":{"shape":"InvocationResponse"}, - "errors":[ - {"shape":"ResourceNotReadyException"}, - {"shape":"InvalidSecurityGroupIDException"}, - {"shape":"SnapStartTimeoutException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"EC2ThrottledException"}, - {"shape":"EFSMountConnectivityException"}, - {"shape":"SubnetIPAddressLimitReachedException"}, - {"shape":"KMSAccessDeniedException"}, - {"shape":"RequestTooLargeException"}, - {"shape":"KMSDisabledException"}, - {"shape":"UnsupportedMediaTypeException"}, - {"shape":"InvalidRuntimeException"}, - {"shape":"EC2UnexpectedException"}, - {"shape":"InvalidSubnetIDException"}, - {"shape":"KMSNotFoundException"}, - {"shape":"InvalidParameterValueException"}, - {"shape":"EC2AccessDeniedException"}, - {"shape":"EFSIOException"}, - {"shape":"KMSInvalidStateException"}, - {"shape":"ResourceConflictException"}, - {"shape":"ENILimitReachedException"}, - {"shape":"SnapStartNotReadyException"}, - {"shape":"ServiceException"}, - {"shape":"SnapStartException"}, - {"shape":"RecursiveInvocationException"}, - {"shape":"EFSMountTimeoutException"}, - {"shape":"ResourceNotFoundException"}, - {"shape":"InvalidRequestContentException"}, - {"shape":"DurableExecutionAlreadyStartedException"}, - {"shape":"InvalidZipFileException"}, - {"shape":"EFSMountFailureException"} - ], - "documentation":"

Invokes a Lambda function. You can invoke a function synchronously (and wait for the response), or asynchronously. By default, Lambda invokes your function synchronously (i.e. theInvocationType is RequestResponse). To invoke a function asynchronously, set InvocationType to Event. Lambda passes the ClientContext object to your function for synchronous invocations only.

For synchronous invocation, details about the function response, including errors, are included in the response body and headers. For either invocation type, you can find more information in the execution log and trace.

When an error occurs, your function may be invoked multiple times. Retry behavior varies by error type, client, event source, and invocation type. For example, if you invoke a function asynchronously and it returns an error, Lambda executes the function up to two more times. For more information, see Error handling and automatic retries in Lambda.

For asynchronous invocation, Lambda adds events to a queue before sending them to your function. If your function does not have enough capacity to keep up with the queue, events may be lost. Occasionally, your function may receive the same event multiple times, even if no error occurs. To retain events that were not processed, configure your function with a dead-letter queue.

The status code in the API response doesn't reflect function errors. Error codes are reserved for errors that prevent your function from executing, such as permissions errors, quota errors, or issues with your function's code and configuration. For example, Lambda returns TooManyRequestsException if running the function would cause you to exceed a concurrency limit at either the account level (ConcurrentInvocationLimitExceeded) or function level (ReservedFunctionConcurrentInvocationLimitExceeded).

For functions with a long timeout, your client might disconnect during synchronous invocation while it waits for a response. Configure your HTTP client, SDK, firewall, proxy, or operating system to allow for long connections with timeout or keep-alive settings.

This operation requires permission for the lambda:InvokeFunction action. For details on how to set up permissions for cross-account invocations, see Granting function access to other accounts.

" - }, - "InvokeAsync":{ - "name":"InvokeAsync", - "http":{ - "method":"POST", - "requestUri":"/2014-11-13/functions/{FunctionName}/invoke-async", - "responseCode":202 - }, - "input":{"shape":"InvokeAsyncRequest"}, - "output":{"shape":"InvokeAsyncResponse"}, - "errors":[ - {"shape":"InvalidRuntimeException"}, - {"shape":"ResourceConflictException"}, - {"shape":"ServiceException"}, - {"shape":"ResourceNotFoundException"}, - {"shape":"InvalidRequestContentException"} - ], - "documentation":"

For asynchronous function invocation, use Invoke.

Invokes a function asynchronously.

If you do use the InvokeAsync action, note that it doesn't support the use of X-Ray active tracing. Trace ID is not propagated to the function, even if X-Ray active tracing is turned on.

", - "deprecated":true - }, - "InvokeWithResponseStream":{ - "name":"InvokeWithResponseStream", - "http":{ - "method":"POST", - "requestUri":"/2021-11-15/functions/{FunctionName}/response-streaming-invocations", - "responseCode":200 - }, - "input":{"shape":"InvokeWithResponseStreamRequest"}, - "output":{"shape":"InvokeWithResponseStreamResponse"}, - "errors":[ - {"shape":"ResourceNotReadyException"}, - {"shape":"InvalidSecurityGroupIDException"}, - {"shape":"SnapStartTimeoutException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"EC2ThrottledException"}, - {"shape":"EFSMountConnectivityException"}, - {"shape":"SubnetIPAddressLimitReachedException"}, - {"shape":"KMSAccessDeniedException"}, - {"shape":"RequestTooLargeException"}, - {"shape":"KMSDisabledException"}, - {"shape":"UnsupportedMediaTypeException"}, - {"shape":"InvalidRuntimeException"}, - {"shape":"EC2UnexpectedException"}, - {"shape":"InvalidSubnetIDException"}, - {"shape":"KMSNotFoundException"}, - {"shape":"InvalidParameterValueException"}, - {"shape":"EC2AccessDeniedException"}, - {"shape":"EFSIOException"}, - {"shape":"KMSInvalidStateException"}, - {"shape":"ResourceConflictException"}, - {"shape":"ENILimitReachedException"}, - {"shape":"SnapStartNotReadyException"}, - {"shape":"ServiceException"}, - {"shape":"SnapStartException"}, - {"shape":"RecursiveInvocationException"}, - {"shape":"EFSMountTimeoutException"}, - {"shape":"ResourceNotFoundException"}, - {"shape":"InvalidRequestContentException"}, - {"shape":"InvalidZipFileException"}, - {"shape":"EFSMountFailureException"} - ], - "documentation":"

Configure your Lambda functions to stream response payloads back to clients. For more information, see Configuring a Lambda function to stream responses.

This operation requires permission for the lambda:InvokeFunction action. For details on how to set up permissions for cross-account invocations, see Granting function access to other accounts.

" - }, - "ListAliases":{ - "name":"ListAliases", - "http":{ - "method":"GET", - "requestUri":"/2015-03-31/functions/{FunctionName}/aliases", - "responseCode":200 - }, - "input":{"shape":"ListAliasesRequest"}, - "output":{"shape":"ListAliasesResponse"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"} - ], - "documentation":"

Returns a list of aliases for a Lambda function.

", - "readonly":true - }, - "ListCodeSigningConfigs":{ - "name":"ListCodeSigningConfigs", - "http":{ - "method":"GET", - "requestUri":"/2020-04-22/code-signing-configs", - "responseCode":200 - }, - "input":{"shape":"ListCodeSigningConfigsRequest"}, - "output":{"shape":"ListCodeSigningConfigsResponse"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ServiceException"} - ], - "documentation":"

Returns a list of code signing configurations. A request returns up to 10,000 configurations per call. You can use the MaxItems parameter to return fewer configurations per call.

", - "readonly":true - }, - "ListDurableExecutionsByFunction":{ - "name":"ListDurableExecutionsByFunction", - "http":{ - "method":"GET", - "requestUri":"/2025-12-01/functions/{FunctionName}/durable-executions", - "responseCode":200 - }, - "input":{"shape":"ListDurableExecutionsByFunctionRequest"}, - "output":{"shape":"ListDurableExecutionsByFunctionResponse"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ServiceException"} - ], - "readonly":true - }, - "ListEventSourceMappings":{ - "name":"ListEventSourceMappings", - "http":{ - "method":"GET", - "requestUri":"/2015-03-31/event-source-mappings", - "responseCode":200 - }, - "input":{"shape":"ListEventSourceMappingsRequest"}, - "output":{"shape":"ListEventSourceMappingsResponse"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"} - ], - "documentation":"

Lists event source mappings. Specify an EventSourceArn to show only event source mappings for a single event source.

", - "readonly":true - }, - "ListFunctionEventInvokeConfigs":{ - "name":"ListFunctionEventInvokeConfigs", - "http":{ - "method":"GET", - "requestUri":"/2019-09-25/functions/{FunctionName}/event-invoke-config/list", - "responseCode":200 - }, - "input":{"shape":"ListFunctionEventInvokeConfigsRequest"}, - "output":{"shape":"ListFunctionEventInvokeConfigsResponse"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"} - ], - "documentation":"

Retrieves a list of configurations for asynchronous invocation for a function.

To configure options for asynchronous invocation, use PutFunctionEventInvokeConfig.

", - "readonly":true - }, - "ListFunctionUrlConfigs":{ - "name":"ListFunctionUrlConfigs", - "http":{ - "method":"GET", - "requestUri":"/2021-10-31/functions/{FunctionName}/urls", - "responseCode":200 - }, - "input":{"shape":"ListFunctionUrlConfigsRequest"}, - "output":{"shape":"ListFunctionUrlConfigsResponse"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"} - ], - "documentation":"

Returns a list of Lambda function URLs for the specified function.

", - "readonly":true - }, - "ListFunctions":{ - "name":"ListFunctions", - "http":{ - "method":"GET", - "requestUri":"/2015-03-31/functions", - "responseCode":200 - }, - "input":{"shape":"ListFunctionsRequest"}, - "output":{"shape":"ListFunctionsResponse"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"} - ], - "documentation":"

Returns a list of Lambda functions, with the version-specific configuration of each. Lambda returns up to 50 functions per call.

Set FunctionVersion to ALL to include all published versions of each function in addition to the unpublished version.

The ListFunctions operation returns a subset of the FunctionConfiguration fields. To get the additional fields (State, StateReasonCode, StateReason, LastUpdateStatus, LastUpdateStatusReason, LastUpdateStatusReasonCode, RuntimeVersionConfig) for a function or version, use GetFunction.

", - "readonly":true - }, - "ListFunctionsByCodeSigningConfig":{ - "name":"ListFunctionsByCodeSigningConfig", - "http":{ - "method":"GET", - "requestUri":"/2020-04-22/code-signing-configs/{CodeSigningConfigArn}/functions", - "responseCode":200 - }, - "input":{"shape":"ListFunctionsByCodeSigningConfigRequest"}, - "output":{"shape":"ListFunctionsByCodeSigningConfigResponse"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ServiceException"}, - {"shape":"ResourceNotFoundException"} - ], - "documentation":"

List the functions that use the specified code signing configuration. You can use this method prior to deleting a code signing configuration, to verify that no functions are using it.

", - "readonly":true - }, - "ListLayerVersions":{ - "name":"ListLayerVersions", - "http":{ - "method":"GET", - "requestUri":"/2018-10-31/layers/{LayerName}/versions", - "responseCode":200 - }, - "input":{"shape":"ListLayerVersionsRequest"}, - "output":{"shape":"ListLayerVersionsResponse"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"} - ], - "documentation":"

Lists the versions of an Lambda layer. Versions that have been deleted aren't listed. Specify a runtime identifier to list only versions that indicate that they're compatible with that runtime. Specify a compatible architecture to include only layer versions that are compatible with that architecture.

", - "readonly":true - }, - "ListLayers":{ - "name":"ListLayers", - "http":{ - "method":"GET", - "requestUri":"/2018-10-31/layers", - "responseCode":200 - }, - "input":{"shape":"ListLayersRequest"}, - "output":{"shape":"ListLayersResponse"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"} - ], - "documentation":"

Lists Lambda layers and shows information about the latest version of each. Specify a runtime identifier to list only layers that indicate that they're compatible with that runtime. Specify a compatible architecture to include only layers that are compatible with that instruction set architecture.

", - "readonly":true - }, - "ListProvisionedConcurrencyConfigs":{ - "name":"ListProvisionedConcurrencyConfigs", - "http":{ - "method":"GET", - "requestUri":"/2019-09-30/functions/{FunctionName}/provisioned-concurrency?List=ALL", - "responseCode":200 - }, - "input":{"shape":"ListProvisionedConcurrencyConfigsRequest"}, - "output":{"shape":"ListProvisionedConcurrencyConfigsResponse"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"} - ], - "documentation":"

Retrieves a list of provisioned concurrency configurations for a function.

", - "readonly":true - }, - "ListTags":{ - "name":"ListTags", - "http":{ - "method":"GET", - "requestUri":"/2017-03-31/tags/{Resource}", - "responseCode":200 - }, - "input":{"shape":"ListTagsRequest"}, - "output":{"shape":"ListTagsResponse"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"} - ], - "documentation":"

Returns a function, event source mapping, or code signing configuration's tags. You can also view function tags with GetFunction.

", - "readonly":true - }, - "ListVersionsByFunction":{ - "name":"ListVersionsByFunction", - "http":{ - "method":"GET", - "requestUri":"/2015-03-31/functions/{FunctionName}/versions", - "responseCode":200 - }, - "input":{"shape":"ListVersionsByFunctionRequest"}, - "output":{"shape":"ListVersionsByFunctionResponse"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"} - ], - "documentation":"

Returns a list of versions, with the version-specific configuration of each. Lambda returns up to 50 versions per call.

", - "readonly":true - }, - "PublishLayerVersion":{ - "name":"PublishLayerVersion", - "http":{ - "method":"POST", - "requestUri":"/2018-10-31/layers/{LayerName}/versions", - "responseCode":201 - }, - "input":{"shape":"PublishLayerVersionRequest"}, - "output":{"shape":"PublishLayerVersionResponse"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"}, - {"shape":"CodeStorageExceededException"} - ], - "documentation":"

Creates an Lambda layer from a ZIP archive. Each time you call PublishLayerVersion with the same layer name, a new version is created.

Add layers to your function with CreateFunction or UpdateFunctionConfiguration.

" - }, - "PublishVersion":{ - "name":"PublishVersion", - "http":{ - "method":"POST", - "requestUri":"/2015-03-31/functions/{FunctionName}/versions", - "responseCode":201 - }, - "input":{"shape":"PublishVersionRequest"}, - "output":{"shape":"FunctionConfiguration"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ResourceConflictException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"}, - {"shape":"CodeStorageExceededException"}, - {"shape":"PreconditionFailedException"} - ], - "documentation":"

Creates a version from the current code and configuration of a function. Use versions to create a snapshot of your function code and configuration that doesn't change.

Lambda doesn't publish a version if the function's configuration and code haven't changed since the last version. Use UpdateFunctionCode or UpdateFunctionConfiguration to update the function before publishing a version.

Clients can invoke versions directly or with an alias. To create an alias, use CreateAlias.

" - }, - "PutFunctionCodeSigningConfig":{ - "name":"PutFunctionCodeSigningConfig", - "http":{ - "method":"PUT", - "requestUri":"/2020-06-30/functions/{FunctionName}/code-signing-config", - "responseCode":200 - }, - "input":{"shape":"PutFunctionCodeSigningConfigRequest"}, - "output":{"shape":"PutFunctionCodeSigningConfigResponse"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ResourceConflictException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"}, - {"shape":"CodeSigningConfigNotFoundException"} - ], - "documentation":"

Update the code signing configuration for the function. Changes to the code signing configuration take effect the next time a user tries to deploy a code package to the function.

" - }, - "PutFunctionConcurrency":{ - "name":"PutFunctionConcurrency", - "http":{ - "method":"PUT", - "requestUri":"/2017-10-31/functions/{FunctionName}/concurrency", - "responseCode":200 - }, - "input":{"shape":"PutFunctionConcurrencyRequest"}, - "output":{"shape":"Concurrency"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ResourceConflictException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"} - ], - "documentation":"

Sets the maximum number of simultaneous executions for a function, and reserves capacity for that concurrency level.

Concurrency settings apply to the function as a whole, including all published versions and the unpublished version. Reserving concurrency both ensures that your function has capacity to process the specified number of events simultaneously, and prevents it from scaling beyond that level. Use GetFunction to see the current setting for a function.

Use GetAccountSettings to see your Regional concurrency limit. You can reserve concurrency for as many functions as you like, as long as you leave at least 100 simultaneous executions unreserved for functions that aren't configured with a per-function limit. For more information, see Lambda function scaling.

" - }, - "PutFunctionEventInvokeConfig":{ - "name":"PutFunctionEventInvokeConfig", - "http":{ - "method":"PUT", - "requestUri":"/2019-09-25/functions/{FunctionName}/event-invoke-config", - "responseCode":200 - }, - "input":{"shape":"PutFunctionEventInvokeConfigRequest"}, - "output":{"shape":"FunctionEventInvokeConfig"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ResourceConflictException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"} - ], - "documentation":"

Configures options for asynchronous invocation on a function, version, or alias. If a configuration already exists for a function, version, or alias, this operation overwrites it. If you exclude any settings, they are removed. To set one option without affecting existing settings for other options, use UpdateFunctionEventInvokeConfig.

By default, Lambda retries an asynchronous invocation twice if the function returns an error. It retains events in a queue for up to six hours. When an event fails all processing attempts or stays in the asynchronous invocation queue for too long, Lambda discards it. To retain discarded events, configure a dead-letter queue with UpdateFunctionConfiguration.

To send an invocation record to a queue, topic, S3 bucket, function, or event bus, specify a destination. You can configure separate destinations for successful invocations (on-success) and events that fail all processing attempts (on-failure). You can configure destinations in addition to or instead of a dead-letter queue.

S3 buckets are supported only for on-failure destinations. To retain records of successful invocations, use another destination type.

" - }, - "PutFunctionRecursionConfig":{ - "name":"PutFunctionRecursionConfig", - "http":{ - "method":"PUT", - "requestUri":"/2024-08-31/functions/{FunctionName}/recursion-config", - "responseCode":200 - }, - "input":{"shape":"PutFunctionRecursionConfigRequest"}, - "output":{"shape":"PutFunctionRecursionConfigResponse"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ResourceConflictException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"} - ], - "documentation":"

Sets your function's recursive loop detection configuration.

When you configure a Lambda function to output to the same service or resource that invokes the function, it's possible to create an infinite recursive loop. For example, a Lambda function might write a message to an Amazon Simple Queue Service (Amazon SQS) queue, which then invokes the same function. This invocation causes the function to write another message to the queue, which in turn invokes the function again.

Lambda can detect certain types of recursive loops shortly after they occur. When Lambda detects a recursive loop and your function's recursive loop detection configuration is set to Terminate, it stops your function being invoked and notifies you.

" - }, - "PutProvisionedConcurrencyConfig":{ - "name":"PutProvisionedConcurrencyConfig", - "http":{ - "method":"PUT", - "requestUri":"/2019-09-30/functions/{FunctionName}/provisioned-concurrency", - "responseCode":202 - }, - "input":{"shape":"PutProvisionedConcurrencyConfigRequest"}, - "output":{"shape":"PutProvisionedConcurrencyConfigResponse"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ResourceConflictException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"} - ], - "documentation":"

Adds a provisioned concurrency configuration to a function's alias or version.

", - "idempotent":true - }, - "PutRuntimeManagementConfig":{ - "name":"PutRuntimeManagementConfig", - "http":{ - "method":"PUT", - "requestUri":"/2021-07-20/functions/{FunctionName}/runtime-management-config", - "responseCode":200 - }, - "input":{"shape":"PutRuntimeManagementConfigRequest"}, - "output":{"shape":"PutRuntimeManagementConfigResponse"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ResourceConflictException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"} - ], - "documentation":"

Sets the runtime management configuration for a function's version. For more information, see Runtime updates.

" - }, - "RemoveLayerVersionPermission":{ - "name":"RemoveLayerVersionPermission", - "http":{ - "method":"DELETE", - "requestUri":"/2018-10-31/layers/{LayerName}/versions/{VersionNumber}/policy/{StatementId}", - "responseCode":204 - }, - "input":{"shape":"RemoveLayerVersionPermissionRequest"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"}, - {"shape":"PreconditionFailedException"} - ], - "documentation":"

Removes a statement from the permissions policy for a version of an Lambda layer. For more information, see AddLayerVersionPermission.

" - }, - "RemovePermission":{ - "name":"RemovePermission", - "http":{ - "method":"DELETE", - "requestUri":"/2015-03-31/functions/{FunctionName}/policy/{StatementId}", - "responseCode":204 - }, - "input":{"shape":"RemovePermissionRequest"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"}, - {"shape":"PreconditionFailedException"} - ], - "documentation":"

Revokes function-use permission from an Amazon Web Services service or another Amazon Web Services account. You can get the ID of the statement from the output of GetPolicy.

" - }, - "SendDurableExecutionCallbackFailure":{ - "name":"SendDurableExecutionCallbackFailure", - "http":{ - "method":"POST", - "requestUri":"/2025-12-01/durable-execution-callbacks/{CallbackId}/fail", - "responseCode":200 - }, - "input":{"shape":"SendDurableExecutionCallbackFailureRequest"}, - "output":{"shape":"SendDurableExecutionCallbackFailureResponse"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ServiceException"}, - {"shape":"CallbackTimeoutException"} - ] - }, - "SendDurableExecutionCallbackHeartbeat":{ - "name":"SendDurableExecutionCallbackHeartbeat", - "http":{ - "method":"POST", - "requestUri":"/2025-12-01/durable-execution-callbacks/{CallbackId}/heartbeat", - "responseCode":200 - }, - "input":{"shape":"SendDurableExecutionCallbackHeartbeatRequest"}, - "output":{"shape":"SendDurableExecutionCallbackHeartbeatResponse"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ServiceException"}, - {"shape":"CallbackTimeoutException"} - ] - }, - "SendDurableExecutionCallbackSuccess":{ - "name":"SendDurableExecutionCallbackSuccess", - "http":{ - "method":"POST", - "requestUri":"/2025-12-01/durable-execution-callbacks/{CallbackId}/succeed", - "responseCode":200 - }, - "input":{"shape":"SendDurableExecutionCallbackSuccessRequest"}, - "output":{"shape":"SendDurableExecutionCallbackSuccessResponse"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ServiceException"}, - {"shape":"CallbackTimeoutException"} - ] - }, - "StopDurableExecution":{ - "name":"StopDurableExecution", - "http":{ - "method":"POST", - "requestUri":"/2025-12-01/durable-executions/{DurableExecutionArn}/stop", - "responseCode":200 - }, - "input":{"shape":"StopDurableExecutionRequest"}, - "output":{"shape":"StopDurableExecutionResponse"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ServiceException"}, - {"shape":"ResourceNotFoundException"} - ] - }, - "TagResource":{ - "name":"TagResource", - "http":{ - "method":"POST", - "requestUri":"/2017-03-31/tags/{Resource}", - "responseCode":204 - }, - "input":{"shape":"TagResourceRequest"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ResourceConflictException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"} - ], - "documentation":"

Adds tags to a function, event source mapping, or code signing configuration.

" - }, - "UntagResource":{ - "name":"UntagResource", - "http":{ - "method":"DELETE", - "requestUri":"/2017-03-31/tags/{Resource}", - "responseCode":204 - }, - "input":{"shape":"UntagResourceRequest"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ResourceConflictException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"} - ], - "documentation":"

Removes tags from a function, event source mapping, or code signing configuration.

" - }, - "UpdateAlias":{ - "name":"UpdateAlias", - "http":{ - "method":"PUT", - "requestUri":"/2015-03-31/functions/{FunctionName}/aliases/{Name}", - "responseCode":200 - }, - "input":{"shape":"UpdateAliasRequest"}, - "output":{"shape":"AliasConfiguration"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ResourceConflictException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"}, - {"shape":"PreconditionFailedException"} - ], - "documentation":"

Updates the configuration of a Lambda function alias.

" - }, - "UpdateCodeSigningConfig":{ - "name":"UpdateCodeSigningConfig", - "http":{ - "method":"PUT", - "requestUri":"/2020-04-22/code-signing-configs/{CodeSigningConfigArn}", - "responseCode":200 - }, - "input":{"shape":"UpdateCodeSigningConfigRequest"}, - "output":{"shape":"UpdateCodeSigningConfigResponse"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ServiceException"}, - {"shape":"ResourceNotFoundException"} - ], - "documentation":"

Update the code signing configuration. Changes to the code signing configuration take effect the next time a user tries to deploy a code package to the function.

" - }, - "UpdateEventSourceMapping":{ - "name":"UpdateEventSourceMapping", - "http":{ - "method":"PUT", - "requestUri":"/2015-03-31/event-source-mappings/{UUID}", - "responseCode":202 - }, - "input":{"shape":"UpdateEventSourceMappingRequest"}, - "output":{"shape":"EventSourceMappingConfiguration"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ResourceInUseException"}, - {"shape":"ResourceConflictException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"} - ], - "documentation":"

Updates an event source mapping. You can change the function that Lambda invokes, or pause invocation and resume later from the same location.

For details about how to configure different event sources, see the following topics.

The following error handling options are available only for DynamoDB and Kinesis event sources:

For stream sources (DynamoDB, Kinesis, Amazon MSK, and self-managed Apache Kafka), the following option is also available:

For information about which configuration parameters apply to each event source, see the following topics.

" - }, - "UpdateFunctionCode":{ - "name":"UpdateFunctionCode", - "http":{ - "method":"PUT", - "requestUri":"/2015-03-31/functions/{FunctionName}/code", - "responseCode":200 - }, - "input":{"shape":"UpdateFunctionCodeRequest"}, - "output":{"shape":"FunctionConfiguration"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ResourceConflictException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"InvalidCodeSignatureException"}, - {"shape":"ResourceNotFoundException"}, - {"shape":"CodeVerificationFailedException"}, - {"shape":"CodeSigningConfigNotFoundException"}, - {"shape":"CodeStorageExceededException"}, - {"shape":"PreconditionFailedException"} - ], - "documentation":"

Updates a Lambda function's code. If code signing is enabled for the function, the code package must be signed by a trusted publisher. For more information, see Configuring code signing for Lambda.

If the function's package type is Image, then you must specify the code package in ImageUri as the URI of a container image in the Amazon ECR registry.

If the function's package type is Zip, then you must specify the deployment package as a .zip file archive. Enter the Amazon S3 bucket and key of the code .zip file location. You can also provide the function code inline using the ZipFile field.

The code in the deployment package must be compatible with the target instruction set architecture of the function (x86-64 or arm64).

The function's code is locked when you publish a version. You can't modify the code of a published version, only the unpublished version.

For a function defined as a container image, Lambda resolves the image tag to an image digest. In Amazon ECR, if you update the image tag to a new image, Lambda does not automatically update the function.

" - }, - "UpdateFunctionConfiguration":{ - "name":"UpdateFunctionConfiguration", - "http":{ - "method":"PUT", - "requestUri":"/2015-03-31/functions/{FunctionName}/configuration", - "responseCode":200 - }, - "input":{"shape":"UpdateFunctionConfigurationRequest"}, - "output":{"shape":"FunctionConfiguration"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ResourceConflictException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"InvalidCodeSignatureException"}, - {"shape":"ResourceNotFoundException"}, - {"shape":"CodeVerificationFailedException"}, - {"shape":"CodeSigningConfigNotFoundException"}, - {"shape":"PreconditionFailedException"} - ], - "documentation":"

Modify the version-specific settings of a Lambda function.

When you update a function, Lambda provisions an instance of the function and its supporting resources. If your function connects to a VPC, this process can take a minute. During this time, you can't modify the function, but you can still invoke it. The LastUpdateStatus, LastUpdateStatusReason, and LastUpdateStatusReasonCode fields in the response from GetFunctionConfiguration indicate when the update is complete and the function is processing events with the new configuration. For more information, see Lambda function states.

These settings can vary between versions of a function and are locked when you publish a version. You can't modify the configuration of a published version, only the unpublished version.

To configure function concurrency, use PutFunctionConcurrency. To grant invoke permissions to an Amazon Web Services account or Amazon Web Services service, use AddPermission.

" - }, - "UpdateFunctionEventInvokeConfig":{ - "name":"UpdateFunctionEventInvokeConfig", - "http":{ - "method":"POST", - "requestUri":"/2019-09-25/functions/{FunctionName}/event-invoke-config", - "responseCode":200 - }, - "input":{"shape":"UpdateFunctionEventInvokeConfigRequest"}, - "output":{"shape":"FunctionEventInvokeConfig"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ResourceConflictException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"} - ], - "documentation":"

Updates the configuration for asynchronous invocation for a function, version, or alias.

To configure options for asynchronous invocation, use PutFunctionEventInvokeConfig.

" - }, - "UpdateFunctionUrlConfig":{ - "name":"UpdateFunctionUrlConfig", - "http":{ - "method":"PUT", - "requestUri":"/2021-10-31/functions/{FunctionName}/url", - "responseCode":200 - }, - "input":{"shape":"UpdateFunctionUrlConfigRequest"}, - "output":{"shape":"UpdateFunctionUrlConfigResponse"}, - "errors":[ - {"shape":"InvalidParameterValueException"}, - {"shape":"ResourceConflictException"}, - {"shape":"ServiceException"}, - {"shape":"TooManyRequestsException"}, - {"shape":"ResourceNotFoundException"} - ], - "documentation":"

Updates the configuration for a Lambda function URL.

" - } - }, - "shapes":{ - "AccountLimit":{ - "type":"structure", - "members":{ - "TotalCodeSize":{ - "shape":"Long", - "documentation":"

The amount of storage space that you can use for all deployment packages and layer archives.

" - }, - "CodeSizeUnzipped":{ - "shape":"Long", - "documentation":"

The maximum size of a function's deployment package and layers when they're extracted.

" - }, - "CodeSizeZipped":{ - "shape":"Long", - "documentation":"

The maximum size of a deployment package when it's uploaded directly to Lambda. Use Amazon S3 for larger files.

" - }, - "ConcurrentExecutions":{ - "shape":"Integer", - "documentation":"

The maximum number of simultaneous function executions.

" - }, - "UnreservedConcurrentExecutions":{ - "shape":"UnreservedConcurrentExecutions", - "documentation":"

The maximum number of simultaneous function executions, minus the capacity that's reserved for individual functions with PutFunctionConcurrency.

" - } - }, - "documentation":"

Limits that are related to concurrency and storage. All file and storage sizes are in bytes.

" - }, - "AccountUsage":{ - "type":"structure", - "members":{ - "TotalCodeSize":{ - "shape":"Long", - "documentation":"

The amount of storage space, in bytes, that's being used by deployment packages and layer archives.

" - }, - "FunctionCount":{ - "shape":"Long", - "documentation":"

The number of Lambda functions.

" - } - }, - "documentation":"

The number of functions and amount of storage in use.

" - }, - "Action":{ - "type":"string", - "pattern":"(lambda:[*]|lambda:[a-zA-Z]+|[*])" - }, - "AddLayerVersionPermissionRequest":{ - "type":"structure", - "required":[ - "LayerName", - "VersionNumber", - "StatementId", - "Action", - "Principal" - ], - "members":{ - "LayerName":{ - "shape":"LayerName", - "documentation":"

The name or Amazon Resource Name (ARN) of the layer.

", - "location":"uri", - "locationName":"LayerName" - }, - "VersionNumber":{ - "shape":"LayerVersionNumber", - "documentation":"

The version number.

", - "location":"uri", - "locationName":"VersionNumber" - }, - "StatementId":{ - "shape":"StatementId", - "documentation":"

An identifier that distinguishes the policy from others on the same layer version.

" - }, - "Action":{ - "shape":"LayerPermissionAllowedAction", - "documentation":"

The API action that grants access to the layer. For example, lambda:GetLayerVersion.

" - }, - "Principal":{ - "shape":"LayerPermissionAllowedPrincipal", - "documentation":"

An account ID, or * to grant layer usage permission to all accounts in an organization, or all Amazon Web Services accounts (if organizationId is not specified). For the last case, make sure that you really do want all Amazon Web Services accounts to have usage permission to this layer.

" - }, - "OrganizationId":{ - "shape":"OrganizationId", - "documentation":"

With the principal set to *, grant permission to all accounts in the specified organization.

" - }, - "RevisionId":{ - "shape":"String", - "documentation":"

Only update the policy if the revision ID matches the ID specified. Use this option to avoid modifying a policy that has changed since you last read it.

", - "location":"querystring", - "locationName":"RevisionId" - } - } - }, - "AddLayerVersionPermissionResponse":{ - "type":"structure", - "members":{ - "Statement":{ - "shape":"String", - "documentation":"

The permission statement.

" - }, - "RevisionId":{ - "shape":"String", - "documentation":"

A unique identifier for the current revision of the policy.

" - } - } - }, - "AddPermissionRequest":{ - "type":"structure", - "required":[ - "FunctionName", - "StatementId", - "Action", - "Principal" - ], - "members":{ - "FunctionName":{ - "shape":"FunctionName", - "documentation":"

The name or ARN of the Lambda function, version, or alias.

Name formats

You can append a version number or alias to any of the formats. The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.

", - "location":"uri", - "locationName":"FunctionName" - }, - "StatementId":{ - "shape":"StatementId", - "documentation":"

A statement identifier that differentiates the statement from others in the same policy.

" - }, - "Action":{ - "shape":"Action", - "documentation":"

The action that the principal can use on the function. For example, lambda:InvokeFunction or lambda:GetFunction.

" - }, - "Principal":{ - "shape":"Principal", - "documentation":"

The Amazon Web Services service, Amazon Web Services account, IAM user, or IAM role that invokes the function. If you specify a service, use SourceArn or SourceAccount to limit who can invoke the function through that service.

" - }, - "SourceArn":{ - "shape":"Arn", - "documentation":"

For Amazon Web Services services, the ARN of the Amazon Web Services resource that invokes the function. For example, an Amazon S3 bucket or Amazon SNS topic.

Note that Lambda configures the comparison using the StringLike operator.

" - }, - "SourceAccount":{ - "shape":"SourceOwner", - "documentation":"

For Amazon Web Services service, the ID of the Amazon Web Services account that owns the resource. Use this together with SourceArn to ensure that the specified account owns the resource. It is possible for an Amazon S3 bucket to be deleted by its owner and recreated by another account.

" - }, - "EventSourceToken":{ - "shape":"EventSourceToken", - "documentation":"

For Alexa Smart Home functions, a token that the invoker must supply.

" - }, - "Qualifier":{ - "shape":"Qualifier", - "documentation":"

Specify a version or alias to add permissions to a published version of the function.

", - "location":"querystring", - "locationName":"Qualifier" - }, - "RevisionId":{ - "shape":"String", - "documentation":"

Update the policy only if the revision ID matches the ID that's specified. Use this option to avoid modifying a policy that has changed since you last read it.

" - }, - "PrincipalOrgID":{ - "shape":"PrincipalOrgID", - "documentation":"

The identifier for your organization in Organizations. Use this to grant permissions to all the Amazon Web Services accounts under this organization.

" - }, - "FunctionUrlAuthType":{ - "shape":"FunctionUrlAuthType", - "documentation":"

The type of authentication that your function URL uses. Set to AWS_IAM if you want to restrict access to authenticated users only. Set to NONE if you want to bypass IAM authentication to create a public endpoint. For more information, see Security and auth model for Lambda function URLs.

" - } - } - }, - "AddPermissionResponse":{ - "type":"structure", - "members":{ - "Statement":{ - "shape":"String", - "documentation":"

The permission statement that's added to the function policy.

" - } - } - }, - "AdditionalVersion":{ - "type":"string", - "max":1024, - "min":1, - "pattern":"[0-9]+" - }, - "AdditionalVersionWeights":{ - "type":"map", - "key":{"shape":"AdditionalVersion"}, - "value":{"shape":"Weight"} - }, - "Alias":{ - "type":"string", - "max":128, - "min":1, - "pattern":"(?!^[0-9]+$)([a-zA-Z0-9-_]+)" - }, - "AliasConfiguration":{ - "type":"structure", - "members":{ - "AliasArn":{ - "shape":"FunctionArn", - "documentation":"

The Amazon Resource Name (ARN) of the alias.

" - }, - "Name":{ - "shape":"Alias", - "documentation":"

The name of the alias.

" - }, - "FunctionVersion":{ - "shape":"Version", - "documentation":"

The function version that the alias invokes.

" - }, - "Description":{ - "shape":"Description", - "documentation":"

A description of the alias.

" - }, - "RoutingConfig":{ - "shape":"AliasRoutingConfiguration", - "documentation":"

The routing configuration of the alias.

" - }, - "RevisionId":{ - "shape":"String", - "documentation":"

A unique identifier that changes when you update the alias.

" - } - }, - "documentation":"

Provides configuration information about a Lambda function alias.

" - }, - "AliasList":{ - "type":"list", - "member":{"shape":"AliasConfiguration"} - }, - "AliasRoutingConfiguration":{ - "type":"structure", - "members":{ - "AdditionalVersionWeights":{ - "shape":"AdditionalVersionWeights", - "documentation":"

The second version, and the percentage of traffic that's routed to it.

" - } - }, - "documentation":"

The traffic-shifting configuration of a Lambda function alias.

" - }, - "AllowCredentials":{ - "type":"boolean", - "box":true - }, - "AllowMethodsList":{ - "type":"list", - "member":{"shape":"Method"}, - "max":6, - "min":0 - }, - "AllowOriginsList":{ - "type":"list", - "member":{"shape":"Origin"}, - "max":100, - "min":0 - }, - "AllowedPublishers":{ - "type":"structure", - "required":["SigningProfileVersionArns"], - "members":{ - "SigningProfileVersionArns":{ - "shape":"SigningProfileVersionArns", - "documentation":"

The Amazon Resource Name (ARN) for each of the signing profiles. A signing profile defines a trusted user who can sign a code package.

" - } - }, - "documentation":"

List of signing profiles that can sign a code package.

" - }, - "AmazonManagedKafkaEventSourceConfig":{ - "type":"structure", - "members":{ - "ConsumerGroupId":{ - "shape":"URI", - "documentation":"

The identifier for the Kafka consumer group to join. The consumer group ID must be unique among all your Kafka event sources. After creating a Kafka event source mapping with the consumer group ID specified, you cannot update this value. For more information, see Customizable consumer group ID.

" - }, - "SchemaRegistryConfig":{ - "shape":"KafkaSchemaRegistryConfig", - "documentation":"

Specific configuration settings for a Kafka schema registry.

" - } - }, - "documentation":"

Specific configuration settings for an Amazon Managed Streaming for Apache Kafka (Amazon MSK) event source.

" - }, - "ApplicationLogLevel":{ - "type":"string", - "enum":[ - "TRACE", - "DEBUG", - "INFO", - "WARN", - "ERROR", - "FATAL" - ] - }, - "Architecture":{ - "type":"string", - "enum":[ - "x86_64", - "arm64" - ] - }, - "ArchitecturesList":{ - "type":"list", - "member":{"shape":"Architecture"}, - "max":1, - "min":1 - }, - "Arn":{ - "type":"string", - "pattern":"arn:(aws[a-zA-Z0-9-]*):([a-zA-Z0-9\\-])+:([a-z]{2}(-gov)?-[a-z]+-\\d{1})?:(\\d{12})?:(.*)" - }, - "AttemptCount":{ - "type":"integer", - "min":0 - }, - "BatchSize":{ - "type":"integer", - "box":true, - "max":10000, - "min":1 - }, - "BinaryOperationPayload":{ - "type":"blob", - "max":262144, - "min":0, - "sensitive":true - }, - "BisectBatchOnFunctionError":{ - "type":"boolean", - "box":true - }, - "Blob":{ - "type":"blob", - "sensitive":true - }, - "BlobStream":{ - "type":"blob", - "streaming":true - }, - "Boolean":{"type":"boolean"}, - "CallbackDetails":{ - "type":"structure", - "members":{ - "CallbackId":{"shape":"CallbackId"}, - "Result":{"shape":"OperationPayload"}, - "Error":{"shape":"ErrorObject"} - } - }, - "CallbackFailedDetails":{ - "type":"structure", - "required":["Error"], - "members":{ - "Error":{"shape":"EventError"} - } - }, - "CallbackId":{ - "type":"string", - "max":1024, - "min":1, - "pattern":"[A-Za-z0-9+/]+={0,2}" - }, - "CallbackOptions":{ - "type":"structure", - "members":{ - "TimeoutSeconds":{"shape":"DurationSeconds"}, - "HeartbeatTimeoutSeconds":{"shape":"DurationSeconds"} - } - }, - "CallbackStartedDetails":{ - "type":"structure", - "required":["CallbackId"], - "members":{ - "CallbackId":{"shape":"CallbackId"}, - "HeartbeatTimeout":{"shape":"DurationSeconds"}, - "Timeout":{"shape":"DurationSeconds"} - } - }, - "CallbackSucceededDetails":{ - "type":"structure", - "required":["Result"], - "members":{ - "Result":{"shape":"EventResult"} - } - }, - "CallbackTimedOutDetails":{ - "type":"structure", - "required":["Error"], - "members":{ - "Error":{"shape":"EventError"} - } - }, - "CallbackTimeoutException":{ - "type":"structure", - "members":{ - "Type":{"shape":"String"}, - "Message":{"shape":"String"} - }, - "error":{ - "httpStatusCode":400, - "senderFault":true - }, - "exception":true - }, - "ChainedInvokeDetails":{ - "type":"structure", - "members":{ - "Result":{"shape":"OperationPayload"}, - "Error":{"shape":"ErrorObject"} - } - }, - "ChainedInvokeFailedDetails":{ - "type":"structure", - "required":["Error"], - "members":{ - "Error":{"shape":"EventError"} - } - }, - "ChainedInvokeOptions":{ - "type":"structure", - "members":{ - "FunctionName":{"shape":"FunctionName"} - } - }, - "ChainedInvokePendingDetails":{ - "type":"structure", - "required":[ - "Input", - "FunctionName" - ], - "members":{ - "Input":{"shape":"EventInput"}, - "FunctionName":{"shape":"FunctionName"} - } - }, - "ChainedInvokeStartedDetails":{ - "type":"structure", - "members":{ - "DurableExecutionArn":{"shape":"DurableExecutionArn"} - } - }, - "ChainedInvokeStoppedDetails":{ - "type":"structure", - "required":["Error"], - "members":{ - "Error":{"shape":"EventError"} - } - }, - "ChainedInvokeSucceededDetails":{ - "type":"structure", - "required":["Result"], - "members":{ - "Result":{"shape":"EventResult"} - } - }, - "ChainedInvokeTimedOutDetails":{ - "type":"structure", - "required":["Error"], - "members":{ - "Error":{"shape":"EventError"} - } - }, - "CheckpointDurableExecutionRequest":{ - "type":"structure", - "required":[ - "DurableExecutionArn", - "CheckpointToken" - ], - "members":{ - "DurableExecutionArn":{ - "shape":"DurableExecutionArn", - "location":"uri", - "locationName":"DurableExecutionArn" - }, - "CheckpointToken":{"shape":"CheckpointToken"}, - "Updates":{"shape":"OperationUpdates"}, - "ClientToken":{"shape":"ClientToken"} - } - }, - "CheckpointDurableExecutionResponse":{ - "type":"structure", - "required":["NewExecutionState"], - "members":{ - "CheckpointToken":{"shape":"CheckpointToken"}, - "NewExecutionState":{"shape":"CheckpointUpdatedExecutionState"} - } - }, - "CheckpointToken":{ - "type":"string", - "max":2048, - "min":1, - "pattern":"[A-Za-z0-9+/]+={0,2}" - }, - "CheckpointUpdatedExecutionState":{ - "type":"structure", - "members":{ - "Operations":{"shape":"Operations"}, - "NextMarker":{"shape":"String"} - } - }, - "ClientToken":{ - "type":"string", - "max":64, - "min":1, - "pattern":"[\\x21-\\x7E]+" - }, - "CodeSigningConfig":{ - "type":"structure", - "required":[ - "CodeSigningConfigId", - "CodeSigningConfigArn", - "AllowedPublishers", - "CodeSigningPolicies", - "LastModified" - ], - "members":{ - "CodeSigningConfigId":{ - "shape":"CodeSigningConfigId", - "documentation":"

Unique identifer for the Code signing configuration.

" - }, - "CodeSigningConfigArn":{ - "shape":"CodeSigningConfigArn", - "documentation":"

The Amazon Resource Name (ARN) of the Code signing configuration.

" - }, - "Description":{ - "shape":"Description", - "documentation":"

Code signing configuration description.

" - }, - "AllowedPublishers":{ - "shape":"AllowedPublishers", - "documentation":"

List of allowed publishers.

" - }, - "CodeSigningPolicies":{ - "shape":"CodeSigningPolicies", - "documentation":"

The code signing policy controls the validation failure action for signature mismatch or expiry.

" - }, - "LastModified":{ - "shape":"Timestamp", - "documentation":"

The date and time that the Code signing configuration was last modified, in ISO-8601 format (YYYY-MM-DDThh:mm:ss.sTZD).

" - } - }, - "documentation":"

Details about a Code signing configuration.

" - }, - "CodeSigningConfigArn":{ - "type":"string", - "max":200, - "min":0, - "pattern":"arn:(aws[a-zA-Z-]*)?:lambda:[a-z]{2}((-gov)|(-iso(b?)))?-[a-z]+-\\d{1}:\\d{12}:code-signing-config:csc-[a-z0-9]{17}" - }, - "CodeSigningConfigId":{ - "type":"string", - "pattern":"csc-[a-zA-Z0-9-_\\.]{17}" - }, - "CodeSigningConfigList":{ - "type":"list", - "member":{"shape":"CodeSigningConfig"} - }, - "CodeSigningConfigNotFoundException":{ - "type":"structure", - "members":{ - "Type":{"shape":"String"}, - "Message":{"shape":"String"} - }, - "documentation":"

The specified code signing configuration does not exist.

", - "error":{ - "httpStatusCode":404, - "senderFault":true - }, - "exception":true - }, - "CodeSigningPolicies":{ - "type":"structure", - "members":{ - "UntrustedArtifactOnDeployment":{ - "shape":"CodeSigningPolicy", - "documentation":"

Code signing configuration policy for deployment validation failure. If you set the policy to Enforce, Lambda blocks the deployment request if signature validation checks fail. If you set the policy to Warn, Lambda allows the deployment and creates a CloudWatch log.

Default value: Warn

" - } - }, - "documentation":"

Code signing configuration policies specify the validation failure action for signature mismatch or expiry.

" - }, - "CodeSigningPolicy":{ - "type":"string", - "enum":[ - "Warn", - "Enforce" - ] - }, - "CodeStorageExceededException":{ - "type":"structure", - "members":{ - "Type":{ - "shape":"String", - "documentation":"

The exception type.

" - }, - "message":{"shape":"String"} - }, - "documentation":"

Your Amazon Web Services account has exceeded its maximum total code size. For more information, see Lambda quotas.

", - "error":{ - "httpStatusCode":400, - "senderFault":true - }, - "exception":true - }, - "CodeVerificationFailedException":{ - "type":"structure", - "members":{ - "Type":{"shape":"String"}, - "Message":{"shape":"String"} - }, - "documentation":"

The code signature failed one or more of the validation checks for signature mismatch or expiry, and the code signing policy is set to ENFORCE. Lambda blocks the deployment.

", - "error":{ - "httpStatusCode":400, - "senderFault":true - }, - "exception":true - }, - "CollectionName":{ - "type":"string", - "max":57, - "min":1, - "pattern":"(^(?!(system\\x2e)))(^[_a-zA-Z0-9])([^$]*)" - }, - "CompatibleArchitectures":{ - "type":"list", - "member":{"shape":"Architecture"}, - "max":2, - "min":0 - }, - "CompatibleRuntimes":{ - "type":"list", - "member":{"shape":"Runtime"}, - "max":15, - "min":0 - }, - "Concurrency":{ - "type":"structure", - "members":{ - "ReservedConcurrentExecutions":{ - "shape":"ReservedConcurrentExecutions", - "documentation":"

The number of concurrent executions that are reserved for this function. For more information, see Managing Lambda reserved concurrency.

" - } - } - }, - "ContextDetails":{ - "type":"structure", - "members":{ - "ReplayChildren":{"shape":"ReplayChildren"}, - "Result":{"shape":"OperationPayload"}, - "Error":{"shape":"ErrorObject"} - } - }, - "ContextFailedDetails":{ - "type":"structure", - "required":["Error"], - "members":{ - "Error":{"shape":"EventError"} - } - }, - "ContextOptions":{ - "type":"structure", - "members":{ - "ReplayChildren":{"shape":"ReplayChildren"} - } - }, - "ContextStartedDetails":{ - "type":"structure", - "members":{} - }, - "ContextSucceededDetails":{ - "type":"structure", - "required":["Result"], - "members":{ - "Result":{"shape":"EventResult"} - } - }, - "Cors":{ - "type":"structure", - "members":{ - "AllowCredentials":{ - "shape":"AllowCredentials", - "documentation":"

Whether to allow cookies or other credentials in requests to your function URL. The default is false.

" - }, - "AllowHeaders":{ - "shape":"HeadersList", - "documentation":"

The HTTP headers that origins can include in requests to your function URL. For example: Date, Keep-Alive, X-Custom-Header.

" - }, - "AllowMethods":{ - "shape":"AllowMethodsList", - "documentation":"

The HTTP methods that are allowed when calling your function URL. For example: GET, POST, DELETE, or the wildcard character (*).

" - }, - "AllowOrigins":{ - "shape":"AllowOriginsList", - "documentation":"

The origins that can access your function URL. You can list any number of specific origins, separated by a comma. For example: https://www.example.com, http://localhost:60905.

Alternatively, you can grant access to all origins using the wildcard character (*).

" - }, - "ExposeHeaders":{ - "shape":"HeadersList", - "documentation":"

The HTTP headers in your function response that you want to expose to origins that call your function URL. For example: Date, Keep-Alive, X-Custom-Header.

" - }, - "MaxAge":{ - "shape":"MaxAge", - "documentation":"

The maximum amount of time, in seconds, that web browsers can cache results of a preflight request. By default, this is set to 0, which means that the browser doesn't cache results.

" - } - }, - "documentation":"

The cross-origin resource sharing (CORS) settings for your Lambda function URL. Use CORS to grant access to your function URL from any origin. You can also use CORS to control access for specific HTTP headers and methods in requests to your function URL.

" - }, - "CreateAliasRequest":{ - "type":"structure", - "required":[ - "FunctionName", - "Name", - "FunctionVersion" - ], - "members":{ - "FunctionName":{ - "shape":"FunctionName", - "documentation":"

The name or ARN of the Lambda function.

Name formats

The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.

", - "location":"uri", - "locationName":"FunctionName" - }, - "Name":{ - "shape":"Alias", - "documentation":"

The name of the alias.

" - }, - "FunctionVersion":{ - "shape":"Version", - "documentation":"

The function version that the alias invokes.

" - }, - "Description":{ - "shape":"Description", - "documentation":"

A description of the alias.

" - }, - "RoutingConfig":{ - "shape":"AliasRoutingConfiguration", - "documentation":"

The routing configuration of the alias.

" - } - } - }, - "CreateCodeSigningConfigRequest":{ - "type":"structure", - "required":["AllowedPublishers"], - "members":{ - "Description":{ - "shape":"Description", - "documentation":"

Descriptive name for this code signing configuration.

" - }, - "AllowedPublishers":{ - "shape":"AllowedPublishers", - "documentation":"

Signing profiles for this code signing configuration.

" - }, - "CodeSigningPolicies":{ - "shape":"CodeSigningPolicies", - "documentation":"

The code signing policies define the actions to take if the validation checks fail.

" - }, - "Tags":{ - "shape":"Tags", - "documentation":"

A list of tags to add to the code signing configuration.

" - } - } - }, - "CreateCodeSigningConfigResponse":{ - "type":"structure", - "required":["CodeSigningConfig"], - "members":{ - "CodeSigningConfig":{ - "shape":"CodeSigningConfig", - "documentation":"

The code signing configuration.

" - } - } - }, - "CreateEventSourceMappingRequest":{ - "type":"structure", - "required":["FunctionName"], - "members":{ - "EventSourceArn":{ - "shape":"Arn", - "documentation":"

The Amazon Resource Name (ARN) of the event source.

" - }, - "FunctionName":{ - "shape":"FunctionName", - "documentation":"

The name or ARN of the Lambda function.

Name formats

The length constraint applies only to the full ARN. If you specify only the function name, it's limited to 64 characters in length.

" - }, - "Enabled":{ - "shape":"Enabled", - "documentation":"

When true, the event source mapping is active. When false, Lambda pauses polling and invocation.

Default: True

" - }, - "BatchSize":{ - "shape":"BatchSize", - "documentation":"

The maximum number of records in each batch that Lambda pulls from your stream or queue and sends to your function. Lambda passes all of the records in the batch to the function in a single call, up to the payload limit for synchronous invocation (6 MB).

" - }, - "FilterCriteria":{ - "shape":"FilterCriteria", - "documentation":"

An object that defines the filter criteria that determine whether Lambda should process an event. For more information, see Lambda event filtering.

" - }, - "MaximumBatchingWindowInSeconds":{ - "shape":"MaximumBatchingWindowInSeconds", - "documentation":"

The maximum amount of time, in seconds, that Lambda spends gathering records before invoking the function. You can configure MaximumBatchingWindowInSeconds to any value from 0 seconds to 300 seconds in increments of seconds.

For Kinesis, DynamoDB, and Amazon SQS event sources, the default batching window is 0 seconds. For Amazon MSK, Self-managed Apache Kafka, Amazon MQ, and DocumentDB event sources, the default batching window is 500 ms. Note that because you can only change MaximumBatchingWindowInSeconds in increments of seconds, you cannot revert back to the 500 ms default batching window after you have changed it. To restore the default batching window, you must create a new event source mapping.

Related setting: For Kinesis, DynamoDB, and Amazon SQS event sources, when you set BatchSize to a value greater than 10, you must set MaximumBatchingWindowInSeconds to at least 1.

" - }, - "ParallelizationFactor":{ - "shape":"ParallelizationFactor", - "documentation":"

(Kinesis and DynamoDB Streams only) The number of batches to process from each shard concurrently.

" - }, - "StartingPosition":{ - "shape":"EventSourcePosition", - "documentation":"

The position in a stream from which to start reading. Required for Amazon Kinesis and Amazon DynamoDB Stream event sources. AT_TIMESTAMP is supported only for Amazon Kinesis streams, Amazon DocumentDB, Amazon MSK, and self-managed Apache Kafka.

" - }, - "StartingPositionTimestamp":{ - "shape":"Date", - "documentation":"

With StartingPosition set to AT_TIMESTAMP, the time from which to start reading. StartingPositionTimestamp cannot be in the future.

" - }, - "DestinationConfig":{ - "shape":"DestinationConfig", - "documentation":"

(Kinesis, DynamoDB Streams, Amazon MSK, and self-managed Kafka only) A configuration object that specifies the destination of an event after Lambda processes it.

" - }, - "MaximumRecordAgeInSeconds":{ - "shape":"MaximumRecordAgeInSeconds", - "documentation":"

(Kinesis and DynamoDB Streams only) Discard records older than the specified age. The default value is infinite (-1).

" - }, - "BisectBatchOnFunctionError":{ - "shape":"BisectBatchOnFunctionError", - "documentation":"

(Kinesis and DynamoDB Streams only) If the function returns an error, split the batch in two and retry.

" - }, - "MaximumRetryAttempts":{ - "shape":"MaximumRetryAttemptsEventSourceMapping", - "documentation":"

(Kinesis and DynamoDB Streams only) Discard records after the specified number of retries. The default value is infinite (-1). When set to infinite (-1), failed records are retried until the record expires.

" - }, - "Tags":{ - "shape":"Tags", - "documentation":"

A list of tags to apply to the event source mapping.

" - }, - "TumblingWindowInSeconds":{ - "shape":"TumblingWindowInSeconds", - "documentation":"

(Kinesis and DynamoDB Streams only) The duration in seconds of a processing window for DynamoDB and Kinesis Streams event sources. A value of 0 seconds indicates no tumbling window.

" - }, - "Topics":{ - "shape":"Topics", - "documentation":"

The name of the Kafka topic.

" - }, - "Queues":{ - "shape":"Queues", - "documentation":"

(MQ) The name of the Amazon MQ broker destination queue to consume.

" - }, - "SourceAccessConfigurations":{ - "shape":"SourceAccessConfigurations", - "documentation":"

An array of authentication protocols or VPC components required to secure your event source.

" - }, - "SelfManagedEventSource":{ - "shape":"SelfManagedEventSource", - "documentation":"

The self-managed Apache Kafka cluster to receive records from.

" - }, - "FunctionResponseTypes":{ - "shape":"FunctionResponseTypeList", - "documentation":"

(Kinesis, DynamoDB Streams, and Amazon SQS) A list of current response type enums applied to the event source mapping.

" - }, - "AmazonManagedKafkaEventSourceConfig":{ - "shape":"AmazonManagedKafkaEventSourceConfig", - "documentation":"

Specific configuration settings for an Amazon Managed Streaming for Apache Kafka (Amazon MSK) event source.

" - }, - "SelfManagedKafkaEventSourceConfig":{ - "shape":"SelfManagedKafkaEventSourceConfig", - "documentation":"

Specific configuration settings for a self-managed Apache Kafka event source.

" - }, - "ScalingConfig":{ - "shape":"ScalingConfig", - "documentation":"

(Amazon SQS only) The scaling configuration for the event source. For more information, see Configuring maximum concurrency for Amazon SQS event sources.

" - }, - "DocumentDBEventSourceConfig":{ - "shape":"DocumentDBEventSourceConfig", - "documentation":"

Specific configuration settings for a DocumentDB event source.

" - }, - "KMSKeyArn":{ - "shape":"KMSKeyArn", - "documentation":"

The ARN of the Key Management Service (KMS) customer managed key that Lambda uses to encrypt your function's filter criteria. By default, Lambda does not encrypt your filter criteria object. Specify this property to encrypt data using your own customer managed key.

" - }, - "MetricsConfig":{ - "shape":"EventSourceMappingMetricsConfig", - "documentation":"

The metrics configuration for your event source. For more information, see Event source mapping metrics.

" - }, - "ProvisionedPollerConfig":{ - "shape":"ProvisionedPollerConfig", - "documentation":"

(Amazon MSK and self-managed Apache Kafka only) The provisioned mode configuration for the event source. For more information, see provisioned mode.

" - } - } - }, - "CreateFunctionRequest":{ - "type":"structure", - "required":[ - "FunctionName", - "Role", - "Code" - ], - "members":{ - "FunctionName":{ - "shape":"FunctionName", - "documentation":"

The name or ARN of the Lambda function.

Name formats

The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.

" - }, - "Runtime":{ - "shape":"Runtime", - "documentation":"

The identifier of the function's runtime. Runtime is required if the deployment package is a .zip file archive. Specifying a runtime results in an error if you're deploying a function using a container image.

The following list includes deprecated runtimes. Lambda blocks creating new functions and updating existing functions shortly after each runtime is deprecated. For more information, see Runtime use after deprecation.

For a list of all currently supported runtimes, see Supported runtimes.

" - }, - "Role":{ - "shape":"RoleArn", - "documentation":"

The Amazon Resource Name (ARN) of the function's execution role.

" - }, - "Handler":{ - "shape":"Handler", - "documentation":"

The name of the method within your code that Lambda calls to run your function. Handler is required if the deployment package is a .zip file archive. The format includes the file name. It can also include namespaces and other qualifiers, depending on the runtime. For more information, see Lambda programming model.

" - }, - "Code":{ - "shape":"FunctionCode", - "documentation":"

The code for the function.

" - }, - "Description":{ - "shape":"Description", - "documentation":"

A description of the function.

" - }, - "Timeout":{ - "shape":"Timeout", - "documentation":"

The amount of time (in seconds) that Lambda allows a function to run before stopping it. The default is 3 seconds. The maximum allowed value is 900 seconds. For more information, see Lambda execution environment.

" - }, - "MemorySize":{ - "shape":"MemorySize", - "documentation":"

The amount of memory available to the function at runtime. Increasing the function memory also increases its CPU allocation. The default value is 128 MB. The value can be any multiple of 1 MB.

" - }, - "Publish":{ - "shape":"Boolean", - "documentation":"

Set to true to publish the first version of the function during creation.

" - }, - "VpcConfig":{ - "shape":"VpcConfig", - "documentation":"

For network connectivity to Amazon Web Services resources in a VPC, specify a list of security groups and subnets in the VPC. When you connect a function to a VPC, it can access resources and the internet only through that VPC. For more information, see Configuring a Lambda function to access resources in a VPC.

" - }, - "PackageType":{ - "shape":"PackageType", - "documentation":"

The type of deployment package. Set to Image for container image and set to Zip for .zip file archive.

" - }, - "DeadLetterConfig":{ - "shape":"DeadLetterConfig", - "documentation":"

A dead-letter queue configuration that specifies the queue or topic where Lambda sends asynchronous events when they fail processing. For more information, see Dead-letter queues.

" - }, - "Environment":{ - "shape":"Environment", - "documentation":"

Environment variables that are accessible from function code during execution.

" - }, - "KMSKeyArn":{ - "shape":"KMSKeyArn", - "documentation":"

The ARN of the Key Management Service (KMS) customer managed key that's used to encrypt the following resources:

If you don't provide a customer managed key, Lambda uses an Amazon Web Services owned key or an Amazon Web Services managed key.

" - }, - "TracingConfig":{ - "shape":"TracingConfig", - "documentation":"

Set Mode to Active to sample and trace a subset of incoming requests with X-Ray.

" - }, - "Tags":{ - "shape":"Tags", - "documentation":"

A list of tags to apply to the function.

" - }, - "Layers":{ - "shape":"LayerList", - "documentation":"

A list of function layers to add to the function's execution environment. Specify each layer by its ARN, including the version.

" - }, - "FileSystemConfigs":{ - "shape":"FileSystemConfigList", - "documentation":"

Connection settings for an Amazon EFS file system.

" - }, - "ImageConfig":{ - "shape":"ImageConfig", - "documentation":"

Container image configuration values that override the values in the container image Dockerfile.

" - }, - "CodeSigningConfigArn":{ - "shape":"CodeSigningConfigArn", - "documentation":"

To enable code signing for this function, specify the ARN of a code-signing configuration. A code-signing configuration includes a set of signing profiles, which define the trusted publishers for this function.

" - }, - "Architectures":{ - "shape":"ArchitecturesList", - "documentation":"

The instruction set architecture that the function supports. Enter a string array with one of the valid values (arm64 or x86_64). The default value is x86_64.

" - }, - "EphemeralStorage":{ - "shape":"EphemeralStorage", - "documentation":"

The size of the function's /tmp directory in MB. The default value is 512, but can be any whole number between 512 and 10,240 MB. For more information, see Configuring ephemeral storage (console).

" - }, - "SnapStart":{ - "shape":"SnapStart", - "documentation":"

The function's SnapStart setting.

" - }, - "LoggingConfig":{ - "shape":"LoggingConfig", - "documentation":"

The function's Amazon CloudWatch Logs configuration settings.

" - }, - "DurableConfig":{"shape":"DurableConfig"} - } - }, - "CreateFunctionUrlConfigRequest":{ - "type":"structure", - "required":[ - "FunctionName", - "AuthType" - ], - "members":{ - "FunctionName":{ - "shape":"FunctionName", - "documentation":"

The name or ARN of the Lambda function.

Name formats

The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.

", - "location":"uri", - "locationName":"FunctionName" - }, - "Qualifier":{ - "shape":"FunctionUrlQualifier", - "documentation":"

The alias name.

", - "location":"querystring", - "locationName":"Qualifier" - }, - "AuthType":{ - "shape":"FunctionUrlAuthType", - "documentation":"

The type of authentication that your function URL uses. Set to AWS_IAM if you want to restrict access to authenticated users only. Set to NONE if you want to bypass IAM authentication to create a public endpoint. For more information, see Security and auth model for Lambda function URLs.

" - }, - "Cors":{ - "shape":"Cors", - "documentation":"

The cross-origin resource sharing (CORS) settings for your function URL.

" - }, - "InvokeMode":{ - "shape":"InvokeMode", - "documentation":"

Use one of the following options:

" - } - } - }, - "CreateFunctionUrlConfigResponse":{ - "type":"structure", - "required":[ - "FunctionUrl", - "FunctionArn", - "AuthType", - "CreationTime" - ], - "members":{ - "FunctionUrl":{ - "shape":"FunctionUrl", - "documentation":"

The HTTP URL endpoint for your function.

" - }, - "FunctionArn":{ - "shape":"FunctionArn", - "documentation":"

The Amazon Resource Name (ARN) of your function.

" - }, - "AuthType":{ - "shape":"FunctionUrlAuthType", - "documentation":"

The type of authentication that your function URL uses. Set to AWS_IAM if you want to restrict access to authenticated users only. Set to NONE if you want to bypass IAM authentication to create a public endpoint. For more information, see Security and auth model for Lambda function URLs.

" - }, - "Cors":{ - "shape":"Cors", - "documentation":"

The cross-origin resource sharing (CORS) settings for your function URL.

" - }, - "CreationTime":{ - "shape":"Timestamp", - "documentation":"

When the function URL was created, in ISO-8601 format (YYYY-MM-DDThh:mm:ss.sTZD).

" - }, - "InvokeMode":{ - "shape":"InvokeMode", - "documentation":"

Use one of the following options:

" - } - } - }, - "DatabaseName":{ - "type":"string", - "max":63, - "min":1, - "pattern":"[^ /\\.$\\x22]*" - }, - "Date":{"type":"timestamp"}, - "DeadLetterConfig":{ - "type":"structure", - "members":{ - "TargetArn":{ - "shape":"ResourceArn", - "documentation":"

The Amazon Resource Name (ARN) of an Amazon SQS queue or Amazon SNS topic.

" - } - }, - "documentation":"

The dead-letter queue for failed asynchronous invocations.

" - }, - "DeleteAliasRequest":{ - "type":"structure", - "required":[ - "FunctionName", - "Name" - ], - "members":{ - "FunctionName":{ - "shape":"FunctionName", - "documentation":"

The name or ARN of the Lambda function.

Name formats

The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.

", - "location":"uri", - "locationName":"FunctionName" - }, - "Name":{ - "shape":"Alias", - "documentation":"

The name of the alias.

", - "location":"uri", - "locationName":"Name" - } - } - }, - "DeleteCodeSigningConfigRequest":{ - "type":"structure", - "required":["CodeSigningConfigArn"], - "members":{ - "CodeSigningConfigArn":{ - "shape":"CodeSigningConfigArn", - "documentation":"

The The Amazon Resource Name (ARN) of the code signing configuration.

", - "location":"uri", - "locationName":"CodeSigningConfigArn" - } - } - }, - "DeleteCodeSigningConfigResponse":{ - "type":"structure", - "members":{} - }, - "DeleteEventSourceMappingRequest":{ - "type":"structure", - "required":["UUID"], - "members":{ - "UUID":{ - "shape":"String", - "documentation":"

The identifier of the event source mapping.

", - "location":"uri", - "locationName":"UUID" - } - } - }, - "DeleteFunctionCodeSigningConfigRequest":{ - "type":"structure", - "required":["FunctionName"], - "members":{ - "FunctionName":{ - "shape":"FunctionName", - "documentation":"

The name or ARN of the Lambda function.

Name formats

The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.

", - "location":"uri", - "locationName":"FunctionName" - } - } - }, - "DeleteFunctionConcurrencyRequest":{ - "type":"structure", - "required":["FunctionName"], - "members":{ - "FunctionName":{ - "shape":"FunctionName", - "documentation":"

The name or ARN of the Lambda function.

Name formats

The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.

", - "location":"uri", - "locationName":"FunctionName" - } - } - }, - "DeleteFunctionEventInvokeConfigRequest":{ - "type":"structure", - "required":["FunctionName"], - "members":{ - "FunctionName":{ - "shape":"FunctionName", - "documentation":"

The name or ARN of the Lambda function, version, or alias.

Name formats

You can append a version number or alias to any of the formats. The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.

", - "location":"uri", - "locationName":"FunctionName" - }, - "Qualifier":{ - "shape":"Qualifier", - "documentation":"

A version number or alias name.

", - "location":"querystring", - "locationName":"Qualifier" - } - } - }, - "DeleteFunctionRequest":{ - "type":"structure", - "required":["FunctionName"], - "members":{ - "FunctionName":{ - "shape":"FunctionName", - "documentation":"

The name or ARN of the Lambda function or version.

Name formats

You can append a version number or alias to any of the formats. The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.

", - "location":"uri", - "locationName":"FunctionName" - }, - "Qualifier":{ - "shape":"Qualifier", - "documentation":"

Specify a version to delete. You can't delete a version that an alias references.

", - "location":"querystring", - "locationName":"Qualifier" - } - } - }, - "DeleteFunctionUrlConfigRequest":{ - "type":"structure", - "required":["FunctionName"], - "members":{ - "FunctionName":{ - "shape":"FunctionName", - "documentation":"

The name or ARN of the Lambda function.

Name formats

The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.

", - "location":"uri", - "locationName":"FunctionName" - }, - "Qualifier":{ - "shape":"FunctionUrlQualifier", - "documentation":"

The alias name.

", - "location":"querystring", - "locationName":"Qualifier" - } - } - }, - "DeleteLayerVersionRequest":{ - "type":"structure", - "required":[ - "LayerName", - "VersionNumber" - ], - "members":{ - "LayerName":{ - "shape":"LayerName", - "documentation":"

The name or Amazon Resource Name (ARN) of the layer.

", - "location":"uri", - "locationName":"LayerName" - }, - "VersionNumber":{ - "shape":"LayerVersionNumber", - "documentation":"

The version number.

", - "location":"uri", - "locationName":"VersionNumber" - } - } - }, - "DeleteProvisionedConcurrencyConfigRequest":{ - "type":"structure", - "required":[ - "FunctionName", - "Qualifier" - ], - "members":{ - "FunctionName":{ - "shape":"FunctionName", - "documentation":"

The name or ARN of the Lambda function.

Name formats

The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.

", - "location":"uri", - "locationName":"FunctionName" - }, - "Qualifier":{ - "shape":"Qualifier", - "documentation":"

The version number or alias name.

", - "location":"querystring", - "locationName":"Qualifier" - } - } - }, - "Description":{ - "type":"string", - "max":256, - "min":0 - }, - "DestinationArn":{ - "type":"string", - "max":350, - "min":0, - "pattern":"$|arn:(aws[a-zA-Z0-9-]*):([a-zA-Z0-9\\-])+:([a-z]{2}(-gov)?-[a-z]+-\\d{1})?:(\\d{12})?:(.*)" - }, - "DestinationConfig":{ - "type":"structure", - "members":{ - "OnSuccess":{ - "shape":"OnSuccess", - "documentation":"

The destination configuration for successful invocations. Not supported in CreateEventSourceMapping or UpdateEventSourceMapping.

" - }, - "OnFailure":{ - "shape":"OnFailure", - "documentation":"

The destination configuration for failed invocations.

" - } - }, - "documentation":"

A configuration object that specifies the destination of an event after Lambda processes it. For more information, see Adding a destination.

" - }, - "DocumentDBEventSourceConfig":{ - "type":"structure", - "members":{ - "DatabaseName":{ - "shape":"DatabaseName", - "documentation":"

The name of the database to consume within the DocumentDB cluster.

" - }, - "CollectionName":{ - "shape":"CollectionName", - "documentation":"

The name of the collection to consume within the database. If you do not specify a collection, Lambda consumes all collections.

" - }, - "FullDocument":{ - "shape":"FullDocument", - "documentation":"

Determines what DocumentDB sends to your event stream during document update operations. If set to UpdateLookup, DocumentDB sends a delta describing the changes, along with a copy of the entire document. Otherwise, DocumentDB sends only a partial document that contains the changes.

" - } - }, - "documentation":"

Specific configuration settings for a DocumentDB event source.

" - }, - "DurableConfig":{ - "type":"structure", - "members":{ - "RetentionPeriodInDays":{"shape":"RetentionPeriodInDays"}, - "ExecutionTimeout":{"shape":"ExecutionTimeout"} - } - }, - "DurableExecutionAlreadyStartedException":{ - "type":"structure", - "members":{ - "Type":{"shape":"String"}, - "Message":{"shape":"String"} - }, - "error":{ - "httpStatusCode":409, - "senderFault":true - }, - "exception":true - }, - "DurableExecutionArn":{ - "type":"string", - "max":1024, - "min":1, - "pattern":"arn:([a-zA-Z0-9-]+):lambda:([a-zA-Z0-9-]+):(\\d{12}):function:([a-zA-Z0-9_-]+):(\\$LATEST(?:\\.PUBLISHED)?|[0-9]+)/durable-execution/([a-zA-Z0-9_-]+)/([a-zA-Z0-9_-]+)" - }, - "DurableExecutionName":{ - "type":"string", - "max":64, - "min":1, - "pattern":"[a-zA-Z0-9-_]+" - }, - "DurableExecutions":{ - "type":"list", - "member":{"shape":"Execution"} - }, - "DurationSeconds":{ - "type":"integer", - "box":true, - "min":0 - }, - "EC2AccessDeniedException":{ - "type":"structure", - "members":{ - "Type":{"shape":"String"}, - "Message":{"shape":"String"} - }, - "documentation":"

Need additional permissions to configure VPC settings.

", - "error":{"httpStatusCode":502}, - "exception":true, - "fault":true - }, - "EC2ThrottledException":{ - "type":"structure", - "members":{ - "Type":{"shape":"String"}, - "Message":{"shape":"String"} - }, - "documentation":"

Amazon EC2 throttled Lambda during Lambda function initialization using the execution role provided for the function.

", - "error":{"httpStatusCode":502}, - "exception":true, - "fault":true - }, - "EC2UnexpectedException":{ - "type":"structure", - "members":{ - "Type":{"shape":"String"}, - "Message":{"shape":"String"}, - "EC2ErrorCode":{"shape":"String"} - }, - "documentation":"

Lambda received an unexpected Amazon EC2 client exception while setting up for the Lambda function.

", - "error":{"httpStatusCode":502}, - "exception":true, - "fault":true - }, - "EFSIOException":{ - "type":"structure", - "members":{ - "Type":{"shape":"String"}, - "Message":{"shape":"String"} - }, - "documentation":"

An error occurred when reading from or writing to a connected file system.

", - "error":{ - "httpStatusCode":410, - "senderFault":true - }, - "exception":true - }, - "EFSMountConnectivityException":{ - "type":"structure", - "members":{ - "Type":{"shape":"String"}, - "Message":{"shape":"String"} - }, - "documentation":"

The Lambda function couldn't make a network connection to the configured file system.

", - "error":{ - "httpStatusCode":408, - "senderFault":true - }, - "exception":true - }, - "EFSMountFailureException":{ - "type":"structure", - "members":{ - "Type":{"shape":"String"}, - "Message":{"shape":"String"} - }, - "documentation":"

The Lambda function couldn't mount the configured file system due to a permission or configuration issue.

", - "error":{ - "httpStatusCode":403, - "senderFault":true - }, - "exception":true - }, - "EFSMountTimeoutException":{ - "type":"structure", - "members":{ - "Type":{"shape":"String"}, - "Message":{"shape":"String"} - }, - "documentation":"

The Lambda function made a network connection to the configured file system, but the mount operation timed out.

", - "error":{ - "httpStatusCode":408, - "senderFault":true - }, - "exception":true - }, - "ENILimitReachedException":{ - "type":"structure", - "members":{ - "Type":{"shape":"String"}, - "Message":{"shape":"String"} - }, - "documentation":"

Lambda couldn't create an elastic network interface in the VPC, specified as part of Lambda function configuration, because the limit for network interfaces has been reached. For more information, see Lambda quotas.

", - "error":{"httpStatusCode":502}, - "exception":true, - "fault":true - }, - "Enabled":{ - "type":"boolean", - "box":true - }, - "EndPointType":{ - "type":"string", - "enum":["KAFKA_BOOTSTRAP_SERVERS"] - }, - "Endpoint":{ - "type":"string", - "max":300, - "min":1, - "pattern":"(([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\\-]*[a-zA-Z0-9])\\.)*([A-Za-z0-9]|[A-Za-z0-9][A-Za-z0-9\\-]*[A-Za-z0-9]):[0-9]{1,5}" - }, - "EndpointLists":{ - "type":"list", - "member":{"shape":"Endpoint"}, - "max":10, - "min":1 - }, - "Endpoints":{ - "type":"map", - "key":{"shape":"EndPointType"}, - "value":{"shape":"EndpointLists"}, - "max":2, - "min":1 - }, - "Environment":{ - "type":"structure", - "members":{ - "Variables":{ - "shape":"EnvironmentVariables", - "documentation":"

Environment variable key-value pairs. For more information, see Using Lambda environment variables.

" - } - }, - "documentation":"

A function's environment variable settings. You can use environment variables to adjust your function's behavior without updating code. An environment variable is a pair of strings that are stored in a function's version-specific configuration.

" - }, - "EnvironmentError":{ - "type":"structure", - "members":{ - "ErrorCode":{ - "shape":"String", - "documentation":"

The error code.

" - }, - "Message":{ - "shape":"SensitiveString", - "documentation":"

The error message.

" - } - }, - "documentation":"

Error messages for environment variables that couldn't be applied.

" - }, - "EnvironmentResponse":{ - "type":"structure", - "members":{ - "Variables":{ - "shape":"EnvironmentVariables", - "documentation":"

Environment variable key-value pairs. Omitted from CloudTrail logs.

" - }, - "Error":{ - "shape":"EnvironmentError", - "documentation":"

Error messages for environment variables that couldn't be applied.

" - } - }, - "documentation":"

The results of an operation to update or read environment variables. If the operation succeeds, the response contains the environment variables. If it fails, the response contains details about the error.

" - }, - "EnvironmentVariableName":{ - "type":"string", - "pattern":"[a-zA-Z]([a-zA-Z0-9_])+", - "sensitive":true - }, - "EnvironmentVariableValue":{ - "type":"string", - "sensitive":true - }, - "EnvironmentVariables":{ - "type":"map", - "key":{"shape":"EnvironmentVariableName"}, - "value":{"shape":"EnvironmentVariableValue"}, - "sensitive":true - }, - "EphemeralStorage":{ - "type":"structure", - "required":["Size"], - "members":{ - "Size":{ - "shape":"EphemeralStorageSize", - "documentation":"

The size of the function's /tmp directory.

" - } - }, - "documentation":"

The size of the function's /tmp directory in MB. The default value is 512, but can be any whole number between 512 and 10,240 MB. For more information, see Configuring ephemeral storage (console).

" - }, - "EphemeralStorageSize":{ - "type":"integer", - "box":true, - "max":10240, - "min":512 - }, - "ErrorData":{ - "type":"string", - "sensitive":true - }, - "ErrorMessage":{ - "type":"string", - "sensitive":true - }, - "ErrorObject":{ - "type":"structure", - "members":{ - "ErrorMessage":{"shape":"ErrorMessage"}, - "ErrorType":{"shape":"ErrorType"}, - "ErrorData":{"shape":"ErrorData"}, - "StackTrace":{"shape":"StackTraceEntries"} - } - }, - "ErrorType":{ - "type":"string", - "sensitive":true - }, - "Event":{ - "type":"structure", - "members":{ - "EventType":{"shape":"EventType"}, - "SubType":{"shape":"OperationSubType"}, - "EventId":{"shape":"EventId"}, - "Id":{"shape":"OperationId"}, - "Name":{"shape":"OperationName"}, - "EventTimestamp":{"shape":"ExecutionTimestamp"}, - "ParentId":{"shape":"OperationId"}, - "ExecutionStartedDetails":{"shape":"ExecutionStartedDetails"}, - "ExecutionSucceededDetails":{"shape":"ExecutionSucceededDetails"}, - "ExecutionFailedDetails":{"shape":"ExecutionFailedDetails"}, - "ExecutionTimedOutDetails":{"shape":"ExecutionTimedOutDetails"}, - "ExecutionStoppedDetails":{"shape":"ExecutionStoppedDetails"}, - "ContextStartedDetails":{"shape":"ContextStartedDetails"}, - "ContextSucceededDetails":{"shape":"ContextSucceededDetails"}, - "ContextFailedDetails":{"shape":"ContextFailedDetails"}, - "WaitStartedDetails":{"shape":"WaitStartedDetails"}, - "WaitSucceededDetails":{"shape":"WaitSucceededDetails"}, - "WaitCancelledDetails":{"shape":"WaitCancelledDetails"}, - "StepStartedDetails":{"shape":"StepStartedDetails"}, - "StepSucceededDetails":{"shape":"StepSucceededDetails"}, - "StepFailedDetails":{"shape":"StepFailedDetails"}, - "ChainedInvokePendingDetails":{"shape":"ChainedInvokePendingDetails"}, - "ChainedInvokeStartedDetails":{"shape":"ChainedInvokeStartedDetails"}, - "ChainedInvokeSucceededDetails":{"shape":"ChainedInvokeSucceededDetails"}, - "ChainedInvokeFailedDetails":{"shape":"ChainedInvokeFailedDetails"}, - "ChainedInvokeTimedOutDetails":{"shape":"ChainedInvokeTimedOutDetails"}, - "ChainedInvokeStoppedDetails":{"shape":"ChainedInvokeStoppedDetails"}, - "CallbackStartedDetails":{"shape":"CallbackStartedDetails"}, - "CallbackSucceededDetails":{"shape":"CallbackSucceededDetails"}, - "CallbackFailedDetails":{"shape":"CallbackFailedDetails"}, - "CallbackTimedOutDetails":{"shape":"CallbackTimedOutDetails"}, - "InvocationCompletedDetails":{"shape":"InvocationCompletedDetails"} - } - }, - "EventError":{ - "type":"structure", - "members":{ - "Payload":{"shape":"ErrorObject"}, - "Truncated":{"shape":"Truncated"} - } - }, - "EventId":{ - "type":"integer", - "box":true, - "min":1 - }, - "EventInput":{ - "type":"structure", - "members":{ - "Payload":{"shape":"InputPayload"}, - "Truncated":{"shape":"Truncated"} - } - }, - "EventResult":{ - "type":"structure", - "members":{ - "Payload":{"shape":"OperationPayload"}, - "Truncated":{"shape":"Truncated"} - } - }, - "EventSourceMappingArn":{ - "type":"string", - "max":120, - "min":85, - "pattern":"arn:(aws[a-zA-Z-]*)?:lambda:[a-z]{2}((-gov)|(-iso([a-z]?)))?-[a-z]+-\\d{1}:\\d{12}:event-source-mapping:[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}" - }, - "EventSourceMappingConfiguration":{ - "type":"structure", - "members":{ - "UUID":{ - "shape":"String", - "documentation":"

The identifier of the event source mapping.

" - }, - "StartingPosition":{ - "shape":"EventSourcePosition", - "documentation":"

The position in a stream from which to start reading. Required for Amazon Kinesis and Amazon DynamoDB Stream event sources. AT_TIMESTAMP is supported only for Amazon Kinesis streams, Amazon DocumentDB, Amazon MSK, and self-managed Apache Kafka.

" - }, - "StartingPositionTimestamp":{ - "shape":"Date", - "documentation":"

With StartingPosition set to AT_TIMESTAMP, the time from which to start reading. StartingPositionTimestamp cannot be in the future.

" - }, - "BatchSize":{ - "shape":"BatchSize", - "documentation":"

The maximum number of records in each batch that Lambda pulls from your stream or queue and sends to your function. Lambda passes all of the records in the batch to the function in a single call, up to the payload limit for synchronous invocation (6 MB).

Default value: Varies by service. For Amazon SQS, the default is 10. For all other services, the default is 100.

Related setting: When you set BatchSize to a value greater than 10, you must set MaximumBatchingWindowInSeconds to at least 1.

" - }, - "MaximumBatchingWindowInSeconds":{ - "shape":"MaximumBatchingWindowInSeconds", - "documentation":"

The maximum amount of time, in seconds, that Lambda spends gathering records before invoking the function. You can configure MaximumBatchingWindowInSeconds to any value from 0 seconds to 300 seconds in increments of seconds.

For streams and Amazon SQS event sources, the default batching window is 0 seconds. For Amazon MSK, Self-managed Apache Kafka, Amazon MQ, and DocumentDB event sources, the default batching window is 500 ms. Note that because you can only change MaximumBatchingWindowInSeconds in increments of seconds, you cannot revert back to the 500 ms default batching window after you have changed it. To restore the default batching window, you must create a new event source mapping.

Related setting: For streams and Amazon SQS event sources, when you set BatchSize to a value greater than 10, you must set MaximumBatchingWindowInSeconds to at least 1.

" - }, - "ParallelizationFactor":{ - "shape":"ParallelizationFactor", - "documentation":"

(Kinesis and DynamoDB Streams only) The number of batches to process concurrently from each shard. The default value is 1.

" - }, - "EventSourceArn":{ - "shape":"Arn", - "documentation":"

The Amazon Resource Name (ARN) of the event source.

" - }, - "FilterCriteria":{ - "shape":"FilterCriteria", - "documentation":"

An object that defines the filter criteria that determine whether Lambda should process an event. For more information, see Lambda event filtering.

If filter criteria is encrypted, this field shows up as null in the response of ListEventSourceMapping API calls. You can view this field in plaintext in the response of GetEventSourceMapping and DeleteEventSourceMapping calls if you have kms:Decrypt permissions for the correct KMS key.

" - }, - "FunctionArn":{ - "shape":"FunctionArn", - "documentation":"

The ARN of the Lambda function.

" - }, - "LastModified":{ - "shape":"Date", - "documentation":"

The date that the event source mapping was last updated or that its state changed.

" - }, - "LastProcessingResult":{ - "shape":"String", - "documentation":"

The result of the event source mapping's last processing attempt.

" - }, - "State":{ - "shape":"String", - "documentation":"

The state of the event source mapping. It can be one of the following: Creating, Enabling, Enabled, Disabling, Disabled, Updating, or Deleting.

" - }, - "StateTransitionReason":{ - "shape":"String", - "documentation":"

Indicates whether a user or Lambda made the last change to the event source mapping.

" - }, - "DestinationConfig":{ - "shape":"DestinationConfig", - "documentation":"

(Kinesis, DynamoDB Streams, Amazon MSK, and self-managed Apache Kafka event sources only) A configuration object that specifies the destination of an event after Lambda processes it.

" - }, - "Topics":{ - "shape":"Topics", - "documentation":"

The name of the Kafka topic.

" - }, - "Queues":{ - "shape":"Queues", - "documentation":"

(Amazon MQ) The name of the Amazon MQ broker destination queue to consume.

" - }, - "SourceAccessConfigurations":{ - "shape":"SourceAccessConfigurations", - "documentation":"

An array of the authentication protocol, VPC components, or virtual host to secure and define your event source.

" - }, - "SelfManagedEventSource":{ - "shape":"SelfManagedEventSource", - "documentation":"

The self-managed Apache Kafka cluster for your event source.

" - }, - "MaximumRecordAgeInSeconds":{ - "shape":"MaximumRecordAgeInSeconds", - "documentation":"

(Kinesis and DynamoDB Streams only) Discard records older than the specified age. The default value is -1, which sets the maximum age to infinite. When the value is set to infinite, Lambda never discards old records.

The minimum valid value for maximum record age is 60s. Although values less than 60 and greater than -1 fall within the parameter's absolute range, they are not allowed

" - }, - "BisectBatchOnFunctionError":{ - "shape":"BisectBatchOnFunctionError", - "documentation":"

(Kinesis and DynamoDB Streams only) If the function returns an error, split the batch in two and retry. The default value is false.

" - }, - "MaximumRetryAttempts":{ - "shape":"MaximumRetryAttemptsEventSourceMapping", - "documentation":"

(Kinesis and DynamoDB Streams only) Discard records after the specified number of retries. The default value is -1, which sets the maximum number of retries to infinite. When MaximumRetryAttempts is infinite, Lambda retries failed records until the record expires in the event source.

" - }, - "TumblingWindowInSeconds":{ - "shape":"TumblingWindowInSeconds", - "documentation":"

(Kinesis and DynamoDB Streams only) The duration in seconds of a processing window for DynamoDB and Kinesis Streams event sources. A value of 0 seconds indicates no tumbling window.

" - }, - "FunctionResponseTypes":{ - "shape":"FunctionResponseTypeList", - "documentation":"

(Kinesis, DynamoDB Streams, and Amazon SQS) A list of current response type enums applied to the event source mapping.

" - }, - "AmazonManagedKafkaEventSourceConfig":{ - "shape":"AmazonManagedKafkaEventSourceConfig", - "documentation":"

Specific configuration settings for an Amazon Managed Streaming for Apache Kafka (Amazon MSK) event source.

" - }, - "SelfManagedKafkaEventSourceConfig":{ - "shape":"SelfManagedKafkaEventSourceConfig", - "documentation":"

Specific configuration settings for a self-managed Apache Kafka event source.

" - }, - "ScalingConfig":{ - "shape":"ScalingConfig", - "documentation":"

(Amazon SQS only) The scaling configuration for the event source. For more information, see Configuring maximum concurrency for Amazon SQS event sources.

" - }, - "DocumentDBEventSourceConfig":{ - "shape":"DocumentDBEventSourceConfig", - "documentation":"

Specific configuration settings for a DocumentDB event source.

" - }, - "KMSKeyArn":{ - "shape":"KMSKeyArn", - "documentation":"

The ARN of the Key Management Service (KMS) customer managed key that Lambda uses to encrypt your function's filter criteria.

" - }, - "FilterCriteriaError":{ - "shape":"FilterCriteriaError", - "documentation":"

An object that contains details about an error related to filter criteria encryption.

" - }, - "EventSourceMappingArn":{ - "shape":"EventSourceMappingArn", - "documentation":"

The Amazon Resource Name (ARN) of the event source mapping.

" - }, - "MetricsConfig":{ - "shape":"EventSourceMappingMetricsConfig", - "documentation":"

The metrics configuration for your event source. For more information, see Event source mapping metrics.

" - }, - "ProvisionedPollerConfig":{ - "shape":"ProvisionedPollerConfig", - "documentation":"

(Amazon MSK and self-managed Apache Kafka only) The provisioned mode configuration for the event source. For more information, see provisioned mode.

" - } - }, - "documentation":"

A mapping between an Amazon Web Services resource and a Lambda function. For details, see CreateEventSourceMapping.

" - }, - "EventSourceMappingMetric":{ - "type":"string", - "enum":["EventCount"] - }, - "EventSourceMappingMetricList":{ - "type":"list", - "member":{"shape":"EventSourceMappingMetric"}, - "max":1, - "min":0 - }, - "EventSourceMappingMetricsConfig":{ - "type":"structure", - "members":{ - "Metrics":{ - "shape":"EventSourceMappingMetricList", - "documentation":"

The metrics you want your event source mapping to produce. Include EventCount to receive event source mapping metrics related to the number of events processed by your event source mapping. For more information about these metrics, see Event source mapping metrics.

" - } - }, - "documentation":"

The metrics configuration for your event source. Use this configuration object to define which metrics you want your event source mapping to produce.

" - }, - "EventSourceMappingsList":{ - "type":"list", - "member":{"shape":"EventSourceMappingConfiguration"} - }, - "EventSourcePosition":{ - "type":"string", - "enum":[ - "TRIM_HORIZON", - "LATEST", - "AT_TIMESTAMP" - ] - }, - "EventSourceToken":{ - "type":"string", - "max":256, - "min":0, - "pattern":"[a-zA-Z0-9._\\-]+" - }, - "EventType":{ - "type":"string", - "enum":[ - "ExecutionStarted", - "ExecutionSucceeded", - "ExecutionFailed", - "ExecutionTimedOut", - "ExecutionStopped", - "ContextStarted", - "ContextSucceeded", - "ContextFailed", - "WaitStarted", - "WaitSucceeded", - "WaitCancelled", - "StepStarted", - "StepSucceeded", - "StepFailed", - "ChainedInvokePending", - "ChainedInvokeStarted", - "ChainedInvokeSucceeded", - "ChainedInvokeFailed", - "ChainedInvokeTimedOut", - "ChainedInvokeCancelled", - "CallbackStarted", - "CallbackSucceeded", - "CallbackFailed", - "CallbackTimedOut", - "InvocationCompleted" - ] - }, - "Events":{ - "type":"list", - "member":{"shape":"Event"} - }, - "Execution":{ - "type":"structure", - "required":[ - "DurableExecutionArn", - "DurableExecutionName", - "FunctionArn", - "Status", - "StartTimestamp" - ], - "members":{ - "DurableExecutionArn":{"shape":"DurableExecutionArn"}, - "DurableExecutionName":{"shape":"DurableExecutionName"}, - "FunctionArn":{"shape":"FunctionArn"}, - "Status":{"shape":"ExecutionStatus"}, - "StartTimestamp":{"shape":"ExecutionTimestamp"}, - "EndTimestamp":{"shape":"ExecutionTimestamp"} - } - }, - "ExecutionDetails":{ - "type":"structure", - "members":{ - "InputPayload":{"shape":"InputPayload"} - } - }, - "ExecutionFailedDetails":{ - "type":"structure", - "required":["Error"], - "members":{ - "Error":{"shape":"EventError"} - } - }, - "ExecutionStartedDetails":{ - "type":"structure", - "required":[ - "Input", - "ExecutionTimeout" - ], - "members":{ - "Input":{"shape":"EventInput"}, - "ExecutionTimeout":{"shape":"DurationSeconds"} - } - }, - "ExecutionStatus":{ - "type":"string", - "enum":[ - "RUNNING", - "SUCCEEDED", - "FAILED", - "TIMED_OUT", - "STOPPED" - ] - }, - "ExecutionStatusList":{ - "type":"list", - "member":{"shape":"ExecutionStatus"} - }, - "ExecutionStoppedDetails":{ - "type":"structure", - "required":["Error"], - "members":{ - "Error":{"shape":"EventError"} - } - }, - "ExecutionSucceededDetails":{ - "type":"structure", - "required":["Result"], - "members":{ - "Result":{"shape":"EventResult"} - } - }, - "ExecutionTimedOutDetails":{ - "type":"structure", - "members":{ - "Error":{"shape":"EventError"} - } - }, - "ExecutionTimeout":{ - "type":"integer", - "box":true, - "max":31622400, - "min":1 - }, - "ExecutionTimestamp":{"type":"timestamp"}, - "FileSystemArn":{ - "type":"string", - "max":200, - "min":0, - "pattern":"arn:aws[a-zA-Z-]*:elasticfilesystem:[a-z]{2}((-gov)|(-iso(b?)))?-[a-z]+-\\d{1}:\\d{12}:access-point/fsap-[a-f0-9]{17}" - }, - "FileSystemConfig":{ - "type":"structure", - "required":[ - "Arn", - "LocalMountPath" - ], - "members":{ - "Arn":{ - "shape":"FileSystemArn", - "documentation":"

The Amazon Resource Name (ARN) of the Amazon EFS access point that provides access to the file system.

" - }, - "LocalMountPath":{ - "shape":"LocalMountPath", - "documentation":"

The path where the function can access the file system, starting with /mnt/.

" - } - }, - "documentation":"

Details about the connection between a Lambda function and an Amazon EFS file system.

" - }, - "FileSystemConfigList":{ - "type":"list", - "member":{"shape":"FileSystemConfig"}, - "max":1, - "min":0 - }, - "Filter":{ - "type":"structure", - "members":{ - "Pattern":{ - "shape":"Pattern", - "documentation":"

A filter pattern. For more information on the syntax of a filter pattern, see Filter rule syntax.

" - } - }, - "documentation":"

A structure within a FilterCriteria object that defines an event filtering pattern.

" - }, - "FilterCriteria":{ - "type":"structure", - "members":{ - "Filters":{ - "shape":"FilterList", - "documentation":"

A list of filters.

" - } - }, - "documentation":"

An object that contains the filters for an event source.

" - }, - "FilterCriteriaError":{ - "type":"structure", - "members":{ - "ErrorCode":{ - "shape":"FilterCriteriaErrorCode", - "documentation":"

The KMS exception that resulted from filter criteria encryption or decryption.

" - }, - "Message":{ - "shape":"FilterCriteriaErrorMessage", - "documentation":"

The error message.

" - } - }, - "documentation":"

An object that contains details about an error related to filter criteria encryption.

" - }, - "FilterCriteriaErrorCode":{ - "type":"string", - "max":50, - "min":10, - "pattern":"[A-Za-z]+Exception" - }, - "FilterCriteriaErrorMessage":{ - "type":"string", - "max":2048, - "min":10, - "pattern":".*" - }, - "FilterList":{ - "type":"list", - "member":{"shape":"Filter"} - }, - "FullDocument":{ - "type":"string", - "enum":[ - "UpdateLookup", - "Default" - ] - }, - "FunctionArn":{ - "type":"string", - "max":10000, - "min":0, - "pattern":"arn:(aws[a-zA-Z-]*)?:lambda:[a-z]{2}(-gov)?-[a-z]+-\\d{1}:\\d{12}:function:[a-zA-Z0-9-_]+(:(\\$LATEST|[a-zA-Z0-9-_]+))?" - }, - "FunctionArnList":{ - "type":"list", - "member":{"shape":"FunctionArn"} - }, - "FunctionCode":{ - "type":"structure", - "members":{ - "ZipFile":{ - "shape":"Blob", - "documentation":"

The base64-encoded contents of the deployment package. Amazon Web Services SDK and CLI clients handle the encoding for you.

" - }, - "S3Bucket":{ - "shape":"S3Bucket", - "documentation":"

An Amazon S3 bucket in the same Amazon Web Services Region as your function. The bucket can be in a different Amazon Web Services account.

" - }, - "S3Key":{ - "shape":"S3Key", - "documentation":"

The Amazon S3 key of the deployment package.

" - }, - "S3ObjectVersion":{ - "shape":"S3ObjectVersion", - "documentation":"

For versioned objects, the version of the deployment package object to use.

" - }, - "ImageUri":{ - "shape":"String", - "documentation":"

URI of a container image in the Amazon ECR registry.

" - }, - "SourceKMSKeyArn":{ - "shape":"KMSKeyArn", - "documentation":"

The ARN of the Key Management Service (KMS) customer managed key that's used to encrypt your function's .zip deployment package. If you don't provide a customer managed key, Lambda uses an Amazon Web Services owned key.

" - } - }, - "documentation":"

The code for the Lambda function. You can either specify an object in Amazon S3, upload a .zip file archive deployment package directly, or specify the URI of a container image.

" - }, - "FunctionCodeLocation":{ - "type":"structure", - "members":{ - "RepositoryType":{ - "shape":"String", - "documentation":"

The service that's hosting the file.

" - }, - "Location":{ - "shape":"String", - "documentation":"

A presigned URL that you can use to download the deployment package.

" - }, - "ImageUri":{ - "shape":"String", - "documentation":"

URI of a container image in the Amazon ECR registry.

" - }, - "ResolvedImageUri":{ - "shape":"String", - "documentation":"

The resolved URI for the image.

" - }, - "SourceKMSKeyArn":{ - "shape":"String", - "documentation":"

The ARN of the Key Management Service (KMS) customer managed key that's used to encrypt your function's .zip deployment package. If you don't provide a customer managed key, Lambda uses an Amazon Web Services owned key.

" - } - }, - "documentation":"

Details about a function's deployment package.

" - }, - "FunctionConfiguration":{ - "type":"structure", - "members":{ - "FunctionName":{ - "shape":"NamespacedFunctionName", - "documentation":"

The name of the function.

" - }, - "FunctionArn":{ - "shape":"NameSpacedFunctionArn", - "documentation":"

The function's Amazon Resource Name (ARN).

" - }, - "Runtime":{ - "shape":"Runtime", - "documentation":"

The identifier of the function's runtime. Runtime is required if the deployment package is a .zip file archive. Specifying a runtime results in an error if you're deploying a function using a container image.

The following list includes deprecated runtimes. Lambda blocks creating new functions and updating existing functions shortly after each runtime is deprecated. For more information, see Runtime use after deprecation.

For a list of all currently supported runtimes, see Supported runtimes.

" - }, - "Role":{ - "shape":"RoleArn", - "documentation":"

The function's execution role.

" - }, - "Handler":{ - "shape":"Handler", - "documentation":"

The function that Lambda calls to begin running your function.

" - }, - "CodeSize":{ - "shape":"Long", - "documentation":"

The size of the function's deployment package, in bytes.

" - }, - "Description":{ - "shape":"Description", - "documentation":"

The function's description.

" - }, - "Timeout":{ - "shape":"Timeout", - "documentation":"

The amount of time in seconds that Lambda allows a function to run before stopping it.

" - }, - "MemorySize":{ - "shape":"MemorySize", - "documentation":"

The amount of memory available to the function at runtime.

" - }, - "LastModified":{ - "shape":"Timestamp", - "documentation":"

The date and time that the function was last updated, in ISO-8601 format (YYYY-MM-DDThh:mm:ss.sTZD).

" - }, - "CodeSha256":{ - "shape":"String", - "documentation":"

The SHA256 hash of the function's deployment package.

" - }, - "Version":{ - "shape":"Version", - "documentation":"

The version of the Lambda function.

" - }, - "VpcConfig":{ - "shape":"VpcConfigResponse", - "documentation":"

The function's networking configuration.

" - }, - "DeadLetterConfig":{ - "shape":"DeadLetterConfig", - "documentation":"

The function's dead letter queue.

" - }, - "Environment":{ - "shape":"EnvironmentResponse", - "documentation":"

The function's environment variables. Omitted from CloudTrail logs.

" - }, - "KMSKeyArn":{ - "shape":"KMSKeyArn", - "documentation":"

The ARN of the Key Management Service (KMS) customer managed key that's used to encrypt the following resources:

If you don't provide a customer managed key, Lambda uses an Amazon Web Services owned key or an Amazon Web Services managed key.

" - }, - "TracingConfig":{ - "shape":"TracingConfigResponse", - "documentation":"

The function's X-Ray tracing configuration.

" - }, - "MasterArn":{ - "shape":"FunctionArn", - "documentation":"

For Lambda@Edge functions, the ARN of the main function.

" - }, - "RevisionId":{ - "shape":"String", - "documentation":"

The latest updated revision of the function or alias.

" - }, - "Layers":{ - "shape":"LayersReferenceList", - "documentation":"

The function's layers.

" - }, - "State":{ - "shape":"State", - "documentation":"

The current state of the function. When the state is Inactive, you can reactivate the function by invoking it.

" - }, - "StateReason":{ - "shape":"StateReason", - "documentation":"

The reason for the function's current state.

" - }, - "StateReasonCode":{ - "shape":"StateReasonCode", - "documentation":"

The reason code for the function's current state. When the code is Creating, you can't invoke or modify the function.

" - }, - "LastUpdateStatus":{ - "shape":"LastUpdateStatus", - "documentation":"

The status of the last update that was performed on the function. This is first set to Successful after function creation completes.

" - }, - "LastUpdateStatusReason":{ - "shape":"LastUpdateStatusReason", - "documentation":"

The reason for the last update that was performed on the function.

" - }, - "LastUpdateStatusReasonCode":{ - "shape":"LastUpdateStatusReasonCode", - "documentation":"

The reason code for the last update that was performed on the function.

" - }, - "FileSystemConfigs":{ - "shape":"FileSystemConfigList", - "documentation":"

Connection settings for an Amazon EFS file system.

" - }, - "PackageType":{ - "shape":"PackageType", - "documentation":"

The type of deployment package. Set to Image for container image and set Zip for .zip file archive.

" - }, - "ImageConfigResponse":{ - "shape":"ImageConfigResponse", - "documentation":"

The function's image configuration values.

" - }, - "SigningProfileVersionArn":{ - "shape":"Arn", - "documentation":"

The ARN of the signing profile version.

" - }, - "SigningJobArn":{ - "shape":"Arn", - "documentation":"

The ARN of the signing job.

" - }, - "Architectures":{ - "shape":"ArchitecturesList", - "documentation":"

The instruction set architecture that the function supports. Architecture is a string array with one of the valid values. The default architecture value is x86_64.

" - }, - "EphemeralStorage":{ - "shape":"EphemeralStorage", - "documentation":"

The size of the function's /tmp directory in MB. The default value is 512, but can be any whole number between 512 and 10,240 MB. For more information, see Configuring ephemeral storage (console).

" - }, - "SnapStart":{ - "shape":"SnapStartResponse", - "documentation":"

Set ApplyOn to PublishedVersions to create a snapshot of the initialized execution environment when you publish a function version. For more information, see Improving startup performance with Lambda SnapStart.

" - }, - "RuntimeVersionConfig":{ - "shape":"RuntimeVersionConfig", - "documentation":"

The ARN of the runtime and any errors that occured.

" - }, - "LoggingConfig":{ - "shape":"LoggingConfig", - "documentation":"

The function's Amazon CloudWatch Logs configuration settings.

" - }, - "DurableConfig":{"shape":"DurableConfig"} - }, - "documentation":"

Details about a function's configuration.

" - }, - "FunctionEventInvokeConfig":{ - "type":"structure", - "members":{ - "LastModified":{ - "shape":"Date", - "documentation":"

The date and time that the configuration was last updated.

" - }, - "FunctionArn":{ - "shape":"FunctionArn", - "documentation":"

The Amazon Resource Name (ARN) of the function.

" - }, - "MaximumRetryAttempts":{ - "shape":"MaximumRetryAttempts", - "documentation":"

The maximum number of times to retry when the function returns an error.

" - }, - "MaximumEventAgeInSeconds":{ - "shape":"MaximumEventAgeInSeconds", - "documentation":"

The maximum age of a request that Lambda sends to a function for processing.

" - }, - "DestinationConfig":{ - "shape":"DestinationConfig", - "documentation":"

A destination for events after they have been sent to a function for processing.

Destinations

S3 buckets are supported only for on-failure destinations. To retain records of successful invocations, use another destination type.

" - } - } - }, - "FunctionEventInvokeConfigList":{ - "type":"list", - "member":{"shape":"FunctionEventInvokeConfig"} - }, - "FunctionList":{ - "type":"list", - "member":{"shape":"FunctionConfiguration"} - }, - "FunctionName":{ - "type":"string", - "max":140, - "min":1, - "pattern":"(arn:(aws[a-zA-Z-]*)?:lambda:)?([a-z]{2}(-gov)?-[a-z]+-\\d{1}:)?(\\d{12}:)?(function:)?([a-zA-Z0-9-_]+)(:(\\$LATEST|[a-zA-Z0-9-_]+))?" - }, - "FunctionResponseType":{ - "type":"string", - "enum":["ReportBatchItemFailures"] - }, - "FunctionResponseTypeList":{ - "type":"list", - "member":{"shape":"FunctionResponseType"}, - "max":1, - "min":0 - }, - "FunctionUrl":{ - "type":"string", - "max":100, - "min":40 - }, - "FunctionUrlAuthType":{ - "type":"string", - "enum":[ - "NONE", - "AWS_IAM" - ] - }, - "FunctionUrlConfig":{ - "type":"structure", - "required":[ - "FunctionUrl", - "FunctionArn", - "CreationTime", - "LastModifiedTime", - "AuthType" - ], - "members":{ - "FunctionUrl":{ - "shape":"FunctionUrl", - "documentation":"

The HTTP URL endpoint for your function.

" - }, - "FunctionArn":{ - "shape":"FunctionArn", - "documentation":"

The Amazon Resource Name (ARN) of your function.

" - }, - "CreationTime":{ - "shape":"Timestamp", - "documentation":"

When the function URL was created, in ISO-8601 format (YYYY-MM-DDThh:mm:ss.sTZD).

" - }, - "LastModifiedTime":{ - "shape":"Timestamp", - "documentation":"

When the function URL configuration was last updated, in ISO-8601 format (YYYY-MM-DDThh:mm:ss.sTZD).

" - }, - "Cors":{ - "shape":"Cors", - "documentation":"

The cross-origin resource sharing (CORS) settings for your function URL.

" - }, - "AuthType":{ - "shape":"FunctionUrlAuthType", - "documentation":"

The type of authentication that your function URL uses. Set to AWS_IAM if you want to restrict access to authenticated users only. Set to NONE if you want to bypass IAM authentication to create a public endpoint. For more information, see Security and auth model for Lambda function URLs.

" - }, - "InvokeMode":{ - "shape":"InvokeMode", - "documentation":"

Use one of the following options:

" - } - }, - "documentation":"

Details about a Lambda function URL.

" - }, - "FunctionUrlConfigList":{ - "type":"list", - "member":{"shape":"FunctionUrlConfig"} - }, - "FunctionUrlQualifier":{ - "type":"string", - "max":128, - "min":1, - "pattern":"(^\\$LATEST$)|((?!^[0-9]+$)([a-zA-Z0-9-_]+))" - }, - "FunctionVersion":{ - "type":"string", - "enum":["ALL"] - }, - "GetAccountSettingsRequest":{ - "type":"structure", - "members":{} - }, - "GetAccountSettingsResponse":{ - "type":"structure", - "members":{ - "AccountLimit":{ - "shape":"AccountLimit", - "documentation":"

Limits that are related to concurrency and code storage.

" - }, - "AccountUsage":{ - "shape":"AccountUsage", - "documentation":"

The number of functions and amount of storage in use.

" - } - } - }, - "GetAliasRequest":{ - "type":"structure", - "required":[ - "FunctionName", - "Name" - ], - "members":{ - "FunctionName":{ - "shape":"FunctionName", - "documentation":"

The name or ARN of the Lambda function.

Name formats

The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.

", - "location":"uri", - "locationName":"FunctionName" - }, - "Name":{ - "shape":"Alias", - "documentation":"

The name of the alias.

", - "location":"uri", - "locationName":"Name" - } - } - }, - "GetCodeSigningConfigRequest":{ - "type":"structure", - "required":["CodeSigningConfigArn"], - "members":{ - "CodeSigningConfigArn":{ - "shape":"CodeSigningConfigArn", - "documentation":"

The The Amazon Resource Name (ARN) of the code signing configuration.

", - "location":"uri", - "locationName":"CodeSigningConfigArn" - } - } - }, - "GetCodeSigningConfigResponse":{ - "type":"structure", - "required":["CodeSigningConfig"], - "members":{ - "CodeSigningConfig":{ - "shape":"CodeSigningConfig", - "documentation":"

The code signing configuration

" - } - } - }, - "GetDurableExecutionHistoryRequest":{ - "type":"structure", - "required":["DurableExecutionArn"], - "members":{ - "DurableExecutionArn":{ - "shape":"DurableExecutionArn", - "location":"uri", - "locationName":"DurableExecutionArn" - }, - "IncludeExecutionData":{ - "shape":"IncludeExecutionData", - "location":"querystring", - "locationName":"IncludeExecutionData" - }, - "MaxItems":{ - "shape":"ItemCount", - "location":"querystring", - "locationName":"MaxItems" - }, - "Marker":{ - "shape":"String", - "location":"querystring", - "locationName":"Marker" - }, - "ReverseOrder":{ - "shape":"ReverseOrder", - "location":"querystring", - "locationName":"ReverseOrder" - } - } - }, - "GetDurableExecutionHistoryResponse":{ - "type":"structure", - "required":["Events"], - "members":{ - "Events":{"shape":"Events"}, - "NextMarker":{"shape":"String"} - } - }, - "GetDurableExecutionRequest":{ - "type":"structure", - "required":["DurableExecutionArn"], - "members":{ - "DurableExecutionArn":{ - "shape":"DurableExecutionArn", - "location":"uri", - "locationName":"DurableExecutionArn" - } - } - }, - "GetDurableExecutionResponse":{ - "type":"structure", - "required":[ - "DurableExecutionArn", - "DurableExecutionName", - "FunctionArn", - "StartTimestamp", - "Status" - ], - "members":{ - "DurableExecutionArn":{"shape":"DurableExecutionArn"}, - "DurableExecutionName":{"shape":"DurableExecutionName"}, - "FunctionArn":{"shape":"FunctionArn"}, - "InputPayload":{"shape":"InputPayload"}, - "Result":{"shape":"OutputPayload"}, - "Error":{"shape":"ErrorObject"}, - "StartTimestamp":{"shape":"ExecutionTimestamp"}, - "Status":{"shape":"ExecutionStatus"}, - "EndTimestamp":{"shape":"ExecutionTimestamp"}, - "Version":{"shape":"Version"} - } - }, - "GetDurableExecutionStateRequest":{ - "type":"structure", - "required":[ - "DurableExecutionArn", - "CheckpointToken" - ], - "members":{ - "DurableExecutionArn":{ - "shape":"DurableExecutionArn", - "location":"uri", - "locationName":"DurableExecutionArn" - }, - "CheckpointToken":{ - "shape":"CheckpointToken", - "location":"querystring", - "locationName":"CheckpointToken" - }, - "Marker":{ - "shape":"String", - "location":"querystring", - "locationName":"Marker" - }, - "MaxItems":{ - "shape":"ItemCount", - "location":"querystring", - "locationName":"MaxItems" - } - } - }, - "GetDurableExecutionStateResponse":{ - "type":"structure", - "required":["Operations"], - "members":{ - "Operations":{"shape":"Operations"}, - "NextMarker":{"shape":"String"} - } - }, - "GetEventSourceMappingRequest":{ - "type":"structure", - "required":["UUID"], - "members":{ - "UUID":{ - "shape":"String", - "documentation":"

The identifier of the event source mapping.

", - "location":"uri", - "locationName":"UUID" - } - } - }, - "GetFunctionCodeSigningConfigRequest":{ - "type":"structure", - "required":["FunctionName"], - "members":{ - "FunctionName":{ - "shape":"FunctionName", - "documentation":"

The name or ARN of the Lambda function.

Name formats

The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.

", - "location":"uri", - "locationName":"FunctionName" - } - } - }, - "GetFunctionCodeSigningConfigResponse":{ - "type":"structure", - "required":[ - "CodeSigningConfigArn", - "FunctionName" - ], - "members":{ - "CodeSigningConfigArn":{ - "shape":"CodeSigningConfigArn", - "documentation":"

The The Amazon Resource Name (ARN) of the code signing configuration.

" - }, - "FunctionName":{ - "shape":"FunctionName", - "documentation":"

The name or ARN of the Lambda function.

Name formats

The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.

" - } - } - }, - "GetFunctionConcurrencyRequest":{ - "type":"structure", - "required":["FunctionName"], - "members":{ - "FunctionName":{ - "shape":"FunctionName", - "documentation":"

The name or ARN of the Lambda function.

Name formats

The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.

", - "location":"uri", - "locationName":"FunctionName" - } - } - }, - "GetFunctionConcurrencyResponse":{ - "type":"structure", - "members":{ - "ReservedConcurrentExecutions":{ - "shape":"ReservedConcurrentExecutions", - "documentation":"

The number of simultaneous executions that are reserved for the function.

" - } - } - }, - "GetFunctionConfigurationRequest":{ - "type":"structure", - "required":["FunctionName"], - "members":{ - "FunctionName":{ - "shape":"NamespacedFunctionName", - "documentation":"

The name or ARN of the Lambda function, version, or alias.

Name formats

You can append a version number or alias to any of the formats. The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.

", - "location":"uri", - "locationName":"FunctionName" - }, - "Qualifier":{ - "shape":"Qualifier", - "documentation":"

Specify a version or alias to get details about a published version of the function.

", - "location":"querystring", - "locationName":"Qualifier" - } - } - }, - "GetFunctionEventInvokeConfigRequest":{ - "type":"structure", - "required":["FunctionName"], - "members":{ - "FunctionName":{ - "shape":"FunctionName", - "documentation":"

The name or ARN of the Lambda function, version, or alias.

Name formats

You can append a version number or alias to any of the formats. The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.

", - "location":"uri", - "locationName":"FunctionName" - }, - "Qualifier":{ - "shape":"Qualifier", - "documentation":"

A version number or alias name.

", - "location":"querystring", - "locationName":"Qualifier" - } - } - }, - "GetFunctionRecursionConfigRequest":{ - "type":"structure", - "required":["FunctionName"], - "members":{ - "FunctionName":{ - "shape":"UnqualifiedFunctionName", - "documentation":"

", - "location":"uri", - "locationName":"FunctionName" - } - } - }, - "GetFunctionRecursionConfigResponse":{ - "type":"structure", - "members":{ - "RecursiveLoop":{ - "shape":"RecursiveLoop", - "documentation":"

If your function's recursive loop detection configuration is Allow, Lambda doesn't take any action when it detects your function being invoked as part of a recursive loop.

If your function's recursive loop detection configuration is Terminate, Lambda stops your function being invoked and notifies you when it detects your function being invoked as part of a recursive loop.

By default, Lambda sets your function's configuration to Terminate. You can update this configuration using the PutFunctionRecursionConfig action.

" - } - } - }, - "GetFunctionRequest":{ - "type":"structure", - "required":["FunctionName"], - "members":{ - "FunctionName":{ - "shape":"NamespacedFunctionName", - "documentation":"

The name or ARN of the Lambda function, version, or alias.

Name formats

You can append a version number or alias to any of the formats. The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.

", - "location":"uri", - "locationName":"FunctionName" - }, - "Qualifier":{ - "shape":"Qualifier", - "documentation":"

Specify a version or alias to get details about a published version of the function.

", - "location":"querystring", - "locationName":"Qualifier" - } - } - }, - "GetFunctionResponse":{ - "type":"structure", - "members":{ - "Configuration":{ - "shape":"FunctionConfiguration", - "documentation":"

The configuration of the function or version.

" - }, - "Code":{ - "shape":"FunctionCodeLocation", - "documentation":"

The deployment package of the function or version.

" - }, - "Tags":{ - "shape":"Tags", - "documentation":"

The function's tags. Lambda returns tag data only if you have explicit allow permissions for lambda:ListTags.

" - }, - "TagsError":{ - "shape":"TagsError", - "documentation":"

An object that contains details about an error related to retrieving tags.

" - }, - "Concurrency":{ - "shape":"Concurrency", - "documentation":"

The function's reserved concurrency.

" - } - } - }, - "GetFunctionUrlConfigRequest":{ - "type":"structure", - "required":["FunctionName"], - "members":{ - "FunctionName":{ - "shape":"FunctionName", - "documentation":"

The name or ARN of the Lambda function.

Name formats

The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.

", - "location":"uri", - "locationName":"FunctionName" - }, - "Qualifier":{ - "shape":"FunctionUrlQualifier", - "documentation":"

The alias name.

", - "location":"querystring", - "locationName":"Qualifier" - } - } - }, - "GetFunctionUrlConfigResponse":{ - "type":"structure", - "required":[ - "FunctionUrl", - "FunctionArn", - "AuthType", - "CreationTime", - "LastModifiedTime" - ], - "members":{ - "FunctionUrl":{ - "shape":"FunctionUrl", - "documentation":"

The HTTP URL endpoint for your function.

" - }, - "FunctionArn":{ - "shape":"FunctionArn", - "documentation":"

The Amazon Resource Name (ARN) of your function.

" - }, - "AuthType":{ - "shape":"FunctionUrlAuthType", - "documentation":"

The type of authentication that your function URL uses. Set to AWS_IAM if you want to restrict access to authenticated users only. Set to NONE if you want to bypass IAM authentication to create a public endpoint. For more information, see Security and auth model for Lambda function URLs.

" - }, - "Cors":{ - "shape":"Cors", - "documentation":"

The cross-origin resource sharing (CORS) settings for your function URL.

" - }, - "CreationTime":{ - "shape":"Timestamp", - "documentation":"

When the function URL was created, in ISO-8601 format (YYYY-MM-DDThh:mm:ss.sTZD).

" - }, - "LastModifiedTime":{ - "shape":"Timestamp", - "documentation":"

When the function URL configuration was last updated, in ISO-8601 format (YYYY-MM-DDThh:mm:ss.sTZD).

" - }, - "InvokeMode":{ - "shape":"InvokeMode", - "documentation":"

Use one of the following options:

" - } - } - }, - "GetLayerVersionByArnRequest":{ - "type":"structure", - "required":["Arn"], - "members":{ - "Arn":{ - "shape":"LayerVersionArn", - "documentation":"

The ARN of the layer version.

", - "location":"querystring", - "locationName":"Arn" - } - } - }, - "GetLayerVersionPolicyRequest":{ - "type":"structure", - "required":[ - "LayerName", - "VersionNumber" - ], - "members":{ - "LayerName":{ - "shape":"LayerName", - "documentation":"

The name or Amazon Resource Name (ARN) of the layer.

", - "location":"uri", - "locationName":"LayerName" - }, - "VersionNumber":{ - "shape":"LayerVersionNumber", - "documentation":"

The version number.

", - "location":"uri", - "locationName":"VersionNumber" - } - } - }, - "GetLayerVersionPolicyResponse":{ - "type":"structure", - "members":{ - "Policy":{ - "shape":"String", - "documentation":"

The policy document.

" - }, - "RevisionId":{ - "shape":"String", - "documentation":"

A unique identifier for the current revision of the policy.

" - } - } - }, - "GetLayerVersionRequest":{ - "type":"structure", - "required":[ - "LayerName", - "VersionNumber" - ], - "members":{ - "LayerName":{ - "shape":"LayerName", - "documentation":"

The name or Amazon Resource Name (ARN) of the layer.

", - "location":"uri", - "locationName":"LayerName" - }, - "VersionNumber":{ - "shape":"LayerVersionNumber", - "documentation":"

The version number.

", - "location":"uri", - "locationName":"VersionNumber" - } - } - }, - "GetLayerVersionResponse":{ - "type":"structure", - "members":{ - "Content":{ - "shape":"LayerVersionContentOutput", - "documentation":"

Details about the layer version.

" - }, - "LayerArn":{ - "shape":"LayerArn", - "documentation":"

The ARN of the layer.

" - }, - "LayerVersionArn":{ - "shape":"LayerVersionArn", - "documentation":"

The ARN of the layer version.

" - }, - "Description":{ - "shape":"Description", - "documentation":"

The description of the version.

" - }, - "CreatedDate":{ - "shape":"Timestamp", - "documentation":"

The date that the layer version was created, in ISO-8601 format (YYYY-MM-DDThh:mm:ss.sTZD).

" - }, - "Version":{ - "shape":"LayerVersionNumber", - "documentation":"

The version number.

" - }, - "CompatibleRuntimes":{ - "shape":"CompatibleRuntimes", - "documentation":"

The layer's compatible runtimes.

The following list includes deprecated runtimes. For more information, see Runtime use after deprecation.

For a list of all currently supported runtimes, see Supported runtimes.

" - }, - "LicenseInfo":{ - "shape":"LicenseInfo", - "documentation":"

The layer's software license.

" - }, - "CompatibleArchitectures":{ - "shape":"CompatibleArchitectures", - "documentation":"

A list of compatible instruction set architectures.

" - } - } - }, - "GetPolicyRequest":{ - "type":"structure", - "required":["FunctionName"], - "members":{ - "FunctionName":{ - "shape":"NamespacedFunctionName", - "documentation":"

The name or ARN of the Lambda function, version, or alias.

Name formats

You can append a version number or alias to any of the formats. The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.

", - "location":"uri", - "locationName":"FunctionName" - }, - "Qualifier":{ - "shape":"Qualifier", - "documentation":"

Specify a version or alias to get the policy for that resource.

", - "location":"querystring", - "locationName":"Qualifier" - } - } - }, - "GetPolicyResponse":{ - "type":"structure", - "members":{ - "Policy":{ - "shape":"String", - "documentation":"

The resource-based policy.

" - }, - "RevisionId":{ - "shape":"String", - "documentation":"

A unique identifier for the current revision of the policy.

" - } - } - }, - "GetProvisionedConcurrencyConfigRequest":{ - "type":"structure", - "required":[ - "FunctionName", - "Qualifier" - ], - "members":{ - "FunctionName":{ - "shape":"FunctionName", - "documentation":"

The name or ARN of the Lambda function.

Name formats

The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.

", - "location":"uri", - "locationName":"FunctionName" - }, - "Qualifier":{ - "shape":"Qualifier", - "documentation":"

The version number or alias name.

", - "location":"querystring", - "locationName":"Qualifier" - } - } - }, - "GetProvisionedConcurrencyConfigResponse":{ - "type":"structure", - "members":{ - "RequestedProvisionedConcurrentExecutions":{ - "shape":"PositiveInteger", - "documentation":"

The amount of provisioned concurrency requested.

" - }, - "AvailableProvisionedConcurrentExecutions":{ - "shape":"NonNegativeInteger", - "documentation":"

The amount of provisioned concurrency available.

" - }, - "AllocatedProvisionedConcurrentExecutions":{ - "shape":"NonNegativeInteger", - "documentation":"

The amount of provisioned concurrency allocated. When a weighted alias is used during linear and canary deployments, this value fluctuates depending on the amount of concurrency that is provisioned for the function versions.

" - }, - "Status":{ - "shape":"ProvisionedConcurrencyStatusEnum", - "documentation":"

The status of the allocation process.

" - }, - "StatusReason":{ - "shape":"String", - "documentation":"

For failed allocations, the reason that provisioned concurrency could not be allocated.

" - }, - "LastModified":{ - "shape":"Timestamp", - "documentation":"

The date and time that a user last updated the configuration, in ISO 8601 format.

" - } - } - }, - "GetRuntimeManagementConfigRequest":{ - "type":"structure", - "required":["FunctionName"], - "members":{ - "FunctionName":{ - "shape":"NamespacedFunctionName", - "documentation":"

The name or ARN of the Lambda function.

Name formats

The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.

", - "location":"uri", - "locationName":"FunctionName" - }, - "Qualifier":{ - "shape":"Qualifier", - "documentation":"

Specify a version of the function. This can be $LATEST or a published version number. If no value is specified, the configuration for the $LATEST version is returned.

", - "location":"querystring", - "locationName":"Qualifier" - } - } - }, - "GetRuntimeManagementConfigResponse":{ - "type":"structure", - "members":{ - "UpdateRuntimeOn":{ - "shape":"UpdateRuntimeOn", - "documentation":"

The current runtime update mode of the function.

" - }, - "RuntimeVersionArn":{ - "shape":"RuntimeVersionArn", - "documentation":"

The ARN of the runtime the function is configured to use. If the runtime update mode is Manual, the ARN is returned, otherwise null is returned.

" - }, - "FunctionArn":{ - "shape":"NameSpacedFunctionArn", - "documentation":"

The Amazon Resource Name (ARN) of your function.

" - } - } - }, - "Handler":{ - "type":"string", - "max":128, - "min":0, - "pattern":"[^\\s]+" - }, - "Header":{ - "type":"string", - "max":1024, - "min":0, - "pattern":".*" - }, - "HeadersList":{ - "type":"list", - "member":{"shape":"Header"}, - "max":100, - "min":0 - }, - "HttpStatus":{"type":"integer"}, - "ImageConfig":{ - "type":"structure", - "members":{ - "EntryPoint":{ - "shape":"StringList", - "documentation":"

Specifies the entry point to their application, which is typically the location of the runtime executable.

" - }, - "Command":{ - "shape":"StringList", - "documentation":"

Specifies parameters that you want to pass in with ENTRYPOINT.

" - }, - "WorkingDirectory":{ - "shape":"WorkingDirectory", - "documentation":"

Specifies the working directory.

" - } - }, - "documentation":"

Configuration values that override the container image Dockerfile settings. For more information, see Container image settings.

" - }, - "ImageConfigError":{ - "type":"structure", - "members":{ - "ErrorCode":{ - "shape":"String", - "documentation":"

Error code.

" - }, - "Message":{ - "shape":"SensitiveString", - "documentation":"

Error message.

" - } - }, - "documentation":"

Error response to GetFunctionConfiguration.

" - }, - "ImageConfigResponse":{ - "type":"structure", - "members":{ - "ImageConfig":{ - "shape":"ImageConfig", - "documentation":"

Configuration values that override the container image Dockerfile.

" - }, - "Error":{ - "shape":"ImageConfigError", - "documentation":"

Error response to GetFunctionConfiguration.

" - } - }, - "documentation":"

Response to a GetFunctionConfiguration request.

" - }, - "IncludeExecutionData":{ - "type":"boolean", - "box":true - }, - "InputPayload":{ - "type":"string", - "max":6291456, - "min":0, - "sensitive":true - }, - "Integer":{"type":"integer"}, - "InvalidCodeSignatureException":{ - "type":"structure", - "members":{ - "Type":{"shape":"String"}, - "Message":{"shape":"String"} - }, - "documentation":"

The code signature failed the integrity check. If the integrity check fails, then Lambda blocks deployment, even if the code signing policy is set to WARN.

", - "error":{ - "httpStatusCode":400, - "senderFault":true - }, - "exception":true - }, - "InvalidParameterValueException":{ - "type":"structure", - "members":{ - "Type":{ - "shape":"String", - "documentation":"

The exception type.

" - }, - "message":{ - "shape":"String", - "documentation":"

The exception message.

" - } - }, - "documentation":"

One of the parameters in the request is not valid.

", - "error":{ - "httpStatusCode":400, - "senderFault":true - }, - "exception":true - }, - "InvalidRequestContentException":{ - "type":"structure", - "members":{ - "Type":{ - "shape":"String", - "documentation":"

The exception type.

" - }, - "message":{ - "shape":"String", - "documentation":"

The exception message.

" - } - }, - "documentation":"

The request body could not be parsed as JSON, or a request header is invalid. For example, the 'x-amzn-RequestId' header is not a valid UUID string.

", - "error":{ - "httpStatusCode":400, - "senderFault":true - }, - "exception":true - }, - "InvalidRuntimeException":{ - "type":"structure", - "members":{ - "Type":{"shape":"String"}, - "Message":{"shape":"String"} - }, - "documentation":"

The runtime or runtime version specified is not supported.

", - "error":{"httpStatusCode":502}, - "exception":true, - "fault":true - }, - "InvalidSecurityGroupIDException":{ - "type":"structure", - "members":{ - "Type":{"shape":"String"}, - "Message":{"shape":"String"} - }, - "documentation":"

The security group ID provided in the Lambda function VPC configuration is not valid.

", - "error":{"httpStatusCode":502}, - "exception":true, - "fault":true - }, - "InvalidSubnetIDException":{ - "type":"structure", - "members":{ - "Type":{"shape":"String"}, - "Message":{"shape":"String"} - }, - "documentation":"

The subnet ID provided in the Lambda function VPC configuration is not valid.

", - "error":{"httpStatusCode":502}, - "exception":true, - "fault":true - }, - "InvalidZipFileException":{ - "type":"structure", - "members":{ - "Type":{"shape":"String"}, - "Message":{"shape":"String"} - }, - "documentation":"

Lambda could not unzip the deployment package.

", - "error":{"httpStatusCode":502}, - "exception":true, - "fault":true - }, - "InvocationCompletedDetails":{ - "type":"structure", - "required":[ - "StartTimestamp", - "EndTimestamp", - "RequestId" - ], - "members":{ - "StartTimestamp":{"shape":"ExecutionTimestamp"}, - "EndTimestamp":{"shape":"ExecutionTimestamp"}, - "RequestId":{"shape":"String"}, - "Error":{"shape":"EventError"} - } - }, - "InvocationRequest":{ - "type":"structure", - "required":["FunctionName"], - "members":{ - "FunctionName":{ - "shape":"NamespacedFunctionName", - "documentation":"

The name or ARN of the Lambda function, version, or alias.

Name formats

You can append a version number or alias to any of the formats. The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.

", - "location":"uri", - "locationName":"FunctionName" - }, - "InvocationType":{ - "shape":"InvocationType", - "documentation":"

Choose from the following options.

", - "location":"header", - "locationName":"X-Amz-Invocation-Type" - }, - "LogType":{ - "shape":"LogType", - "documentation":"

Set to Tail to include the execution log in the response. Applies to synchronously invoked functions only.

", - "location":"header", - "locationName":"X-Amz-Log-Type" - }, - "ClientContext":{ - "shape":"String", - "documentation":"

Up to 3,583 bytes of base64-encoded data about the invoking client to pass to the function in the context object. Lambda passes the ClientContext object to your function for synchronous invocations only.

", - "location":"header", - "locationName":"X-Amz-Client-Context" - }, - "DurableExecutionName":{ - "shape":"DurableExecutionName", - "location":"header", - "locationName":"X-Amz-Durable-Execution-Name" - }, - "Payload":{ - "shape":"Blob", - "documentation":"

The JSON that you want to provide to your Lambda function as input.

You can enter the JSON directly. For example, --payload '{ \"key\": \"value\" }'. You can also specify a file path. For example, --payload file://payload.json.

" - }, - "Qualifier":{ - "shape":"Qualifier", - "documentation":"

Specify a version or alias to invoke a published version of the function.

", - "location":"querystring", - "locationName":"Qualifier" - } - }, - "payload":"Payload" - }, - "InvocationResponse":{ - "type":"structure", - "members":{ - "StatusCode":{ - "shape":"Integer", - "documentation":"

The HTTP status code is in the 200 range for a successful request. For the RequestResponse invocation type, this status code is 200. For the Event invocation type, this status code is 202. For the DryRun invocation type, the status code is 204.

", - "location":"statusCode" - }, - "FunctionError":{ - "shape":"String", - "documentation":"

If present, indicates that an error occurred during function execution. Details about the error are included in the response payload.

", - "location":"header", - "locationName":"X-Amz-Function-Error" - }, - "LogResult":{ - "shape":"String", - "documentation":"

The last 4 KB of the execution log, which is base64-encoded.

", - "location":"header", - "locationName":"X-Amz-Log-Result" - }, - "Payload":{ - "shape":"Blob", - "documentation":"

The response from the function, or an error object.

" - }, - "ExecutedVersion":{ - "shape":"Version", - "documentation":"

The version of the function that executed. When you invoke a function with an alias, this indicates which version the alias resolved to.

", - "location":"header", - "locationName":"X-Amz-Executed-Version" - }, - "DurableExecutionArn":{ - "shape":"DurableExecutionArn", - "location":"header", - "locationName":"X-Amz-Durable-Execution-Arn" - } - }, - "payload":"Payload" - }, - "InvocationType":{ - "type":"string", - "enum":[ - "Event", - "RequestResponse", - "DryRun" - ] - }, - "InvokeAsyncRequest":{ - "type":"structure", - "required":[ - "FunctionName", - "InvokeArgs" - ], - "members":{ - "FunctionName":{ - "shape":"NamespacedFunctionName", - "documentation":"

The name or ARN of the Lambda function.

Name formats

The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.

", - "location":"uri", - "locationName":"FunctionName" - }, - "InvokeArgs":{ - "shape":"BlobStream", - "documentation":"

The JSON that you want to provide to your Lambda function as input.

" - } - }, - "deprecated":true, - "payload":"InvokeArgs" - }, - "InvokeAsyncResponse":{ - "type":"structure", - "members":{ - "Status":{ - "shape":"HttpStatus", - "documentation":"

The status code.

", - "location":"statusCode" - } - }, - "documentation":"

A success response (202 Accepted) indicates that the request is queued for invocation.

", - "deprecated":true, - "payload":"Body" - }, - "InvokeCancelledDetails":{ - "type":"structure", - "members":{ - "Error":{"shape":"EventError"} - } - }, - "InvokeFailedDetails":{ - "type":"structure", - "members":{ - "Error":{"shape":"EventError"}, - "RetryDetails":{"shape":"RetryDetails"} - } - }, - "InvokeMode":{ - "type":"string", - "enum":[ - "BUFFERED", - "RESPONSE_STREAM" - ] - }, - "InvokeResponseStreamUpdate":{ - "type":"structure", - "members":{ - "Payload":{ - "shape":"Blob", - "documentation":"

Data returned by your Lambda function.

", - "eventpayload":true - } - }, - "documentation":"

A chunk of the streamed response payload.

", - "event":true - }, - "InvokeWithResponseStreamCompleteEvent":{ - "type":"structure", - "members":{ - "ErrorCode":{ - "shape":"String", - "documentation":"

An error code.

" - }, - "ErrorDetails":{ - "shape":"String", - "documentation":"

The details of any returned error.

" - }, - "LogResult":{ - "shape":"String", - "documentation":"

The last 4 KB of the execution log, which is base64-encoded.

" - } - }, - "documentation":"

A response confirming that the event stream is complete.

", - "event":true - }, - "InvokeWithResponseStreamRequest":{ - "type":"structure", - "required":["FunctionName"], - "members":{ - "FunctionName":{ - "shape":"NamespacedFunctionName", - "documentation":"

The name or ARN of the Lambda function.

Name formats

The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.

", - "location":"uri", - "locationName":"FunctionName" - }, - "InvocationType":{ - "shape":"ResponseStreamingInvocationType", - "documentation":"

Use one of the following options:

", - "location":"header", - "locationName":"X-Amz-Invocation-Type" - }, - "LogType":{ - "shape":"LogType", - "documentation":"

Set to Tail to include the execution log in the response. Applies to synchronously invoked functions only.

", - "location":"header", - "locationName":"X-Amz-Log-Type" - }, - "ClientContext":{ - "shape":"String", - "documentation":"

Up to 3,583 bytes of base64-encoded data about the invoking client to pass to the function in the context object.

", - "location":"header", - "locationName":"X-Amz-Client-Context" - }, - "Qualifier":{ - "shape":"Qualifier", - "documentation":"

The alias name.

", - "location":"querystring", - "locationName":"Qualifier" - }, - "Payload":{ - "shape":"Blob", - "documentation":"

The JSON that you want to provide to your Lambda function as input.

You can enter the JSON directly. For example, --payload '{ \"key\": \"value\" }'. You can also specify a file path. For example, --payload file://payload.json.

" - } - }, - "payload":"Payload" - }, - "InvokeWithResponseStreamResponse":{ - "type":"structure", - "members":{ - "StatusCode":{ - "shape":"Integer", - "documentation":"

For a successful request, the HTTP status code is in the 200 range. For the RequestResponse invocation type, this status code is 200. For the DryRun invocation type, this status code is 204.

", - "location":"statusCode" - }, - "ExecutedVersion":{ - "shape":"Version", - "documentation":"

The version of the function that executed. When you invoke a function with an alias, this indicates which version the alias resolved to.

", - "location":"header", - "locationName":"X-Amz-Executed-Version" - }, - "EventStream":{ - "shape":"InvokeWithResponseStreamResponseEvent", - "documentation":"

The stream of response payloads.

" - }, - "ResponseStreamContentType":{ - "shape":"String", - "documentation":"

The type of data the stream is returning.

", - "location":"header", - "locationName":"Content-Type" - } - }, - "payload":"EventStream" - }, - "InvokeWithResponseStreamResponseEvent":{ - "type":"structure", - "members":{ - "PayloadChunk":{ - "shape":"InvokeResponseStreamUpdate", - "documentation":"

A chunk of the streamed response payload.

" - }, - "InvokeComplete":{ - "shape":"InvokeWithResponseStreamCompleteEvent", - "documentation":"

An object that's returned when the stream has ended and all the payload chunks have been returned.

" - } - }, - "documentation":"

An object that includes a chunk of the response payload. When the stream has ended, Lambda includes a InvokeComplete object.

", - "eventstream":true - }, - "ItemCount":{ - "type":"integer", - "max":1000, - "min":0 - }, - "KMSAccessDeniedException":{ - "type":"structure", - "members":{ - "Type":{"shape":"String"}, - "Message":{"shape":"String"} - }, - "documentation":"

Lambda couldn't decrypt the environment variables because KMS access was denied. Check the Lambda function's KMS permissions.

", - "error":{"httpStatusCode":502}, - "exception":true, - "fault":true - }, - "KMSDisabledException":{ - "type":"structure", - "members":{ - "Type":{"shape":"String"}, - "Message":{"shape":"String"} - }, - "documentation":"

Lambda couldn't decrypt the environment variables because the KMS key used is disabled. Check the Lambda function's KMS key settings.

", - "error":{"httpStatusCode":502}, - "exception":true, - "fault":true - }, - "KMSInvalidStateException":{ - "type":"structure", - "members":{ - "Type":{"shape":"String"}, - "Message":{"shape":"String"} - }, - "documentation":"

Lambda couldn't decrypt the environment variables because the state of the KMS key used is not valid for Decrypt. Check the function's KMS key settings.

", - "error":{"httpStatusCode":502}, - "exception":true, - "fault":true - }, - "KMSKeyArn":{ - "type":"string", - "pattern":"(arn:(aws[a-zA-Z-]*)?:[a-z0-9-.]+:.*)|()" - }, - "KMSNotFoundException":{ - "type":"structure", - "members":{ - "Type":{"shape":"String"}, - "Message":{"shape":"String"} - }, - "documentation":"

Lambda couldn't decrypt the environment variables because the KMS key was not found. Check the function's KMS key settings.

", - "error":{"httpStatusCode":502}, - "exception":true, - "fault":true - }, - "KafkaSchemaRegistryAccessConfig":{ - "type":"structure", - "members":{ - "Type":{ - "shape":"KafkaSchemaRegistryAuthType", - "documentation":"

The type of authentication Lambda uses to access your schema registry.

" - }, - "URI":{ - "shape":"Arn", - "documentation":"

The URI of the secret (Secrets Manager secret ARN) to authenticate with your schema registry.

" - } - }, - "documentation":"

Specific access configuration settings that tell Lambda how to authenticate with your schema registry.

If you're working with an Glue schema registry, don't provide authentication details in this object. Instead, ensure that your execution role has the required permissions for Lambda to access your cluster.

If you're working with a Confluent schema registry, choose the authentication method in the Type field, and provide the Secrets Manager secret ARN in the URI field.

" - }, - "KafkaSchemaRegistryAccessConfigList":{ - "type":"list", - "member":{"shape":"KafkaSchemaRegistryAccessConfig"} - }, - "KafkaSchemaRegistryAuthType":{ - "type":"string", - "enum":[ - "BASIC_AUTH", - "CLIENT_CERTIFICATE_TLS_AUTH", - "SERVER_ROOT_CA_CERTIFICATE" - ] - }, - "KafkaSchemaRegistryConfig":{ - "type":"structure", - "members":{ - "SchemaRegistryURI":{ - "shape":"SchemaRegistryUri", - "documentation":"

The URI for your schema registry. The correct URI format depends on the type of schema registry you're using.

" - }, - "EventRecordFormat":{ - "shape":"SchemaRegistryEventRecordFormat", - "documentation":"

The record format that Lambda delivers to your function after schema validation.

" - }, - "AccessConfigs":{ - "shape":"KafkaSchemaRegistryAccessConfigList", - "documentation":"

An array of access configuration objects that tell Lambda how to authenticate with your schema registry.

" - }, - "SchemaValidationConfigs":{ - "shape":"KafkaSchemaValidationConfigList", - "documentation":"

An array of schema validation configuration objects, which tell Lambda the message attributes you want to validate and filter using your schema registry.

" - } - }, - "documentation":"

Specific configuration settings for a Kafka schema registry.

" - }, - "KafkaSchemaValidationAttribute":{ - "type":"string", - "enum":[ - "KEY", - "VALUE" - ] - }, - "KafkaSchemaValidationConfig":{ - "type":"structure", - "members":{ - "Attribute":{ - "shape":"KafkaSchemaValidationAttribute", - "documentation":"

The attributes you want your schema registry to validate and filter for. If you selected JSON as the EventRecordFormat, Lambda also deserializes the selected message attributes.

" - } - }, - "documentation":"

Specific schema validation configuration settings that tell Lambda the message attributes you want to validate and filter using your schema registry.

" - }, - "KafkaSchemaValidationConfigList":{ - "type":"list", - "member":{"shape":"KafkaSchemaValidationConfig"} - }, - "LastUpdateStatus":{ - "type":"string", - "enum":[ - "Successful", - "Failed", - "InProgress" - ] - }, - "LastUpdateStatusReason":{"type":"string"}, - "LastUpdateStatusReasonCode":{ - "type":"string", - "enum":[ - "EniLimitExceeded", - "InsufficientRolePermissions", - "InvalidConfiguration", - "InternalError", - "SubnetOutOfIPAddresses", - "InvalidSubnet", - "InvalidSecurityGroup", - "ImageDeleted", - "ImageAccessDenied", - "InvalidImage", - "KMSKeyAccessDenied", - "KMSKeyNotFound", - "InvalidStateKMSKey", - "DisabledKMSKey", - "EFSIOError", - "EFSMountConnectivityError", - "EFSMountFailure", - "EFSMountTimeout", - "InvalidRuntime", - "InvalidZipFileException", - "FunctionError" - ] - }, - "Layer":{ - "type":"structure", - "members":{ - "Arn":{ - "shape":"LayerVersionArn", - "documentation":"

The Amazon Resource Name (ARN) of the function layer.

" - }, - "CodeSize":{ - "shape":"Long", - "documentation":"

The size of the layer archive in bytes.

" - }, - "SigningProfileVersionArn":{ - "shape":"Arn", - "documentation":"

The Amazon Resource Name (ARN) for a signing profile version.

" - }, - "SigningJobArn":{ - "shape":"Arn", - "documentation":"

The Amazon Resource Name (ARN) of a signing job.

" - } - }, - "documentation":"

An Lambda layer.

" - }, - "LayerArn":{ - "type":"string", - "max":140, - "min":1, - "pattern":"arn:[a-zA-Z0-9-]+:lambda:[a-zA-Z0-9-]+:\\d{12}:layer:[a-zA-Z0-9-_]+" - }, - "LayerList":{ - "type":"list", - "member":{"shape":"LayerVersionArn"} - }, - "LayerName":{ - "type":"string", - "max":140, - "min":1, - "pattern":"(arn:[a-zA-Z0-9-]+:lambda:[a-zA-Z0-9-]+:\\d{12}:layer:[a-zA-Z0-9-_]+)|[a-zA-Z0-9-_]+" - }, - "LayerPermissionAllowedAction":{ - "type":"string", - "max":22, - "min":0, - "pattern":"lambda:GetLayerVersion" - }, - "LayerPermissionAllowedPrincipal":{ - "type":"string", - "pattern":"\\d{12}|\\*|arn:(aws[a-zA-Z-]*):iam::\\d{12}:root" - }, - "LayerVersionArn":{ - "type":"string", - "max":140, - "min":1, - "pattern":"arn:[a-zA-Z0-9-]+:lambda:[a-zA-Z0-9-]+:\\d{12}:layer:[a-zA-Z0-9-_]+:[0-9]+" - }, - "LayerVersionContentInput":{ - "type":"structure", - "members":{ - "S3Bucket":{ - "shape":"S3Bucket", - "documentation":"

The Amazon S3 bucket of the layer archive.

" - }, - "S3Key":{ - "shape":"S3Key", - "documentation":"

The Amazon S3 key of the layer archive.

" - }, - "S3ObjectVersion":{ - "shape":"S3ObjectVersion", - "documentation":"

For versioned objects, the version of the layer archive object to use.

" - }, - "ZipFile":{ - "shape":"Blob", - "documentation":"

The base64-encoded contents of the layer archive. Amazon Web Services SDK and Amazon Web Services CLI clients handle the encoding for you.

" - } - }, - "documentation":"

A ZIP archive that contains the contents of an Lambda layer. You can specify either an Amazon S3 location, or upload a layer archive directly.

" - }, - "LayerVersionContentOutput":{ - "type":"structure", - "members":{ - "Location":{ - "shape":"String", - "documentation":"

A link to the layer archive in Amazon S3 that is valid for 10 minutes.

" - }, - "CodeSha256":{ - "shape":"String", - "documentation":"

The SHA-256 hash of the layer archive.

" - }, - "CodeSize":{ - "shape":"Long", - "documentation":"

The size of the layer archive in bytes.

" - }, - "SigningProfileVersionArn":{ - "shape":"String", - "documentation":"

The Amazon Resource Name (ARN) for a signing profile version.

" - }, - "SigningJobArn":{ - "shape":"String", - "documentation":"

The Amazon Resource Name (ARN) of a signing job.

" - } - }, - "documentation":"

Details about a version of an Lambda layer.

" - }, - "LayerVersionNumber":{"type":"long"}, - "LayerVersionsList":{ - "type":"list", - "member":{"shape":"LayerVersionsListItem"} - }, - "LayerVersionsListItem":{ - "type":"structure", - "members":{ - "LayerVersionArn":{ - "shape":"LayerVersionArn", - "documentation":"

The ARN of the layer version.

" - }, - "Version":{ - "shape":"LayerVersionNumber", - "documentation":"

The version number.

" - }, - "Description":{ - "shape":"Description", - "documentation":"

The description of the version.

" - }, - "CreatedDate":{ - "shape":"Timestamp", - "documentation":"

The date that the version was created, in ISO 8601 format. For example, 2018-11-27T15:10:45.123+0000.

" - }, - "CompatibleRuntimes":{ - "shape":"CompatibleRuntimes", - "documentation":"

The layer's compatible runtimes.

The following list includes deprecated runtimes. For more information, see Runtime use after deprecation.

For a list of all currently supported runtimes, see Supported runtimes.

" - }, - "LicenseInfo":{ - "shape":"LicenseInfo", - "documentation":"

The layer's open-source license.

" - }, - "CompatibleArchitectures":{ - "shape":"CompatibleArchitectures", - "documentation":"

A list of compatible instruction set architectures.

" - } - }, - "documentation":"

Details about a version of an Lambda layer.

" - }, - "LayersList":{ - "type":"list", - "member":{"shape":"LayersListItem"} - }, - "LayersListItem":{ - "type":"structure", - "members":{ - "LayerName":{ - "shape":"LayerName", - "documentation":"

The name of the layer.

" - }, - "LayerArn":{ - "shape":"LayerArn", - "documentation":"

The Amazon Resource Name (ARN) of the function layer.

" - }, - "LatestMatchingVersion":{ - "shape":"LayerVersionsListItem", - "documentation":"

The newest version of the layer.

" - } - }, - "documentation":"

Details about an Lambda layer.

" - }, - "LayersReferenceList":{ - "type":"list", - "member":{"shape":"Layer"} - }, - "LicenseInfo":{ - "type":"string", - "max":512, - "min":0 - }, - "ListAliasesRequest":{ - "type":"structure", - "required":["FunctionName"], - "members":{ - "FunctionName":{ - "shape":"FunctionName", - "documentation":"

The name or ARN of the Lambda function.

Name formats

The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.

", - "location":"uri", - "locationName":"FunctionName" - }, - "FunctionVersion":{ - "shape":"Version", - "documentation":"

Specify a function version to only list aliases that invoke that version.

", - "location":"querystring", - "locationName":"FunctionVersion" - }, - "Marker":{ - "shape":"String", - "documentation":"

Specify the pagination token that's returned by a previous request to retrieve the next page of results.

", - "location":"querystring", - "locationName":"Marker" - }, - "MaxItems":{ - "shape":"MaxListItems", - "documentation":"

Limit the number of aliases returned.

", - "location":"querystring", - "locationName":"MaxItems" - } - } - }, - "ListAliasesResponse":{ - "type":"structure", - "members":{ - "NextMarker":{ - "shape":"String", - "documentation":"

The pagination token that's included if more results are available.

" - }, - "Aliases":{ - "shape":"AliasList", - "documentation":"

A list of aliases.

" - } - } - }, - "ListCodeSigningConfigsRequest":{ - "type":"structure", - "members":{ - "Marker":{ - "shape":"String", - "documentation":"

Specify the pagination token that's returned by a previous request to retrieve the next page of results.

", - "location":"querystring", - "locationName":"Marker" - }, - "MaxItems":{ - "shape":"MaxListItems", - "documentation":"

Maximum number of items to return.

", - "location":"querystring", - "locationName":"MaxItems" - } - } - }, - "ListCodeSigningConfigsResponse":{ - "type":"structure", - "members":{ - "NextMarker":{ - "shape":"String", - "documentation":"

The pagination token that's included if more results are available.

" - }, - "CodeSigningConfigs":{ - "shape":"CodeSigningConfigList", - "documentation":"

The code signing configurations

" - } - } - }, - "ListDurableExecutionsByFunctionRequest":{ - "type":"structure", - "required":["FunctionName"], - "members":{ - "FunctionName":{ - "shape":"FunctionName", - "location":"uri", - "locationName":"FunctionName" - }, - "Qualifier":{ - "shape":"Qualifier", - "location":"querystring", - "locationName":"Qualifier" - }, - "DurableExecutionName":{ - "shape":"DurableExecutionName", - "location":"querystring", - "locationName":"DurableExecutionName" - }, - "Statuses":{ - "shape":"ExecutionStatusList", - "location":"querystring", - "locationName":"Statuses" - }, - "StartedAfter":{ - "shape":"ExecutionTimestamp", - "location":"querystring", - "locationName":"StartedAfter" - }, - "StartedBefore":{ - "shape":"ExecutionTimestamp", - "location":"querystring", - "locationName":"StartedBefore" - }, - "ReverseOrder":{ - "shape":"ReverseOrder", - "location":"querystring", - "locationName":"ReverseOrder" - }, - "Marker":{ - "shape":"String", - "location":"querystring", - "locationName":"Marker" - }, - "MaxItems":{ - "shape":"ItemCount", - "location":"querystring", - "locationName":"MaxItems" - } - } - }, - "ListDurableExecutionsByFunctionResponse":{ - "type":"structure", - "members":{ - "DurableExecutions":{"shape":"DurableExecutions"}, - "NextMarker":{"shape":"String"} - } - }, - "ListEventSourceMappingsRequest":{ - "type":"structure", - "members":{ - "EventSourceArn":{ - "shape":"Arn", - "documentation":"

The Amazon Resource Name (ARN) of the event source.

", - "location":"querystring", - "locationName":"EventSourceArn" - }, - "FunctionName":{ - "shape":"FunctionName", - "documentation":"

The name or ARN of the Lambda function.

Name formats

The length constraint applies only to the full ARN. If you specify only the function name, it's limited to 64 characters in length.

", - "location":"querystring", - "locationName":"FunctionName" - }, - "Marker":{ - "shape":"String", - "documentation":"

A pagination token returned by a previous call.

", - "location":"querystring", - "locationName":"Marker" - }, - "MaxItems":{ - "shape":"MaxListItems", - "documentation":"

The maximum number of event source mappings to return. Note that ListEventSourceMappings returns a maximum of 100 items in each response, even if you set the number higher.

", - "location":"querystring", - "locationName":"MaxItems" - } - } - }, - "ListEventSourceMappingsResponse":{ - "type":"structure", - "members":{ - "NextMarker":{ - "shape":"String", - "documentation":"

A pagination token that's returned when the response doesn't contain all event source mappings.

" - }, - "EventSourceMappings":{ - "shape":"EventSourceMappingsList", - "documentation":"

A list of event source mappings.

" - } - } - }, - "ListFunctionEventInvokeConfigsRequest":{ - "type":"structure", - "required":["FunctionName"], - "members":{ - "FunctionName":{ - "shape":"FunctionName", - "documentation":"

The name or ARN of the Lambda function.

Name formats

The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.

", - "location":"uri", - "locationName":"FunctionName" - }, - "Marker":{ - "shape":"String", - "documentation":"

Specify the pagination token that's returned by a previous request to retrieve the next page of results.

", - "location":"querystring", - "locationName":"Marker" - }, - "MaxItems":{ - "shape":"MaxFunctionEventInvokeConfigListItems", - "documentation":"

The maximum number of configurations to return.

", - "location":"querystring", - "locationName":"MaxItems" - } - } - }, - "ListFunctionEventInvokeConfigsResponse":{ - "type":"structure", - "members":{ - "FunctionEventInvokeConfigs":{ - "shape":"FunctionEventInvokeConfigList", - "documentation":"

A list of configurations.

" - }, - "NextMarker":{ - "shape":"String", - "documentation":"

The pagination token that's included if more results are available.

" - } - } - }, - "ListFunctionUrlConfigsRequest":{ - "type":"structure", - "required":["FunctionName"], - "members":{ - "FunctionName":{ - "shape":"FunctionName", - "documentation":"

The name or ARN of the Lambda function.

Name formats

The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.

", - "location":"uri", - "locationName":"FunctionName" - }, - "Marker":{ - "shape":"String", - "documentation":"

Specify the pagination token that's returned by a previous request to retrieve the next page of results.

", - "location":"querystring", - "locationName":"Marker" - }, - "MaxItems":{ - "shape":"MaxItems", - "documentation":"

The maximum number of function URLs to return in the response. Note that ListFunctionUrlConfigs returns a maximum of 50 items in each response, even if you set the number higher.

", - "location":"querystring", - "locationName":"MaxItems" - } - } - }, - "ListFunctionUrlConfigsResponse":{ - "type":"structure", - "required":["FunctionUrlConfigs"], - "members":{ - "FunctionUrlConfigs":{ - "shape":"FunctionUrlConfigList", - "documentation":"

A list of function URL configurations.

" - }, - "NextMarker":{ - "shape":"String", - "documentation":"

The pagination token that's included if more results are available.

" - } - } - }, - "ListFunctionsByCodeSigningConfigRequest":{ - "type":"structure", - "required":["CodeSigningConfigArn"], - "members":{ - "CodeSigningConfigArn":{ - "shape":"CodeSigningConfigArn", - "documentation":"

The The Amazon Resource Name (ARN) of the code signing configuration.

", - "location":"uri", - "locationName":"CodeSigningConfigArn" - }, - "Marker":{ - "shape":"String", - "documentation":"

Specify the pagination token that's returned by a previous request to retrieve the next page of results.

", - "location":"querystring", - "locationName":"Marker" - }, - "MaxItems":{ - "shape":"MaxListItems", - "documentation":"

Maximum number of items to return.

", - "location":"querystring", - "locationName":"MaxItems" - } - } - }, - "ListFunctionsByCodeSigningConfigResponse":{ - "type":"structure", - "members":{ - "NextMarker":{ - "shape":"String", - "documentation":"

The pagination token that's included if more results are available.

" - }, - "FunctionArns":{ - "shape":"FunctionArnList", - "documentation":"

The function ARNs.

" - } - } - }, - "ListFunctionsRequest":{ - "type":"structure", - "members":{ - "MasterRegion":{ - "shape":"MasterRegion", - "documentation":"

For Lambda@Edge functions, the Amazon Web Services Region of the master function. For example, us-east-1 filters the list of functions to include only Lambda@Edge functions replicated from a master function in US East (N. Virginia). If specified, you must set FunctionVersion to ALL.

", - "location":"querystring", - "locationName":"MasterRegion" - }, - "FunctionVersion":{ - "shape":"FunctionVersion", - "documentation":"

Set to ALL to include entries for all published versions of each function.

", - "location":"querystring", - "locationName":"FunctionVersion" - }, - "Marker":{ - "shape":"String", - "documentation":"

Specify the pagination token that's returned by a previous request to retrieve the next page of results.

", - "location":"querystring", - "locationName":"Marker" - }, - "MaxItems":{ - "shape":"MaxListItems", - "documentation":"

The maximum number of functions to return in the response. Note that ListFunctions returns a maximum of 50 items in each response, even if you set the number higher.

", - "location":"querystring", - "locationName":"MaxItems" - } - } - }, - "ListFunctionsResponse":{ - "type":"structure", - "members":{ - "NextMarker":{ - "shape":"String", - "documentation":"

The pagination token that's included if more results are available.

" - }, - "Functions":{ - "shape":"FunctionList", - "documentation":"

A list of Lambda functions.

" - } - }, - "documentation":"

A list of Lambda functions.

" - }, - "ListLayerVersionsRequest":{ - "type":"structure", - "required":["LayerName"], - "members":{ - "CompatibleRuntime":{ - "shape":"Runtime", - "documentation":"

A runtime identifier.

The following list includes deprecated runtimes. For more information, see Runtime use after deprecation.

For a list of all currently supported runtimes, see Supported runtimes.

", - "location":"querystring", - "locationName":"CompatibleRuntime" - }, - "LayerName":{ - "shape":"LayerName", - "documentation":"

The name or Amazon Resource Name (ARN) of the layer.

", - "location":"uri", - "locationName":"LayerName" - }, - "Marker":{ - "shape":"String", - "documentation":"

A pagination token returned by a previous call.

", - "location":"querystring", - "locationName":"Marker" - }, - "MaxItems":{ - "shape":"MaxLayerListItems", - "documentation":"

The maximum number of versions to return.

", - "location":"querystring", - "locationName":"MaxItems" - }, - "CompatibleArchitecture":{ - "shape":"Architecture", - "documentation":"

The compatible instruction set architecture.

", - "location":"querystring", - "locationName":"CompatibleArchitecture" - } - } - }, - "ListLayerVersionsResponse":{ - "type":"structure", - "members":{ - "NextMarker":{ - "shape":"String", - "documentation":"

A pagination token returned when the response doesn't contain all versions.

" - }, - "LayerVersions":{ - "shape":"LayerVersionsList", - "documentation":"

A list of versions.

" - } - } - }, - "ListLayersRequest":{ - "type":"structure", - "members":{ - "CompatibleRuntime":{ - "shape":"Runtime", - "documentation":"

A runtime identifier.

The following list includes deprecated runtimes. For more information, see Runtime use after deprecation.

For a list of all currently supported runtimes, see Supported runtimes.

", - "location":"querystring", - "locationName":"CompatibleRuntime" - }, - "Marker":{ - "shape":"String", - "documentation":"

A pagination token returned by a previous call.

", - "location":"querystring", - "locationName":"Marker" - }, - "MaxItems":{ - "shape":"MaxLayerListItems", - "documentation":"

The maximum number of layers to return.

", - "location":"querystring", - "locationName":"MaxItems" - }, - "CompatibleArchitecture":{ - "shape":"Architecture", - "documentation":"

The compatible instruction set architecture.

", - "location":"querystring", - "locationName":"CompatibleArchitecture" - } - } - }, - "ListLayersResponse":{ - "type":"structure", - "members":{ - "NextMarker":{ - "shape":"String", - "documentation":"

A pagination token returned when the response doesn't contain all layers.

" - }, - "Layers":{ - "shape":"LayersList", - "documentation":"

A list of function layers.

" - } - } - }, - "ListProvisionedConcurrencyConfigsRequest":{ - "type":"structure", - "required":["FunctionName"], - "members":{ - "FunctionName":{ - "shape":"FunctionName", - "documentation":"

The name or ARN of the Lambda function.

Name formats

The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.

", - "location":"uri", - "locationName":"FunctionName" - }, - "Marker":{ - "shape":"String", - "documentation":"

Specify the pagination token that's returned by a previous request to retrieve the next page of results.

", - "location":"querystring", - "locationName":"Marker" - }, - "MaxItems":{ - "shape":"MaxProvisionedConcurrencyConfigListItems", - "documentation":"

Specify a number to limit the number of configurations returned.

", - "location":"querystring", - "locationName":"MaxItems" - } - } - }, - "ListProvisionedConcurrencyConfigsResponse":{ - "type":"structure", - "members":{ - "ProvisionedConcurrencyConfigs":{ - "shape":"ProvisionedConcurrencyConfigList", - "documentation":"

A list of provisioned concurrency configurations.

" - }, - "NextMarker":{ - "shape":"String", - "documentation":"

The pagination token that's included if more results are available.

" - } - } - }, - "ListTagsRequest":{ - "type":"structure", - "required":["Resource"], - "members":{ - "Resource":{ - "shape":"TaggableResource", - "documentation":"

The resource's Amazon Resource Name (ARN). Note: Lambda does not support adding tags to function aliases or versions.

", - "location":"uri", - "locationName":"Resource" - } - } - }, - "ListTagsResponse":{ - "type":"structure", - "members":{ - "Tags":{ - "shape":"Tags", - "documentation":"

The function's tags.

" - } - } - }, - "ListVersionsByFunctionRequest":{ - "type":"structure", - "required":["FunctionName"], - "members":{ - "FunctionName":{ - "shape":"NamespacedFunctionName", - "documentation":"

The name or ARN of the Lambda function.

Name formats

The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.

", - "location":"uri", - "locationName":"FunctionName" - }, - "Marker":{ - "shape":"String", - "documentation":"

Specify the pagination token that's returned by a previous request to retrieve the next page of results.

", - "location":"querystring", - "locationName":"Marker" - }, - "MaxItems":{ - "shape":"MaxListItems", - "documentation":"

The maximum number of versions to return. Note that ListVersionsByFunction returns a maximum of 50 items in each response, even if you set the number higher.

", - "location":"querystring", - "locationName":"MaxItems" - } - } - }, - "ListVersionsByFunctionResponse":{ - "type":"structure", - "members":{ - "NextMarker":{ - "shape":"String", - "documentation":"

The pagination token that's included if more results are available.

" - }, - "Versions":{ - "shape":"FunctionList", - "documentation":"

A list of Lambda function versions.

" - } - } - }, - "LocalMountPath":{ - "type":"string", - "max":160, - "min":0, - "pattern":"/mnt/[a-zA-Z0-9-_.]+" - }, - "LogFormat":{ - "type":"string", - "enum":[ - "JSON", - "Text" - ] - }, - "LogGroup":{ - "type":"string", - "max":512, - "min":1, - "pattern":"[\\.\\-_/#A-Za-z0-9]+" - }, - "LogType":{ - "type":"string", - "enum":[ - "None", - "Tail" - ] - }, - "LoggingConfig":{ - "type":"structure", - "members":{ - "LogFormat":{ - "shape":"LogFormat", - "documentation":"

The format in which Lambda sends your function's application and system logs to CloudWatch. Select between plain text and structured JSON.

" - }, - "ApplicationLogLevel":{ - "shape":"ApplicationLogLevel", - "documentation":"

Set this property to filter the application logs for your function that Lambda sends to CloudWatch. Lambda only sends application logs at the selected level of detail and lower, where TRACE is the highest level and FATAL is the lowest.

" - }, - "SystemLogLevel":{ - "shape":"SystemLogLevel", - "documentation":"

Set this property to filter the system logs for your function that Lambda sends to CloudWatch. Lambda only sends system logs at the selected level of detail and lower, where DEBUG is the highest level and WARN is the lowest.

" - }, - "LogGroup":{ - "shape":"LogGroup", - "documentation":"

The name of the Amazon CloudWatch log group the function sends logs to. By default, Lambda functions send logs to a default log group named /aws/lambda/<function name>. To use a different log group, enter an existing log group or enter a new log group name.

" - } - }, - "documentation":"

The function's Amazon CloudWatch Logs configuration settings.

" - }, - "Long":{"type":"long"}, - "MasterRegion":{ - "type":"string", - "pattern":"ALL|[a-z]{2}(-gov)?-[a-z]+-\\d{1}" - }, - "MaxAge":{ - "type":"integer", - "box":true, - "max":86400, - "min":0 - }, - "MaxFunctionEventInvokeConfigListItems":{ - "type":"integer", - "box":true, - "max":50, - "min":1 - }, - "MaxItems":{ - "type":"integer", - "box":true, - "max":50, - "min":1 - }, - "MaxLayerListItems":{ - "type":"integer", - "box":true, - "max":50, - "min":1 - }, - "MaxListItems":{ - "type":"integer", - "box":true, - "max":10000, - "min":1 - }, - "MaxProvisionedConcurrencyConfigListItems":{ - "type":"integer", - "box":true, - "max":50, - "min":1 - }, - "MaximumBatchingWindowInSeconds":{ - "type":"integer", - "box":true, - "max":300, - "min":0 - }, - "MaximumConcurrency":{ - "type":"integer", - "box":true, - "max":1000, - "min":2 - }, - "MaximumEventAgeInSeconds":{ - "type":"integer", - "box":true, - "max":21600, - "min":60 - }, - "MaximumNumberOfPollers":{ - "type":"integer", - "box":true, - "max":2000, - "min":1 - }, - "MaximumRecordAgeInSeconds":{ - "type":"integer", - "box":true, - "max":604800, - "min":-1 - }, - "MaximumRetryAttempts":{ - "type":"integer", - "box":true, - "max":2, - "min":0 - }, - "MaximumRetryAttemptsEventSourceMapping":{ - "type":"integer", - "box":true, - "max":10000, - "min":-1 - }, - "MemorySize":{ - "type":"integer", - "box":true, - "max":10240, - "min":128 - }, - "Method":{ - "type":"string", - "max":6, - "min":0, - "pattern":".*" - }, - "MinimumNumberOfPollers":{ - "type":"integer", - "box":true, - "max":200, - "min":1 - }, - "NameSpacedFunctionArn":{ - "type":"string", - "pattern":"arn:(aws[a-zA-Z-]*)?:lambda:[a-z]{2}(-gov)?-[a-z]+-\\d{1}:\\d{12}:function:[a-zA-Z0-9-_\\.]+(:(\\$LATEST|[a-zA-Z0-9-_]+))?" - }, - "NamespacedFunctionName":{ - "type":"string", - "max":170, - "min":1, - "pattern":"(arn:(aws[a-zA-Z-]*)?:lambda:)?([a-z]{2}(-gov)?-[a-z]+-\\d{1}:)?(\\d{12}:)?(function:)?([a-zA-Z0-9-_\\.]+)(:(\\$LATEST|[a-zA-Z0-9-_]+))?" - }, - "NamespacedStatementId":{ - "type":"string", - "max":100, - "min":1, - "pattern":"([a-zA-Z0-9-_.]+)" - }, - "NonNegativeInteger":{ - "type":"integer", - "box":true, - "min":0 - }, - "NullableBoolean":{ - "type":"boolean", - "box":true - }, - "OnFailure":{ - "type":"structure", - "members":{ - "Destination":{ - "shape":"DestinationArn", - "documentation":"

The Amazon Resource Name (ARN) of the destination resource.

To retain records of unsuccessful asynchronous invocations, you can configure an Amazon SNS topic, Amazon SQS queue, Amazon S3 bucket, Lambda function, or Amazon EventBridge event bus as the destination.

To retain records of failed invocations from Kinesis, DynamoDB, self-managed Kafka or Amazon MSK, you can configure an Amazon SNS topic, Amazon SQS queue, or Amazon S3 bucket as the destination.

" - } - }, - "documentation":"

A destination for events that failed processing. For more information, see Adding a destination.

" - }, - "OnSuccess":{ - "type":"structure", - "members":{ - "Destination":{ - "shape":"DestinationArn", - "documentation":"

The Amazon Resource Name (ARN) of the destination resource.

" - } - }, - "documentation":"

A destination for events that were processed successfully.

To retain records of successful asynchronous invocations, you can configure an Amazon SNS topic, Amazon SQS queue, Lambda function, or Amazon EventBridge event bus as the destination.

OnSuccess is not supported in CreateEventSourceMapping or UpdateEventSourceMapping requests.

" - }, - "Operation":{ - "type":"structure", - "required":[ - "Id", - "Type", - "StartTimestamp", - "Status" - ], - "members":{ - "Id":{"shape":"OperationId"}, - "ParentId":{"shape":"OperationId"}, - "Name":{"shape":"OperationName"}, - "Type":{"shape":"OperationType"}, - "SubType":{"shape":"OperationSubType"}, - "StartTimestamp":{"shape":"ExecutionTimestamp"}, - "EndTimestamp":{"shape":"ExecutionTimestamp"}, - "Status":{"shape":"OperationStatus"}, - "ExecutionDetails":{"shape":"ExecutionDetails"}, - "ContextDetails":{"shape":"ContextDetails"}, - "StepDetails":{"shape":"StepDetails"}, - "WaitDetails":{"shape":"WaitDetails"}, - "CallbackDetails":{"shape":"CallbackDetails"}, - "ChainedInvokeDetails":{"shape":"ChainedInvokeDetails"} - } - }, - "OperationAction":{ - "type":"string", - "enum":[ - "START", - "SUCCEED", - "FAIL", - "RETRY", - "CANCEL" - ] - }, - "OperationId":{ - "type":"string", - "max":64, - "min":1, - "pattern":"[a-zA-Z0-9-_]+" - }, - "OperationName":{ - "type":"string", - "max":256, - "min":1, - "pattern":"[a-zA-Z0-9-_]+" - }, - "OperationPayload":{ - "type":"string", - "max":6291456, - "min":0, - "sensitive":true - }, - "OperationStatus":{ - "type":"string", - "enum":[ - "STARTED", - "PENDING", - "READY", - "SUCCEEDED", - "FAILED", - "CANCELLED", - "TIMED_OUT", - "STOPPED" - ] - }, - "OperationSubType":{ - "type":"string", - "max":32, - "min":1, - "pattern":"[a-zA-Z-_]+" - }, - "OperationType":{ - "type":"string", - "enum":[ - "EXECUTION", - "CONTEXT", - "STEP", - "WAIT", - "CALLBACK", - "CHAINED_INVOKE" - ] - }, - "OperationUpdate":{ - "type":"structure", - "required":[ - "Id", - "Type", - "Action" - ], - "members":{ - "Id":{"shape":"OperationId"}, - "ParentId":{"shape":"OperationId"}, - "Name":{"shape":"OperationName"}, - "Type":{"shape":"OperationType"}, - "SubType":{"shape":"OperationSubType"}, - "Action":{"shape":"OperationAction"}, - "Payload":{"shape":"OperationPayload"}, - "Error":{"shape":"ErrorObject"}, - "ContextOptions":{"shape":"ContextOptions"}, - "StepOptions":{"shape":"StepOptions"}, - "WaitOptions":{"shape":"WaitOptions"}, - "CallbackOptions":{"shape":"CallbackOptions"}, - "ChainedInvokeOptions":{"shape":"ChainedInvokeOptions"} - } - }, - "OperationUpdates":{ - "type":"list", - "member":{"shape":"OperationUpdate"} - }, - "Operations":{ - "type":"list", - "member":{"shape":"Operation"} - }, - "OrganizationId":{ - "type":"string", - "max":34, - "min":0, - "pattern":"o-[a-z0-9]{10,32}" - }, - "Origin":{ - "type":"string", - "max":253, - "min":1, - "pattern":".*" - }, - "OutputPayload":{ - "type":"string", - "max":6291456, - "min":0, - "sensitive":true - }, - "PackageType":{ - "type":"string", - "enum":[ - "Zip", - "Image" - ] - }, - "ParallelizationFactor":{ - "type":"integer", - "box":true, - "max":10, - "min":1 - }, - "Pattern":{ - "type":"string", - "max":4096, - "min":0, - "pattern":".*" - }, - "PolicyLengthExceededException":{ - "type":"structure", - "members":{ - "Type":{"shape":"String"}, - "message":{"shape":"String"} - }, - "documentation":"

The permissions policy for the resource is too large. For more information, see Lambda quotas.

", - "error":{ - "httpStatusCode":400, - "senderFault":true - }, - "exception":true - }, - "PositiveInteger":{ - "type":"integer", - "box":true, - "min":1 - }, - "PreconditionFailedException":{ - "type":"structure", - "members":{ - "Type":{ - "shape":"String", - "documentation":"

The exception type.

" - }, - "message":{ - "shape":"String", - "documentation":"

The exception message.

" - } - }, - "documentation":"

The RevisionId provided does not match the latest RevisionId for the Lambda function or alias.

", - "error":{ - "httpStatusCode":412, - "senderFault":true - }, - "exception":true - }, - "Principal":{ - "type":"string", - "pattern":"[^\\s]+" - }, - "PrincipalOrgID":{ - "type":"string", - "max":34, - "min":12, - "pattern":"o-[a-z0-9]{10,32}" - }, - "ProvisionedConcurrencyConfigList":{ - "type":"list", - "member":{"shape":"ProvisionedConcurrencyConfigListItem"} - }, - "ProvisionedConcurrencyConfigListItem":{ - "type":"structure", - "members":{ - "FunctionArn":{ - "shape":"FunctionArn", - "documentation":"

The Amazon Resource Name (ARN) of the alias or version.

" - }, - "RequestedProvisionedConcurrentExecutions":{ - "shape":"PositiveInteger", - "documentation":"

The amount of provisioned concurrency requested.

" - }, - "AvailableProvisionedConcurrentExecutions":{ - "shape":"NonNegativeInteger", - "documentation":"

The amount of provisioned concurrency available.

" - }, - "AllocatedProvisionedConcurrentExecutions":{ - "shape":"NonNegativeInteger", - "documentation":"

The amount of provisioned concurrency allocated. When a weighted alias is used during linear and canary deployments, this value fluctuates depending on the amount of concurrency that is provisioned for the function versions.

" - }, - "Status":{ - "shape":"ProvisionedConcurrencyStatusEnum", - "documentation":"

The status of the allocation process.

" - }, - "StatusReason":{ - "shape":"String", - "documentation":"

For failed allocations, the reason that provisioned concurrency could not be allocated.

" - }, - "LastModified":{ - "shape":"Timestamp", - "documentation":"

The date and time that a user last updated the configuration, in ISO 8601 format.

" - } - }, - "documentation":"

Details about the provisioned concurrency configuration for a function alias or version.

" - }, - "ProvisionedConcurrencyConfigNotFoundException":{ - "type":"structure", - "members":{ - "Type":{"shape":"String"}, - "message":{"shape":"String"} - }, - "documentation":"

The specified configuration does not exist.

", - "error":{ - "httpStatusCode":404, - "senderFault":true - }, - "exception":true - }, - "ProvisionedConcurrencyStatusEnum":{ - "type":"string", - "enum":[ - "IN_PROGRESS", - "READY", - "FAILED" - ] - }, - "ProvisionedPollerConfig":{ - "type":"structure", - "members":{ - "MinimumPollers":{ - "shape":"MinimumNumberOfPollers", - "documentation":"

The minimum number of event pollers this event source can scale down to.

" - }, - "MaximumPollers":{ - "shape":"MaximumNumberOfPollers", - "documentation":"

The maximum number of event pollers this event source can scale up to.

" - } - }, - "documentation":"

The provisioned mode configuration for the event source. Use Provisioned Mode to customize the minimum and maximum number of event pollers for your event source. An event poller is a compute unit that provides approximately 5 MBps of throughput.

" - }, - "PublishLayerVersionRequest":{ - "type":"structure", - "required":[ - "LayerName", - "Content" - ], - "members":{ - "LayerName":{ - "shape":"LayerName", - "documentation":"

The name or Amazon Resource Name (ARN) of the layer.

", - "location":"uri", - "locationName":"LayerName" - }, - "Description":{ - "shape":"Description", - "documentation":"

The description of the version.

" - }, - "Content":{ - "shape":"LayerVersionContentInput", - "documentation":"

The function layer archive.

" - }, - "CompatibleRuntimes":{ - "shape":"CompatibleRuntimes", - "documentation":"

A list of compatible function runtimes. Used for filtering with ListLayers and ListLayerVersions.

The following list includes deprecated runtimes. For more information, see Runtime deprecation policy.

" - }, - "LicenseInfo":{ - "shape":"LicenseInfo", - "documentation":"

The layer's software license. It can be any of the following:

" - }, - "CompatibleArchitectures":{ - "shape":"CompatibleArchitectures", - "documentation":"

A list of compatible instruction set architectures.

" - } - } - }, - "PublishLayerVersionResponse":{ - "type":"structure", - "members":{ - "Content":{ - "shape":"LayerVersionContentOutput", - "documentation":"

Details about the layer version.

" - }, - "LayerArn":{ - "shape":"LayerArn", - "documentation":"

The ARN of the layer.

" - }, - "LayerVersionArn":{ - "shape":"LayerVersionArn", - "documentation":"

The ARN of the layer version.

" - }, - "Description":{ - "shape":"Description", - "documentation":"

The description of the version.

" - }, - "CreatedDate":{ - "shape":"Timestamp", - "documentation":"

The date that the layer version was created, in ISO-8601 format (YYYY-MM-DDThh:mm:ss.sTZD).

" - }, - "Version":{ - "shape":"LayerVersionNumber", - "documentation":"

The version number.

" - }, - "CompatibleRuntimes":{ - "shape":"CompatibleRuntimes", - "documentation":"

The layer's compatible runtimes.

The following list includes deprecated runtimes. For more information, see Runtime use after deprecation.

For a list of all currently supported runtimes, see Supported runtimes.

" - }, - "LicenseInfo":{ - "shape":"LicenseInfo", - "documentation":"

The layer's software license.

" - }, - "CompatibleArchitectures":{ - "shape":"CompatibleArchitectures", - "documentation":"

A list of compatible instruction set architectures.

" - } - } - }, - "PublishVersionRequest":{ - "type":"structure", - "required":["FunctionName"], - "members":{ - "FunctionName":{ - "shape":"FunctionName", - "documentation":"

The name or ARN of the Lambda function.

Name formats

The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.

", - "location":"uri", - "locationName":"FunctionName" - }, - "CodeSha256":{ - "shape":"String", - "documentation":"

Only publish a version if the hash value matches the value that's specified. Use this option to avoid publishing a version if the function code has changed since you last updated it. You can get the hash for the version that you uploaded from the output of UpdateFunctionCode.

" - }, - "Description":{ - "shape":"Description", - "documentation":"

A description for the version to override the description in the function configuration.

" - }, - "RevisionId":{ - "shape":"String", - "documentation":"

Only update the function if the revision ID matches the ID that's specified. Use this option to avoid publishing a version if the function configuration has changed since you last updated it.

" - } - } - }, - "PutFunctionCodeSigningConfigRequest":{ - "type":"structure", - "required":[ - "CodeSigningConfigArn", - "FunctionName" - ], - "members":{ - "CodeSigningConfigArn":{ - "shape":"CodeSigningConfigArn", - "documentation":"

The The Amazon Resource Name (ARN) of the code signing configuration.

" - }, - "FunctionName":{ - "shape":"FunctionName", - "documentation":"

The name or ARN of the Lambda function.

Name formats

The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.

", - "location":"uri", - "locationName":"FunctionName" - } - } - }, - "PutFunctionCodeSigningConfigResponse":{ - "type":"structure", - "required":[ - "CodeSigningConfigArn", - "FunctionName" - ], - "members":{ - "CodeSigningConfigArn":{ - "shape":"CodeSigningConfigArn", - "documentation":"

The The Amazon Resource Name (ARN) of the code signing configuration.

" - }, - "FunctionName":{ - "shape":"FunctionName", - "documentation":"

The name or ARN of the Lambda function.

Name formats

The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.

" - } - } - }, - "PutFunctionConcurrencyRequest":{ - "type":"structure", - "required":[ - "FunctionName", - "ReservedConcurrentExecutions" - ], - "members":{ - "FunctionName":{ - "shape":"FunctionName", - "documentation":"

The name or ARN of the Lambda function.

Name formats

The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.

", - "location":"uri", - "locationName":"FunctionName" - }, - "ReservedConcurrentExecutions":{ - "shape":"ReservedConcurrentExecutions", - "documentation":"

The number of simultaneous executions to reserve for the function.

" - } - } - }, - "PutFunctionEventInvokeConfigRequest":{ - "type":"structure", - "required":["FunctionName"], - "members":{ - "FunctionName":{ - "shape":"FunctionName", - "documentation":"

The name or ARN of the Lambda function, version, or alias.

Name formats

You can append a version number or alias to any of the formats. The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.

", - "location":"uri", - "locationName":"FunctionName" - }, - "Qualifier":{ - "shape":"Qualifier", - "documentation":"

A version number or alias name.

", - "location":"querystring", - "locationName":"Qualifier" - }, - "MaximumRetryAttempts":{ - "shape":"MaximumRetryAttempts", - "documentation":"

The maximum number of times to retry when the function returns an error.

" - }, - "MaximumEventAgeInSeconds":{ - "shape":"MaximumEventAgeInSeconds", - "documentation":"

The maximum age of a request that Lambda sends to a function for processing.

" - }, - "DestinationConfig":{ - "shape":"DestinationConfig", - "documentation":"

A destination for events after they have been sent to a function for processing.

Destinations

S3 buckets are supported only for on-failure destinations. To retain records of successful invocations, use another destination type.

" - } - } - }, - "PutFunctionRecursionConfigRequest":{ - "type":"structure", - "required":[ - "FunctionName", - "RecursiveLoop" - ], - "members":{ - "FunctionName":{ - "shape":"UnqualifiedFunctionName", - "documentation":"

The name or ARN of the Lambda function.

Name formats

The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.

", - "location":"uri", - "locationName":"FunctionName" - }, - "RecursiveLoop":{ - "shape":"RecursiveLoop", - "documentation":"

If you set your function's recursive loop detection configuration to Allow, Lambda doesn't take any action when it detects your function being invoked as part of a recursive loop. We recommend that you only use this setting if your design intentionally uses a Lambda function to write data back to the same Amazon Web Services resource that invokes it.

If you set your function's recursive loop detection configuration to Terminate, Lambda stops your function being invoked and notifies you when it detects your function being invoked as part of a recursive loop.

By default, Lambda sets your function's configuration to Terminate.

If your design intentionally uses a Lambda function to write data back to the same Amazon Web Services resource that invokes the function, then use caution and implement suitable guard rails to prevent unexpected charges being billed to your Amazon Web Services account. To learn more about best practices for using recursive invocation patterns, see Recursive patterns that cause run-away Lambda functions in Serverless Land.

" - } - } - }, - "PutFunctionRecursionConfigResponse":{ - "type":"structure", - "members":{ - "RecursiveLoop":{ - "shape":"RecursiveLoop", - "documentation":"

The status of your function's recursive loop detection configuration.

When this value is set to Allowand Lambda detects your function being invoked as part of a recursive loop, it doesn't take any action.

When this value is set to Terminate and Lambda detects your function being invoked as part of a recursive loop, it stops your function being invoked and notifies you.

" - } - } - }, - "PutProvisionedConcurrencyConfigRequest":{ - "type":"structure", - "required":[ - "FunctionName", - "Qualifier", - "ProvisionedConcurrentExecutions" - ], - "members":{ - "FunctionName":{ - "shape":"FunctionName", - "documentation":"

The name or ARN of the Lambda function.

Name formats

The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.

", - "location":"uri", - "locationName":"FunctionName" - }, - "Qualifier":{ - "shape":"Qualifier", - "documentation":"

The version number or alias name.

", - "location":"querystring", - "locationName":"Qualifier" - }, - "ProvisionedConcurrentExecutions":{ - "shape":"PositiveInteger", - "documentation":"

The amount of provisioned concurrency to allocate for the version or alias.

" - } - } - }, - "PutProvisionedConcurrencyConfigResponse":{ - "type":"structure", - "members":{ - "RequestedProvisionedConcurrentExecutions":{ - "shape":"PositiveInteger", - "documentation":"

The amount of provisioned concurrency requested.

" - }, - "AvailableProvisionedConcurrentExecutions":{ - "shape":"NonNegativeInteger", - "documentation":"

The amount of provisioned concurrency available.

" - }, - "AllocatedProvisionedConcurrentExecutions":{ - "shape":"NonNegativeInteger", - "documentation":"

The amount of provisioned concurrency allocated. When a weighted alias is used during linear and canary deployments, this value fluctuates depending on the amount of concurrency that is provisioned for the function versions.

" - }, - "Status":{ - "shape":"ProvisionedConcurrencyStatusEnum", - "documentation":"

The status of the allocation process.

" - }, - "StatusReason":{ - "shape":"String", - "documentation":"

For failed allocations, the reason that provisioned concurrency could not be allocated.

" - }, - "LastModified":{ - "shape":"Timestamp", - "documentation":"

The date and time that a user last updated the configuration, in ISO 8601 format.

" - } - } - }, - "PutRuntimeManagementConfigRequest":{ - "type":"structure", - "required":[ - "FunctionName", - "UpdateRuntimeOn" - ], - "members":{ - "FunctionName":{ - "shape":"FunctionName", - "documentation":"

The name or ARN of the Lambda function.

Name formats

The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.

", - "location":"uri", - "locationName":"FunctionName" - }, - "Qualifier":{ - "shape":"Qualifier", - "documentation":"

Specify a version of the function. This can be $LATEST or a published version number. If no value is specified, the configuration for the $LATEST version is returned.

", - "location":"querystring", - "locationName":"Qualifier" - }, - "UpdateRuntimeOn":{ - "shape":"UpdateRuntimeOn", - "documentation":"

Specify the runtime update mode.

" - }, - "RuntimeVersionArn":{ - "shape":"RuntimeVersionArn", - "documentation":"

The ARN of the runtime version you want the function to use.

This is only required if you're using the Manual runtime update mode.

" - } - } - }, - "PutRuntimeManagementConfigResponse":{ - "type":"structure", - "required":[ - "UpdateRuntimeOn", - "FunctionArn" - ], - "members":{ - "UpdateRuntimeOn":{ - "shape":"UpdateRuntimeOn", - "documentation":"

The runtime update mode.

" - }, - "FunctionArn":{ - "shape":"FunctionArn", - "documentation":"

The ARN of the function

" - }, - "RuntimeVersionArn":{ - "shape":"RuntimeVersionArn", - "documentation":"

The ARN of the runtime the function is configured to use. If the runtime update mode is manual, the ARN is returned, otherwise null is returned.

" - } - } - }, - "Qualifier":{ - "type":"string", - "max":128, - "min":1, - "pattern":"(|[a-zA-Z0-9$_-]+)" - }, - "Queue":{ - "type":"string", - "max":1000, - "min":1, - "pattern":"[\\s\\S]*" - }, - "Queues":{ - "type":"list", - "member":{"shape":"Queue"}, - "max":1, - "min":1 - }, - "RecursiveInvocationException":{ - "type":"structure", - "members":{ - "Type":{ - "shape":"String", - "documentation":"

The exception type.

" - }, - "Message":{ - "shape":"String", - "documentation":"

The exception message.

" - } - }, - "documentation":"

Lambda has detected your function being invoked in a recursive loop with other Amazon Web Services resources and stopped your function's invocation.

", - "error":{ - "httpStatusCode":400, - "senderFault":true - }, - "exception":true - }, - "RecursiveLoop":{ - "type":"string", - "enum":[ - "Allow", - "Terminate" - ] - }, - "RemoveLayerVersionPermissionRequest":{ - "type":"structure", - "required":[ - "LayerName", - "VersionNumber", - "StatementId" - ], - "members":{ - "LayerName":{ - "shape":"LayerName", - "documentation":"

The name or Amazon Resource Name (ARN) of the layer.

", - "location":"uri", - "locationName":"LayerName" - }, - "VersionNumber":{ - "shape":"LayerVersionNumber", - "documentation":"

The version number.

", - "location":"uri", - "locationName":"VersionNumber" - }, - "StatementId":{ - "shape":"StatementId", - "documentation":"

The identifier that was specified when the statement was added.

", - "location":"uri", - "locationName":"StatementId" - }, - "RevisionId":{ - "shape":"String", - "documentation":"

Only update the policy if the revision ID matches the ID specified. Use this option to avoid modifying a policy that has changed since you last read it.

", - "location":"querystring", - "locationName":"RevisionId" - } - } - }, - "RemovePermissionRequest":{ - "type":"structure", - "required":[ - "FunctionName", - "StatementId" - ], - "members":{ - "FunctionName":{ - "shape":"FunctionName", - "documentation":"

The name or ARN of the Lambda function, version, or alias.

Name formats

You can append a version number or alias to any of the formats. The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.

", - "location":"uri", - "locationName":"FunctionName" - }, - "StatementId":{ - "shape":"NamespacedStatementId", - "documentation":"

Statement ID of the permission to remove.

", - "location":"uri", - "locationName":"StatementId" - }, - "Qualifier":{ - "shape":"Qualifier", - "documentation":"

Specify a version or alias to remove permissions from a published version of the function.

", - "location":"querystring", - "locationName":"Qualifier" - }, - "RevisionId":{ - "shape":"String", - "documentation":"

Update the policy only if the revision ID matches the ID that's specified. Use this option to avoid modifying a policy that has changed since you last read it.

", - "location":"querystring", - "locationName":"RevisionId" - } - } - }, - "ReplayChildren":{ - "type":"boolean", - "box":true - }, - "RequestTooLargeException":{ - "type":"structure", - "members":{ - "Type":{"shape":"String"}, - "message":{"shape":"String"} - }, - "documentation":"

The request payload exceeded the Invoke request body JSON input quota. For more information, see Lambda quotas.

", - "error":{ - "httpStatusCode":413, - "senderFault":true - }, - "exception":true - }, - "ReservedConcurrentExecutions":{ - "type":"integer", - "box":true, - "min":0 - }, - "ResourceArn":{ - "type":"string", - "pattern":"(arn:(aws[a-zA-Z-]*)?:[a-z0-9-.]+:.*)|()" - }, - "ResourceConflictException":{ - "type":"structure", - "members":{ - "Type":{ - "shape":"String", - "documentation":"

The exception type.

" - }, - "message":{ - "shape":"String", - "documentation":"

The exception message.

" - } - }, - "documentation":"

The resource already exists, or another operation is in progress.

", - "error":{ - "httpStatusCode":409, - "senderFault":true - }, - "exception":true - }, - "ResourceInUseException":{ - "type":"structure", - "members":{ - "Type":{"shape":"String"}, - "Message":{"shape":"String"} - }, - "documentation":"

The operation conflicts with the resource's availability. For example, you tried to update an event source mapping in the CREATING state, or you tried to delete an event source mapping currently UPDATING.

", - "error":{ - "httpStatusCode":400, - "senderFault":true - }, - "exception":true - }, - "ResourceNotFoundException":{ - "type":"structure", - "members":{ - "Type":{"shape":"String"}, - "Message":{"shape":"String"} - }, - "documentation":"

The resource specified in the request does not exist.

", - "error":{ - "httpStatusCode":404, - "senderFault":true - }, - "exception":true - }, - "ResourceNotReadyException":{ - "type":"structure", - "members":{ - "Type":{ - "shape":"String", - "documentation":"

The exception type.

" - }, - "message":{ - "shape":"String", - "documentation":"

The exception message.

" - } - }, - "documentation":"

The function is inactive and its VPC connection is no longer available. Wait for the VPC connection to reestablish and try again.

", - "error":{"httpStatusCode":502}, - "exception":true, - "fault":true - }, - "ResponseStreamingInvocationType":{ - "type":"string", - "enum":[ - "RequestResponse", - "DryRun" - ] - }, - "RetentionPeriodInDays":{ - "type":"integer", - "box":true, - "max":90, - "min":1 - }, - "RetryDetails":{ - "type":"structure", - "members":{ - "CurrentAttempt":{"shape":"AttemptCount"}, - "NextAttemptDelaySeconds":{"shape":"DurationSeconds"} - } - }, - "ReverseOrder":{ - "type":"boolean", - "box":true - }, - "RoleArn":{ - "type":"string", - "pattern":"arn:(aws[a-zA-Z-]*)?:iam::\\d{12}:role/?[a-zA-Z_0-9+=,.@\\-_/]+" - }, - "Runtime":{ - "type":"string", - "enum":[ - "nodejs", - "nodejs4.3", - "nodejs6.10", - "nodejs8.10", - "nodejs10.x", - "nodejs12.x", - "nodejs14.x", - "nodejs16.x", - "java8", - "java8.al2", - "java11", - "python2.7", - "python3.6", - "python3.7", - "python3.8", - "python3.9", - "dotnetcore1.0", - "dotnetcore2.0", - "dotnetcore2.1", - "dotnetcore3.1", - "dotnet6", - "dotnet8", - "nodejs4.3-edge", - "go1.x", - "ruby2.5", - "ruby2.7", - "provided", - "provided.al2", - "nodejs18.x", - "python3.10", - "java17", - "ruby3.2", - "ruby3.3", - "ruby3.4", - "python3.11", - "nodejs20.x", - "provided.al2023", - "python3.12", - "java21", - "python3.13", - "nodejs22.x" - ] - }, - "RuntimeVersionArn":{ - "type":"string", - "max":2048, - "min":26, - "pattern":"arn:(aws[a-zA-Z-]*):lambda:[a-z]{2}((-gov)|(-iso(b?)))?-[a-z]+-\\d{1}::runtime:.+" - }, - "RuntimeVersionConfig":{ - "type":"structure", - "members":{ - "RuntimeVersionArn":{ - "shape":"RuntimeVersionArn", - "documentation":"

The ARN of the runtime version you want the function to use.

" - }, - "Error":{ - "shape":"RuntimeVersionError", - "documentation":"

Error response when Lambda is unable to retrieve the runtime version for a function.

" - } - }, - "documentation":"

The ARN of the runtime and any errors that occured.

" - }, - "RuntimeVersionError":{ - "type":"structure", - "members":{ - "ErrorCode":{ - "shape":"String", - "documentation":"

The error code.

" - }, - "Message":{ - "shape":"SensitiveString", - "documentation":"

The error message.

" - } - }, - "documentation":"

Any error returned when the runtime version information for the function could not be retrieved.

" - }, - "S3Bucket":{ - "type":"string", - "max":63, - "min":3, - "pattern":"[0-9A-Za-z\\.\\-_]*(?Limits the number of concurrent instances that the Amazon SQS event source can invoke.

" - } - }, - "documentation":"

(Amazon SQS only) The scaling configuration for the event source. To remove the configuration, pass an empty value.

" - }, - "SchemaRegistryEventRecordFormat":{ - "type":"string", - "enum":[ - "JSON", - "SOURCE" - ] - }, - "SchemaRegistryUri":{ - "type":"string", - "max":10000, - "min":1, - "pattern":"[a-zA-Z0-9-\\/*:_+=.@-]*" - }, - "SecurityGroupId":{"type":"string"}, - "SecurityGroupIds":{ - "type":"list", - "member":{"shape":"SecurityGroupId"}, - "max":5, - "min":0 - }, - "SelfManagedEventSource":{ - "type":"structure", - "members":{ - "Endpoints":{ - "shape":"Endpoints", - "documentation":"

The list of bootstrap servers for your Kafka brokers in the following format: \"KAFKA_BOOTSTRAP_SERVERS\": [\"abc.xyz.com:xxxx\",\"abc2.xyz.com:xxxx\"].

" - } - }, - "documentation":"

The self-managed Apache Kafka cluster for your event source.

" - }, - "SelfManagedKafkaEventSourceConfig":{ - "type":"structure", - "members":{ - "ConsumerGroupId":{ - "shape":"URI", - "documentation":"

The identifier for the Kafka consumer group to join. The consumer group ID must be unique among all your Kafka event sources. After creating a Kafka event source mapping with the consumer group ID specified, you cannot update this value. For more information, see Customizable consumer group ID.

" - }, - "SchemaRegistryConfig":{ - "shape":"KafkaSchemaRegistryConfig", - "documentation":"

Specific configuration settings for a Kafka schema registry.

" - } - }, - "documentation":"

Specific configuration settings for a self-managed Apache Kafka event source.

" - }, - "SendDurableExecutionCallbackFailureRequest":{ - "type":"structure", - "required":["CallbackId"], - "members":{ - "CallbackId":{ - "shape":"CallbackId", - "location":"uri", - "locationName":"CallbackId" - }, - "Error":{"shape":"ErrorObject"} - }, - "payload":"Error" - }, - "SendDurableExecutionCallbackFailureResponse":{ - "type":"structure", - "members":{} - }, - "SendDurableExecutionCallbackHeartbeatRequest":{ - "type":"structure", - "required":["CallbackId"], - "members":{ - "CallbackId":{ - "shape":"CallbackId", - "location":"uri", - "locationName":"CallbackId" - } - } - }, - "SendDurableExecutionCallbackHeartbeatResponse":{ - "type":"structure", - "members":{} - }, - "SendDurableExecutionCallbackSuccessRequest":{ - "type":"structure", - "required":["CallbackId"], - "members":{ - "CallbackId":{ - "shape":"CallbackId", - "location":"uri", - "locationName":"CallbackId" - }, - "Result":{"shape":"BinaryOperationPayload"} - }, - "payload":"Result" - }, - "SendDurableExecutionCallbackSuccessResponse":{ - "type":"structure", - "members":{} - }, - "SensitiveString":{ - "type":"string", - "sensitive":true - }, - "ServiceException":{ - "type":"structure", - "members":{ - "Type":{"shape":"String"}, - "Message":{"shape":"String"} - }, - "documentation":"

The Lambda service encountered an internal error.

", - "error":{"httpStatusCode":500}, - "exception":true, - "fault":true - }, - "SigningProfileVersionArns":{ - "type":"list", - "member":{"shape":"Arn"}, - "max":20, - "min":1 - }, - "SnapStart":{ - "type":"structure", - "members":{ - "ApplyOn":{ - "shape":"SnapStartApplyOn", - "documentation":"

Set to PublishedVersions to create a snapshot of the initialized execution environment when you publish a function version.

" - } - }, - "documentation":"

The function's Lambda SnapStart setting. Set ApplyOn to PublishedVersions to create a snapshot of the initialized execution environment when you publish a function version.

" - }, - "SnapStartApplyOn":{ - "type":"string", - "enum":[ - "PublishedVersions", - "None" - ] - }, - "SnapStartException":{ - "type":"structure", - "members":{ - "Type":{"shape":"String"}, - "Message":{"shape":"String"} - }, - "documentation":"

The afterRestore() runtime hook encountered an error. For more information, check the Amazon CloudWatch logs.

", - "error":{ - "httpStatusCode":400, - "senderFault":true - }, - "exception":true - }, - "SnapStartNotReadyException":{ - "type":"structure", - "members":{ - "Type":{"shape":"String"}, - "Message":{"shape":"String"} - }, - "documentation":"

Lambda is initializing your function. You can invoke the function when the function state becomes Active.

", - "error":{ - "httpStatusCode":409, - "senderFault":true - }, - "exception":true - }, - "SnapStartOptimizationStatus":{ - "type":"string", - "enum":[ - "On", - "Off" - ] - }, - "SnapStartResponse":{ - "type":"structure", - "members":{ - "ApplyOn":{ - "shape":"SnapStartApplyOn", - "documentation":"

When set to PublishedVersions, Lambda creates a snapshot of the execution environment when you publish a function version.

" - }, - "OptimizationStatus":{ - "shape":"SnapStartOptimizationStatus", - "documentation":"

When you provide a qualified Amazon Resource Name (ARN), this response element indicates whether SnapStart is activated for the specified function version.

" - } - }, - "documentation":"

The function's SnapStart setting.

" - }, - "SnapStartTimeoutException":{ - "type":"structure", - "members":{ - "Type":{"shape":"String"}, - "Message":{"shape":"String"} - }, - "documentation":"

Lambda couldn't restore the snapshot within the timeout limit.

", - "error":{ - "httpStatusCode":408, - "senderFault":true - }, - "exception":true - }, - "SourceAccessConfiguration":{ - "type":"structure", - "members":{ - "Type":{ - "shape":"SourceAccessType", - "documentation":"

The type of authentication protocol, VPC components, or virtual host for your event source. For example: \"Type\":\"SASL_SCRAM_512_AUTH\".

" - }, - "URI":{ - "shape":"URI", - "documentation":"

The value for your chosen configuration in Type. For example: \"URI\": \"arn:aws:secretsmanager:us-east-1:01234567890:secret:MyBrokerSecretName\".

" - } - }, - "documentation":"

To secure and define access to your event source, you can specify the authentication protocol, VPC components, or virtual host.

" - }, - "SourceAccessConfigurations":{ - "type":"list", - "member":{"shape":"SourceAccessConfiguration"}, - "max":22, - "min":0 - }, - "SourceAccessType":{ - "type":"string", - "enum":[ - "BASIC_AUTH", - "VPC_SUBNET", - "VPC_SECURITY_GROUP", - "SASL_SCRAM_512_AUTH", - "SASL_SCRAM_256_AUTH", - "VIRTUAL_HOST", - "CLIENT_CERTIFICATE_TLS_AUTH", - "SERVER_ROOT_CA_CERTIFICATE" - ] - }, - "SourceOwner":{ - "type":"string", - "max":12, - "min":0, - "pattern":"\\d{12}" - }, - "StackTraceEntries":{ - "type":"list", - "member":{"shape":"StackTraceEntry"} - }, - "StackTraceEntry":{ - "type":"string", - "sensitive":true - }, - "State":{ - "type":"string", - "enum":[ - "Pending", - "Active", - "Inactive", - "Failed" - ] - }, - "StateReason":{"type":"string"}, - "StateReasonCode":{ - "type":"string", - "enum":[ - "Idle", - "Creating", - "Restoring", - "EniLimitExceeded", - "InsufficientRolePermissions", - "InvalidConfiguration", - "InternalError", - "SubnetOutOfIPAddresses", - "InvalidSubnet", - "InvalidSecurityGroup", - "ImageDeleted", - "ImageAccessDenied", - "InvalidImage", - "KMSKeyAccessDenied", - "KMSKeyNotFound", - "InvalidStateKMSKey", - "DisabledKMSKey", - "EFSIOError", - "EFSMountConnectivityError", - "EFSMountFailure", - "EFSMountTimeout", - "InvalidRuntime", - "InvalidZipFileException", - "FunctionError", - "DrainingDurableExecutions" - ] - }, - "StatementId":{ - "type":"string", - "max":100, - "min":1, - "pattern":"([a-zA-Z0-9-_]+)" - }, - "StepDetails":{ - "type":"structure", - "members":{ - "Attempt":{"shape":"AttemptCount"}, - "NextAttemptTimestamp":{"shape":"ExecutionTimestamp"}, - "Result":{"shape":"OperationPayload"}, - "Error":{"shape":"ErrorObject"} - } - }, - "StepFailedDetails":{ - "type":"structure", - "required":[ - "Error", - "RetryDetails" - ], - "members":{ - "Error":{"shape":"EventError"}, - "RetryDetails":{"shape":"RetryDetails"} - } - }, - "StepOptions":{ - "type":"structure", - "members":{ - "NextAttemptDelaySeconds":{"shape":"StepOptionsNextAttemptDelaySecondsInteger"} - } - }, - "StepOptionsNextAttemptDelaySecondsInteger":{ - "type":"integer", - "box":true, - "max":31622400, - "min":1 - }, - "StepStartedDetails":{ - "type":"structure", - "members":{} - }, - "StepSucceededDetails":{ - "type":"structure", - "required":[ - "Result", - "RetryDetails" - ], - "members":{ - "Result":{"shape":"EventResult"}, - "RetryDetails":{"shape":"RetryDetails"} - } - }, - "StopDurableExecutionRequest":{ - "type":"structure", - "required":["DurableExecutionArn"], - "members":{ - "DurableExecutionArn":{ - "shape":"DurableExecutionArn", - "location":"uri", - "locationName":"DurableExecutionArn" - }, - "Error":{"shape":"ErrorObject"} - }, - "payload":"Error" - }, - "StopDurableExecutionResponse":{ - "type":"structure", - "required":["StopTimestamp"], - "members":{ - "StopTimestamp":{"shape":"ExecutionTimestamp"} - } - }, - "String":{"type":"string"}, - "StringList":{ - "type":"list", - "member":{"shape":"String"}, - "max":1500, - "min":0 - }, - "SubnetIPAddressLimitReachedException":{ - "type":"structure", - "members":{ - "Type":{"shape":"String"}, - "Message":{"shape":"String"} - }, - "documentation":"

Lambda couldn't set up VPC access for the Lambda function because one or more configured subnets has no available IP addresses.

", - "error":{"httpStatusCode":502}, - "exception":true, - "fault":true - }, - "SubnetId":{"type":"string"}, - "SubnetIds":{ - "type":"list", - "member":{"shape":"SubnetId"}, - "max":16, - "min":0 - }, - "SystemLogLevel":{ - "type":"string", - "enum":[ - "DEBUG", - "INFO", - "WARN" - ] - }, - "TagKey":{"type":"string"}, - "TagKeyList":{ - "type":"list", - "member":{"shape":"TagKey"} - }, - "TagResourceRequest":{ - "type":"structure", - "required":[ - "Resource", - "Tags" - ], - "members":{ - "Resource":{ - "shape":"TaggableResource", - "documentation":"

The resource's Amazon Resource Name (ARN).

", - "location":"uri", - "locationName":"Resource" - }, - "Tags":{ - "shape":"Tags", - "documentation":"

A list of tags to apply to the resource.

" - } - } - }, - "TagValue":{"type":"string"}, - "TaggableResource":{ - "type":"string", - "max":256, - "min":1, - "pattern":"arn:(aws[a-zA-Z-]*):lambda:[a-z]{2}((-gov)|(-iso([a-z]?)))?-[a-z]+-\\d{1}:\\d{12}:(function:[a-zA-Z0-9-_]+(:(\\$LATEST|[a-zA-Z0-9-_]+))?|code-signing-config:csc-[a-z0-9]{17}|event-source-mapping:[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12})" - }, - "Tags":{ - "type":"map", - "key":{"shape":"TagKey"}, - "value":{"shape":"TagValue"} - }, - "TagsError":{ - "type":"structure", - "required":[ - "ErrorCode", - "Message" - ], - "members":{ - "ErrorCode":{ - "shape":"TagsErrorCode", - "documentation":"

The error code.

" - }, - "Message":{ - "shape":"TagsErrorMessage", - "documentation":"

The error message.

" - } - }, - "documentation":"

An object that contains details about an error related to retrieving tags.

" - }, - "TagsErrorCode":{ - "type":"string", - "max":21, - "min":10, - "pattern":"[A-Za-z]+Exception" - }, - "TagsErrorMessage":{ - "type":"string", - "max":1000, - "min":84, - "pattern":".*" - }, - "ThrottleReason":{ - "type":"string", - "enum":[ - "ConcurrentInvocationLimitExceeded", - "FunctionInvocationRateLimitExceeded", - "ReservedFunctionConcurrentInvocationLimitExceeded", - "ReservedFunctionInvocationRateLimitExceeded", - "CallerRateLimitExceeded", - "ConcurrentSnapshotCreateLimitExceeded" - ] - }, - "Timeout":{ - "type":"integer", - "box":true, - "min":1 - }, - "Timestamp":{"type":"string"}, - "TooManyRequestsException":{ - "type":"structure", - "members":{ - "retryAfterSeconds":{ - "shape":"String", - "documentation":"

The number of seconds the caller should wait before retrying.

", - "location":"header", - "locationName":"Retry-After" - }, - "Type":{"shape":"String"}, - "message":{"shape":"String"}, - "Reason":{"shape":"ThrottleReason"} - }, - "documentation":"

The request throughput limit was exceeded. For more information, see Lambda quotas.

", - "error":{ - "httpStatusCode":429, - "senderFault":true - }, - "exception":true - }, - "Topic":{ - "type":"string", - "max":249, - "min":1, - "pattern":"[^.]([a-zA-Z0-9\\-_.]+)" - }, - "Topics":{ - "type":"list", - "member":{"shape":"Topic"}, - "max":1, - "min":1 - }, - "TracingConfig":{ - "type":"structure", - "members":{ - "Mode":{ - "shape":"TracingMode", - "documentation":"

The tracing mode.

" - } - }, - "documentation":"

The function's X-Ray tracing configuration. To sample and record incoming requests, set Mode to Active.

" - }, - "TracingConfigResponse":{ - "type":"structure", - "members":{ - "Mode":{ - "shape":"TracingMode", - "documentation":"

The tracing mode.

" - } - }, - "documentation":"

The function's X-Ray tracing configuration.

" - }, - "TracingMode":{ - "type":"string", - "enum":[ - "Active", - "PassThrough" - ] - }, - "Truncated":{ - "type":"boolean", - "box":true - }, - "TumblingWindowInSeconds":{ - "type":"integer", - "box":true, - "max":900, - "min":0 - }, - "URI":{ - "type":"string", - "max":200, - "min":1, - "pattern":"[a-zA-Z0-9-\\/*:_+=.@-]*" - }, - "UnqualifiedFunctionName":{ - "type":"string", - "max":140, - "min":1, - "pattern":"(arn:(aws[a-zA-Z-]*)?:lambda:)?([a-z]{2}((-gov)|(-iso([a-z]?)))?-[a-z]+-\\d{1}:)?(\\d{12}:)?(function:)?([a-zA-Z0-9-_]+)" - }, - "UnreservedConcurrentExecutions":{ - "type":"integer", - "box":true, - "min":0 - }, - "UnsupportedMediaTypeException":{ - "type":"structure", - "members":{ - "Type":{"shape":"String"}, - "message":{"shape":"String"} - }, - "documentation":"

The content type of the Invoke request body is not JSON.

", - "error":{ - "httpStatusCode":415, - "senderFault":true - }, - "exception":true - }, - "UntagResourceRequest":{ - "type":"structure", - "required":[ - "Resource", - "TagKeys" - ], - "members":{ - "Resource":{ - "shape":"TaggableResource", - "documentation":"

The resource's Amazon Resource Name (ARN).

", - "location":"uri", - "locationName":"Resource" - }, - "TagKeys":{ - "shape":"TagKeyList", - "documentation":"

A list of tag keys to remove from the resource.

", - "location":"querystring", - "locationName":"tagKeys" - } - } - }, - "UpdateAliasRequest":{ - "type":"structure", - "required":[ - "FunctionName", - "Name" - ], - "members":{ - "FunctionName":{ - "shape":"FunctionName", - "documentation":"

The name or ARN of the Lambda function.

Name formats

The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.

", - "location":"uri", - "locationName":"FunctionName" - }, - "Name":{ - "shape":"Alias", - "documentation":"

The name of the alias.

", - "location":"uri", - "locationName":"Name" - }, - "FunctionVersion":{ - "shape":"Version", - "documentation":"

The function version that the alias invokes.

" - }, - "Description":{ - "shape":"Description", - "documentation":"

A description of the alias.

" - }, - "RoutingConfig":{ - "shape":"AliasRoutingConfiguration", - "documentation":"

The routing configuration of the alias.

" - }, - "RevisionId":{ - "shape":"String", - "documentation":"

Only update the alias if the revision ID matches the ID that's specified. Use this option to avoid modifying an alias that has changed since you last read it.

" - } - } - }, - "UpdateCodeSigningConfigRequest":{ - "type":"structure", - "required":["CodeSigningConfigArn"], - "members":{ - "CodeSigningConfigArn":{ - "shape":"CodeSigningConfigArn", - "documentation":"

The The Amazon Resource Name (ARN) of the code signing configuration.

", - "location":"uri", - "locationName":"CodeSigningConfigArn" - }, - "Description":{ - "shape":"Description", - "documentation":"

Descriptive name for this code signing configuration.

" - }, - "AllowedPublishers":{ - "shape":"AllowedPublishers", - "documentation":"

Signing profiles for this code signing configuration.

" - }, - "CodeSigningPolicies":{ - "shape":"CodeSigningPolicies", - "documentation":"

The code signing policy.

" - } - } - }, - "UpdateCodeSigningConfigResponse":{ - "type":"structure", - "required":["CodeSigningConfig"], - "members":{ - "CodeSigningConfig":{ - "shape":"CodeSigningConfig", - "documentation":"

The code signing configuration

" - } - } - }, - "UpdateEventSourceMappingRequest":{ - "type":"structure", - "required":["UUID"], - "members":{ - "UUID":{ - "shape":"String", - "documentation":"

The identifier of the event source mapping.

", - "location":"uri", - "locationName":"UUID" - }, - "FunctionName":{ - "shape":"FunctionName", - "documentation":"

The name or ARN of the Lambda function.

Name formats

The length constraint applies only to the full ARN. If you specify only the function name, it's limited to 64 characters in length.

" - }, - "Enabled":{ - "shape":"Enabled", - "documentation":"

When true, the event source mapping is active. When false, Lambda pauses polling and invocation.

Default: True

" - }, - "BatchSize":{ - "shape":"BatchSize", - "documentation":"

The maximum number of records in each batch that Lambda pulls from your stream or queue and sends to your function. Lambda passes all of the records in the batch to the function in a single call, up to the payload limit for synchronous invocation (6 MB).

" - }, - "FilterCriteria":{ - "shape":"FilterCriteria", - "documentation":"

An object that defines the filter criteria that determine whether Lambda should process an event. For more information, see Lambda event filtering.

" - }, - "MaximumBatchingWindowInSeconds":{ - "shape":"MaximumBatchingWindowInSeconds", - "documentation":"

The maximum amount of time, in seconds, that Lambda spends gathering records before invoking the function. You can configure MaximumBatchingWindowInSeconds to any value from 0 seconds to 300 seconds in increments of seconds.

For Kinesis, DynamoDB, and Amazon SQS event sources, the default batching window is 0 seconds. For Amazon MSK, Self-managed Apache Kafka, Amazon MQ, and DocumentDB event sources, the default batching window is 500 ms. Note that because you can only change MaximumBatchingWindowInSeconds in increments of seconds, you cannot revert back to the 500 ms default batching window after you have changed it. To restore the default batching window, you must create a new event source mapping.

Related setting: For Kinesis, DynamoDB, and Amazon SQS event sources, when you set BatchSize to a value greater than 10, you must set MaximumBatchingWindowInSeconds to at least 1.

" - }, - "DestinationConfig":{ - "shape":"DestinationConfig", - "documentation":"

(Kinesis, DynamoDB Streams, Amazon MSK, and self-managed Kafka only) A configuration object that specifies the destination of an event after Lambda processes it.

" - }, - "MaximumRecordAgeInSeconds":{ - "shape":"MaximumRecordAgeInSeconds", - "documentation":"

(Kinesis and DynamoDB Streams only) Discard records older than the specified age. The default value is infinite (-1).

" - }, - "BisectBatchOnFunctionError":{ - "shape":"BisectBatchOnFunctionError", - "documentation":"

(Kinesis and DynamoDB Streams only) If the function returns an error, split the batch in two and retry.

" - }, - "MaximumRetryAttempts":{ - "shape":"MaximumRetryAttemptsEventSourceMapping", - "documentation":"

(Kinesis and DynamoDB Streams only) Discard records after the specified number of retries. The default value is infinite (-1). When set to infinite (-1), failed records are retried until the record expires.

" - }, - "ParallelizationFactor":{ - "shape":"ParallelizationFactor", - "documentation":"

(Kinesis and DynamoDB Streams only) The number of batches to process from each shard concurrently.

" - }, - "SourceAccessConfigurations":{ - "shape":"SourceAccessConfigurations", - "documentation":"

An array of authentication protocols or VPC components required to secure your event source.

" - }, - "TumblingWindowInSeconds":{ - "shape":"TumblingWindowInSeconds", - "documentation":"

(Kinesis and DynamoDB Streams only) The duration in seconds of a processing window for DynamoDB and Kinesis Streams event sources. A value of 0 seconds indicates no tumbling window.

" - }, - "FunctionResponseTypes":{ - "shape":"FunctionResponseTypeList", - "documentation":"

(Kinesis, DynamoDB Streams, and Amazon SQS) A list of current response type enums applied to the event source mapping.

" - }, - "ScalingConfig":{ - "shape":"ScalingConfig", - "documentation":"

(Amazon SQS only) The scaling configuration for the event source. For more information, see Configuring maximum concurrency for Amazon SQS event sources.

" - }, - "AmazonManagedKafkaEventSourceConfig":{"shape":"AmazonManagedKafkaEventSourceConfig"}, - "SelfManagedKafkaEventSourceConfig":{"shape":"SelfManagedKafkaEventSourceConfig"}, - "DocumentDBEventSourceConfig":{ - "shape":"DocumentDBEventSourceConfig", - "documentation":"

Specific configuration settings for a DocumentDB event source.

" - }, - "KMSKeyArn":{ - "shape":"KMSKeyArn", - "documentation":"

The ARN of the Key Management Service (KMS) customer managed key that Lambda uses to encrypt your function's filter criteria. By default, Lambda does not encrypt your filter criteria object. Specify this property to encrypt data using your own customer managed key.

" - }, - "MetricsConfig":{ - "shape":"EventSourceMappingMetricsConfig", - "documentation":"

The metrics configuration for your event source. For more information, see Event source mapping metrics.

" - }, - "ProvisionedPollerConfig":{ - "shape":"ProvisionedPollerConfig", - "documentation":"

(Amazon MSK and self-managed Apache Kafka only) The provisioned mode configuration for the event source. For more information, see provisioned mode.

" - } - } - }, - "UpdateFunctionCodeRequest":{ - "type":"structure", - "required":["FunctionName"], - "members":{ - "FunctionName":{ - "shape":"FunctionName", - "documentation":"

The name or ARN of the Lambda function.

Name formats

The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.

", - "location":"uri", - "locationName":"FunctionName" - }, - "ZipFile":{ - "shape":"Blob", - "documentation":"

The base64-encoded contents of the deployment package. Amazon Web Services SDK and CLI clients handle the encoding for you. Use only with a function defined with a .zip file archive deployment package.

" - }, - "S3Bucket":{ - "shape":"S3Bucket", - "documentation":"

An Amazon S3 bucket in the same Amazon Web Services Region as your function. The bucket can be in a different Amazon Web Services account. Use only with a function defined with a .zip file archive deployment package.

" - }, - "S3Key":{ - "shape":"S3Key", - "documentation":"

The Amazon S3 key of the deployment package. Use only with a function defined with a .zip file archive deployment package.

" - }, - "S3ObjectVersion":{ - "shape":"S3ObjectVersion", - "documentation":"

For versioned objects, the version of the deployment package object to use.

" - }, - "ImageUri":{ - "shape":"String", - "documentation":"

URI of a container image in the Amazon ECR registry. Do not use for a function defined with a .zip file archive.

" - }, - "Publish":{ - "shape":"Boolean", - "documentation":"

Set to true to publish a new version of the function after updating the code. This has the same effect as calling PublishVersion separately.

" - }, - "DryRun":{ - "shape":"Boolean", - "documentation":"

Set to true to validate the request parameters and access permissions without modifying the function code.

" - }, - "RevisionId":{ - "shape":"String", - "documentation":"

Update the function only if the revision ID matches the ID that's specified. Use this option to avoid modifying a function that has changed since you last read it.

" - }, - "Architectures":{ - "shape":"ArchitecturesList", - "documentation":"

The instruction set architecture that the function supports. Enter a string array with one of the valid values (arm64 or x86_64). The default value is x86_64.

" - }, - "SourceKMSKeyArn":{ - "shape":"KMSKeyArn", - "documentation":"

The ARN of the Key Management Service (KMS) customer managed key that's used to encrypt your function's .zip deployment package. If you don't provide a customer managed key, Lambda uses an Amazon Web Services managed key.

" - } - } - }, - "UpdateFunctionConfigurationRequest":{ - "type":"structure", - "required":["FunctionName"], - "members":{ - "FunctionName":{ - "shape":"FunctionName", - "documentation":"

The name or ARN of the Lambda function.

Name formats

The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.

", - "location":"uri", - "locationName":"FunctionName" - }, - "Role":{ - "shape":"RoleArn", - "documentation":"

The Amazon Resource Name (ARN) of the function's execution role.

" - }, - "Handler":{ - "shape":"Handler", - "documentation":"

The name of the method within your code that Lambda calls to run your function. Handler is required if the deployment package is a .zip file archive. The format includes the file name. It can also include namespaces and other qualifiers, depending on the runtime. For more information, see Lambda programming model.

" - }, - "Description":{ - "shape":"Description", - "documentation":"

A description of the function.

" - }, - "Timeout":{ - "shape":"Timeout", - "documentation":"

The amount of time (in seconds) that Lambda allows a function to run before stopping it. The default is 3 seconds. The maximum allowed value is 900 seconds. For more information, see Lambda execution environment.

" - }, - "MemorySize":{ - "shape":"MemorySize", - "documentation":"

The amount of memory available to the function at runtime. Increasing the function memory also increases its CPU allocation. The default value is 128 MB. The value can be any multiple of 1 MB.

" - }, - "VpcConfig":{ - "shape":"VpcConfig", - "documentation":"

For network connectivity to Amazon Web Services resources in a VPC, specify a list of security groups and subnets in the VPC. When you connect a function to a VPC, it can access resources and the internet only through that VPC. For more information, see Configuring a Lambda function to access resources in a VPC.

" - }, - "Environment":{ - "shape":"Environment", - "documentation":"

Environment variables that are accessible from function code during execution.

" - }, - "Runtime":{ - "shape":"Runtime", - "documentation":"

The identifier of the function's runtime. Runtime is required if the deployment package is a .zip file archive. Specifying a runtime results in an error if you're deploying a function using a container image.

The following list includes deprecated runtimes. Lambda blocks creating new functions and updating existing functions shortly after each runtime is deprecated. For more information, see Runtime use after deprecation.

For a list of all currently supported runtimes, see Supported runtimes.

" - }, - "DeadLetterConfig":{ - "shape":"DeadLetterConfig", - "documentation":"

A dead-letter queue configuration that specifies the queue or topic where Lambda sends asynchronous events when they fail processing. For more information, see Dead-letter queues.

" - }, - "KMSKeyArn":{ - "shape":"KMSKeyArn", - "documentation":"

The ARN of the Key Management Service (KMS) customer managed key that's used to encrypt the following resources:

If you don't provide a customer managed key, Lambda uses an Amazon Web Services owned key or an Amazon Web Services managed key.

" - }, - "TracingConfig":{ - "shape":"TracingConfig", - "documentation":"

Set Mode to Active to sample and trace a subset of incoming requests with X-Ray.

" - }, - "RevisionId":{ - "shape":"String", - "documentation":"

Update the function only if the revision ID matches the ID that's specified. Use this option to avoid modifying a function that has changed since you last read it.

" - }, - "Layers":{ - "shape":"LayerList", - "documentation":"

A list of function layers to add to the function's execution environment. Specify each layer by its ARN, including the version.

" - }, - "FileSystemConfigs":{ - "shape":"FileSystemConfigList", - "documentation":"

Connection settings for an Amazon EFS file system.

" - }, - "ImageConfig":{ - "shape":"ImageConfig", - "documentation":"

Container image configuration values that override the values in the container image Docker file.

" - }, - "EphemeralStorage":{ - "shape":"EphemeralStorage", - "documentation":"

The size of the function's /tmp directory in MB. The default value is 512, but can be any whole number between 512 and 10,240 MB. For more information, see Configuring ephemeral storage (console).

" - }, - "SnapStart":{ - "shape":"SnapStart", - "documentation":"

The function's SnapStart setting.

" - }, - "LoggingConfig":{ - "shape":"LoggingConfig", - "documentation":"

The function's Amazon CloudWatch Logs configuration settings.

" - }, - "DurableConfig":{"shape":"DurableConfig"} - } - }, - "UpdateFunctionEventInvokeConfigRequest":{ - "type":"structure", - "required":["FunctionName"], - "members":{ - "FunctionName":{ - "shape":"FunctionName", - "documentation":"

The name or ARN of the Lambda function, version, or alias.

Name formats

You can append a version number or alias to any of the formats. The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.

", - "location":"uri", - "locationName":"FunctionName" - }, - "Qualifier":{ - "shape":"Qualifier", - "documentation":"

A version number or alias name.

", - "location":"querystring", - "locationName":"Qualifier" - }, - "MaximumRetryAttempts":{ - "shape":"MaximumRetryAttempts", - "documentation":"

The maximum number of times to retry when the function returns an error.

" - }, - "MaximumEventAgeInSeconds":{ - "shape":"MaximumEventAgeInSeconds", - "documentation":"

The maximum age of a request that Lambda sends to a function for processing.

" - }, - "DestinationConfig":{ - "shape":"DestinationConfig", - "documentation":"

A destination for events after they have been sent to a function for processing.

Destinations

S3 buckets are supported only for on-failure destinations. To retain records of successful invocations, use another destination type.

" - } - } - }, - "UpdateFunctionUrlConfigRequest":{ - "type":"structure", - "required":["FunctionName"], - "members":{ - "FunctionName":{ - "shape":"FunctionName", - "documentation":"

The name or ARN of the Lambda function.

Name formats

The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.

", - "location":"uri", - "locationName":"FunctionName" - }, - "Qualifier":{ - "shape":"FunctionUrlQualifier", - "documentation":"

The alias name.

", - "location":"querystring", - "locationName":"Qualifier" - }, - "AuthType":{ - "shape":"FunctionUrlAuthType", - "documentation":"

The type of authentication that your function URL uses. Set to AWS_IAM if you want to restrict access to authenticated users only. Set to NONE if you want to bypass IAM authentication to create a public endpoint. For more information, see Security and auth model for Lambda function URLs.

" - }, - "Cors":{ - "shape":"Cors", - "documentation":"

The cross-origin resource sharing (CORS) settings for your function URL.

" - }, - "InvokeMode":{ - "shape":"InvokeMode", - "documentation":"

Use one of the following options:

" - } - } - }, - "UpdateFunctionUrlConfigResponse":{ - "type":"structure", - "required":[ - "FunctionUrl", - "FunctionArn", - "AuthType", - "CreationTime", - "LastModifiedTime" - ], - "members":{ - "FunctionUrl":{ - "shape":"FunctionUrl", - "documentation":"

The HTTP URL endpoint for your function.

" - }, - "FunctionArn":{ - "shape":"FunctionArn", - "documentation":"

The Amazon Resource Name (ARN) of your function.

" - }, - "AuthType":{ - "shape":"FunctionUrlAuthType", - "documentation":"

The type of authentication that your function URL uses. Set to AWS_IAM if you want to restrict access to authenticated users only. Set to NONE if you want to bypass IAM authentication to create a public endpoint. For more information, see Security and auth model for Lambda function URLs.

" - }, - "Cors":{ - "shape":"Cors", - "documentation":"

The cross-origin resource sharing (CORS) settings for your function URL.

" - }, - "CreationTime":{ - "shape":"Timestamp", - "documentation":"

When the function URL was created, in ISO-8601 format (YYYY-MM-DDThh:mm:ss.sTZD).

" - }, - "LastModifiedTime":{ - "shape":"Timestamp", - "documentation":"

When the function URL configuration was last updated, in ISO-8601 format (YYYY-MM-DDThh:mm:ss.sTZD).

" - }, - "InvokeMode":{ - "shape":"InvokeMode", - "documentation":"

Use one of the following options:

" - } - } - }, - "UpdateRuntimeOn":{ - "type":"string", - "enum":[ - "Auto", - "Manual", - "FunctionUpdate" - ] - }, - "Version":{ - "type":"string", - "max":1024, - "min":1, - "pattern":"(\\$LATEST|[0-9]+)" - }, - "VpcConfig":{ - "type":"structure", - "members":{ - "SubnetIds":{ - "shape":"SubnetIds", - "documentation":"

A list of VPC subnet IDs.

" - }, - "SecurityGroupIds":{ - "shape":"SecurityGroupIds", - "documentation":"

A list of VPC security group IDs.

" - }, - "Ipv6AllowedForDualStack":{ - "shape":"NullableBoolean", - "documentation":"

Allows outbound IPv6 traffic on VPC functions that are connected to dual-stack subnets.

" - } - }, - "documentation":"

The VPC security groups and subnets that are attached to a Lambda function. For more information, see Configuring a Lambda function to access resources in a VPC.

" - }, - "VpcConfigResponse":{ - "type":"structure", - "members":{ - "SubnetIds":{ - "shape":"SubnetIds", - "documentation":"

A list of VPC subnet IDs.

" - }, - "SecurityGroupIds":{ - "shape":"SecurityGroupIds", - "documentation":"

A list of VPC security group IDs.

" - }, - "VpcId":{ - "shape":"VpcId", - "documentation":"

The ID of the VPC.

" - }, - "Ipv6AllowedForDualStack":{ - "shape":"NullableBoolean", - "documentation":"

Allows outbound IPv6 traffic on VPC functions that are connected to dual-stack subnets.

" - } - }, - "documentation":"

The VPC security groups and subnets that are attached to a Lambda function.

" - }, - "VpcId":{"type":"string"}, - "WaitCancelledDetails":{ - "type":"structure", - "members":{ - "Error":{"shape":"EventError"} - } - }, - "WaitDetails":{ - "type":"structure", - "members":{ - "ScheduledEndTimestamp":{"shape":"ExecutionTimestamp"} - } - }, - "WaitOptions":{ - "type":"structure", - "members":{ - "WaitSeconds":{"shape":"WaitOptionsWaitSecondsInteger"} - } - }, - "WaitOptionsWaitSecondsInteger":{ - "type":"integer", - "box":true, - "max":31622400, - "min":1 - }, - "WaitStartedDetails":{ - "type":"structure", - "required":[ - "Duration", - "ScheduledEndTimestamp" - ], - "members":{ - "Duration":{"shape":"DurationSeconds"}, - "ScheduledEndTimestamp":{"shape":"ExecutionTimestamp"} - } - }, - "WaitSucceededDetails":{ - "type":"structure", - "members":{ - "Duration":{"shape":"DurationSeconds"} - } - }, - "Weight":{ - "type":"double", - "max":1.0, - "min":0.0 - }, - "WorkingDirectory":{ - "type":"string", - "max":1000, - "min":0 - } - }, - "documentation":"

Lambda

Overview

Lambda is a compute service that lets you run code without provisioning or managing servers. Lambda runs your code on a high-availability compute infrastructure and performs all of the administration of the compute resources, including server and operating system maintenance, capacity provisioning and automatic scaling, code monitoring and logging. With Lambda, you can run code for virtually any type of application or backend service. For more information about the Lambda service, see What is Lambda in the Lambda Developer Guide.

The Lambda API Reference provides information about each of the API methods, including details about the parameters in each API request and response.

You can use Software Development Kits (SDKs), Integrated Development Environment (IDE) Toolkits, and command line tools to access the API. For installation instructions, see Tools for Amazon Web Services.

For a list of Region-specific endpoints that Lambda supports, see Lambda endpoints and quotas in the Amazon Web Services General Reference..

When making the API calls, you will need to authenticate your request by providing a signature. Lambda supports signature version 4. For more information, see Signature Version 4 signing process in the Amazon Web Services General Reference..

CA certificates

Because Amazon Web Services SDKs use the CA certificates from your computer, changes to the certificates on the Amazon Web Services servers can cause connection failures when you attempt to use an SDK. You can prevent these failures by keeping your computer's CA certificates and operating system up-to-date. If you encounter this issue in a corporate environment and do not manage your own computer, you might need to ask an administrator to assist with the update process. The following list shows minimum operating system and Java versions:

When accessing the Lambda management console or Lambda API endpoints, whether through browsers or programmatically, you will need to ensure your client machines support any of the following CAs:

Root certificates from the first two authorities are available from Amazon trust services, but keeping your computer up-to-date is the more straightforward solution. To learn more about ACM-provided certificates, see Amazon Web Services Certificate Manager FAQs.

" -} diff --git a/src/aws_durable_execution_sdk_python/concurrency.py b/src/aws_durable_execution_sdk_python/concurrency.py deleted file mode 100644 index 4797d05..0000000 --- a/src/aws_durable_execution_sdk_python/concurrency.py +++ /dev/null @@ -1,862 +0,0 @@ -"""Concurrent executor for parallel and map operations.""" - -from __future__ import annotations - -import heapq -import logging -import threading -import time -from abc import ABC, abstractmethod -from collections import Counter -from concurrent.futures import Future, ThreadPoolExecutor -from dataclasses import dataclass -from enum import Enum -from typing import TYPE_CHECKING, Generic, Self, TypeVar - -from aws_durable_execution_sdk_python.config import ChildConfig -from aws_durable_execution_sdk_python.exceptions import ( - InvalidStateError, - SuspendExecution, - TimedSuspendExecution, -) -from aws_durable_execution_sdk_python.identifier import OperationIdentifier -from aws_durable_execution_sdk_python.lambda_service import ErrorObject -from aws_durable_execution_sdk_python.operation.child import child_handler -from aws_durable_execution_sdk_python.types import BatchResult as BatchResultProtocol - -if TYPE_CHECKING: - from collections.abc import Callable - - from aws_durable_execution_sdk_python.config import CompletionConfig - from aws_durable_execution_sdk_python.context import DurableContext - from aws_durable_execution_sdk_python.lambda_service import OperationSubType - from aws_durable_execution_sdk_python.serdes import SerDes - from aws_durable_execution_sdk_python.state import ExecutionState - from aws_durable_execution_sdk_python.types import SummaryGenerator - - -logger = logging.getLogger(__name__) - -T = TypeVar("T") -R = TypeVar("R") - -CallableType = TypeVar("CallableType") -ResultType = TypeVar("ResultType") - - -# region Result models -class BatchItemStatus(Enum): - SUCCEEDED = "SUCCEEDED" - FAILED = "FAILED" - STARTED = "STARTED" - - -class CompletionReason(Enum): - ALL_COMPLETED = "ALL_COMPLETED" - MIN_SUCCESSFUL_REACHED = "MIN_SUCCESSFUL_REACHED" - FAILURE_TOLERANCE_EXCEEDED = "FAILURE_TOLERANCE_EXCEEDED" - - -@dataclass(frozen=True) -class SuspendResult: - should_suspend: bool - exception: SuspendExecution | None = None - - @staticmethod - def do_not_suspend() -> SuspendResult: - return SuspendResult(should_suspend=False) - - @staticmethod - def suspend(exception: SuspendExecution) -> SuspendResult: - return SuspendResult(should_suspend=True, exception=exception) - - -@dataclass(frozen=True) -class BatchItem(Generic[R]): - index: int - status: BatchItemStatus - result: R | None = None - error: ErrorObject | None = None - - def to_dict(self) -> dict: - return { - "index": self.index, - "status": self.status.value, - "result": self.result, - "error": self.error.to_dict() if self.error else None, - } - - @classmethod - def from_dict(cls, data: dict) -> BatchItem[R]: - return cls( - index=data["index"], - status=BatchItemStatus(data["status"]), - result=data.get("result"), - error=ErrorObject.from_dict(data["error"]) if data.get("error") else None, - ) - - -@dataclass(frozen=True) -class BatchResult(Generic[R], BatchResultProtocol[R]): # noqa: PYI059 - all: list[BatchItem[R]] - completion_reason: CompletionReason - - @classmethod - def from_dict( - cls, data: dict, completion_config: CompletionConfig | None = None - ) -> BatchResult[R]: - batch_items: list[BatchItem[R]] = [ - BatchItem.from_dict(item) for item in data["all"] - ] - - completion_reason_value = data.get("completionReason") - if completion_reason_value is None: - # Infer completion reason from batch item statuses and completion config - # This aligns with the TypeScript implementation that uses completion config - # to accurately reconstruct the completion reason during replay - result = cls.from_items(batch_items, completion_config) - logger.warning( - "Missing completionReason in BatchResult deserialization, " - "inferred '%s' from batch item statuses. " - "This may indicate incomplete serialization data.", - result.completion_reason.value, - ) - return result - - completion_reason = CompletionReason(completion_reason_value) - return cls(batch_items, completion_reason) - - @classmethod - def from_items( - cls, - items: list[BatchItem[R]], - completion_config: CompletionConfig | None = None, - ): - """ - Infer completion reason based on batch item statuses and completion config. - - This follows the same logic as the TypeScript implementation: - - If all items completed: ALL_COMPLETED - - If minSuccessful threshold met and not all completed: MIN_SUCCESSFUL_REACHED - - Otherwise: FAILURE_TOLERANCE_EXCEEDED - """ - - statuses = (item.status for item in items) - counts = Counter(statuses) - succeeded_count = counts.get(BatchItemStatus.SUCCEEDED, 0) - failed_count = counts.get(BatchItemStatus.FAILED, 0) - started_count = counts.get(BatchItemStatus.STARTED, 0) - - completed_count = succeeded_count + failed_count - total_count = started_count + completed_count - - # If all items completed (no started items), it's ALL_COMPLETED - if completed_count == total_count: - completion_reason = CompletionReason.ALL_COMPLETED - elif ( # If we have completion config and minSuccessful threshold is met - completion_config - and (min_successful := completion_config.min_successful) is not None - and succeeded_count >= min_successful - ): - completion_reason = CompletionReason.MIN_SUCCESSFUL_REACHED - else: - # Otherwise, assume failure tolerance was exceeded - completion_reason = CompletionReason.FAILURE_TOLERANCE_EXCEEDED - - return cls(items, completion_reason) - - def to_dict(self) -> dict: - return { - "all": [item.to_dict() for item in self.all], - "completionReason": self.completion_reason.value, - } - - def succeeded(self) -> list[BatchItem[R]]: - return [ - item - for item in self.all - if item.status is BatchItemStatus.SUCCEEDED and item.result is not None - ] - - def failed(self) -> list[BatchItem[R]]: - return [ - item - for item in self.all - if item.status is BatchItemStatus.FAILED and item.error is not None - ] - - def started(self) -> list[BatchItem[R]]: - return [item for item in self.all if item.status is BatchItemStatus.STARTED] - - @property - def status(self) -> BatchItemStatus: - return BatchItemStatus.FAILED if self.has_failure else BatchItemStatus.SUCCEEDED - - @property - def has_failure(self) -> bool: - return any(item.status is BatchItemStatus.FAILED for item in self.all) - - def throw_if_error(self) -> None: - first_error = next( - (item.error for item in self.all if item.status is BatchItemStatus.FAILED), - None, - ) - if first_error: - raise first_error.to_callable_runtime_error() - - def get_results(self) -> list[R]: - return [ - item.result - for item in self.all - if item.status is BatchItemStatus.SUCCEEDED and item.result is not None - ] - - def get_errors(self) -> list[ErrorObject]: - return [ - item.error - for item in self.all - if item.status is BatchItemStatus.FAILED and item.error is not None - ] - - @property - def success_count(self) -> int: - return sum(1 for item in self.all if item.status is BatchItemStatus.SUCCEEDED) - - @property - def failure_count(self) -> int: - return sum(1 for item in self.all if item.status is BatchItemStatus.FAILED) - - @property - def started_count(self) -> int: - return sum(1 for item in self.all if item.status is BatchItemStatus.STARTED) - - @property - def total_count(self) -> int: - return len(self.all) - - -# endregion Result models - - -# region concurrency models -@dataclass(frozen=True) -class Executable(Generic[CallableType]): - index: int - func: CallableType - - -class BranchStatus(Enum): - PENDING = "pending" - RUNNING = "running" - COMPLETED = "completed" - SUSPENDED = "suspended" - SUSPENDED_WITH_TIMEOUT = "suspended_with_timeout" - FAILED = "failed" - - -class ExecutableWithState(Generic[CallableType, ResultType]): - """Manages the execution state and lifecycle of an executable.""" - - def __init__(self, executable: Executable[CallableType]): - self.executable = executable - self._status = BranchStatus.PENDING - self._future: Future | None = None - self._suspend_until: float | None = None - self._result: ResultType = None # type: ignore[assignment] - self._is_result_set: bool = False - self._error: Exception | None = None - - @property - def future(self) -> Future: - """Get the future, raising error if not available.""" - if self._future is None: - msg = f"ExecutableWithState was never started. {self.executable.index}" - raise InvalidStateError(msg) - return self._future - - @property - def status(self) -> BranchStatus: - """Get current status.""" - return self._status - - @property - def result(self) -> ResultType: - """Get result if completed.""" - if not self._is_result_set or self._status != BranchStatus.COMPLETED: - msg = f"result not available in status {self._status}" - raise InvalidStateError(msg) - return self._result - - @property - def error(self) -> Exception: - """Get error if failed.""" - if self._error is None or self._status != BranchStatus.FAILED: - msg = f"error not available in status {self._status}" - raise InvalidStateError(msg) - return self._error - - @property - def suspend_until(self) -> float | None: - """Get suspend timestamp.""" - return self._suspend_until - - @property - def is_running(self) -> bool: - """Check if currently running.""" - return self._status is BranchStatus.RUNNING - - @property - def can_resume(self) -> bool: - """Check if can resume from suspension.""" - return self._status is BranchStatus.SUSPENDED or ( - self._status is BranchStatus.SUSPENDED_WITH_TIMEOUT - and self._suspend_until is not None - and time.time() >= self._suspend_until - ) - - @property - def index(self) -> int: - return self.executable.index - - @property - def callable(self) -> CallableType: - return self.executable.func - - # region State transitions - def run(self, future: Future) -> None: - """Transition to RUNNING state with a future.""" - if self._status != BranchStatus.PENDING: - msg = f"Cannot start running from {self._status}" - raise InvalidStateError(msg) - self._status = BranchStatus.RUNNING - self._future = future - - def suspend(self) -> None: - """Transition to SUSPENDED state (indefinite).""" - self._status = BranchStatus.SUSPENDED - self._suspend_until = None - - def suspend_with_timeout(self, timestamp: float) -> None: - """Transition to SUSPENDED_WITH_TIMEOUT state.""" - self._status = BranchStatus.SUSPENDED_WITH_TIMEOUT - self._suspend_until = timestamp - - def complete(self, result: ResultType) -> None: - """Transition to COMPLETED state.""" - self._status = BranchStatus.COMPLETED - self._result = result - self._is_result_set = True - - def fail(self, error: Exception) -> None: - """Transition to FAILED state.""" - self._status = BranchStatus.FAILED - self._error = error - - def reset_to_pending(self) -> None: - """Reset to PENDING state for resubmission.""" - self._status = BranchStatus.PENDING - self._future = None - self._suspend_until = None - - # endregion State transitions - - -class ExecutionCounters: - """Thread-safe counters for tracking execution state.""" - - def __init__( - self, - total_tasks: int, - min_successful: int, - tolerated_failure_count: int | None, - tolerated_failure_percentage: float | None, - ): - self.total_tasks: int = total_tasks - self.min_successful: int = min_successful - self.tolerated_failure_count: int | None = tolerated_failure_count - self.tolerated_failure_percentage: float | None = tolerated_failure_percentage - self.success_count: int = 0 - self.failure_count: int = 0 - self._lock = threading.Lock() - - def complete_task(self) -> None: - """Task completed successfully.""" - with self._lock: - self.success_count += 1 - - def fail_task(self) -> None: - """Task failed.""" - with self._lock: - self.failure_count += 1 - - def should_continue(self) -> bool: - """ - Check if we should continue starting new tasks (based on failure tolerance). - Matches TypeScript shouldContinue() logic. - """ - with self._lock: - # If no completion config, only continue if no failures - if ( - self.tolerated_failure_count is None - and self.tolerated_failure_percentage is None - ): - return self.failure_count == 0 - - # Check failure count tolerance - if ( - self.tolerated_failure_count is not None - and self.failure_count > self.tolerated_failure_count - ): - return False - - # Check failure percentage tolerance - if self.tolerated_failure_percentage is not None and self.total_tasks > 0: - failure_percentage = (self.failure_count / self.total_tasks) * 100 - if failure_percentage > self.tolerated_failure_percentage: - return False - - return True - - def is_complete(self) -> bool: - """ - Check if execution should complete (based on completion criteria). - Matches TypeScript isComplete() logic. - """ - with self._lock: - completed_count = self.success_count + self.failure_count - - # All tasks completed - if completed_count == self.total_tasks: - # Complete if no failure tolerance OR no failures OR min successful reached - return ( - ( - self.tolerated_failure_count is None - and self.tolerated_failure_percentage is None - ) - or self.failure_count == 0 - or self.success_count >= self.min_successful - ) - - # when we breach min successful, we've completed - return self.success_count >= self.min_successful - - def should_complete(self) -> bool: - """ - Check if execution should complete. - Combines TypeScript shouldContinue() and isComplete() logic. - """ - return self.is_complete() or not self.should_continue() - - def is_all_completed(self) -> bool: - """True if all tasks completed successfully.""" - with self._lock: - return self.success_count == self.total_tasks - - def is_min_successful_reached(self) -> bool: - """True if minimum successful tasks reached.""" - with self._lock: - return self.success_count >= self.min_successful - - def is_failure_tolerance_exceeded(self) -> bool: - """True if failure tolerance was exceeded.""" - with self._lock: - return self._is_failure_condition_reached( - tolerated_count=self.tolerated_failure_count, - tolerated_percentage=self.tolerated_failure_percentage, - failure_count=self.failure_count, - ) - - def _is_failure_condition_reached( - self, - tolerated_count: int | None, - tolerated_percentage: float | None, - failure_count: int, - ) -> bool: - """True if failure conditions are reached (no locking - caller must lock).""" - # Failure count condition - if tolerated_count is not None and failure_count > tolerated_count: - return True - - # Failure percentage condition - if tolerated_percentage is not None and self.total_tasks > 0: - failure_percentage = (failure_count / self.total_tasks) * 100 - if failure_percentage > tolerated_percentage: - return True - - return False - - -# endegion concurrency models - - -# region concurrency logic -class TimerScheduler: - """Manage timed suspend tasks with a background timer thread.""" - - def __init__( - self, resubmit_callback: Callable[[ExecutableWithState], None] - ) -> None: - self.resubmit_callback = resubmit_callback - self._pending_resumes: list[tuple[float, ExecutableWithState]] = [] - self._lock = threading.Lock() - self._shutdown = threading.Event() - self._timer_thread = threading.Thread(target=self._timer_loop, daemon=True) - self._timer_thread.start() - - def __enter__(self) -> Self: - return self - - def __exit__(self, exc_type, exc_val, exc_tb) -> None: - self.shutdown() - - def schedule_resume( - self, exe_state: ExecutableWithState, resume_time: float - ) -> None: - """Schedule a task to resume at the specified time.""" - with self._lock: - heapq.heappush(self._pending_resumes, (resume_time, exe_state)) - - def shutdown(self) -> None: - """Shutdown the timer thread and cancel all pending resumes.""" - self._shutdown.set() - self._timer_thread.join(timeout=1.0) - with self._lock: - self._pending_resumes.clear() - - def _timer_loop(self) -> None: - """Background thread that processes timed resumes.""" - while not self._shutdown.is_set(): - next_resume_time = None - - with self._lock: - if self._pending_resumes: - next_resume_time = self._pending_resumes[0][0] - - if next_resume_time is None: - # No pending resumes, wait a bit and check again - self._shutdown.wait(timeout=0.1) - continue - - current_time = time.time() - if current_time >= next_resume_time: - # Time to resume - with self._lock: - # no branch cover because hard to test reliably - this is a double-safety check if heap mutated - # since the first peek on next_resume_time further up - if ( # pragma: no branch - self._pending_resumes - and self._pending_resumes[0][0] <= current_time - ): - _, exe_state = heapq.heappop(self._pending_resumes) - if exe_state.can_resume: - exe_state.reset_to_pending() - self.resubmit_callback(exe_state) - else: - # Wait until next resume time - wait_time = min(next_resume_time - current_time, 0.1) - self._shutdown.wait(timeout=wait_time) - - -class ConcurrentExecutor(ABC, Generic[CallableType, ResultType]): - """Execute durable operations concurrently. This contains the execution logic for Map and Parallel.""" - - def __init__( - self, - executables: list[Executable[CallableType]], - max_concurrency: int | None, - completion_config: CompletionConfig, - sub_type_top: OperationSubType, - sub_type_iteration: OperationSubType, - name_prefix: str, - serdes: SerDes | None, - item_serdes: SerDes | None = None, - summary_generator: SummaryGenerator | None = None, - ): - """Initialize ConcurrentExecutor. - - Args: - summary_generator: Optional function to generate compact summaries for large results. - When the serialized result exceeds 256KB, this generator creates a JSON summary - instead of checkpointing the full result. Used by map/parallel operations to - handle large BatchResult payloads efficiently. Matches TypeScript behavior in - run-in-child-context-handler.ts. - """ - self.executables = executables - self.max_concurrency = max_concurrency - self.completion_config = completion_config - self.sub_type_top = sub_type_top - self.sub_type_iteration = sub_type_iteration - self.name_prefix = name_prefix - self.summary_generator = summary_generator - - # Event-driven state tracking for when the executor is done - self._completion_event = threading.Event() - self._suspend_exception: SuspendExecution | None = None - - # ExecutionCounters will keep track of completion criteria and on-going counters - min_successful = self.completion_config.min_successful or len(self.executables) - tolerated_failure_count = self.completion_config.tolerated_failure_count - tolerated_failure_percentage = ( - self.completion_config.tolerated_failure_percentage - ) - - self.counters: ExecutionCounters = ExecutionCounters( - len(executables), - min_successful, - tolerated_failure_count, - tolerated_failure_percentage, - ) - self.executables_with_state: list[ExecutableWithState] = [] - self.serdes = serdes - self.item_serdes = item_serdes - - @abstractmethod - def execute_item( - self, child_context: DurableContext, executable: Executable[CallableType] - ) -> ResultType: - """Execute a single executable in a child context and return the result.""" - raise NotImplementedError - - def execute( - self, execution_state: ExecutionState, executor_context: DurableContext - ) -> BatchResult[ResultType]: - """Execute items concurrently with event-driven state management.""" - logger.debug( - "▶️ Executing concurrent operation, items: %d", len(self.executables) - ) - - max_workers = self.max_concurrency or len(self.executables) - - self.executables_with_state = [ - ExecutableWithState(executable=exe) for exe in self.executables - ] - self._completion_event.clear() - self._suspend_exception = None - - def resubmitter(executable_with_state: ExecutableWithState) -> None: - """Resubmit a timed suspended task.""" - execution_state.create_checkpoint() - submit_task(executable_with_state) - - with ( - TimerScheduler(resubmitter) as scheduler, - ThreadPoolExecutor(max_workers=max_workers) as thread_executor, - ): - - def submit_task(executable_with_state: ExecutableWithState) -> None: - """Submit task to the thread executor and mark its state as started.""" - future = thread_executor.submit( - self._execute_item_in_child_context, - executor_context, - executable_with_state.executable, - ) - executable_with_state.run(future) - - def on_done(future: Future) -> None: - self._on_task_complete(executable_with_state, future, scheduler) - - future.add_done_callback(on_done) - - # Submit initial tasks - for exe_state in self.executables_with_state: - submit_task(exe_state) - - # Wait for completion - self._completion_event.wait() - - # Suspend execution if everything done and at least one of the tasks raised a suspend exception. - if self._suspend_exception: - raise self._suspend_exception - - # Build final result - return self._create_result() - - def should_execution_suspend(self) -> SuspendResult: - """Check if execution should suspend.""" - earliest_timestamp: float = float("inf") - indefinite_suspend_task: ( - ExecutableWithState[CallableType, ResultType] | None - ) = None - - for exe_state in self.executables_with_state: - if exe_state.status in {BranchStatus.PENDING, BranchStatus.RUNNING}: - # Exit here! Still have tasks that can make progress, don't suspend. - return SuspendResult.do_not_suspend() - if exe_state.status is BranchStatus.SUSPENDED_WITH_TIMEOUT: - if ( - exe_state.suspend_until - and exe_state.suspend_until < earliest_timestamp - ): - earliest_timestamp = exe_state.suspend_until - elif exe_state.status is BranchStatus.SUSPENDED: - indefinite_suspend_task = exe_state - - # All tasks are in final states and at least one of them is a suspend. - if earliest_timestamp != float("inf"): - return SuspendResult.suspend( - TimedSuspendExecution( - "All concurrent work complete or suspended pending retry.", - earliest_timestamp, - ) - ) - if indefinite_suspend_task: - return SuspendResult.suspend( - SuspendExecution( - "All concurrent work complete or suspended and pending external callback." - ) - ) - - return SuspendResult.do_not_suspend() - - def _on_task_complete( - self, - exe_state: ExecutableWithState, - future: Future, - scheduler: TimerScheduler, - ) -> None: - """Handle task completion, suspension, or failure.""" - try: - result = future.result() - exe_state.complete(result) - self.counters.complete_task() - except TimedSuspendExecution as tse: - exe_state.suspend_with_timeout(tse.scheduled_timestamp) - scheduler.schedule_resume(exe_state, tse.scheduled_timestamp) - except SuspendExecution: - exe_state.suspend() - # For indefinite suspend, don't schedule resume - except Exception as e: # noqa: BLE001 - exe_state.fail(e) - self.counters.fail_task() - - # Check if execution should complete or suspend - if self.counters.should_complete(): - self._completion_event.set() - else: - suspend_result = self.should_execution_suspend() - if suspend_result.should_suspend: - self._suspend_exception = suspend_result.exception - self._completion_event.set() - - def _create_result(self) -> BatchResult[ResultType]: - """ - Build the final BatchResult. - - When this function executes, we've terminated the upper/parent context for whatever reason. - It follows that our items can be only in 3 states, Completed, Failed and Started (in all of the possible forms). - We tag each branch based on its observed value at the time of completion of the parent / upper context, and pass the - results to BatchResult. - - Any inference wrt completion reason is left up to BatchResult, keeping the logic inference isolated. - """ - batch_items: list[BatchItem[ResultType]] = [] - for executable in self.executables_with_state: - match executable.status: - case BranchStatus.COMPLETED: - batch_items.append( - BatchItem( - executable.index, - BatchItemStatus.SUCCEEDED, - executable.result, - ) - ) - case BranchStatus.FAILED: - batch_items.append( - BatchItem( - executable.index, - BatchItemStatus.FAILED, - error=ErrorObject.from_exception(executable.error), - ) - ) - case ( - BranchStatus.PENDING - | BranchStatus.RUNNING - | BranchStatus.SUSPENDED - | BranchStatus.SUSPENDED_WITH_TIMEOUT - ): - batch_items.append( - BatchItem(executable.index, BatchItemStatus.STARTED) - ) - - return BatchResult.from_items(batch_items, self.completion_config) - - def _execute_item_in_child_context( - self, - executor_context: DurableContext, - executable: Executable[CallableType], - ) -> ResultType: - """ - Execute a single item in a derived child context. - - instead of relying on `executor_context.run_in_child_context` - we generate an operation_id for the child, and then call `child_handler` - directly. This avoids the hidden mutation of the context's internal counter. - we can do this because we explicitly control the generation of step_id and do it - using executable.index. - - - invariant: `operation_id` for a given executable is deterministic, - and execution order invariant. - """ - - operation_id = executor_context._create_step_id_for_logical_step( # noqa: SLF001 - executable.index - ) - name = f"{self.name_prefix}{executable.index}" - child_context = executor_context.create_child_context(operation_id) - operation_identifier = OperationIdentifier( - operation_id, - executor_context._parent_id, # noqa: SLF001 - name, - ) - - def run_in_child_handler(): - return self.execute_item(child_context, executable) - - return child_handler( - run_in_child_handler, - child_context.state, - operation_identifier=operation_identifier, - config=ChildConfig( - serdes=self.item_serdes or self.serdes, - sub_type=self.sub_type_iteration, - summary_generator=self.summary_generator, - ), - ) - - def replay(self, execution_state: ExecutionState, executor_context: DurableContext): - """ - Replay rather than re-run children. - - if we are here, then we are in replay_children. - This will pre-generate all the operation ids for the children and collect the checkpointed - results. - """ - items: list[BatchItem[ResultType]] = [] - for executable in self.executables: - operation_id = executor_context._create_step_id_for_logical_step( # noqa: SLF001 - executable.index - ) - checkpoint = execution_state.get_checkpoint_result(operation_id) - - result: ResultType | None = None - error = None - status: BatchItemStatus - if checkpoint.is_succeeded(): - status = BatchItemStatus.SUCCEEDED - result = self._execute_item_in_child_context( - executor_context, executable - ) - - elif checkpoint.is_failed(): - error = checkpoint.error - status = BatchItemStatus.FAILED - else: - status = BatchItemStatus.STARTED - - batch_item = BatchItem(executable.index, status, result=result, error=error) - items.append(batch_item) - return BatchResult.from_items(items, self.completion_config) - - -# endregion concurrency logic diff --git a/src/aws_durable_execution_sdk_python/concurrency/__init__.py b/src/aws_durable_execution_sdk_python/concurrency/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/aws_durable_execution_sdk_python/concurrency/executor.py b/src/aws_durable_execution_sdk_python/concurrency/executor.py new file mode 100644 index 0000000..da1a5cd --- /dev/null +++ b/src/aws_durable_execution_sdk_python/concurrency/executor.py @@ -0,0 +1,451 @@ +"""Concurrent executor for parallel and map operations.""" + +from __future__ import annotations + +import heapq +import logging +import threading +import time +from abc import ABC, abstractmethod +from concurrent.futures import Future, ThreadPoolExecutor +from typing import TYPE_CHECKING, Generic, Self, TypeVar + +from aws_durable_execution_sdk_python.concurrency.models import ( + BatchItem, + BatchItemStatus, + BatchResult, + BranchStatus, + Executable, + ExecutableWithState, + ExecutionCounters, + SuspendResult, +) +from aws_durable_execution_sdk_python.config import ChildConfig +from aws_durable_execution_sdk_python.exceptions import ( + OrphanedChildException, + SuspendExecution, + TimedSuspendExecution, +) +from aws_durable_execution_sdk_python.identifier import OperationIdentifier +from aws_durable_execution_sdk_python.lambda_service import ErrorObject +from aws_durable_execution_sdk_python.operation.child import child_handler + +if TYPE_CHECKING: + from collections.abc import Callable + + from aws_durable_execution_sdk_python.config import CompletionConfig + from aws_durable_execution_sdk_python.context import DurableContext + from aws_durable_execution_sdk_python.lambda_service import OperationSubType + from aws_durable_execution_sdk_python.serdes import SerDes + from aws_durable_execution_sdk_python.state import ExecutionState + from aws_durable_execution_sdk_python.types import SummaryGenerator + + +logger = logging.getLogger(__name__) + +T = TypeVar("T") +R = TypeVar("R") + +CallableType = TypeVar("CallableType") +ResultType = TypeVar("ResultType") + + +# region concurrency logic +class TimerScheduler: + """Manage timed suspend tasks with a background timer thread.""" + + def __init__( + self, resubmit_callback: Callable[[ExecutableWithState], None] + ) -> None: + self.resubmit_callback = resubmit_callback + self._pending_resumes: list[tuple[float, ExecutableWithState]] = [] + self._lock = threading.Lock() + self._shutdown = threading.Event() + self._timer_thread = threading.Thread(target=self._timer_loop, daemon=True) + self._timer_thread.start() + + def __enter__(self) -> Self: + return self + + def __exit__(self, exc_type, exc_val, exc_tb) -> None: + self.shutdown() + + def schedule_resume( + self, exe_state: ExecutableWithState, resume_time: float + ) -> None: + """Schedule a task to resume at the specified time.""" + with self._lock: + heapq.heappush(self._pending_resumes, (resume_time, exe_state)) + + def shutdown(self) -> None: + """Shutdown the timer thread and cancel all pending resumes.""" + self._shutdown.set() + self._timer_thread.join(timeout=1.0) + with self._lock: + self._pending_resumes.clear() + + def _timer_loop(self) -> None: + """Background thread that processes timed resumes.""" + while not self._shutdown.is_set(): + next_resume_time = None + + with self._lock: + if self._pending_resumes: + next_resume_time = self._pending_resumes[0][0] + + if next_resume_time is None: + # No pending resumes, wait a bit and check again + self._shutdown.wait(timeout=0.1) + continue + + current_time = time.time() + if current_time >= next_resume_time: + # Time to resume + with self._lock: + # no branch cover because hard to test reliably - this is a double-safety check if heap mutated + # since the first peek on next_resume_time further up + if ( # pragma: no branch + self._pending_resumes + and self._pending_resumes[0][0] <= current_time + ): + _, exe_state = heapq.heappop(self._pending_resumes) + if exe_state.can_resume: + exe_state.reset_to_pending() + self.resubmit_callback(exe_state) + else: + # Wait until next resume time + wait_time = min(next_resume_time - current_time, 0.1) + self._shutdown.wait(timeout=wait_time) + + +class ConcurrentExecutor(ABC, Generic[CallableType, ResultType]): + """Execute durable operations concurrently. This contains the execution logic for Map and Parallel.""" + + def __init__( + self, + executables: list[Executable[CallableType]], + max_concurrency: int | None, + completion_config: CompletionConfig, + sub_type_top: OperationSubType, + sub_type_iteration: OperationSubType, + name_prefix: str, + serdes: SerDes | None, + item_serdes: SerDes | None = None, + summary_generator: SummaryGenerator | None = None, + ): + """Initialize ConcurrentExecutor. + + Args: + summary_generator: Optional function to generate compact summaries for large results. + When the serialized result exceeds 256KB, this generator creates a JSON summary + instead of checkpointing the full result. Used by map/parallel operations to + handle large BatchResult payloads efficiently. Matches TypeScript behavior in + run-in-child-context-handler.ts. + """ + self.executables = executables + self.max_concurrency = max_concurrency + self.completion_config = completion_config + self.sub_type_top = sub_type_top + self.sub_type_iteration = sub_type_iteration + self.name_prefix = name_prefix + self.summary_generator = summary_generator + + # Event-driven state tracking for when the executor is done + self._completion_event = threading.Event() + self._suspend_exception: SuspendExecution | None = None + + # ExecutionCounters will keep track of completion criteria and on-going counters + min_successful = self.completion_config.min_successful or len(self.executables) + tolerated_failure_count = self.completion_config.tolerated_failure_count + tolerated_failure_percentage = ( + self.completion_config.tolerated_failure_percentage + ) + + self.counters: ExecutionCounters = ExecutionCounters( + len(executables), + min_successful, + tolerated_failure_count, + tolerated_failure_percentage, + ) + self.executables_with_state: list[ExecutableWithState] = [] + self.serdes = serdes + self.item_serdes = item_serdes + + @abstractmethod + def execute_item( + self, child_context: DurableContext, executable: Executable[CallableType] + ) -> ResultType: + """Execute a single executable in a child context and return the result.""" + raise NotImplementedError + + def execute( + self, execution_state: ExecutionState, executor_context: DurableContext + ) -> BatchResult[ResultType]: + """Execute items concurrently with event-driven state management.""" + logger.debug( + "▶️ Executing concurrent operation, items: %d", len(self.executables) + ) + + max_workers = self.max_concurrency or len(self.executables) + + self.executables_with_state = [ + ExecutableWithState(executable=exe) for exe in self.executables + ] + self._completion_event.clear() + self._suspend_exception = None + + def resubmitter(executable_with_state: ExecutableWithState) -> None: + """Resubmit a timed suspended task.""" + execution_state.create_checkpoint() + submit_task(executable_with_state) + + thread_executor = ThreadPoolExecutor(max_workers=max_workers) + try: + with TimerScheduler(resubmitter) as scheduler: + + def submit_task(executable_with_state: ExecutableWithState) -> Future: + """Submit task to the thread executor and mark its state as started.""" + future = thread_executor.submit( + self._execute_item_in_child_context, + executor_context, + executable_with_state.executable, + ) + executable_with_state.run(future) + + def on_done(future: Future) -> None: + self._on_task_complete(executable_with_state, future, scheduler) + + future.add_done_callback(on_done) + return future + + # Submit initial tasks + futures = [ + submit_task(exe_state) for exe_state in self.executables_with_state + ] + + # Wait for completion + self._completion_event.wait() + + # Cancel futures that haven't started yet + for future in futures: + future.cancel() + + # Suspend execution if everything done and at least one of the tasks raised a suspend exception. + if self._suspend_exception: + raise self._suspend_exception + + finally: + # Shutdown without waiting for running threads for early return when + # completion criteria are met (e.g., min_successful). + # Running threads will continue in background but they raise OrphanedChildException + # on the next attempt to checkpoint. + thread_executor.shutdown(wait=False, cancel_futures=True) + + # Build final result + return self._create_result() + + def should_execution_suspend(self) -> SuspendResult: + """Check if execution should suspend.""" + earliest_timestamp: float = float("inf") + indefinite_suspend_task: ( + ExecutableWithState[CallableType, ResultType] | None + ) = None + + for exe_state in self.executables_with_state: + if exe_state.status in {BranchStatus.PENDING, BranchStatus.RUNNING}: + # Exit here! Still have tasks that can make progress, don't suspend. + return SuspendResult.do_not_suspend() + if exe_state.status is BranchStatus.SUSPENDED_WITH_TIMEOUT: + if ( + exe_state.suspend_until + and exe_state.suspend_until < earliest_timestamp + ): + earliest_timestamp = exe_state.suspend_until + elif exe_state.status is BranchStatus.SUSPENDED: + indefinite_suspend_task = exe_state + + # All tasks are in final states and at least one of them is a suspend. + if earliest_timestamp != float("inf"): + return SuspendResult.suspend( + TimedSuspendExecution( + "All concurrent work complete or suspended pending retry.", + earliest_timestamp, + ) + ) + if indefinite_suspend_task: + return SuspendResult.suspend( + SuspendExecution( + "All concurrent work complete or suspended and pending external callback." + ) + ) + + return SuspendResult.do_not_suspend() + + def _on_task_complete( + self, + exe_state: ExecutableWithState, + future: Future, + scheduler: TimerScheduler, + ) -> None: + """Handle task completion, suspension, or failure.""" + + if future.cancelled(): + exe_state.suspend() + return + + try: + result = future.result() + exe_state.complete(result) + self.counters.complete_task() + except OrphanedChildException: + # Parent already completed and returned. + # State is already RUNNING, which _create_result() marked as STARTED + # Just log and exit - no state change needed + logger.debug( + "Terminating orphaned branch %s without error because parent has completed already", + exe_state.index, + ) + return + except TimedSuspendExecution as tse: + exe_state.suspend_with_timeout(tse.scheduled_timestamp) + scheduler.schedule_resume(exe_state, tse.scheduled_timestamp) + except SuspendExecution: + exe_state.suspend() + # For indefinite suspend, don't schedule resume + except Exception as e: # noqa: BLE001 + exe_state.fail(e) + self.counters.fail_task() + + # Check if execution should complete or suspend + if self.counters.should_complete(): + self._completion_event.set() + else: + suspend_result = self.should_execution_suspend() + if suspend_result.should_suspend: + self._suspend_exception = suspend_result.exception + self._completion_event.set() + + def _create_result(self) -> BatchResult[ResultType]: + """ + Build the final BatchResult. + + When this function executes, we've terminated the upper/parent context for whatever reason. + It follows that our items can be only in 3 states, Completed, Failed and Started (in all of the possible forms). + We tag each branch based on its observed value at the time of completion of the parent / upper context, and pass the + results to BatchResult. + + Any inference wrt completion reason is left up to BatchResult, keeping the logic inference isolated. + """ + batch_items: list[BatchItem[ResultType]] = [] + for executable in self.executables_with_state: + match executable.status: + case BranchStatus.COMPLETED: + batch_items.append( + BatchItem( + executable.index, + BatchItemStatus.SUCCEEDED, + executable.result, + ) + ) + case BranchStatus.FAILED: + batch_items.append( + BatchItem( + executable.index, + BatchItemStatus.FAILED, + error=ErrorObject.from_exception(executable.error), + ) + ) + case ( + BranchStatus.PENDING + | BranchStatus.RUNNING + | BranchStatus.SUSPENDED + | BranchStatus.SUSPENDED_WITH_TIMEOUT + ): + batch_items.append( + BatchItem(executable.index, BatchItemStatus.STARTED) + ) + + return BatchResult.from_items(batch_items, self.completion_config) + + def _execute_item_in_child_context( + self, + executor_context: DurableContext, + executable: Executable[CallableType], + ) -> ResultType: + """ + Execute a single item in a derived child context. + + instead of relying on `executor_context.run_in_child_context` + we generate an operation_id for the child, and then call `child_handler` + directly. This avoids the hidden mutation of the context's internal counter. + we can do this because we explicitly control the generation of step_id and do it + using executable.index. + + + invariant: `operation_id` for a given executable is deterministic, + and execution order invariant. + """ + + operation_id = executor_context._create_step_id_for_logical_step( # noqa: SLF001 + executable.index + ) + name = f"{self.name_prefix}{executable.index}" + child_context = executor_context.create_child_context(operation_id) + operation_identifier = OperationIdentifier( + operation_id, + executor_context._parent_id, # noqa: SLF001 + name, + ) + + def run_in_child_handler(): + return self.execute_item(child_context, executable) + + result: ResultType = child_handler( + run_in_child_handler, + child_context.state, + operation_identifier=operation_identifier, + config=ChildConfig( + serdes=self.item_serdes or self.serdes, + sub_type=self.sub_type_iteration, + summary_generator=self.summary_generator, + ), + ) + child_context.state.track_replay(operation_id=operation_id) + return result + + def replay(self, execution_state: ExecutionState, executor_context: DurableContext): + """ + Replay rather than re-run children. + + if we are here, then we are in replay_children. + This will pre-generate all the operation ids for the children and collect the checkpointed + results. + """ + items: list[BatchItem[ResultType]] = [] + for executable in self.executables: + operation_id = executor_context._create_step_id_for_logical_step( # noqa: SLF001 + executable.index + ) + checkpoint = execution_state.get_checkpoint_result(operation_id) + + result: ResultType | None = None + error = None + status: BatchItemStatus + if checkpoint.is_succeeded(): + status = BatchItemStatus.SUCCEEDED + result = self._execute_item_in_child_context( + executor_context, executable + ) + + elif checkpoint.is_failed(): + error = checkpoint.error + status = BatchItemStatus.FAILED + else: + status = BatchItemStatus.STARTED + + batch_item = BatchItem(executable.index, status, result=result, error=error) + items.append(batch_item) + return BatchResult.from_items(items, self.completion_config) + + +# endregion concurrency logic diff --git a/src/aws_durable_execution_sdk_python/concurrency/models.py b/src/aws_durable_execution_sdk_python/concurrency/models.py new file mode 100644 index 0000000..29ffeaf --- /dev/null +++ b/src/aws_durable_execution_sdk_python/concurrency/models.py @@ -0,0 +1,469 @@ +"""Concurrent executor for parallel and map operations.""" + +from __future__ import annotations + +import logging +import threading +import time +from collections import Counter +from dataclasses import dataclass +from enum import Enum +from typing import TYPE_CHECKING, Generic, TypeVar + +from aws_durable_execution_sdk_python.exceptions import ( + InvalidStateError, + SuspendExecution, +) +from aws_durable_execution_sdk_python.lambda_service import ErrorObject +from aws_durable_execution_sdk_python.types import BatchResult as BatchResultProtocol + +if TYPE_CHECKING: + from concurrent.futures import Future + + from aws_durable_execution_sdk_python.config import CompletionConfig + + +logger = logging.getLogger(__name__) + +T = TypeVar("T") +R = TypeVar("R") + +CallableType = TypeVar("CallableType") +ResultType = TypeVar("ResultType") + + +# region Result models +class BatchItemStatus(Enum): + SUCCEEDED = "SUCCEEDED" + FAILED = "FAILED" + STARTED = "STARTED" + + +class CompletionReason(Enum): + ALL_COMPLETED = "ALL_COMPLETED" + MIN_SUCCESSFUL_REACHED = "MIN_SUCCESSFUL_REACHED" + FAILURE_TOLERANCE_EXCEEDED = "FAILURE_TOLERANCE_EXCEEDED" + + +@dataclass(frozen=True) +class SuspendResult: + should_suspend: bool + exception: SuspendExecution | None = None + + @staticmethod + def do_not_suspend() -> SuspendResult: + return SuspendResult(should_suspend=False) + + @staticmethod + def suspend(exception: SuspendExecution) -> SuspendResult: + return SuspendResult(should_suspend=True, exception=exception) + + +@dataclass(frozen=True) +class BatchItem(Generic[R]): + index: int + status: BatchItemStatus + result: R | None = None + error: ErrorObject | None = None + + def to_dict(self) -> dict: + return { + "index": self.index, + "status": self.status.value, + "result": self.result, + "error": self.error.to_dict() if self.error else None, + } + + @classmethod + def from_dict(cls, data: dict) -> BatchItem[R]: + return cls( + index=data["index"], + status=BatchItemStatus(data["status"]), + result=data.get("result"), + error=ErrorObject.from_dict(data["error"]) if data.get("error") else None, + ) + + +@dataclass(frozen=True) +class BatchResult(Generic[R], BatchResultProtocol[R]): # noqa: PYI059 + all: list[BatchItem[R]] + completion_reason: CompletionReason + + @classmethod + def from_dict( + cls, data: dict, completion_config: CompletionConfig | None = None + ) -> BatchResult[R]: + batch_items: list[BatchItem[R]] = [ + BatchItem.from_dict(item) for item in data["all"] + ] + + completion_reason_value = data.get("completionReason") + if completion_reason_value is None: + # Infer completion reason from batch item statuses and completion config + # This aligns with the TypeScript implementation that uses completion config + # to accurately reconstruct the completion reason during replay + result = cls.from_items(batch_items, completion_config) + logger.warning( + "Missing completionReason in BatchResult deserialization, " + "inferred '%s' from batch item statuses. " + "This may indicate incomplete serialization data.", + result.completion_reason.value, + ) + return result + + completion_reason = CompletionReason(completion_reason_value) + return cls(batch_items, completion_reason) + + @classmethod + def from_items( + cls, + items: list[BatchItem[R]], + completion_config: CompletionConfig | None = None, + ): + """ + Infer completion reason based on batch item statuses and completion config. + + This follows the same logic as the TypeScript implementation: + - If all items completed: ALL_COMPLETED + - If minSuccessful threshold met and not all completed: MIN_SUCCESSFUL_REACHED + - Otherwise: FAILURE_TOLERANCE_EXCEEDED + """ + + statuses = (item.status for item in items) + counts = Counter(statuses) + succeeded_count = counts.get(BatchItemStatus.SUCCEEDED, 0) + failed_count = counts.get(BatchItemStatus.FAILED, 0) + started_count = counts.get(BatchItemStatus.STARTED, 0) + + completed_count = succeeded_count + failed_count + total_count = started_count + completed_count + + # If all items completed (no started items), it's ALL_COMPLETED + if completed_count == total_count: + completion_reason = CompletionReason.ALL_COMPLETED + elif ( # If we have completion config and minSuccessful threshold is met + completion_config + and (min_successful := completion_config.min_successful) is not None + and succeeded_count >= min_successful + ): + completion_reason = CompletionReason.MIN_SUCCESSFUL_REACHED + else: + # Otherwise, assume failure tolerance was exceeded + completion_reason = CompletionReason.FAILURE_TOLERANCE_EXCEEDED + + return cls(items, completion_reason) + + def to_dict(self) -> dict: + return { + "all": [item.to_dict() for item in self.all], + "completionReason": self.completion_reason.value, + } + + def succeeded(self) -> list[BatchItem[R]]: + return [ + item + for item in self.all + if item.status is BatchItemStatus.SUCCEEDED and item.result is not None + ] + + def failed(self) -> list[BatchItem[R]]: + return [ + item + for item in self.all + if item.status is BatchItemStatus.FAILED and item.error is not None + ] + + def started(self) -> list[BatchItem[R]]: + return [item for item in self.all if item.status is BatchItemStatus.STARTED] + + @property + def status(self) -> BatchItemStatus: + return BatchItemStatus.FAILED if self.has_failure else BatchItemStatus.SUCCEEDED + + @property + def has_failure(self) -> bool: + return any(item.status is BatchItemStatus.FAILED for item in self.all) + + def throw_if_error(self) -> None: + first_error = next( + (item.error for item in self.all if item.status is BatchItemStatus.FAILED), + None, + ) + if first_error: + raise first_error.to_callable_runtime_error() + + def get_results(self) -> list[R]: + return [ + item.result + for item in self.all + if item.status is BatchItemStatus.SUCCEEDED and item.result is not None + ] + + def get_errors(self) -> list[ErrorObject]: + return [ + item.error + for item in self.all + if item.status is BatchItemStatus.FAILED and item.error is not None + ] + + @property + def success_count(self) -> int: + return sum(1 for item in self.all if item.status is BatchItemStatus.SUCCEEDED) + + @property + def failure_count(self) -> int: + return sum(1 for item in self.all if item.status is BatchItemStatus.FAILED) + + @property + def started_count(self) -> int: + return sum(1 for item in self.all if item.status is BatchItemStatus.STARTED) + + @property + def total_count(self) -> int: + return len(self.all) + + +# endregion Result models + + +# region concurrency models +@dataclass(frozen=True) +class Executable(Generic[CallableType]): + index: int + func: CallableType + + +class BranchStatus(Enum): + PENDING = "pending" + RUNNING = "running" + COMPLETED = "completed" + SUSPENDED = "suspended" + SUSPENDED_WITH_TIMEOUT = "suspended_with_timeout" + FAILED = "failed" + + +class ExecutableWithState(Generic[CallableType, ResultType]): + """Manages the execution state and lifecycle of an executable.""" + + def __init__(self, executable: Executable[CallableType]): + self.executable = executable + self._status = BranchStatus.PENDING + self._future: Future | None = None + self._suspend_until: float | None = None + self._result: ResultType = None # type: ignore[assignment] + self._is_result_set: bool = False + self._error: Exception | None = None + + @property + def future(self) -> Future: + """Get the future, raising error if not available.""" + if self._future is None: + msg = f"ExecutableWithState was never started. {self.executable.index}" + raise InvalidStateError(msg) + return self._future + + @property + def status(self) -> BranchStatus: + """Get current status.""" + return self._status + + @property + def result(self) -> ResultType: + """Get result if completed.""" + if not self._is_result_set or self._status != BranchStatus.COMPLETED: + msg = f"result not available in status {self._status}" + raise InvalidStateError(msg) + return self._result + + @property + def error(self) -> Exception: + """Get error if failed.""" + if self._error is None or self._status != BranchStatus.FAILED: + msg = f"error not available in status {self._status}" + raise InvalidStateError(msg) + return self._error + + @property + def suspend_until(self) -> float | None: + """Get suspend timestamp.""" + return self._suspend_until + + @property + def is_running(self) -> bool: + """Check if currently running.""" + return self._status is BranchStatus.RUNNING + + @property + def can_resume(self) -> bool: + """Check if can resume from suspension.""" + return self._status is BranchStatus.SUSPENDED or ( + self._status is BranchStatus.SUSPENDED_WITH_TIMEOUT + and self._suspend_until is not None + and time.time() >= self._suspend_until + ) + + @property + def index(self) -> int: + return self.executable.index + + @property + def callable(self) -> CallableType: + return self.executable.func + + # region State transitions + def run(self, future: Future) -> None: + """Transition to RUNNING state with a future.""" + if self._status != BranchStatus.PENDING: + msg = f"Cannot start running from {self._status}" + raise InvalidStateError(msg) + self._status = BranchStatus.RUNNING + self._future = future + + def suspend(self) -> None: + """Transition to SUSPENDED state (indefinite).""" + self._status = BranchStatus.SUSPENDED + self._suspend_until = None + + def suspend_with_timeout(self, timestamp: float) -> None: + """Transition to SUSPENDED_WITH_TIMEOUT state.""" + self._status = BranchStatus.SUSPENDED_WITH_TIMEOUT + self._suspend_until = timestamp + + def complete(self, result: ResultType) -> None: + """Transition to COMPLETED state.""" + self._status = BranchStatus.COMPLETED + self._result = result + self._is_result_set = True + + def fail(self, error: Exception) -> None: + """Transition to FAILED state.""" + self._status = BranchStatus.FAILED + self._error = error + + def reset_to_pending(self) -> None: + """Reset to PENDING state for resubmission.""" + self._status = BranchStatus.PENDING + self._future = None + self._suspend_until = None + + # endregion State transitions + + +class ExecutionCounters: + """Thread-safe counters for tracking execution state.""" + + def __init__( + self, + total_tasks: int, + min_successful: int, + tolerated_failure_count: int | None, + tolerated_failure_percentage: float | None, + ): + self.total_tasks: int = total_tasks + self.min_successful: int = min_successful + self.tolerated_failure_count: int | None = tolerated_failure_count + self.tolerated_failure_percentage: float | None = tolerated_failure_percentage + self.success_count: int = 0 + self.failure_count: int = 0 + self._lock = threading.Lock() + + def complete_task(self) -> None: + """Task completed successfully.""" + with self._lock: + self.success_count += 1 + + def fail_task(self) -> None: + """Task failed.""" + with self._lock: + self.failure_count += 1 + + def should_continue(self) -> bool: + """ + Check if we should continue starting new tasks (based on failure tolerance). + Matches TypeScript shouldContinue() logic. + """ + with self._lock: + # If no completion config, only continue if no failures + if ( + self.tolerated_failure_count is None + and self.tolerated_failure_percentage is None + ): + return self.failure_count == 0 + + # Check failure count tolerance + if ( + self.tolerated_failure_count is not None + and self.failure_count > self.tolerated_failure_count + ): + return False + + # Check failure percentage tolerance + if self.tolerated_failure_percentage is not None and self.total_tasks > 0: + failure_percentage = (self.failure_count / self.total_tasks) * 100 + if failure_percentage > self.tolerated_failure_percentage: + return False + + return True + + def is_complete(self) -> bool: + """ + Check if execution should complete (based on completion criteria). + Matches TypeScript isComplete() logic. + """ + with self._lock: + completed_count = self.success_count + self.failure_count + + # All tasks completed + if completed_count == self.total_tasks: + return True + + # when we breach min successful, we've completed + return self.success_count >= self.min_successful + + def should_complete(self) -> bool: + """ + Check if execution should complete. + Combines TypeScript shouldContinue() and isComplete() logic. + """ + return self.is_complete() or not self.should_continue() + + def is_all_completed(self) -> bool: + """True if all tasks completed successfully.""" + with self._lock: + return self.success_count == self.total_tasks + + def is_min_successful_reached(self) -> bool: + """True if minimum successful tasks reached.""" + with self._lock: + return self.success_count >= self.min_successful + + def is_failure_tolerance_exceeded(self) -> bool: + """True if failure tolerance was exceeded.""" + with self._lock: + return self._is_failure_condition_reached( + tolerated_count=self.tolerated_failure_count, + tolerated_percentage=self.tolerated_failure_percentage, + failure_count=self.failure_count, + ) + + def _is_failure_condition_reached( + self, + tolerated_count: int | None, + tolerated_percentage: float | None, + failure_count: int, + ) -> bool: + """True if failure conditions are reached (no locking - caller must lock).""" + # Failure count condition + if tolerated_count is not None and failure_count > tolerated_count: + return True + + # Failure percentage condition + if tolerated_percentage is not None and self.total_tasks > 0: + failure_percentage = (failure_count / self.total_tasks) * 100 + if failure_percentage > tolerated_percentage: + return True + + return False + + +# endegion concurrency models diff --git a/src/aws_durable_execution_sdk_python/config.py b/src/aws_durable_execution_sdk_python/config.py index 4f4c5b5..548b6c1 100644 --- a/src/aws_durable_execution_sdk_python/config.py +++ b/src/aws_durable_execution_sdk_python/config.py @@ -7,6 +7,8 @@ from enum import Enum, StrEnum from typing import TYPE_CHECKING, Generic, TypeVar +from aws_durable_execution_sdk_python.exceptions import ValidationError + P = TypeVar("P") # Payload type R = TypeVar("R") # Result type T = TypeVar("T") @@ -25,6 +27,42 @@ Numeric = int | float # deliberately leaving off complex +@dataclass(frozen=True) +class Duration: + """Represents a duration stored as total seconds.""" + + seconds: int = 0 + + def __post_init__(self): + if self.seconds < 0: + msg = "Duration seconds must be positive" + raise ValidationError(msg) + + def to_seconds(self) -> int: + """Convert the duration to total seconds.""" + return self.seconds + + @classmethod + def from_seconds(cls, value: float) -> Duration: + """Create a Duration from total seconds.""" + return cls(seconds=int(value)) + + @classmethod + def from_minutes(cls, value: float) -> Duration: + """Create a Duration from minutes.""" + return cls(seconds=int(value * 60)) + + @classmethod + def from_hours(cls, value: float) -> Duration: + """Create a Duration from hours.""" + return cls(seconds=int(value * 3600)) + + @classmethod + def from_days(cls, value: float) -> Duration: + """Create a Duration from days.""" + return cls(seconds=int(value * 86400)) + + @dataclass(frozen=True) class BatchedInput(Generic[T, U]): batch_input: T @@ -340,22 +378,61 @@ class MapConfig: summary_generator: SummaryGenerator | None = None -@dataclass +@dataclass(frozen=True) class InvokeConfig(Generic[P, R]): + """ + Configuration for invoke operations. + + This class configures how function invocations are executed, including + timeout behavior, serialization, and tenant isolation. + + Args: + timeout: Maximum duration to wait for the invoked function to complete. + Default is no timeout. Use this to prevent long-running invocations + from blocking execution indefinitely. + + serdes_payload: Custom serialization/deserialization for the payload + sent to the invoked function. Defaults to DEFAULT_JSON_SERDES when + not set. + + serdes_result: Custom serialization/deserialization for the result + returned from the invoked function. Defaults to DEFAULT_JSON_SERDES when + not set. + + tenant_id: Optional tenant identifier for multi-tenant isolation. + If provided, the invocation will be scoped to this tenant. + """ + # retry_strategy: Callable[[Exception, int], RetryDecision] | None = None - timeout_seconds: int = 0 + timeout: Duration = field(default_factory=Duration) serdes_payload: SerDes[P] | None = None serdes_result: SerDes[R] | None = None + tenant_id: str | None = None + + @property + def timeout_seconds(self) -> int: + """Get timeout in seconds.""" + return self.timeout.to_seconds() @dataclass(frozen=True) class CallbackConfig: """Configuration for callbacks.""" - timeout_seconds: int = 0 - heartbeat_timeout_seconds: int = 0 + timeout: Duration = field(default_factory=Duration) + heartbeat_timeout: Duration = field(default_factory=Duration) serdes: SerDes | None = None + @property + def timeout_seconds(self) -> int: + """Get timeout in seconds.""" + return self.timeout.to_seconds() + + @property + def heartbeat_timeout_seconds(self) -> int: + """Get heartbeat timeout in seconds.""" + return self.heartbeat_timeout.to_seconds() + @dataclass(frozen=True) class WaitForCallbackConfig(CallbackConfig): @@ -387,23 +464,35 @@ class JitterStrategy(StrEnum): Jitter is meant to be used to spread operations across time. + Based on AWS Architecture Blog: https://aws.amazon.com/blogs/architecture/exponential-backoff-and-jitter/ + members: :NONE: No jitter; use the exact calculated delay :FULL: Full jitter; random delay between 0 and calculated delay - :HALF: Half jitter; random delay between 0.5x and 1.0x of the calculated delay + :HALF: Equal jitter; random delay between 0.5x and 1.0x of the calculated delay """ NONE = "NONE" FULL = "FULL" HALF = "HALF" - def compute_jitter(self, delay) -> float: + def apply_jitter(self, delay: float) -> float: + """Apply jitter to a delay value and return the final delay. + + Args: + delay: The base delay value to apply jitter to + + Returns: + The final delay after applying jitter strategy + """ match self: case JitterStrategy.NONE: - return 0 + return delay case JitterStrategy.HALF: - return delay * (random.random() * 0.5 + 0.5) # noqa: S311 + # Equal jitter: delay/2 + random(0, delay/2) + return delay / 2 + random.random() * (delay / 2) # noqa: S311 case _: # default is FULL + # Full jitter: random(0, delay) return random.random() * delay # noqa: S311 diff --git a/src/aws_durable_execution_sdk_python/context.py b/src/aws_durable_execution_sdk_python/context.py index 2938ca9..8efaed0 100644 --- a/src/aws_durable_execution_sdk_python/context.py +++ b/src/aws_durable_execution_sdk_python/context.py @@ -8,6 +8,7 @@ BatchedInput, CallbackConfig, ChildConfig, + Duration, InvokeConfig, MapConfig, ParallelConfig, @@ -23,25 +24,30 @@ from aws_durable_execution_sdk_python.lambda_service import OperationSubType from aws_durable_execution_sdk_python.logger import Logger, LogInfo from aws_durable_execution_sdk_python.operation.callback import ( - create_callback_handler, + CallbackOperationExecutor, wait_for_callback_handler, ) from aws_durable_execution_sdk_python.operation.child import child_handler -from aws_durable_execution_sdk_python.operation.invoke import invoke_handler +from aws_durable_execution_sdk_python.operation.invoke import InvokeOperationExecutor from aws_durable_execution_sdk_python.operation.map import map_handler from aws_durable_execution_sdk_python.operation.parallel import parallel_handler -from aws_durable_execution_sdk_python.operation.step import step_handler -from aws_durable_execution_sdk_python.operation.wait import wait_handler +from aws_durable_execution_sdk_python.operation.step import StepOperationExecutor +from aws_durable_execution_sdk_python.operation.wait import WaitOperationExecutor from aws_durable_execution_sdk_python.operation.wait_for_condition import ( - wait_for_condition_handler, + WaitForConditionOperationExecutor, +) +from aws_durable_execution_sdk_python.serdes import ( + PassThroughSerDes, + SerDes, + deserialize, ) -from aws_durable_execution_sdk_python.serdes import SerDes, deserialize from aws_durable_execution_sdk_python.state import ExecutionState # noqa: TCH001 from aws_durable_execution_sdk_python.threading import OrderedCounter from aws_durable_execution_sdk_python.types import ( BatchResult, LoggerInterface, StepContext, + WaitForCallbackContext, WaitForConditionCheckContext, ) from aws_durable_execution_sdk_python.types import Callback as CallbackProtocol @@ -65,6 +71,8 @@ logger = logging.getLogger(__name__) +PASS_THROUGH_SERDES: SerDes[Any] = PassThroughSerDes() + def durable_step( func: Callable[Concatenate[StepContext, Params], T], @@ -96,6 +104,52 @@ def function_with_arguments(child_context: DurableContext): return wrapper +def durable_wait_for_callback( + func: Callable[Concatenate[str, WaitForCallbackContext, Params], T], +) -> Callable[Params, Callable[[str, WaitForCallbackContext], T]]: + """Wrap your callable into a wait_for_callback submitter function. + + This decorator allows you to define a submitter function with additional + parameters that will be bound when called. + + Args: + func: A callable that takes callback_id, context, and additional parameters + + Returns: + A wrapper function that binds the additional parameters and returns + a submitter function compatible with wait_for_callback + + Example: + @durable_wait_for_callback + def submit_to_external_system( + callback_id: str, + context: WaitForCallbackContext, + task_name: str, + priority: int + ): + context.logger.info(f"Submitting {task_name} with callback {callback_id}") + external_api.submit_task( + task_name=task_name, + priority=priority, + callback_id=callback_id + ) + + # Usage in durable handler: + result = context.wait_for_callback( + submit_to_external_system("my_task", priority=5) + ) + """ + + def wrapper(*args, **kwargs): + def submitter_with_arguments(callback_id: str, context: WaitForCallbackContext): + return func(callback_id, context, *args, **kwargs) + + submitter_with_arguments._original_name = func.__name__ # noqa: SLF001 + return submitter_with_arguments + + return wrapper + + class Callback(Generic[T], CallbackProtocol[T]): # noqa: PYI059 """A future that will block on result() until callback_id returns.""" @@ -128,7 +182,7 @@ def result(self) -> T | None: if not checkpointed_result.is_existent(): msg = "Callback operation must exist" - raise CallbackError(msg) + raise CallbackError(message=msg, callback_id=self.callback_id) if ( checkpointed_result.is_failed() @@ -136,14 +190,19 @@ def result(self) -> T | None: or checkpointed_result.is_timed_out() or checkpointed_result.is_stopped() ): - checkpointed_result.raise_callable_error() + msg = ( + checkpointed_result.error.message + if checkpointed_result.error and checkpointed_result.error.message + else "Callback failed" + ) + raise CallbackError(message=msg, callback_id=self.callback_id) if checkpointed_result.is_succeeded(): if checkpointed_result.result is None: return None # type: ignore return deserialize( - serdes=self.serdes, + serdes=self.serdes if self.serdes is not None else PASS_THROUGH_SERDES, data=checkpointed_result.result, operation_id=self.operation_id, durable_execution_arn=self.state.durable_execution_arn, @@ -169,7 +228,8 @@ def __init__( self._step_counter: OrderedCounter = OrderedCounter() log_info = LogInfo( - execution_arn=state.durable_execution_arn, parent_id=parent_id + execution_state=state, + parent_id=parent_id, ) self._log_info = log_info self.logger: Logger = logger or Logger.from_log_info( @@ -198,7 +258,8 @@ def create_child_context(self, parent_id: str) -> DurableContext: parent_id=parent_id, logger=self.logger.with_log_info( LogInfo( - execution_arn=self.state.durable_execution_arn, parent_id=parent_id + execution_state=self.state, + parent_id=parent_id, ) ), ) @@ -262,20 +323,22 @@ def create_callback( if not config: config = CallbackConfig() operation_id: str = self._create_step_id() - callback_id: str = create_callback_handler( + executor: CallbackOperationExecutor = CallbackOperationExecutor( state=self.state, operation_identifier=OperationIdentifier( operation_id=operation_id, parent_id=self._parent_id, name=name ), config=config, ) - - return Callback( + callback_id: str = executor.process() + result: Callback = Callback( callback_id=callback_id, operation_id=operation_id, state=self.state, serdes=config.serdes, ) + self.state.track_replay(operation_id=operation_id) + return result def invoke( self, @@ -295,17 +358,23 @@ def invoke( Returns: The result of the invoked function """ - return invoke_handler( + if not config: + config = InvokeConfig[P, R]() + operation_id = self._create_step_id() + executor: InvokeOperationExecutor[R] = InvokeOperationExecutor( function_name=function_name, payload=payload, state=self.state, operation_identifier=OperationIdentifier( - operation_id=self._create_step_id(), + operation_id=operation_id, parent_id=self._parent_id, name=name, ), config=config, ) + result: R = executor.process() + self.state.track_replay(operation_id=operation_id) + return result def map( self, @@ -337,15 +406,21 @@ def map_in_child_context() -> BatchResult[R]: operation_identifier=operation_identifier, ) - return child_handler( + result: BatchResult[R] = child_handler( func=map_in_child_context, state=self.state, operation_identifier=operation_identifier, config=ChildConfig( sub_type=OperationSubType.MAP, - serdes=config.serdes if config is not None else None, + serdes=getattr(config, "serdes", None), + # child_handler should only know the serdes of the parent serdes, + # the item serdes will be passed when we are actually executing + # the branch within its own child_handler. + item_serdes=None, ), ) + self.state.track_replay(operation_id=operation_id) + return result def parallel( self, @@ -374,15 +449,21 @@ def parallel_in_child_context() -> BatchResult[T]: operation_identifier=operation_identifier, ) - return child_handler( + result: BatchResult[T] = child_handler( func=parallel_in_child_context, state=self.state, operation_identifier=operation_identifier, config=ChildConfig( sub_type=OperationSubType.PARALLEL, - serdes=config.serdes if config is not None else None, + serdes=getattr(config, "serdes", None), + # child_handler should only know the serdes of the parent serdes, + # the item serdes will be passed when we are actually executing + # the branch within its own child_handler. + item_serdes=None, ), ) + self.state.track_replay(operation_id=operation_id) + return result def run_in_child_context( self, @@ -409,7 +490,7 @@ def run_in_child_context( def callable_with_child_context(): return func(self.create_child_context(parent_id=operation_id)) - return child_handler( + result: T = child_handler( func=callable_with_child_context, state=self.state, operation_identifier=OperationIdentifier( @@ -417,6 +498,8 @@ def callable_with_child_context(): ), config=config, ) + self.state.track_replay(operation_id=operation_id) + return result def step( self, @@ -426,42 +509,52 @@ def step( ) -> T: step_name = self._resolve_step_name(name, func) logger.debug("Step name: %s", step_name) - - return step_handler( + if not config: + config = StepConfig() + operation_id = self._create_step_id() + executor: StepOperationExecutor[T] = StepOperationExecutor( func=func, config=config, state=self.state, operation_identifier=OperationIdentifier( - operation_id=self._create_step_id(), + operation_id=operation_id, parent_id=self._parent_id, name=step_name, ), context_logger=self.logger, ) + result: T = executor.process() + self.state.track_replay(operation_id=operation_id) + return result - def wait(self, seconds: int, name: str | None = None) -> None: + def wait(self, duration: Duration, name: str | None = None) -> None: """Wait for a specified amount of time. Args: - seconds: Time to wait in seconds + duration: Duration to wait name: Optional name for the wait step """ + seconds = duration.to_seconds() if seconds < 1: - msg = "seconds must be an integer greater than 0" + msg = "duration must be at least 1 second" raise ValidationError(msg) - wait_handler( - seconds=seconds, + operation_id = self._create_step_id() + wait_seconds = duration.seconds + executor: WaitOperationExecutor = WaitOperationExecutor( + seconds=wait_seconds, state=self.state, operation_identifier=OperationIdentifier( - operation_id=self._create_step_id(), + operation_id=operation_id, parent_id=self._parent_id, name=name, ), ) + executor.process() + self.state.track_replay(operation_id=operation_id) def wait_for_callback( self, - submitter: Callable[[str], None], + submitter: Callable[[str, WaitForCallbackContext], None], name: str | None = None, config: WaitForCallbackConfig | None = None, ) -> Any: @@ -499,17 +592,23 @@ def wait_for_condition( msg = "`config` is required for wait_for_condition" raise ValidationError(msg) - return wait_for_condition_handler( - check=check, - config=config, - state=self.state, - operation_identifier=OperationIdentifier( - operation_id=self._create_step_id(), - parent_id=self._parent_id, - name=name, - ), - context_logger=self.logger, + operation_id = self._create_step_id() + executor: WaitForConditionOperationExecutor[T] = ( + WaitForConditionOperationExecutor( + check=check, + config=config, + state=self.state, + operation_identifier=OperationIdentifier( + operation_id=operation_id, + parent_id=self._parent_id, + name=name, + ), + context_logger=self.logger, + ) ) + result: T = executor.process() + self.state.track_replay(operation_id=operation_id) + return result # endregion Operations diff --git a/src/aws_durable_execution_sdk_python/exceptions.py b/src/aws_durable_execution_sdk_python/exceptions.py index dcaa2c1..72f0aa0 100644 --- a/src/aws_durable_execution_sdk_python/exceptions.py +++ b/src/aws_durable_execution_sdk_python/exceptions.py @@ -8,12 +8,29 @@ import time from dataclasses import dataclass from enum import Enum -from typing import TYPE_CHECKING +from typing import TYPE_CHECKING, Self, TypedDict + +BAD_REQUEST_ERROR: int = 400 +TOO_MANY_REQUESTS_ERROR: int = 429 +SERVICE_ERROR: int = 500 if TYPE_CHECKING: import datetime +class AwsErrorObj(TypedDict): + Code: str | None + Message: str | None + + +class AwsErrorMetadata(TypedDict): + RequestId: str | None + HostId: str | None + HTTPStatusCode: int | None + HTTPHeaders: str | None + RetryAttempts: str | None + + class TerminationReason(Enum): """Reasons why a durable execution terminated.""" @@ -69,12 +86,35 @@ def __init__(self, message: str, callback_id: str | None = None): self.callback_id = callback_id -class CheckpointFailedError(InvocationError): - """Error when checkpoint operation fails.""" +class BotoClientError(InvocationError): + def __init__( + self, + message: str, + error: AwsErrorObj | None = None, + response_metadata: AwsErrorMetadata | None = None, + termination_reason=TerminationReason.INVOCATION_ERROR, + ): + super().__init__(message=message, termination_reason=termination_reason) + self.error: AwsErrorObj | None = error + self.response_metadata: AwsErrorMetadata | None = response_metadata - def __init__(self, message: str, step_id: str | None = None): - super().__init__(message, TerminationReason.CHECKPOINT_FAILED) - self.step_id = step_id + @classmethod + def from_exception(cls, exception: Exception) -> Self: + response = getattr(exception, "response", {}) + response_metadata = response.get("ResponseMetadata") + error = response.get("Error") + return cls( + message=str(exception), error=error, response_metadata=response_metadata + ) + + def build_logger_extras(self) -> dict: + extras: dict = {} + # preserve PascalCase to be consistent with other langauges + if error := self.error: + extras["Error"] = error + if response_metadata := self.response_metadata: + extras["ResponseMetadata"] = response_metadata + return extras class NonDeterministicExecutionError(ExecutionError): @@ -85,21 +125,85 @@ def __init__(self, message: str, step_id: str | None = None): self.step_id = step_id -class CheckpointError(CheckpointFailedError): +class CheckpointErrorCategory(Enum): + INVOCATION = "INVOCATION" + EXECUTION = "EXECUTION" + + +class CheckpointError(BotoClientError): """Failure to checkpoint. Will terminate the lambda.""" - def __init__(self, message: str): - super().__init__(message) + def __init__( + self, + message: str, + error_category: CheckpointErrorCategory, + error: AwsErrorObj | None = None, + response_metadata: AwsErrorMetadata | None = None, + ): + super().__init__( + message, + error, + response_metadata, + termination_reason=TerminationReason.CHECKPOINT_FAILED, + ) + self.error_category: CheckpointErrorCategory = error_category @classmethod def from_exception(cls, exception: Exception) -> CheckpointError: - return cls(message=str(exception)) + base = BotoClientError.from_exception(exception) + metadata: AwsErrorMetadata | None = base.response_metadata + error: AwsErrorObj | None = base.error + error_category: CheckpointErrorCategory = CheckpointErrorCategory.INVOCATION + + # InvalidParameterValueException and error message starts with "Invalid Checkpoint Token" is an InvocationError + # all other 4xx errors are Execution Errors and should be retried + # all 5xx errors are Invocation Errors + status_code: int | None = (metadata and metadata.get("HTTPStatusCode")) or None + if ( + status_code + # if we are in 4xx range (except 429) and is not an InvalidParameterValueException with Invalid Checkpoint Token + # then it's an execution error + and status_code < SERVICE_ERROR + and status_code >= BAD_REQUEST_ERROR + and status_code != TOO_MANY_REQUESTS_ERROR + and error + and ( + # is not InvalidParam => Execution + (error.get("Code", "") or "") != "InvalidParameterValueException" + # is not Invalid Token => Execution + or not (error.get("Message") or "").startswith( + "Invalid Checkpoint Token" + ) + ) + ): + error_category = CheckpointErrorCategory.EXECUTION + return CheckpointError(str(exception), error_category, error, metadata) + + def is_retriable(self): + return self.error_category == CheckpointErrorCategory.EXECUTION class ValidationError(DurableExecutionsError): """Incorrect arguments to a Durable Function operation.""" +class GetExecutionStateError(BotoClientError): + """Raised when failing to retrieve execution state""" + + def __init__( + self, + message: str, + error: AwsErrorObj | None = None, + response_metadata: AwsErrorMetadata | None = None, + ): + super().__init__( + message, + error, + response_metadata, + termination_reason=TerminationReason.INVOCATION_ERROR, + ) + + class InvalidStateError(DurableExecutionsError): """Raised when an operation is attempted on an object in an invalid state.""" @@ -268,3 +372,32 @@ def __str__(self) -> str: class SerDesError(DurableExecutionsError): """Raised when serialization fails.""" + + +class OrphanedChildException(BaseException): + """Raised when a child operation attempts to checkpoint after its parent context has completed. + + This exception inherits from BaseException (not Exception) so that user-space doesn't + accidentally catch it with broad exception handlers like 'except Exception'. + + This exception will happen when a parallel branch or map item tries to create a checkpoint + after its parent context (i.e the parallel/map operation) has already completed due to meeting + completion criteria (e.g., min_successful reached, failure tolerance exceeded). + + Although you cannot cancel running futures in user-space, this will at least terminate the + child operation on the next checkpoint attempt, preventing subsequent operations in the + child scope from executing. + + Attributes: + operation_id: Operation ID of the orphaned child + """ + + def __init__(self, message: str, operation_id: str): + """Initialize OrphanedChildException. + + Args: + message: Human-readable error message + operation_id: Operation ID of the orphaned child (required) + """ + super().__init__(message) + self.operation_id = operation_id diff --git a/src/aws_durable_execution_sdk_python/execution.py b/src/aws_durable_execution_sdk_python/execution.py index 1a47408..6f4e438 100644 --- a/src/aws_durable_execution_sdk_python/execution.py +++ b/src/aws_durable_execution_sdk_python/execution.py @@ -1,5 +1,7 @@ from __future__ import annotations +import contextlib +import functools import json import logging from concurrent.futures import ThreadPoolExecutor @@ -7,9 +9,10 @@ from enum import Enum from typing import TYPE_CHECKING, Any -from aws_durable_execution_sdk_python.context import DurableContext, ExecutionState +from aws_durable_execution_sdk_python.context import DurableContext from aws_durable_execution_sdk_python.exceptions import ( BackgroundThreadError, + BotoClientError, CheckpointError, DurableExecutionsError, ExecutionError, @@ -24,10 +27,13 @@ OperationType, OperationUpdate, ) +from aws_durable_execution_sdk_python.state import ExecutionState, ReplayStatus if TYPE_CHECKING: from collections.abc import Callable, MutableMapping + import boto3 # type: ignore + from aws_durable_execution_sdk_python.types import LambdaContext @@ -53,10 +59,15 @@ def from_dict(input_dict: MutableMapping[str, Any]) -> InitialExecutionState: next_marker=input_dict.get("NextMarker", ""), ) - def get_execution_operation(self) -> Operation: - if len(self.operations) < 1: + def get_execution_operation(self) -> Operation | None: + if not self.operations: + # Due to payload size limitations we may have an empty operations list. + # This will only happen when loading the initial page of results and is + # expected behaviour. We don't fail, but instead return None + # as the execution operation does not exist msg: str = "No durable operations found in initial execution state." - raise DurableExecutionsError(msg) + logger.debug(msg) + return None candidate = self.operations[0] if candidate.operation_type is not OperationType.EXECUTION: @@ -66,11 +77,13 @@ def get_execution_operation(self) -> Operation: return candidate def get_input_payload(self) -> str | None: - # TODO: are these None checks necessary? i.e will there always be execution_details with input_payload - if execution_details := self.get_execution_operation().execution_details: - return execution_details.input_payload - - return None + # It is possible that backend will not provide an execution operation + # for the initial page of results. + if not (operations := self.get_execution_operation()): + return None + if not (execution_details := operations.execution_details): + return None + return execution_details.input_payload def to_dict(self) -> MutableMapping[str, Any]: return { @@ -84,7 +97,6 @@ class DurableExecutionInvocationInput: durable_execution_arn: str checkpoint_token: str initial_execution_state: InitialExecutionState - is_local_runner: bool @staticmethod def from_dict( @@ -96,7 +108,6 @@ def from_dict( initial_execution_state=InitialExecutionState.from_dict( input_dict.get("InitialExecutionState", {}) ), - is_local_runner=input_dict.get("LocalRunner", False), ) def to_dict(self) -> MutableMapping[str, Any]: @@ -104,7 +115,6 @@ def to_dict(self) -> MutableMapping[str, Any]: "DurableExecutionArn": self.durable_execution_arn, "CheckpointToken": self.checkpoint_token, "InitialExecutionState": self.initial_execution_state.to_dict(), - "LocalRunner": self.is_local_runner, } @@ -126,7 +136,6 @@ def from_durable_execution_invocation_input( durable_execution_arn=invocation_input.durable_execution_arn, checkpoint_token=invocation_input.checkpoint_token, initial_execution_state=invocation_input.initial_execution_state, - is_local_runner=invocation_input.is_local_runner, service_client=service_client, ) @@ -191,8 +200,15 @@ def create_succeeded(cls, result: str) -> DurableExecutionInvocationOutput: def durable_execution( - func: Callable[[Any, DurableContext], Any], + func: Callable[[Any, DurableContext], Any] | None = None, + *, + boto3_client: boto3.client | None = None, ) -> Callable[[Any, LambdaContext], Any]: + # Decorator called with parameters + if func is None: + logger.debug("Decorator called with parameters") + return functools.partial(durable_execution, boto3_client=boto3_client) + logger.debug("Starting durable execution handler...") def wrapper(event: Any, context: LambdaContext) -> MutableMapping[str, Any]: @@ -205,13 +221,23 @@ def wrapper(event: Any, context: LambdaContext) -> MutableMapping[str, Any]: invocation_input = event service_client = invocation_input.service_client else: - logger.debug("durableExecutionArn: %s", event.get("DurableExecutionArn")) - invocation_input = DurableExecutionInvocationInput.from_dict(event) + try: + logger.debug( + "durableExecutionArn: %s", event.get("DurableExecutionArn") + ) + invocation_input = DurableExecutionInvocationInput.from_dict(event) + except (KeyError, TypeError, AttributeError) as e: + msg = ( + "Unexpected payload provided to start the durable execution. " + "Check your resource configurations to confirm the durability is set." + ) + raise ExecutionError(msg) from e + # Use custom client if provided, otherwise initialize from environment service_client = ( - LambdaClient.initialize_local_runner_client() - if invocation_input.is_local_runner - else LambdaClient.initialize_from_env() + LambdaClient(client=boto3_client) + if boto3_client is not None + else LambdaClient.initialize_client() ) raw_input_payload: str | None = ( @@ -236,6 +262,10 @@ def wrapper(event: Any, context: LambdaContext) -> MutableMapping[str, Any]: initial_checkpoint_token=invocation_input.checkpoint_token, operations={}, service_client=service_client, + # If there are operations other than the initial EXECUTION one, current state is in replay mode + replay_status=ReplayStatus.REPLAY + if len(invocation_input.initial_execution_state.operations) > 1 + else ReplayStatus.NEW, ) execution_state.fetch_paginated_operations( @@ -249,9 +279,12 @@ def wrapper(event: Any, context: LambdaContext) -> MutableMapping[str, Any]: ) # Use ThreadPoolExecutor for concurrent execution of user code and background checkpoint processing - with ThreadPoolExecutor( - max_workers=2, thread_name_prefix="dex-handler" - ) as executor: + with ( + ThreadPoolExecutor( + max_workers=2, thread_name_prefix="dex-handler" + ) as executor, + contextlib.closing(execution_state) as execution_state, + ): # Thread 1: Run background checkpoint processing executor.submit(execution_state.checkpoint_batches_forever) @@ -276,7 +309,6 @@ def wrapper(event: Any, context: LambdaContext) -> MutableMapping[str, Any]: invocation_input.durable_execution_arn, ) serialized_result = json.dumps(result) - # large response handling here. Remember if checkpointing to complete, NOT to include # payload in response if ( @@ -295,18 +327,16 @@ def wrapper(event: Any, context: LambdaContext) -> MutableMapping[str, Any]: # Must ensure the result is persisted before returning to Lambda. # Large results exceed Lambda response limits and must be stored durably # before the execution completes. - execution_state.create_checkpoint_sync(success_operation) - - # Stop background checkpointing thread - execution_state.stop_checkpointing() - + try: + execution_state.create_checkpoint( + success_operation, is_sync=True + ) + except CheckpointError as e: + return handle_checkpoint_error(e).to_dict() return DurableExecutionInvocationOutput.create_succeeded( result="" ).to_dict() - # Stop background checkpointing thread - execution_state.stop_checkpointing() - return DurableExecutionInvocationOutput.create_succeeded( result=serialized_result ).to_dict() @@ -314,31 +344,37 @@ def wrapper(event: Any, context: LambdaContext) -> MutableMapping[str, Any]: except BackgroundThreadError as bg_error: # Background checkpoint system failed - propagated through CompletionEvent # Do not attempt to checkpoint anything, just terminate immediately - logger.exception("Checkpoint processing failed") - execution_state.stop_checkpointing() - # Raise the original exception + if isinstance(bg_error.source_exception, BotoClientError): + logger.exception( + "Checkpoint processing failed", + extra=bg_error.source_exception.build_logger_extras(), + ) + else: + logger.exception("Checkpoint processing failed") + # handle the original exception + if isinstance(bg_error.source_exception, CheckpointError): + return handle_checkpoint_error(bg_error.source_exception).to_dict() raise bg_error.source_exception from bg_error except SuspendExecution: # User code suspended - stop background checkpointing thread logger.debug("Suspending execution...") - execution_state.stop_checkpointing() return DurableExecutionInvocationOutput( status=InvocationStatus.PENDING ).to_dict() - except CheckpointError: + except CheckpointError as e: # Checkpoint system is broken - stop background thread and exit immediately - execution_state.stop_checkpointing() - logger.exception("Checkpoint system failed") - raise # Terminate Lambda immediately + logger.exception( + "Checkpoint system failed", + extra=e.build_logger_extras(), + ) + return handle_checkpoint_error(e).to_dict() except InvocationError: - execution_state.stop_checkpointing() logger.exception("Invocation error. Must terminate.") # Throw the error to trigger Lambda retry raise except ExecutionError as e: - execution_state.stop_checkpointing() logger.exception("Execution error. Must terminate without retry.") return DurableExecutionInvocationOutput( status=InvocationStatus.FAILED, @@ -347,15 +383,46 @@ def wrapper(event: Any, context: LambdaContext) -> MutableMapping[str, Any]: except Exception as e: # all user-space errors go here logger.exception("Execution failed") - failed_operation = OperationUpdate.create_execution_fail( - error=ErrorObject.from_exception(e) - ) - # TODO: can optimize, if not too large can just return response rather than checkpoint - execution_state.create_checkpoint_sync(failed_operation) - execution_state.stop_checkpointing() - return DurableExecutionInvocationOutput( - status=InvocationStatus.FAILED + result = DurableExecutionInvocationOutput( + status=InvocationStatus.FAILED, error=ErrorObject.from_exception(e) ).to_dict() + serialized_result = json.dumps(result) + + if ( + serialized_result + and len(serialized_result) > LAMBDA_RESPONSE_SIZE_LIMIT + ): + logger.debug( + "Response size (%s bytes) exceeds Lambda limit (%s) bytes). Checkpointing result.", + len(serialized_result), + LAMBDA_RESPONSE_SIZE_LIMIT, + ) + failed_operation = OperationUpdate.create_execution_fail( + error=ErrorObject.from_exception(e) + ) + + # Checkpoint large result with blocking (is_sync=True, default). + # Must ensure the result is persisted before returning to Lambda. + # Large results exceed Lambda response limits and must be stored durably + # before the execution completes. + try: + execution_state.create_checkpoint_sync(failed_operation) + except CheckpointError as e: + return handle_checkpoint_error(e).to_dict() + return DurableExecutionInvocationOutput( + status=InvocationStatus.FAILED + ).to_dict() + + return result + return wrapper + + +def handle_checkpoint_error(error: CheckpointError) -> DurableExecutionInvocationOutput: + if error.is_retriable(): + raise error from None # Terminate Lambda immediately and have it be retried + return DurableExecutionInvocationOutput( + status=InvocationStatus.FAILED, error=ErrorObject.from_exception(error) + ) diff --git a/src/aws_durable_execution_sdk_python/lambda_service.py b/src/aws_durable_execution_sdk_python/lambda_service.py index 97841c1..b907391 100644 --- a/src/aws_durable_execution_sdk_python/lambda_service.py +++ b/src/aws_durable_execution_sdk_python/lambda_service.py @@ -2,17 +2,17 @@ import datetime import logging -import os from dataclasses import dataclass, field from enum import Enum -from pathlib import Path from typing import TYPE_CHECKING, Any, Protocol, TypeAlias import boto3 # type: ignore +from botocore.config import Config # type: ignore from aws_durable_execution_sdk_python.exceptions import ( CallableRuntimeError, CheckpointError, + GetExecutionStateError, ) if TYPE_CHECKING: @@ -20,10 +20,10 @@ from aws_durable_execution_sdk_python.identifier import OperationIdentifier -ReplayChildren: TypeAlias = bool # noqa UP040 ignore due to python3.11 minimum version -OperationPayload: TypeAlias = str # noqa UP040 ignore due to python3.11 minimum version -TimeoutSeconds: TypeAlias = int # noqa UP040 ignore due to python3.11 minimum version - +# Replace with `type` it when dropping support to Python 3.11 +ReplayChildren: TypeAlias = bool +OperationPayload: TypeAlias = str +TimeoutSeconds: TypeAlias = int logger = logging.getLogger(__name__) @@ -85,7 +85,7 @@ class OperationSubType(Enum): PARALLEL_BRANCH = "ParallelBranch" WAIT_FOR_CALLBACK = "WaitForCallback" WAIT_FOR_CONDITION = "WaitForCondition" - INVOKE = "Invoke" + CHAINED_INVOKE = "ChainedInvoke" @dataclass(frozen=True) @@ -301,17 +301,22 @@ class ChainedInvokeOptions: """ function_name: str + tenant_id: str | None = None @classmethod def from_dict(cls, data: MutableMapping[str, Any]) -> ChainedInvokeOptions: return cls( function_name=data["FunctionName"], + tenant_id=data.get("TenantId"), ) def to_dict(self) -> MutableMapping[str, Any]: result: MutableMapping[str, Any] = { "FunctionName": self.function_name, } + if self.tenant_id is not None: + result["TenantId"] = self.tenant_id + return result @@ -494,7 +499,7 @@ def create_context_fail( def create_execution_succeed(cls, payload: str) -> OperationUpdate: """Create an instance of OperationUpdate for type: EXECUTION, action: SUCCEED.""" return cls( - operation_id=f"execution-result-{datetime.datetime.now(tz=datetime.UTC)}", + operation_id=f"execution-result-{int(datetime.datetime.now(tz=datetime.UTC).timestamp() * 1000)}", operation_type=OperationType.EXECUTION, action=OperationAction.SUCCEED, payload=payload, @@ -504,7 +509,7 @@ def create_execution_succeed(cls, payload: str) -> OperationUpdate: def create_execution_fail(cls, error: ErrorObject) -> OperationUpdate: """Create an instance of OperationUpdate for type: EXECUTION, action: FAIL.""" return cls( - operation_id=f"execution-result-{datetime.datetime.now(tz=datetime.UTC)}", + operation_id=f"execution-result-{int(datetime.datetime.now(tz=datetime.UTC).timestamp() * 1000)}", operation_type=OperationType.EXECUTION, action=OperationAction.FAIL, error=error, @@ -591,7 +596,7 @@ def create_invoke_start( operation_id=identifier.operation_id, parent_id=identifier.parent_id, operation_type=OperationType.CHAINED_INVOKE, - sub_type=OperationSubType.INVOKE, + sub_type=OperationSubType.CHAINED_INVOKE, action=OperationAction.START, name=identifier.name, payload=payload, @@ -744,7 +749,7 @@ def from_dict(cls, data: MutableMapping[str, Any]) -> Operation: callback_details = CallbackDetails.from_dict(callback_details_input) chained_invoke_details = None - if chained_invoke_details := data.get("chained_invoke_details"): + if chained_invoke_details := data.get("ChainedInvokeDetails"): chained_invoke_details = ChainedInvokeDetails.from_dict( chained_invoke_details ) @@ -936,55 +941,16 @@ def __init__(self, client: Any) -> None: self.client = client @staticmethod - def load_preview_botocore_models() -> None: - """ - Load boto3 models from the Python path for custom preview client. - """ - os.environ["AWS_DATA_PATH"] = str( - Path(__file__).parent.joinpath("botocore", "data") - ) - - @staticmethod - def initialize_local_runner_client() -> LambdaClient: - endpoint = os.getenv( - "DURABLE_LOCAL_RUNNER_ENDPOINT", "http://host.docker.internal:5000/" - ) - region = os.getenv("DURABLE_LOCAL_RUNNER_REGION", "us-west-2") - - # The local runner client needs execute-api as the signing service name, - # so we have a second `lambdainternal-local` boto model with this. - LambdaClient.load_preview_botocore_models() + def initialize_client() -> LambdaClient: client = boto3.client( - "lambdainternal-local", - endpoint_url=endpoint, - region_name=region, - ) - - logger.debug( - "Initialized lambda client with endpoint: '%s', region: '%s'", - endpoint, - region, + "lambda", + config=Config( + connect_timeout=5, + read_timeout=50, + ), ) return LambdaClient(client=client) - @staticmethod - def initialize_from_env() -> LambdaClient: - LambdaClient.load_preview_botocore_models() - - """ - TODO - we can remove this when were using the actual lambda client, - but we need this with the preview model because boto won't match against lambdainternal. - """ - endpoint_url = os.getenv("AWS_ENDPOINT_URL_LAMBDA", None) - if not endpoint_url: - client = boto3.client( - "lambdainternal", - ) - else: - client = boto3.client("lambdainternal", endpoint_url=endpoint_url) - - return LambdaClient(client=client) - def checkpoint( self, durable_execution_arn: str, @@ -1007,8 +973,11 @@ def checkpoint( return CheckpointOutput.from_dict(result) except Exception as e: - logger.exception("Failed to checkpoint.") - raise CheckpointError.from_exception(e) from e + checkpoint_error = CheckpointError.from_exception(e) + logger.exception( + "Failed to checkpoint.", extra=checkpoint_error.build_logger_extras() + ) + raise checkpoint_error from None def get_execution_state( self, @@ -1017,13 +986,20 @@ def get_execution_state( next_marker: str, max_items: int = 1000, ) -> StateOutput: - result: MutableMapping[str, Any] = self.client.get_durable_execution_state( - DurableExecutionArn=durable_execution_arn, - CheckpointToken=checkpoint_token, - Marker=next_marker, - MaxItems=max_items, - ) - return StateOutput.from_dict(result) + try: + result: MutableMapping[str, Any] = self.client.get_durable_execution_state( + DurableExecutionArn=durable_execution_arn, + CheckpointToken=checkpoint_token, + Marker=next_marker, + MaxItems=max_items, + ) + return StateOutput.from_dict(result) + except Exception as e: + error = GetExecutionStateError.from_exception(e) + logger.exception( + "Failed to get execution state.", extra=error.build_logger_extras() + ) + raise error from None # endregion client diff --git a/src/aws_durable_execution_sdk_python/logger.py b/src/aws_durable_execution_sdk_python/logger.py index f68b9b8..1ad68a9 100644 --- a/src/aws_durable_execution_sdk_python/logger.py +++ b/src/aws_durable_execution_sdk_python/logger.py @@ -8,26 +8,32 @@ from aws_durable_execution_sdk_python.types import LoggerInterface if TYPE_CHECKING: - from collections.abc import Mapping, MutableMapping + from collections.abc import Callable, Mapping, MutableMapping + from aws_durable_execution_sdk_python.context import ExecutionState from aws_durable_execution_sdk_python.identifier import OperationIdentifier @dataclass(frozen=True) class LogInfo: - execution_arn: str + execution_state: ExecutionState parent_id: str | None = None + operation_id: str | None = None name: str | None = None attempt: int | None = None @classmethod def from_operation_identifier( - cls, execution_arn: str, op_id: OperationIdentifier, attempt: int | None = None + cls, + execution_state: ExecutionState, + op_id: OperationIdentifier, + attempt: int | None = None, ) -> LogInfo: """Create new log info from an execution arn, OperationIdentifier and attempt.""" return cls( - execution_arn=execution_arn, + execution_state=execution_state, parent_id=op_id.parent_id, + operation_id=op_id.operation_id, name=op_id.name, attempt=attempt, ) @@ -35,8 +41,9 @@ def from_operation_identifier( def with_parent_id(self, parent_id: str) -> LogInfo: """Clone the log info with a new parent id.""" return LogInfo( - execution_arn=self.execution_arn, + execution_state=self.execution_state, parent_id=parent_id, + operation_id=self.operation_id, name=self.name, attempt=self.attempt, ) @@ -44,22 +51,33 @@ def with_parent_id(self, parent_id: str) -> LogInfo: class Logger(LoggerInterface): def __init__( - self, logger: LoggerInterface, default_extra: Mapping[str, object] + self, + logger: LoggerInterface, + default_extra: Mapping[str, object], + execution_state: ExecutionState, ) -> None: self._logger = logger self._default_extra = default_extra + self._execution_state = execution_state @classmethod def from_log_info(cls, logger: LoggerInterface, info: LogInfo) -> Logger: """Create a new logger with the given LogInfo.""" - extra: MutableMapping[str, object] = {"execution_arn": info.execution_arn} + extra: MutableMapping[str, object] = { + "executionArn": info.execution_state.durable_execution_arn + } if info.parent_id: - extra["parent_id"] = info.parent_id + extra["parentId"] = info.parent_id if info.name: - extra["name"] = info.name - if info.attempt: - extra["attempt"] = info.attempt - return cls(logger, extra) + # Use 'operation_name' instead of 'name' as key because the stdlib LogRecord internally reserved 'name' parameter + extra["operationName"] = info.name + if info.attempt is not None: + extra["attempt"] = info.attempt + 1 + if info.operation_id: + extra["operationId"] = info.operation_id + return cls( + logger=logger, default_extra=extra, execution_state=info.execution_state + ) def with_log_info(self, info: LogInfo) -> Logger: """Clone the existing logger with new LogInfo.""" @@ -75,29 +93,39 @@ def get_logger(self) -> LoggerInterface: def debug( self, msg: object, *args: object, extra: Mapping[str, object] | None = None ) -> None: - merged_extra = {**self._default_extra, **(extra or {})} - self._logger.debug(msg, *args, extra=merged_extra) + self._log(self._logger.debug, msg, *args, extra=extra) def info( self, msg: object, *args: object, extra: Mapping[str, object] | None = None ) -> None: - merged_extra = {**self._default_extra, **(extra or {})} - self._logger.info(msg, *args, extra=merged_extra) + self._log(self._logger.info, msg, *args, extra=extra) def warning( self, msg: object, *args: object, extra: Mapping[str, object] | None = None ) -> None: - merged_extra = {**self._default_extra, **(extra or {})} - self._logger.warning(msg, *args, extra=merged_extra) + self._log(self._logger.warning, msg, *args, extra=extra) def error( self, msg: object, *args: object, extra: Mapping[str, object] | None = None ) -> None: - merged_extra = {**self._default_extra, **(extra or {})} - self._logger.error(msg, *args, extra=merged_extra) + self._log(self._logger.error, msg, *args, extra=extra) def exception( self, msg: object, *args: object, extra: Mapping[str, object] | None = None ) -> None: + self._log(self._logger.exception, msg, *args, extra=extra) + + def _log( + self, + log_func: Callable, + msg: object, + *args: object, + extra: Mapping[str, object] | None = None, + ): + if not self._should_log(): + return merged_extra = {**self._default_extra, **(extra or {})} - self._logger.exception(msg, *args, extra=merged_extra) + log_func(msg, *args, extra=merged_extra) + + def _should_log(self) -> bool: + return not self._execution_state.is_replaying() diff --git a/src/aws_durable_execution_sdk_python/operation/base.py b/src/aws_durable_execution_sdk_python/operation/base.py new file mode 100644 index 0000000..5836cda --- /dev/null +++ b/src/aws_durable_execution_sdk_python/operation/base.py @@ -0,0 +1,187 @@ +"""Base classes for operation executors with checkpoint response handling.""" + +from __future__ import annotations + +from abc import ABC, abstractmethod +from dataclasses import dataclass +from typing import TYPE_CHECKING, Generic, TypeVar + +from aws_durable_execution_sdk_python.exceptions import InvalidStateError + +if TYPE_CHECKING: + from aws_durable_execution_sdk_python.state import CheckpointedResult + +T = TypeVar("T") + + +@dataclass(frozen=True) +class CheckResult(Generic[T]): + """Result of checking operation checkpoint status. + + Encapsulates the outcome of checking an operation's status and determines + the next action in the operation execution flow. + + IMPORTANT: Do not construct directly. Use factory methods: + - create_is_ready_to_execute(checkpoint) - operation ready to execute + - create_started() - checkpoint created, check status again + - create_completed(result) - terminal result available + + Attributes: + is_ready_to_execute: True if the operation is ready to execute its logic + has_checkpointed_result: True if a terminal result is already available + checkpointed_result: Checkpoint data for execute() + deserialized_result: Final result when operation completed + """ + + is_ready_to_execute: bool + has_checkpointed_result: bool + checkpointed_result: CheckpointedResult | None = None + deserialized_result: T | None = None + + @classmethod + def create_is_ready_to_execute( + cls, checkpoint: CheckpointedResult + ) -> CheckResult[T]: + """Create a CheckResult indicating the operation is ready to execute. + + Args: + checkpoint: The checkpoint data to pass to execute() + + Returns: + CheckResult with is_ready_to_execute=True + """ + return cls( + is_ready_to_execute=True, + has_checkpointed_result=False, + checkpointed_result=checkpoint, + ) + + @classmethod + def create_started(cls) -> CheckResult[T]: + """Create a CheckResult signaling that a checkpoint was created. + + Signals that process() should verify checkpoint status again to detect + if the operation completed already during checkpoint creation. + + Returns: + CheckResult indicating process() should check status again + """ + return cls(is_ready_to_execute=False, has_checkpointed_result=False) + + @classmethod + def create_completed(cls, result: T) -> CheckResult[T]: + """Create a CheckResult with a terminal result already deserialized. + + Args: + result: The final deserialized result + + Returns: + CheckResult with has_checkpointed_result=True and deserialized_result set + """ + return cls( + is_ready_to_execute=False, + has_checkpointed_result=True, + deserialized_result=result, + ) + + +class OperationExecutor(ABC, Generic[T]): + """Base class for durable operations with checkpoint response handling. + + Provides a framework for implementing operations that check status after + creating START checkpoints to handle synchronous completion, avoiding + unnecessary execution or suspension. + + The common pattern: + 1. Check operation status + 2. Create START checkpoint if needed + 3. Check status again (detects synchronous completion) + 4. Execute operation logic when ready + + Subclasses must implement: + - check_result_status(): Check status, create checkpoint if needed, return next action + - execute(): Execute the operation logic with checkpoint data + """ + + @abstractmethod + def check_result_status(self) -> CheckResult[T]: + """Check operation status and create START checkpoint if needed. + + Called twice by process() when creating synchronous checkpoints: once before + and once after, to detect if the operation completed immediately. + + This method should: + 1. Get the current checkpoint result + 2. Check for terminal statuses (SUCCEEDED, FAILED, etc.) and handle them + 3. Check for pending statuses and suspend if needed + 4. Create a START checkpoint if the operation hasn't started + 5. Return a CheckResult indicating the next action + + Returns: + CheckResult indicating whether to: + - Return a terminal result (has_checkpointed_result=True) + - Execute operation logic (is_ready_to_execute=True) + - Check status again (neither flag set - checkpoint was just created) + + Raises: + Operation-specific exceptions for terminal failure states + SuspendExecution for pending states + """ + ... # pragma: no cover + + @abstractmethod + def execute(self, checkpointed_result: CheckpointedResult) -> T: + """Execute operation logic with checkpoint data. + + This method is called when the operation is ready to execute its core logic. + It receives the checkpoint data that was returned by check_result_status(). + + Args: + checkpointed_result: The checkpoint data containing operation state + + Returns: + The result of executing the operation + + Raises: + May raise operation-specific errors during execution + """ + ... # pragma: no cover + + def process(self) -> T: + """Process operation with checkpoint response handling. + + Orchestrates the double-check pattern: + 1. Check status (handles replay and existing checkpoints) + 2. If checkpoint was just created, check status again (detects synchronous completion) + 3. Return terminal result if available + 4. Execute operation logic if ready + 5. Raise error for invalid states + + Returns: + The final result of the operation + + Raises: + InvalidStateError: If the check result is in an invalid state + May raise operation-specific errors from check_result_status() or execute() + """ + # Check 1: Entry (handles replay and existing checkpoints) + result = self.check_result_status() + + # If checkpoint was created, verify checkpoint response for immediate status change + if not result.is_ready_to_execute and not result.has_checkpointed_result: + result = self.check_result_status() + + # Return terminal result if available (can be None for operations that return None) + if result.has_checkpointed_result: + return result.deserialized_result # type: ignore[return-value] + + # Execute operation logic + if result.is_ready_to_execute: + if result.checkpointed_result is None: + msg = "CheckResult is marked ready to execute but checkpointed result is not set." + raise InvalidStateError(msg) + return self.execute(result.checkpointed_result) + + # Invalid state - neither terminal nor ready to execute + msg = "Invalid CheckResult state: neither terminal nor ready to execute" + raise InvalidStateError(msg) diff --git a/src/aws_durable_execution_sdk_python/operation/callback.py b/src/aws_durable_execution_sdk_python/operation/callback.py index 4fe2a1e..67c51eb 100644 --- a/src/aws_durable_execution_sdk_python/operation/callback.py +++ b/src/aws_durable_execution_sdk_python/operation/callback.py @@ -10,6 +10,11 @@ CallbackOptions, OperationUpdate, ) +from aws_durable_execution_sdk_python.operation.base import ( + CheckResult, + OperationExecutor, +) +from aws_durable_execution_sdk_python.types import WaitForCallbackContext if TYPE_CHECKING: from collections.abc import Callable @@ -23,69 +28,128 @@ CheckpointedResult, ExecutionState, ) - from aws_durable_execution_sdk_python.types import Callback, DurableContext - - -def create_callback_handler( - state: ExecutionState, - operation_identifier: OperationIdentifier, - config: CallbackConfig | None = None, -) -> str: - """Create the callback checkpoint and return the callback id.""" - callback_options: CallbackOptions = ( - CallbackOptions( - timeout_seconds=config.timeout_seconds, - heartbeat_timeout_seconds=config.heartbeat_timeout_seconds, - ) - if config - else CallbackOptions() + from aws_durable_execution_sdk_python.types import ( + Callback, + DurableContext, + StepContext, ) - checkpointed_result: CheckpointedResult = state.get_checkpoint_result( - operation_identifier.operation_id - ) - if checkpointed_result.is_failed(): - # have to throw the exact same error on replay as the checkpointed failure - checkpointed_result.raise_callable_error() - - if ( - checkpointed_result.is_started() - or checkpointed_result.is_succeeded() - or checkpointed_result.is_timed_out() + +class CallbackOperationExecutor(OperationExecutor[str]): + """Executor for callback operations. + + Checks operation status after creating START checkpoints to handle operations + that complete synchronously, avoiding unnecessary execution or suspension. + + Unlike other operations, callbacks NEVER execute logic - they only create + checkpoints and return callback IDs. + + CRITICAL: Errors are deferred to Callback.result() for deterministic replay. + create_callback() always returns the callback_id, even for FAILED callbacks. + """ + + def __init__( + self, + state: ExecutionState, + operation_identifier: OperationIdentifier, + config: CallbackConfig | None, ): - # callback id should already exist + """Initialize the callback operation executor. + + Args: + state: The execution state + operation_identifier: The operation identifier + config: The callback configuration (optional) + """ + self.state = state + self.operation_identifier = operation_identifier + self.config = config + + def check_result_status(self) -> CheckResult[str]: + """Check operation status and create START checkpoint if needed. + + Called twice by process() when creating synchronous checkpoints: once before + and once after, to detect if the operation completed immediately. + + CRITICAL: This method does NOT raise on FAILED status. Errors are deferred + to Callback.result() to ensure deterministic replay. Code between + create_callback() and callback.result() must always execute. + + Returns: + CheckResult.create_is_ready_to_execute() for any existing status (including FAILED) + or CheckResult.create_started() after creating checkpoint + + Raises: + CallbackError: If callback_details are missing from checkpoint + """ + checkpointed_result: CheckpointedResult = self.state.get_checkpoint_result( + self.operation_identifier.operation_id + ) + + # CRITICAL: Do NOT raise on FAILED - defer error to Callback.result() + # If checkpoint exists (any status including FAILED), return ready to execute + # The execute() method will extract the callback_id + if checkpointed_result.is_existent(): + if ( + not checkpointed_result.operation + or not checkpointed_result.operation.callback_details + ): + msg = f"Missing callback details for operation: {self.operation_identifier.operation_id}" + raise CallbackError(msg) + + return CheckResult.create_is_ready_to_execute(checkpointed_result) + + # Create START checkpoint + callback_options: CallbackOptions = ( + CallbackOptions( + timeout_seconds=self.config.timeout_seconds, + heartbeat_timeout_seconds=self.config.heartbeat_timeout_seconds, + ) + if self.config + else CallbackOptions() + ) + + create_callback_operation: OperationUpdate = OperationUpdate.create_callback( + identifier=self.operation_identifier, + callback_options=callback_options, + ) + + # Checkpoint callback START with blocking (is_sync=True, default). + # Must wait for the API to generate and return the callback ID before proceeding. + # The callback ID is needed immediately by the caller to pass to external systems. + self.state.create_checkpoint(operation_update=create_callback_operation) + + # Signal to process() to check status again for immediate response + return CheckResult.create_started() + + def execute(self, checkpointed_result: CheckpointedResult) -> str: + """Execute callback operation by extracting the callback_id. + + Callbacks don't execute logic - they just extract and return the callback_id + from the checkpoint data. + + Args: + checkpointed_result: The checkpoint data containing callback_details + + Returns: + The callback_id from the checkpoint + + Raises: + CallbackError: If callback_details are missing (should never happen) + """ if ( not checkpointed_result.operation or not checkpointed_result.operation.callback_details ): - msg = f"Missing callback details for operation: {operation_identifier.operation_id}" + msg = f"Missing callback details for operation: {self.operation_identifier.operation_id}" raise CallbackError(msg) return checkpointed_result.operation.callback_details.callback_id - create_callback_operation = OperationUpdate.create_callback( - identifier=operation_identifier, - callback_options=callback_options, - ) - # Checkpoint callback START with blocking (is_sync=True, default). - # Must wait for the API to generate and return the callback ID before proceeding. - # The callback ID is needed immediately by the caller to pass to external systems. - state.create_checkpoint(operation_update=create_callback_operation) - - result: CheckpointedResult = state.get_checkpoint_result( - operation_identifier.operation_id - ) - - if not result.operation or not result.operation.callback_details: - msg = f"Missing callback details for operation: {operation_identifier.operation_id}" - raise CallbackError(msg) - - return result.operation.callback_details.callback_id - def wait_for_callback_handler( context: DurableContext, - submitter: Callable[[str], None], + submitter: Callable[[str, WaitForCallbackContext], None], name: str | None = None, config: WaitForCallbackConfig | None = None, ) -> Any: @@ -98,8 +162,10 @@ def wait_for_callback_handler( name=f"{name_with_space}create callback id", config=config ) - def submitter_step(step_context): # noqa: ARG001 - return submitter(callback.callback_id) + def submitter_step(step_context: StepContext): + return submitter( + callback.callback_id, WaitForCallbackContext(logger=step_context.logger) + ) step_config = ( StepConfig( diff --git a/src/aws_durable_execution_sdk_python/operation/child.py b/src/aws_durable_execution_sdk_python/operation/child.py index 07d0a08..04819d4 100644 --- a/src/aws_durable_execution_sdk_python/operation/child.py +++ b/src/aws_durable_execution_sdk_python/operation/child.py @@ -16,13 +16,20 @@ OperationSubType, OperationUpdate, ) +from aws_durable_execution_sdk_python.operation.base import ( + CheckResult, + OperationExecutor, +) from aws_durable_execution_sdk_python.serdes import deserialize, serialize if TYPE_CHECKING: from collections.abc import Callable from aws_durable_execution_sdk_python.identifier import OperationIdentifier - from aws_durable_execution_sdk_python.state import ExecutionState + from aws_durable_execution_sdk_python.state import ( + CheckpointedResult, + ExecutionState, + ) logger = logging.getLogger(__name__) @@ -32,131 +39,239 @@ CHECKPOINT_SIZE_LIMIT = 256 * 1024 -def child_handler( - func: Callable[[], T], - state: ExecutionState, - operation_identifier: OperationIdentifier, - config: ChildConfig | None, -) -> T: - logger.debug( - "▶️ Executing child context for id: %s, name: %s", - operation_identifier.operation_id, - operation_identifier.name, - ) +class ChildOperationExecutor(OperationExecutor[T]): + """Executor for child context operations. - if not config: - config = ChildConfig() + Checks operation status after creating START checkpoints to handle operations + that complete synchronously, avoiding unnecessary execution or suspension. + + Handles large payload scenarios with ReplayChildren mode. + """ - checkpointed_result = state.get_checkpoint_result(operation_identifier.operation_id) - if ( - checkpointed_result.is_succeeded() - and not checkpointed_result.is_replay_children() + def __init__( + self, + func: Callable[[], T], + state: ExecutionState, + operation_identifier: OperationIdentifier, + config: ChildConfig, ): - logger.debug( - "Child context already completed, skipping execution for id: %s, name: %s", - operation_identifier.operation_id, - operation_identifier.name, - ) - if checkpointed_result.result is None: - return None # type: ignore - return deserialize( - serdes=config.serdes, - data=checkpointed_result.result, - operation_id=operation_identifier.operation_id, - durable_execution_arn=state.durable_execution_arn, - ) - if checkpointed_result.is_failed(): - checkpointed_result.raise_callable_error() - sub_type = config.sub_type or OperationSubType.RUN_IN_CHILD_CONTEXT - - if not checkpointed_result.is_existent(): - start_operation = OperationUpdate.create_context_start( - identifier=operation_identifier, - sub_type=sub_type, + """Initialize the child operation executor. + + Args: + func: The child context function to execute + state: The execution state + operation_identifier: The operation identifier + config: The child configuration + """ + self.func = func + self.state = state + self.operation_identifier = operation_identifier + self.config = config + self.sub_type = config.sub_type or OperationSubType.RUN_IN_CHILD_CONTEXT + + def check_result_status(self) -> CheckResult[T]: + """Check operation status and create START checkpoint if needed. + + Called twice by process() when creating synchronous checkpoints: once before + and once after, to detect if the operation completed immediately. + + Returns: + CheckResult indicating the next action to take + + Raises: + CallableRuntimeError: For FAILED operations + """ + checkpointed_result: CheckpointedResult = self.state.get_checkpoint_result( + self.operation_identifier.operation_id ) - # Checkpoint child context START with non-blocking (is_sync=False). - # This is a fire-and-forget operation for performance - we don't need to wait for - # persistence before executing the child context. The START checkpoint is purely - # for observability and tracking the operation hierarchy. - state.create_checkpoint(operation_update=start_operation, is_sync=False) - - try: - raw_result: T = func() - if checkpointed_result.is_replay_children(): + + # Terminal success without replay_children - deserialize and return + if ( + checkpointed_result.is_succeeded() + and not checkpointed_result.is_replay_children() + ): logger.debug( - "ReplayChildren mode: Executed child context again on replay due to large payload. Exiting child context without creating another checkpoint. id: %s, name: %s", - operation_identifier.operation_id, - operation_identifier.name, + "Child context already completed, skipping execution for id: %s, name: %s", + self.operation_identifier.operation_id, + self.operation_identifier.name, ) - return raw_result - serialized_result: str = serialize( - serdes=config.serdes, - value=raw_result, - operation_id=operation_identifier.operation_id, - durable_execution_arn=state.durable_execution_arn, - ) - # Summary Generator Logic: - # When the serialized result exceeds 256KB, we use ReplayChildren mode to avoid - # checkpointing large payloads. Instead, we checkpoint a compact summary and mark - # the operation for replay. This matches the TypeScript implementation behavior. - # - # See TypeScript reference: - # - aws-durable-execution-sdk-js/src/handlers/run-in-child-context-handler/run-in-child-context-handler.ts (lines ~200-220) - # - # The summary generator creates a JSON summary with metadata (type, counts, status) - # instead of the full BatchResult. During replay, the child context is re-executed - # to reconstruct the full result rather than deserializing from the checkpoint. - replay_children: bool = False - if len(serialized_result) > CHECKPOINT_SIZE_LIMIT: - logger.debug( - "Large payload detected, using ReplayChildren mode: id: %s, name: %s, payload_size: %d, limit: %d", - operation_identifier.operation_id, - operation_identifier.name, - len(serialized_result), - CHECKPOINT_SIZE_LIMIT, + if checkpointed_result.result is None: + return CheckResult.create_completed(None) # type: ignore + + result: T = deserialize( + serdes=self.config.serdes, + data=checkpointed_result.result, + operation_id=self.operation_identifier.operation_id, + durable_execution_arn=self.state.durable_execution_arn, ) - replay_children = True - # Use summary generator if provided, otherwise use empty string (matches TypeScript) - serialized_result = ( - config.summary_generator(raw_result) if config.summary_generator else "" + return CheckResult.create_completed(result) + + # Terminal success with replay_children - re-execute + if ( + checkpointed_result.is_succeeded() + and checkpointed_result.is_replay_children() + ): + return CheckResult.create_is_ready_to_execute(checkpointed_result) + + # Terminal failure + if checkpointed_result.is_failed(): + checkpointed_result.raise_callable_error() + + # Create START checkpoint if not exists + if not checkpointed_result.is_existent(): + start_operation: OperationUpdate = OperationUpdate.create_context_start( + identifier=self.operation_identifier, + sub_type=self.sub_type, + ) + # Checkpoint child context START with non-blocking (is_sync=False). + # This is a fire-and-forget operation for performance - we don't need to wait for + # persistence before executing the child context. The START checkpoint is purely + # for observability and tracking the operation hierarchy. + self.state.create_checkpoint( + operation_update=start_operation, is_sync=False ) - success_operation = OperationUpdate.create_context_succeed( - identifier=operation_identifier, - payload=serialized_result, - sub_type=sub_type, - context_options=ContextOptions(replay_children=replay_children), - ) - # Checkpoint child context SUCCEED with blocking (is_sync=True, default). - # Must ensure the child context result is persisted before returning to the parent. - # This guarantees the result is durable and child operations won't be re-executed on replay - # (unless replay_children=True for large payloads). - state.create_checkpoint(operation_update=success_operation) + # Ready to execute (checkpoint exists or was just created) + return CheckResult.create_is_ready_to_execute(checkpointed_result) + + def execute(self, checkpointed_result: CheckpointedResult) -> T: + """Execute child context function with error handling and large payload support. + Args: + checkpointed_result: The checkpoint data containing operation state + + Returns: + The result of executing the child context function + + Raises: + SuspendExecution: Re-raised without checkpointing + InvocationError: Re-raised after checkpointing FAIL + CallableRuntimeError: Raised for other exceptions after checkpointing FAIL + """ logger.debug( - "✅ Successfully completed child context for id: %s, name: %s", - operation_identifier.operation_id, - operation_identifier.name, + "▶️ Executing child context for id: %s, name: %s", + self.operation_identifier.operation_id, + self.operation_identifier.name, ) - return raw_result # noqa: TRY300 - except SuspendExecution: - # Don't checkpoint SuspendExecution - let it bubble up - raise - except Exception as e: - error_object = ErrorObject.from_exception(e) - fail_operation = OperationUpdate.create_context_fail( - identifier=operation_identifier, error=error_object, sub_type=sub_type - ) - # Checkpoint child context FAIL with blocking (is_sync=True, default). - # Must ensure the failure state is persisted before raising the exception. - # This guarantees the error is durable and child operations won't be re-executed on replay. - state.create_checkpoint(operation_update=fail_operation) - - # InvocationError and its derivatives can be retried - # When we encounter an invocation error (in all of its forms), we bubble that - # error upwards (with the checkpoint in place) such that we reach the - # execution handler at the very top, which will then induce a retry from the - # dataplane. - if isinstance(e, InvocationError): + + try: + raw_result: T = self.func() + + # If in replay_children mode, return without checkpointing + if checkpointed_result.is_replay_children(): + logger.debug( + "ReplayChildren mode: Executed child context again on replay due to large payload. Exiting child context without creating another checkpoint. id: %s, name: %s", + self.operation_identifier.operation_id, + self.operation_identifier.name, + ) + return raw_result + + # Serialize result + serialized_result: str = serialize( + serdes=self.config.serdes, + value=raw_result, + operation_id=self.operation_identifier.operation_id, + durable_execution_arn=self.state.durable_execution_arn, + ) + + # Check payload size and use ReplayChildren mode if needed + # Summary Generator Logic: + # When the serialized result exceeds 256KB, we use ReplayChildren mode to avoid + # checkpointing large payloads. Instead, we checkpoint a compact summary and mark + # the operation for replay. This matches the TypeScript implementation behavior. + # + # See TypeScript reference: + # - aws-durable-execution-sdk-js/src/handlers/run-in-child-context-handler/run-in-child-context-handler.ts (lines ~200-220) + # + # The summary generator creates a JSON summary with metadata (type, counts, status) + # instead of the full BatchResult. During replay, the child context is re-executed + # to reconstruct the full result rather than deserializing from the checkpoint. + replay_children: bool = False + if len(serialized_result) > CHECKPOINT_SIZE_LIMIT: + logger.debug( + "Large payload detected, using ReplayChildren mode: id: %s, name: %s, payload_size: %d, limit: %d", + self.operation_identifier.operation_id, + self.operation_identifier.name, + len(serialized_result), + CHECKPOINT_SIZE_LIMIT, + ) + replay_children = True + # Use summary generator if provided, otherwise use empty string (matches TypeScript) + serialized_result = ( + self.config.summary_generator(raw_result) + if self.config.summary_generator + else "" + ) + + # Checkpoint SUCCEED + success_operation: OperationUpdate = OperationUpdate.create_context_succeed( + identifier=self.operation_identifier, + payload=serialized_result, + sub_type=self.sub_type, + context_options=ContextOptions(replay_children=replay_children), + ) + # Checkpoint child context SUCCEED with blocking (is_sync=True, default). + # Must ensure the child context result is persisted before returning to the parent. + # This guarantees the result is durable and child operations won't be re-executed on replay + # (unless replay_children=True for large payloads). + self.state.create_checkpoint(operation_update=success_operation) + + logger.debug( + "✅ Successfully completed child context for id: %s, name: %s", + self.operation_identifier.operation_id, + self.operation_identifier.name, + ) + return raw_result # noqa: TRY300 + except SuspendExecution: + # Don't checkpoint SuspendExecution - let it bubble up raise - raise error_object.to_callable_runtime_error() from e + except Exception as e: + error_object = ErrorObject.from_exception(e) + fail_operation: OperationUpdate = OperationUpdate.create_context_fail( + identifier=self.operation_identifier, + error=error_object, + sub_type=self.sub_type, + ) + # Checkpoint child context FAIL with blocking (is_sync=True, default). + # Must ensure the failure state is persisted before raising the exception. + # This guarantees the error is durable and child operations won't be re-executed on replay. + self.state.create_checkpoint(operation_update=fail_operation) + + # InvocationError and its derivatives can be retried + # When we encounter an invocation error (in all of its forms), we bubble that + # error upwards (with the checkpoint in place) such that we reach the + # execution handler at the very top, which will then induce a retry from the + # dataplane. + if isinstance(e, InvocationError): + raise + raise error_object.to_callable_runtime_error() from e + + +def child_handler( + func: Callable[[], T], + state: ExecutionState, + operation_identifier: OperationIdentifier, + config: ChildConfig | None, +) -> T: + """Public API for child context operations - maintains existing signature. + + This function creates a ChildOperationExecutor and delegates to its process() method, + maintaining backward compatibility with existing code that calls child_handler. + + Args: + func: The child context function to execute + state: The execution state + operation_identifier: The operation identifier + config: The child configuration (optional) + + Returns: + The result of executing the child context + + Raises: + May raise operation-specific errors during execution + """ + if not config: + config = ChildConfig() + + executor = ChildOperationExecutor(func, state, operation_identifier, config) + return executor.process() diff --git a/src/aws_durable_execution_sdk_python/operation/invoke.py b/src/aws_durable_execution_sdk_python/operation/invoke.py index 1c752de..9288c98 100644 --- a/src/aws_durable_execution_sdk_python/operation/invoke.py +++ b/src/aws_durable_execution_sdk_python/operation/invoke.py @@ -5,18 +5,31 @@ import logging from typing import TYPE_CHECKING, TypeVar -from aws_durable_execution_sdk_python.config import InvokeConfig from aws_durable_execution_sdk_python.exceptions import ExecutionError from aws_durable_execution_sdk_python.lambda_service import ( ChainedInvokeOptions, OperationUpdate, ) -from aws_durable_execution_sdk_python.serdes import deserialize, serialize + +# Import base classes for operation executor pattern +from aws_durable_execution_sdk_python.operation.base import ( + CheckResult, + OperationExecutor, +) +from aws_durable_execution_sdk_python.serdes import ( + DEFAULT_JSON_SERDES, + deserialize, + serialize, +) from aws_durable_execution_sdk_python.suspend import suspend_with_optional_resume_delay if TYPE_CHECKING: + from aws_durable_execution_sdk_python.config import InvokeConfig from aws_durable_execution_sdk_python.identifier import OperationIdentifier - from aws_durable_execution_sdk_python.state import ExecutionState + from aws_durable_execution_sdk_python.state import ( + CheckpointedResult, + ExecutionState, + ) P = TypeVar("P") # Payload type R = TypeVar("R") # Result type @@ -24,88 +37,136 @@ logger = logging.getLogger(__name__) -def invoke_handler( - function_name: str, - payload: P, - state: ExecutionState, - operation_identifier: OperationIdentifier, - config: InvokeConfig[P, R] | None, -) -> R: - """Invoke another Durable Function.""" - logger.debug( - "🔗 Invoke %s (%s)", - operation_identifier.name or function_name, - operation_identifier.operation_id, - ) +class InvokeOperationExecutor(OperationExecutor[R]): + """Executor for invoke operations. + + Checks operation status after creating START checkpoints to handle operations + that complete synchronously, avoiding unnecessary execution or suspension. - if not config: - config = InvokeConfig[P, R]() + The invoke operation never actually "executes" in the traditional sense - + it always suspends to wait for the async invocation to complete. + """ - # Check if we have existing step data - checkpointed_result = state.get_checkpoint_result(operation_identifier.operation_id) + def __init__( + self, + function_name: str, + payload: P, + state: ExecutionState, + operation_identifier: OperationIdentifier, + config: InvokeConfig[P, R], + ): + """Initialize the invoke operation executor. + + Args: + function_name: Name of the function to invoke + payload: The payload to pass to the invoked function + state: The execution state + operation_identifier: The operation identifier + config: Configuration for the invoke operation + """ + self.function_name = function_name + self.payload = payload + self.state = state + self.operation_identifier = operation_identifier + self.payload = payload + self.config = config + + def check_result_status(self) -> CheckResult[R]: + """Check operation status and create START checkpoint if needed. + + Called twice by process() when creating synchronous checkpoints: once before + and once after, to detect if the operation completed immediately. + + Returns: + CheckResult indicating the next action to take + + Raises: + CallableRuntimeError: For FAILED, TIMED_OUT, or STOPPED operations + SuspendExecution: For STARTED operations waiting for completion + """ + checkpointed_result: CheckpointedResult = self.state.get_checkpoint_result( + self.operation_identifier.operation_id + ) + + # Terminal success - deserialize and return + if checkpointed_result.is_succeeded(): + if checkpointed_result.result is None: + return CheckResult.create_completed(None) # type: ignore - if checkpointed_result.is_succeeded(): - # Return persisted result - no need to check for errors in successful operations + result: R = deserialize( + serdes=self.config.serdes_result or DEFAULT_JSON_SERDES, + data=checkpointed_result.result, + operation_id=self.operation_identifier.operation_id, + durable_execution_arn=self.state.durable_execution_arn, + ) + return CheckResult.create_completed(result) + + # Terminal failures if ( - checkpointed_result.operation - and checkpointed_result.operation.chained_invoke_details - and checkpointed_result.operation.chained_invoke_details.result + checkpointed_result.is_failed() + or checkpointed_result.is_timed_out() + or checkpointed_result.is_stopped() ): - return deserialize( - serdes=config.serdes_result, - data=checkpointed_result.operation.chained_invoke_details.result, - operation_id=operation_identifier.operation_id, - durable_execution_arn=state.durable_execution_arn, + checkpointed_result.raise_callable_error() + + # Still running - ready to suspend + if checkpointed_result.is_started(): + logger.debug( + "⏳ Invoke %s still in progress, will suspend", + self.operation_identifier.name or self.function_name, ) - return None # type: ignore + return CheckResult.create_is_ready_to_execute(checkpointed_result) + + # Create START checkpoint if not exists + if not checkpointed_result.is_existent(): + serialized_payload: str = serialize( + serdes=self.config.serdes_payload or DEFAULT_JSON_SERDES, + value=self.payload, + operation_id=self.operation_identifier.operation_id, + durable_execution_arn=self.state.durable_execution_arn, + ) + start_operation: OperationUpdate = OperationUpdate.create_invoke_start( + identifier=self.operation_identifier, + payload=serialized_payload, + chained_invoke_options=ChainedInvokeOptions( + function_name=self.function_name, + tenant_id=self.config.tenant_id, + ), + ) + # Checkpoint invoke START with blocking (is_sync=True). + # Must ensure the chained invocation is recorded before suspending execution. + self.state.create_checkpoint(operation_update=start_operation, is_sync=True) - if ( - checkpointed_result.is_failed() - or checkpointed_result.is_timed_out() - or checkpointed_result.is_stopped() - ): - # Operation failed, throw the exact same error on replay as the checkpointed failure - checkpointed_result.raise_callable_error() - - if checkpointed_result.is_started() or checkpointed_result.is_pending(): - # Operation is still running, suspend until completion - logger.debug( - "⏳ Invoke %s still in progress, suspending", - operation_identifier.name or function_name, - ) - msg = f"Invoke {operation_identifier.operation_id} still in progress" - suspend_with_optional_resume_delay(msg, config.timeout_seconds) - - serialized_payload: str = serialize( - serdes=config.serdes_payload, - value=payload, - operation_id=operation_identifier.operation_id, - durable_execution_arn=state.durable_execution_arn, - ) + logger.debug( + "🚀 Invoke %s started, will check for immediate response", + self.operation_identifier.name or self.function_name, + ) - # the backend will do the invoke once it gets this checkpoint - start_operation: OperationUpdate = OperationUpdate.create_invoke_start( - identifier=operation_identifier, - payload=serialized_payload, - chained_invoke_options=ChainedInvokeOptions(function_name=function_name), - ) + # Signal to process() that checkpoint was created - to recheck status for permissions errs etc. + # before proceeding. + return CheckResult.create_started() - # Checkpoint invoke START with blocking (is_sync=True, default). - # Must ensure the chained invocation is recorded before suspending execution. - # This guarantees the invoke operation is durable and will be tracked by the backend. - state.create_checkpoint(operation_update=start_operation) + # Ready to suspend (checkpoint exists but not in a terminal or started state) + return CheckResult.create_is_ready_to_execute(checkpointed_result) - logger.debug( - "🚀 Invoke %s started, suspending for async execution", - operation_identifier.name or function_name, - ) + def execute(self, _checkpointed_result: CheckpointedResult) -> R: + """Execute invoke operation by suspending to wait for async completion. - # Suspend so invoke executes asynchronously without consuming cpu here - msg = ( - f"Invoke {operation_identifier.operation_id} started, suspending for completion" - ) - suspend_with_optional_resume_delay(msg, config.timeout_seconds) - # This line should never be reached since suspend_with_optional_resume_delay always raises - # if it is ever reached, we will crash in a non-retryable manner via ExecutionError - msg = "suspend_with_optional_resume_delay should have raised an exception, but did not." - raise ExecutionError(msg) from None + The invoke operation doesn't execute synchronously - it suspends and + the backend executes the invoked function asynchronously. + + Args: + checkpointed_result: The checkpoint data (unused, but required by interface) + + Returns: + Never returns - always suspends + + Raises: + Always suspends via suspend_with_optional_resume_delay + ExecutionError: If suspend doesn't raise (should never happen) + """ + msg: str = f"Invoke {self.operation_identifier.operation_id} started, suspending for completion" + suspend_with_optional_resume_delay(msg, self.config.timeout_seconds) + # This line should never be reached since suspend_with_optional_resume_delay always raises + error_msg: str = "suspend_with_optional_resume_delay should have raised an exception, but did not." + raise ExecutionError(error_msg) from None diff --git a/src/aws_durable_execution_sdk_python/operation/map.py b/src/aws_durable_execution_sdk_python/operation/map.py index ed76bb4..2551b48 100644 --- a/src/aws_durable_execution_sdk_python/operation/map.py +++ b/src/aws_durable_execution_sdk_python/operation/map.py @@ -7,9 +7,9 @@ from collections.abc import Callable, Sequence from typing import TYPE_CHECKING, Generic, TypeVar -from aws_durable_execution_sdk_python.concurrency import ( +from aws_durable_execution_sdk_python.concurrency.executor import ConcurrentExecutor +from aws_durable_execution_sdk_python.concurrency.models import ( BatchResult, - ConcurrentExecutor, Executable, ) from aws_durable_execution_sdk_python.config import MapConfig @@ -82,6 +82,7 @@ def from_items( name_prefix="map-item-", serdes=config.serdes, summary_generator=config.summary_generator, + item_serdes=config.item_serdes, ) def execute_item(self, child_context, executable: Executable[Callable]) -> R: diff --git a/src/aws_durable_execution_sdk_python/operation/parallel.py b/src/aws_durable_execution_sdk_python/operation/parallel.py index e81499f..5046c75 100644 --- a/src/aws_durable_execution_sdk_python/operation/parallel.py +++ b/src/aws_durable_execution_sdk_python/operation/parallel.py @@ -7,12 +7,13 @@ from collections.abc import Callable, Sequence from typing import TYPE_CHECKING, TypeVar -from aws_durable_execution_sdk_python.concurrency import ConcurrentExecutor, Executable +from aws_durable_execution_sdk_python.concurrency.executor import ConcurrentExecutor +from aws_durable_execution_sdk_python.concurrency.models import Executable from aws_durable_execution_sdk_python.config import ParallelConfig from aws_durable_execution_sdk_python.lambda_service import OperationSubType if TYPE_CHECKING: - from aws_durable_execution_sdk_python.concurrency import BatchResult + from aws_durable_execution_sdk_python.concurrency.models import BatchResult from aws_durable_execution_sdk_python.context import DurableContext from aws_durable_execution_sdk_python.identifier import OperationIdentifier from aws_durable_execution_sdk_python.serdes import SerDes @@ -69,6 +70,7 @@ def from_callables( name_prefix="parallel-branch-", serdes=config.serdes, summary_generator=config.summary_generator, + item_serdes=config.item_serdes, ) def execute_item(self, child_context, executable: Executable[Callable]) -> R: # noqa: PLR6301 diff --git a/src/aws_durable_execution_sdk_python/operation/step.py b/src/aws_durable_execution_sdk_python/operation/step.py index c80b18b..eb49c9b 100644 --- a/src/aws_durable_execution_sdk_python/operation/step.py +++ b/src/aws_durable_execution_sdk_python/operation/step.py @@ -11,6 +11,7 @@ ) from aws_durable_execution_sdk_python.exceptions import ( ExecutionError, + InvalidStateError, StepInterruptedError, ) from aws_durable_execution_sdk_python.lambda_service import ( @@ -18,6 +19,10 @@ OperationUpdate, ) from aws_durable_execution_sdk_python.logger import Logger, LogInfo +from aws_durable_execution_sdk_python.operation.base import ( + CheckResult, + OperationExecutor, +) from aws_durable_execution_sdk_python.retries import RetryDecision, RetryPresets from aws_durable_execution_sdk_python.serdes import deserialize, serialize from aws_durable_execution_sdk_python.suspend import ( @@ -40,230 +45,314 @@ T = TypeVar("T") -def step_handler( - func: Callable[[StepContext], T], - state: ExecutionState, - operation_identifier: OperationIdentifier, - config: StepConfig | None, - context_logger: Logger, -) -> T: - logger.debug( - "▶️ Executing step for id: %s, name: %s", - operation_identifier.operation_id, - operation_identifier.name, - ) - - if not config: - config = StepConfig() - - checkpointed_result: CheckpointedResult = state.get_checkpoint_result( - operation_identifier.operation_id - ) - if checkpointed_result.is_succeeded(): - logger.debug( - "Step already completed, skipping execution for id: %s, name: %s", - operation_identifier.operation_id, - operation_identifier.name, - ) - if checkpointed_result.result is None: - return None # type: ignore - - return deserialize( - serdes=config.serdes, - data=checkpointed_result.result, - operation_id=operation_identifier.operation_id, - durable_execution_arn=state.durable_execution_arn, - ) +class StepOperationExecutor(OperationExecutor[T]): + """Executor for step operations. - if checkpointed_result.is_failed(): - # have to throw the exact same error on replay as the checkpointed failure - checkpointed_result.raise_callable_error() - - if checkpointed_result.is_pending(): - scheduled_timestamp = checkpointed_result.get_next_attempt_timestamp() - # normally, we'd ensure that a suspension here would be for > 0 seconds; - # however, this is coming from a checkpoint, and we can trust that it is a correct target timestamp. - suspend_with_optional_resume_timestamp( - msg=f"Retry scheduled for {operation_identifier.name or operation_identifier.operation_id} will retry at timestamp {scheduled_timestamp}", - datetime_timestamp=scheduled_timestamp, - ) + Checks operation status after creating START checkpoints to handle operations + that complete synchronously, avoiding unnecessary execution or suspension. + """ - if ( - checkpointed_result.is_started() - and config.step_semantics is StepSemantics.AT_MOST_ONCE_PER_RETRY + def __init__( + self, + func: Callable[[StepContext], T], + config: StepConfig, + state: ExecutionState, + operation_identifier: OperationIdentifier, + context_logger: Logger, ): - # step was previously interrupted - msg = f"Step operation_id={operation_identifier.operation_id} name={operation_identifier.name} was previously interrupted" - retry_handler( - StepInterruptedError(msg), - state, - operation_identifier, - config, - checkpointed_result, + """Initialize the step operation executor. + + Args: + func: The step function to execute + config: The step configuration + state: The execution state + operation_identifier: The operation identifier + context_logger: The logger for the step context + """ + self.func = func + self.config = config + self.state = state + self.operation_identifier = operation_identifier + self.context_logger = context_logger + self._checkpoint_created = False # Track if we created the checkpoint + + def check_result_status(self) -> CheckResult[T]: + """Check operation status and create START checkpoint if needed. + + Called twice by process() when creating synchronous checkpoints: once before + and once after, to detect if the operation completed immediately. + + Returns: + CheckResult indicating the next action to take + + Raises: + CallableRuntimeError: For FAILED operations + StepInterruptedError: For interrupted AT_MOST_ONCE operations + SuspendExecution: For PENDING operations waiting for retry + """ + checkpointed_result: CheckpointedResult = self.state.get_checkpoint_result( + self.operation_identifier.operation_id ) - checkpointed_result.raise_callable_error() + # Terminal success - deserialize and return + if checkpointed_result.is_succeeded(): + logger.debug( + "Step already completed, skipping execution for id: %s, name: %s", + self.operation_identifier.operation_id, + self.operation_identifier.name, + ) + if checkpointed_result.result is None: + return CheckResult.create_completed(None) # type: ignore + + result: T = deserialize( + serdes=self.config.serdes, + data=checkpointed_result.result, + operation_id=self.operation_identifier.operation_id, + durable_execution_arn=self.state.durable_execution_arn, + ) + return CheckResult.create_completed(result) + + # Terminal failure + if checkpointed_result.is_failed(): + # Have to throw the exact same error on replay as the checkpointed failure + checkpointed_result.raise_callable_error() + + # Pending retry + if checkpointed_result.is_pending(): + scheduled_timestamp = checkpointed_result.get_next_attempt_timestamp() + # Normally, we'd ensure that a suspension here would be for > 0 seconds; + # however, this is coming from a checkpoint, and we can trust that it is a correct target timestamp. + suspend_with_optional_resume_timestamp( + msg=f"Retry scheduled for {self.operation_identifier.name or self.operation_identifier.operation_id} will retry at timestamp {scheduled_timestamp}", + datetime_timestamp=scheduled_timestamp, + ) - if not ( - checkpointed_result.is_started() - and config.step_semantics is StepSemantics.AT_LEAST_ONCE_PER_RETRY - ): - # Do not checkpoint start for started & AT_LEAST_ONCE execution - # Checkpoint start for the other - start_operation: OperationUpdate = OperationUpdate.create_step_start( - identifier=operation_identifier, - ) - # Checkpoint START operation with appropriate synchronization: - # - AtMostOncePerRetry: Use blocking checkpoint (is_sync=True) to prevent duplicate execution. - # The step must not execute until the START checkpoint is persisted, ensuring exactly-once semantics. - # - AtLeastOncePerRetry: Use non-blocking checkpoint (is_sync=False) for performance optimization. - # The step can execute immediately without waiting for checkpoint persistence, allowing at-least-once semantics. - is_sync: bool = config.step_semantics is StepSemantics.AT_MOST_ONCE_PER_RETRY - state.create_checkpoint(operation_update=start_operation, is_sync=is_sync) - - attempt: int = 0 - if checkpointed_result.operation and checkpointed_result.operation.step_details: - attempt = checkpointed_result.operation.step_details.attempt - - step_context = StepContext( - logger=context_logger.with_log_info( - LogInfo.from_operation_identifier( - execution_arn=state.durable_execution_arn, - op_id=operation_identifier, - attempt=attempt, + # Handle interrupted AT_MOST_ONCE (replay scenario only) + # This check only applies on REPLAY when a new Lambda invocation starts after interruption. + # A STARTED checkpoint with AT_MOST_ONCE on entry means the previous invocation + # was interrupted and it should NOT re-execute. + # + # This check is skipped on fresh executions because: + # - First call (fresh): checkpoint doesn't exist → is_started() returns False → skip this check + # - After creating sync checkpoint and refreshing: if status is STARTED, we return + # ready_to_execute directly, so process() never calls check_result_status() again + if ( + checkpointed_result.is_started() + and self.config.step_semantics is StepSemantics.AT_MOST_ONCE_PER_RETRY + ): + # Step was previously interrupted in a prior invocation - handle retry + msg: str = f"Step operation_id={self.operation_identifier.operation_id} name={self.operation_identifier.name} was previously interrupted" + self.retry_handler(StepInterruptedError(msg), checkpointed_result) + checkpointed_result.raise_callable_error() + + # Ready to execute if STARTED + AT_LEAST_ONCE + if ( + checkpointed_result.is_started() + and self.config.step_semantics is StepSemantics.AT_LEAST_ONCE_PER_RETRY + ): + return CheckResult.create_is_ready_to_execute(checkpointed_result) + + # Create START checkpoint if not exists + if not checkpointed_result.is_existent(): + start_operation: OperationUpdate = OperationUpdate.create_step_start( + identifier=self.operation_identifier, + ) + # Checkpoint START operation with appropriate synchronization: + # - AtMostOncePerRetry: Use blocking checkpoint (is_sync=True) to prevent duplicate execution. + # The step must not execute until the START checkpoint is persisted, ensuring exactly-once semantics. + # - AtLeastOncePerRetry: Use non-blocking checkpoint (is_sync=False) for performance optimization. + # The step can execute immediately without waiting for checkpoint persistence, allowing at-least-once semantics. + is_sync: bool = ( + self.config.step_semantics is StepSemantics.AT_MOST_ONCE_PER_RETRY + ) + self.state.create_checkpoint( + operation_update=start_operation, is_sync=is_sync ) - ) - ) - try: - # this is the actual code provided by the caller to execute durably inside the step - raw_result: T = func(step_context) - serialized_result: str = serialize( - serdes=config.serdes, - value=raw_result, - operation_id=operation_identifier.operation_id, - durable_execution_arn=state.durable_execution_arn, - ) - success_operation: OperationUpdate = OperationUpdate.create_step_succeed( - identifier=operation_identifier, - payload=serialized_result, + # After creating sync checkpoint, check the status + if is_sync: + # Refresh checkpoint result to check for immediate response + refreshed_result: CheckpointedResult = self.state.get_checkpoint_result( + self.operation_identifier.operation_id + ) + + # START checkpoint only returns STARTED status + # Any errors would be thrown as runtime exceptions during checkpoint creation + if not refreshed_result.is_started(): + # This should never happen - defensive check + error_msg: str = f"Unexpected status after START checkpoint: {refreshed_result.status}" + raise InvalidStateError(error_msg) + + # If we reach here, status must be STARTED - ready to execute + return CheckResult.create_is_ready_to_execute(refreshed_result) + + # Ready to execute + return CheckResult.create_is_ready_to_execute(checkpointed_result) + + def execute(self, checkpointed_result: CheckpointedResult) -> T: + """Execute step function with error handling and retry logic. + + Args: + checkpointed_result: The checkpoint data containing operation state + + Returns: + The result of executing the step function + + Raises: + ExecutionError: For fatal errors that should not be retried + May raise other exceptions that will be handled by retry_handler + """ + attempt: int = 0 + if checkpointed_result.operation and checkpointed_result.operation.step_details: + attempt = checkpointed_result.operation.step_details.attempt + + step_context: StepContext = StepContext( + logger=self.context_logger.with_log_info( + LogInfo.from_operation_identifier( + execution_state=self.state, + op_id=self.operation_identifier, + attempt=attempt, + ) + ) ) - # Checkpoint SUCCEED operation with blocking (is_sync=True, default). - # Must ensure the success state is persisted before returning the result to the caller. - # This guarantees the step result is durable and won't be lost if Lambda terminates. - state.create_checkpoint(operation_update=success_operation) - - logger.debug( - "✅ Successfully completed step for id: %s, name: %s", - operation_identifier.operation_id, - operation_identifier.name, - ) - return raw_result # noqa: TRY300 - except Exception as e: - if isinstance(e, ExecutionError): - # no retry on fatal - e.g checkpoint exception - logger.debug( - "💥 Fatal error for id: %s, name: %s", - operation_identifier.operation_id, - operation_identifier.name, + try: + # This is the actual code provided by the caller to execute durably inside the step + raw_result: T = self.func(step_context) + serialized_result: str = serialize( + serdes=self.config.serdes, + value=raw_result, + operation_id=self.operation_identifier.operation_id, + durable_execution_arn=self.state.durable_execution_arn, ) - # this bubbles up to execution.durable_execution, where it will exit with FAILED - raise - - logger.exception( - "❌ failed step for id: %s, name: %s", - operation_identifier.operation_id, - operation_identifier.name, - ) - retry_handler(e, state, operation_identifier, config, checkpointed_result) - # if we've failed to raise an exception from the retry_handler, then we are in a - # weird state, and should crash terminate the execution - msg = "retry handler should have raised an exception, but did not." - raise ExecutionError(msg) from None + success_operation: OperationUpdate = OperationUpdate.create_step_succeed( + identifier=self.operation_identifier, + payload=serialized_result, + ) + # Checkpoint SUCCEED operation with blocking (is_sync=True, default). + # Must ensure the success state is persisted before returning the result to the caller. + # This guarantees the step result is durable and won't be lost if Lambda terminates. + self.state.create_checkpoint(operation_update=success_operation) -# TODO: I don't much like this func, needs refactor. Messy grab-bag of args, refine. -def retry_handler( - error: Exception, - state: ExecutionState, - operation_identifier: OperationIdentifier, - config: StepConfig, - checkpointed_result: CheckpointedResult, -): - """Checkpoint and suspend for replay if retry required, otherwise raise error.""" - error_object = ErrorObject.from_exception(error) + logger.debug( + "✅ Successfully completed step for id: %s, name: %s", + self.operation_identifier.operation_id, + self.operation_identifier.name, + ) + return raw_result # noqa: TRY300 + except Exception as e: + if isinstance(e, ExecutionError): + # No retry on fatal - e.g checkpoint exception + logger.debug( + "💥 Fatal error for id: %s, name: %s", + self.operation_identifier.operation_id, + self.operation_identifier.name, + ) + # This bubbles up to execution.durable_execution, where it will exit with FAILED + raise + + logger.exception( + "❌ failed step for id: %s, name: %s", + self.operation_identifier.operation_id, + self.operation_identifier.name, + ) - retry_strategy = config.retry_strategy or RetryPresets.default() + self.retry_handler(e, checkpointed_result) + # If we've failed to raise an exception from the retry_handler, then we are in a + # weird state, and should crash terminate the execution + msg = "retry handler should have raised an exception, but did not." + raise ExecutionError(msg) from None - retry_attempt: int = ( - checkpointed_result.operation.step_details.attempt - if ( - checkpointed_result.operation and checkpointed_result.operation.step_details - ) - else 0 - ) - retry_decision: RetryDecision = retry_strategy(error, retry_attempt + 1) - - if retry_decision.should_retry: - logger.debug( - "Retrying step for id: %s, name: %s, attempt: %s", - operation_identifier.operation_id, - operation_identifier.name, - retry_attempt + 1, + def retry_handler( + self, + error: Exception, + checkpointed_result: CheckpointedResult, + ): + """Checkpoint and suspend for replay if retry required, otherwise raise error. + + Args: + error: The exception that occurred during step execution + checkpointed_result: The checkpoint data containing operation state + + Raises: + SuspendExecution: If retry is scheduled + StepInterruptedError: If the error is a StepInterruptedError + CallableRuntimeError: If retry is exhausted or error is not retryable + """ + error_object = ErrorObject.from_exception(error) + + retry_strategy = self.config.retry_strategy or RetryPresets.default() + + retry_attempt: int = ( + checkpointed_result.operation.step_details.attempt + if ( + checkpointed_result.operation + and checkpointed_result.operation.step_details + ) + else 0 ) + retry_decision: RetryDecision = retry_strategy(error, retry_attempt + 1) - # because we are issuing a retry and create an OperationUpdate - # we enforce a minimum delay second of 1, to match model behaviour. - # we localize enforcement and keep it outside suspension methods as: - # a) those are used throughout the codebase, e.g. in wait(..) <- enforcement is done in context - # b) they shouldn't know model specific details <- enforcement is done above - # and c) this "issue" arises from retry-decision and we shouldn't push it down - delay_seconds = retry_decision.delay_seconds - if delay_seconds < 1: - logger.warning( - ( - "Retry delay_seconds step for id: %s, name: %s," - "attempt: %s is %d < 1. Setting to minimum of 1 seconds." - ), - operation_identifier.operation_id, - operation_identifier.name, + if retry_decision.should_retry: + logger.debug( + "Retrying step for id: %s, name: %s, attempt: %s", + self.operation_identifier.operation_id, + self.operation_identifier.name, retry_attempt + 1, - delay_seconds, ) - delay_seconds = 1 - retry_operation: OperationUpdate = OperationUpdate.create_step_retry( - identifier=operation_identifier, - error=error_object, - next_attempt_delay_seconds=delay_seconds, - ) + # because we are issuing a retry and create an OperationUpdate + # we enforce a minimum delay second of 1, to match model behaviour. + # we localize enforcement and keep it outside suspension methods as: + # a) those are used throughout the codebase, e.g. in wait(..) <- enforcement is done in context + # b) they shouldn't know model specific details <- enforcement is done above + # and c) this "issue" arises from retry-decision and we shouldn't push it down + delay_seconds = retry_decision.delay_seconds + if delay_seconds < 1: + logger.warning( + ( + "Retry delay_seconds step for id: %s, name: %s," + "attempt: %s is %d < 1. Setting to minimum of 1 seconds." + ), + self.operation_identifier.operation_id, + self.operation_identifier.name, + retry_attempt + 1, + delay_seconds, + ) + delay_seconds = 1 + + retry_operation: OperationUpdate = OperationUpdate.create_step_retry( + identifier=self.operation_identifier, + error=error_object, + next_attempt_delay_seconds=delay_seconds, + ) - # Checkpoint RETRY operation with blocking (is_sync=True, default). - # Must ensure retry state is persisted before suspending execution. - # This guarantees the retry attempt count and next attempt timestamp are durable. - state.create_checkpoint(operation_update=retry_operation) - - suspend_with_optional_resume_delay( - msg=( - f"Retry scheduled for {operation_identifier.operation_id}" - f"in {retry_decision.delay_seconds} seconds" - ), - delay_seconds=delay_seconds, - ) + # Checkpoint RETRY operation with blocking (is_sync=True, default). + # Must ensure retry state is persisted before suspending execution. + # This guarantees the retry attempt count and next attempt timestamp are durable. + self.state.create_checkpoint(operation_update=retry_operation) - # no retry - fail_operation: OperationUpdate = OperationUpdate.create_step_fail( - identifier=operation_identifier, error=error_object - ) + suspend_with_optional_resume_delay( + msg=( + f"Retry scheduled for {self.operation_identifier.operation_id}" + f"in {retry_decision.delay_seconds} seconds" + ), + delay_seconds=delay_seconds, + ) + + # no retry + fail_operation: OperationUpdate = OperationUpdate.create_step_fail( + identifier=self.operation_identifier, error=error_object + ) - # Checkpoint FAIL operation with blocking (is_sync=True, default). - # Must ensure the failure state is persisted before raising the exception. - # This guarantees the error is durable and the step won't be retried on replay. - state.create_checkpoint(operation_update=fail_operation) + # Checkpoint FAIL operation with blocking (is_sync=True, default). + # Must ensure the failure state is persisted before raising the exception. + # This guarantees the error is durable and the step won't be retried on replay. + self.state.create_checkpoint(operation_update=fail_operation) - if isinstance(error, StepInterruptedError): - raise error + if isinstance(error, StepInterruptedError): + raise error - raise error_object.to_callable_runtime_error() + raise error_object.to_callable_runtime_error() diff --git a/src/aws_durable_execution_sdk_python/operation/wait.py b/src/aws_durable_execution_sdk_python/operation/wait.py index 90d0880..fc16e66 100644 --- a/src/aws_durable_execution_sdk_python/operation/wait.py +++ b/src/aws_durable_execution_sdk_python/operation/wait.py @@ -6,6 +6,10 @@ from typing import TYPE_CHECKING from aws_durable_execution_sdk_python.lambda_service import OperationUpdate, WaitOptions +from aws_durable_execution_sdk_python.operation.base import ( + CheckResult, + OperationExecutor, +) from aws_durable_execution_sdk_python.suspend import suspend_with_optional_resume_delay if TYPE_CHECKING: @@ -18,36 +22,90 @@ logger = logging.getLogger(__name__) -def wait_handler( - seconds: int, state: ExecutionState, operation_identifier: OperationIdentifier -) -> None: - logger.debug( - "Wait requested for id: %s, name: %s", - operation_identifier.operation_id, - operation_identifier.name, - ) +class WaitOperationExecutor(OperationExecutor[None]): + """Executor for wait operations. - checkpointed_result: CheckpointedResult = state.get_checkpoint_result( - operation_identifier.operation_id - ) + Checks operation status after creating START checkpoints to handle operations + that complete synchronously, avoiding unnecessary execution or suspension. + """ - if checkpointed_result.is_succeeded(): - logger.debug( - "Wait already completed, skipping wait for id: %s, name: %s", - operation_identifier.operation_id, - operation_identifier.name, - ) - return + def __init__( + self, + seconds: int, + state: ExecutionState, + operation_identifier: OperationIdentifier, + ): + """Initialize the wait operation executor. + + Args: + seconds: Number of seconds to wait + state: The execution state + operation_identifier: The operation identifier + """ + self.seconds = seconds + self.state = state + self.operation_identifier = operation_identifier - if not checkpointed_result.is_existent(): - operation = OperationUpdate.create_wait_start( - identifier=operation_identifier, - wait_options=WaitOptions(wait_seconds=seconds), + def check_result_status(self) -> CheckResult[None]: + """Check operation status and create START checkpoint if needed. + + Called twice by process() when creating synchronous checkpoints: once before + and once after, to detect if the operation completed immediately. + + Returns: + CheckResult indicating the next action to take + + Raises: + SuspendExecution: When wait timer has not completed + """ + checkpointed_result: CheckpointedResult = self.state.get_checkpoint_result( + self.operation_identifier.operation_id ) - # Checkpoint wait START with blocking (is_sync=True, default). - # Must ensure the wait operation and scheduled timestamp are persisted before suspending. - # This guarantees the wait will resume at the correct time on the next invocation. - state.create_checkpoint(operation_update=operation) - msg = f"Wait for {seconds} seconds" - suspend_with_optional_resume_delay(msg, seconds) # throws suspend + # Terminal success - wait completed + if checkpointed_result.is_succeeded(): + logger.debug( + "Wait already completed, skipping wait for id: %s, name: %s", + self.operation_identifier.operation_id, + self.operation_identifier.name, + ) + return CheckResult.create_completed(None) + + # Create START checkpoint if not exists + if not checkpointed_result.is_existent(): + operation: OperationUpdate = OperationUpdate.create_wait_start( + identifier=self.operation_identifier, + wait_options=WaitOptions(wait_seconds=self.seconds), + ) + # Checkpoint wait START with blocking (is_sync=True, default). + # Must ensure the wait operation and scheduled timestamp are persisted before suspending. + # This guarantees the wait will resume at the correct time on the next invocation. + self.state.create_checkpoint(operation_update=operation, is_sync=True) + + logger.debug( + "Wait checkpoint created for id: %s, name: %s, will check for immediate response", + self.operation_identifier.operation_id, + self.operation_identifier.name, + ) + + # Signal to process() that checkpoint was created - which will re-run this check_result_status + # check from the top + return CheckResult.create_started() + + # Ready to suspend (checkpoint exists) + return CheckResult.create_is_ready_to_execute(checkpointed_result) + + def execute(self, _checkpointed_result: CheckpointedResult) -> None: + """Execute wait by suspending. + + Wait operations 'execute' by suspending execution until the timer completes. + This method never returns normally - it always suspends. + + Args: + _checkpointed_result: The checkpoint data (unused for wait) + + Raises: + SuspendExecution: Always suspends to wait for timer completion + """ + msg: str = f"Wait for {self.seconds} seconds" + suspend_with_optional_resume_delay(msg, self.seconds) # throws suspend diff --git a/src/aws_durable_execution_sdk_python/operation/wait_for_condition.py b/src/aws_durable_execution_sdk_python/operation/wait_for_condition.py index bb2d6e7..d1c2b4f 100644 --- a/src/aws_durable_execution_sdk_python/operation/wait_for_condition.py +++ b/src/aws_durable_execution_sdk_python/operation/wait_for_condition.py @@ -13,6 +13,10 @@ OperationUpdate, ) from aws_durable_execution_sdk_python.logger import LogInfo +from aws_durable_execution_sdk_python.operation.base import ( + CheckResult, + OperationExecutor, +) from aws_durable_execution_sdk_python.serdes import deserialize, serialize from aws_durable_execution_sdk_python.suspend import ( suspend_with_optional_resume_delay, @@ -40,196 +44,239 @@ logger = logging.getLogger(__name__) -def wait_for_condition_handler( - check: Callable[[T, WaitForConditionCheckContext], T], - config: WaitForConditionConfig[T], - state: ExecutionState, - operation_identifier: OperationIdentifier, - context_logger: Logger, -) -> T: - """Handle wait_for_condition operation. +class WaitForConditionOperationExecutor(OperationExecutor[T]): + """Executor for wait_for_condition operations. - wait_for_condition creates a STEP checkpoint. + Checks operation status after creating START checkpoints to handle operations + that complete synchronously, avoiding unnecessary execution or suspension. """ - logger.debug( - "▶️ Executing wait_for_condition for id: %s, name: %s", - operation_identifier.operation_id, - operation_identifier.name, - ) - checkpointed_result: CheckpointedResult = state.get_checkpoint_result( - operation_identifier.operation_id - ) - - # Check if already completed - if checkpointed_result.is_succeeded(): - logger.debug( - "wait_for_condition already completed for id: %s, name: %s", - operation_identifier.operation_id, - operation_identifier.name, - ) - if checkpointed_result.result is None: - return None # type: ignore - return deserialize( - serdes=config.serdes, - data=checkpointed_result.result, - operation_id=operation_identifier.operation_id, - durable_execution_arn=state.durable_execution_arn, + def __init__( + self, + check: Callable[[T, WaitForConditionCheckContext], T], + config: WaitForConditionConfig[T], + state: ExecutionState, + operation_identifier: OperationIdentifier, + context_logger: Logger, + ): + """Initialize the wait_for_condition executor. + + Args: + check: The check function to evaluate the condition + config: Configuration for the wait_for_condition operation + state: The execution state + operation_identifier: The operation identifier + context_logger: Logger for the operation context + """ + self.check = check + self.config = config + self.state = state + self.operation_identifier = operation_identifier + self.context_logger = context_logger + + def check_result_status(self) -> CheckResult[T]: + """Check operation status and create START checkpoint if needed. + + Called twice by process() when creating synchronous checkpoints: once before + and once after, to detect if the operation completed immediately. + + Returns: + CheckResult indicating the next action to take + + Raises: + CallableRuntimeError: For FAILED operations + SuspendExecution: For PENDING operations waiting for retry + """ + checkpointed_result = self.state.get_checkpoint_result( + self.operation_identifier.operation_id ) - if checkpointed_result.is_failed(): - checkpointed_result.raise_callable_error() + # Check if already completed + if checkpointed_result.is_succeeded(): + logger.debug( + "wait_for_condition already completed for id: %s, name: %s", + self.operation_identifier.operation_id, + self.operation_identifier.name, + ) + if checkpointed_result.result is None: + return CheckResult.create_completed(None) # type: ignore + result = deserialize( + serdes=self.config.serdes, + data=checkpointed_result.result, + operation_id=self.operation_identifier.operation_id, + durable_execution_arn=self.state.durable_execution_arn, + ) + return CheckResult.create_completed(result) + + # Terminal failure + if checkpointed_result.is_failed(): + checkpointed_result.raise_callable_error() + + # Pending retry + if checkpointed_result.is_pending(): + scheduled_timestamp = checkpointed_result.get_next_attempt_timestamp() + suspend_with_optional_resume_timestamp( + msg=f"wait_for_condition {self.operation_identifier.name or self.operation_identifier.operation_id} will retry at timestamp {scheduled_timestamp}", + datetime_timestamp=scheduled_timestamp, + ) - if checkpointed_result.is_pending(): - scheduled_timestamp = checkpointed_result.get_next_attempt_timestamp() - suspend_with_optional_resume_timestamp( - msg=f"wait_for_condition {operation_identifier.name or operation_identifier.operation_id} will retry at timestamp {scheduled_timestamp}", - datetime_timestamp=scheduled_timestamp, - ) + # Create START checkpoint if not started + if not checkpointed_result.is_started(): + start_operation = OperationUpdate.create_wait_for_condition_start( + identifier=self.operation_identifier, + ) + # Checkpoint wait_for_condition START with non-blocking (is_sync=False). + # This is purely for observability - we don't need to wait for persistence before + # executing the check function. The START checkpoint just records that polling began. + self.state.create_checkpoint( + operation_update=start_operation, is_sync=False + ) + # For async checkpoint, no immediate response possible + # Proceed directly to execute with current checkpoint data + + # Ready to execute check function + return CheckResult.create_is_ready_to_execute(checkpointed_result) + + def execute(self, checkpointed_result: CheckpointedResult) -> T: + """Execute check function and handle decision. + + Args: + checkpointed_result: The checkpoint data - attempt: int = 1 - if checkpointed_result.is_started_or_ready(): - # This is a retry - get state from previous checkpoint - if checkpointed_result.result: + Returns: + The final state when condition is met + + Raises: + Suspends if condition not met + Raises error if check function fails + """ + # Determine current state from checkpoint + if checkpointed_result.is_started_or_ready() and checkpointed_result.result: try: current_state = deserialize( - serdes=config.serdes, + serdes=self.config.serdes, data=checkpointed_result.result, - operation_id=operation_identifier.operation_id, - durable_execution_arn=state.durable_execution_arn, + operation_id=self.operation_identifier.operation_id, + durable_execution_arn=self.state.durable_execution_arn, ) except Exception: - # default to initial state if there's an error getting checkpointed state + # Default to initial state if there's an error getting checkpointed state logger.exception( "⚠️ wait_for_condition failed to deserialize state for id: %s, name: %s. Using initial state.", - operation_identifier.operation_id, - operation_identifier.name, + self.operation_identifier.operation_id, + self.operation_identifier.name, ) - current_state = config.initial_state + current_state = self.config.initial_state else: - current_state = config.initial_state + current_state = self.config.initial_state - # at this point operation has to exist. Nonetheless, just in case somehow it's not there. + # Get attempt number + attempt: int = 1 if checkpointed_result.operation and checkpointed_result.operation.step_details: attempt = checkpointed_result.operation.step_details.attempt - else: - # First execution - current_state = config.initial_state - - # Checkpoint START for observability. - if not checkpointed_result.is_started(): - start_operation: OperationUpdate = ( - OperationUpdate.create_wait_for_condition_start( - identifier=operation_identifier, - ) - ) - # Checkpoint wait_for_condition START with non-blocking (is_sync=False). - # This is purely for observability - we don't need to wait for persistence before - # executing the check function. The START checkpoint just records that polling began. - state.create_checkpoint(operation_update=start_operation, is_sync=False) - - try: - # Execute the check function with the injected logger - check_context = WaitForConditionCheckContext( - logger=context_logger.with_log_info( - LogInfo.from_operation_identifier( - execution_arn=state.durable_execution_arn, - op_id=operation_identifier, - attempt=attempt, + + try: + # Execute the check function with the injected logger + check_context = WaitForConditionCheckContext( + logger=self.context_logger.with_log_info( + LogInfo.from_operation_identifier( + execution_state=self.state, + op_id=self.operation_identifier, + attempt=attempt, + ) ) ) - ) - new_state = check(current_state, check_context) + new_state = self.check(current_state, check_context) - # Check if condition is met with the wait strategy - decision: WaitForConditionDecision = config.wait_strategy(new_state, attempt) - - serialized_state = serialize( - serdes=config.serdes, - value=new_state, - operation_id=operation_identifier.operation_id, - durable_execution_arn=state.durable_execution_arn, - ) - - logger.debug( - "wait_for_condition check completed: %s, name: %s, attempt: %s", - operation_identifier.operation_id, - operation_identifier.name, - attempt, - ) + # Check if condition is met with the wait strategy + decision: WaitForConditionDecision = self.config.wait_strategy( + new_state, attempt + ) - if not decision.should_continue: - # Condition is met - complete successfully - success_operation = OperationUpdate.create_wait_for_condition_succeed( - identifier=operation_identifier, - payload=serialized_state, + serialized_state = serialize( + serdes=self.config.serdes, + value=new_state, + operation_id=self.operation_identifier.operation_id, + durable_execution_arn=self.state.durable_execution_arn, ) - # Checkpoint SUCCEED operation with blocking (is_sync=True, default). - # Must ensure the final state is persisted before returning to the caller. - # This guarantees the condition result is durable and won't be re-evaluated on replay. - state.create_checkpoint(operation_update=success_operation) logger.debug( - "✅ wait_for_condition completed for id: %s, name: %s", - operation_identifier.operation_id, - operation_identifier.name, - ) - return new_state - - # Condition not met - schedule retry - # we enforce a minimum delay second of 1, to match model behaviour. - # we localize enforcement and keep it outside suspension methods as: - # a) those are used throughout the codebase, e.g. in wait(..) <- enforcement is done in context - # b) they shouldn't know model specific details <- enforcement is done above - # and c) this "issue" arises from retry-decision and shouldn't be chased deeper. - delay_seconds = decision.delay_seconds - if delay_seconds is not None and delay_seconds < 1: - logger.warning( - ( - "WaitDecision delay_seconds step for id: %s, name: %s," - "is %d < 1. Setting to minimum of 1 seconds." - ), - operation_identifier.operation_id, - operation_identifier.name, - delay_seconds, + "wait_for_condition check completed: %s, name: %s, attempt: %s", + self.operation_identifier.operation_id, + self.operation_identifier.name, + attempt, ) - delay_seconds = 1 - retry_operation = OperationUpdate.create_wait_for_condition_retry( - identifier=operation_identifier, - payload=serialized_state, - next_attempt_delay_seconds=delay_seconds, - ) + if not decision.should_continue: + # Condition is met - complete successfully + success_operation = OperationUpdate.create_wait_for_condition_succeed( + identifier=self.operation_identifier, + payload=serialized_state, + ) + # Checkpoint SUCCEED operation with blocking (is_sync=True, default). + # Must ensure the final state is persisted before returning to the caller. + # This guarantees the condition result is durable and won't be re-evaluated on replay. + self.state.create_checkpoint(operation_update=success_operation) + + logger.debug( + "✅ wait_for_condition completed for id: %s, name: %s", + self.operation_identifier.operation_id, + self.operation_identifier.name, + ) + return new_state + + # Condition not met - schedule retry + # We enforce a minimum delay second of 1, to match model behaviour. + delay_seconds = decision.delay_seconds + if delay_seconds is not None and delay_seconds < 1: + logger.warning( + ( + "WaitDecision delay_seconds step for id: %s, name: %s," + "is %d < 1. Setting to minimum of 1 seconds." + ), + self.operation_identifier.operation_id, + self.operation_identifier.name, + delay_seconds, + ) + delay_seconds = 1 - # Checkpoint RETRY operation with blocking (is_sync=True, default). - # Must ensure the current state and next attempt timestamp are persisted before suspending. - # This guarantees the polling state is durable and will resume correctly on the next invocation. - state.create_checkpoint(operation_update=retry_operation) + retry_operation = OperationUpdate.create_wait_for_condition_retry( + identifier=self.operation_identifier, + payload=serialized_state, + next_attempt_delay_seconds=delay_seconds, + ) - suspend_with_optional_resume_delay( - msg=f"wait_for_condition {operation_identifier.name or operation_identifier.operation_id} will retry in {decision.delay_seconds} seconds", - delay_seconds=decision.delay_seconds, - ) + # Checkpoint RETRY operation with blocking (is_sync=True, default). + # Must ensure the current state and next attempt timestamp are persisted before suspending. + # This guarantees the polling state is durable and will resume correctly on the next invocation. + self.state.create_checkpoint(operation_update=retry_operation) - except Exception as e: - # Mark as failed - waitForCondition doesn't have its own retry logic for errors - # If the check function throws, it's considered a failure - logger.exception( - "❌ wait_for_condition failed for id: %s, name: %s", - operation_identifier.operation_id, - operation_identifier.name, - ) + suspend_with_optional_resume_delay( + msg=f"wait_for_condition {self.operation_identifier.name or self.operation_identifier.operation_id} will retry in {decision.delay_seconds} seconds", + delay_seconds=decision.delay_seconds, + ) + + except Exception as e: + # Mark as failed - waitForCondition doesn't have its own retry logic for errors + # If the check function throws, it's considered a failure + logger.exception( + "❌ wait_for_condition failed for id: %s, name: %s", + self.operation_identifier.operation_id, + self.operation_identifier.name, + ) - fail_operation = OperationUpdate.create_wait_for_condition_fail( - identifier=operation_identifier, - error=ErrorObject.from_exception(e), + fail_operation = OperationUpdate.create_wait_for_condition_fail( + identifier=self.operation_identifier, + error=ErrorObject.from_exception(e), + ) + # Checkpoint FAIL operation with blocking (is_sync=True, default). + # Must ensure the failure state is persisted before raising the exception. + # This guarantees the error is durable and the condition won't be re-evaluated on replay. + self.state.create_checkpoint(operation_update=fail_operation) + raise + + msg: str = ( + "wait_for_condition should never reach this point" # pragma: no cover ) - # Checkpoint FAIL operation with blocking (is_sync=True, default). - # Must ensure the failure state is persisted before raising the exception. - # This guarantees the error is durable and the condition won't be re-evaluated on replay. - state.create_checkpoint(operation_update=fail_operation) - raise - - msg: str = "wait_for_condition should never reach this point" # pragma: no cover - raise ExecutionError(msg) # pragma: no cover + raise ExecutionError(msg) # pragma: no cover diff --git a/src/aws_durable_execution_sdk_python/retries.py b/src/aws_durable_execution_sdk_python/retries.py index 4b8e885..5a09db2 100644 --- a/src/aws_durable_execution_sdk_python/retries.py +++ b/src/aws_durable_execution_sdk_python/retries.py @@ -7,82 +7,113 @@ from dataclasses import dataclass, field from typing import TYPE_CHECKING -from aws_durable_execution_sdk_python.config import JitterStrategy +from aws_durable_execution_sdk_python.config import Duration, JitterStrategy if TYPE_CHECKING: from collections.abc import Callable Numeric = int | float +# Default pattern that matches all error messages +_DEFAULT_RETRYABLE_ERROR_PATTERN = re.compile(r".*") + @dataclass class RetryDecision: """Decision about whether to retry a step and with what delay.""" should_retry: bool - delay_seconds: int + delay: Duration + + @property + def delay_seconds(self) -> int: + """Get delay in seconds.""" + return self.delay.to_seconds() @classmethod - def retry(cls, delay_seconds: int) -> RetryDecision: + def retry(cls, delay: Duration) -> RetryDecision: """Create a retry decision.""" - return cls(should_retry=True, delay_seconds=delay_seconds) + return cls(should_retry=True, delay=delay) @classmethod def no_retry(cls) -> RetryDecision: """Create a no-retry decision.""" - return cls(should_retry=False, delay_seconds=0) + return cls(should_retry=False, delay=Duration()) @dataclass class RetryStrategyConfig: max_attempts: int = 3 - initial_delay_seconds: int = 5 - max_delay_seconds: int = 300 # 5 minutes + initial_delay: Duration = field(default_factory=lambda: Duration.from_seconds(5)) + max_delay: Duration = field( + default_factory=lambda: Duration.from_minutes(5) + ) # 5 minutes backoff_rate: Numeric = 2.0 jitter_strategy: JitterStrategy = field(default=JitterStrategy.FULL) - retryable_errors: list[str | re.Pattern] = field( - default_factory=lambda: [re.compile(r".*")] - ) - retryable_error_types: list[type[Exception]] = field(default_factory=list) + retryable_errors: list[str | re.Pattern] | None = None + retryable_error_types: list[type[Exception]] | None = None + + @property + def initial_delay_seconds(self) -> int: + """Get initial delay in seconds.""" + return self.initial_delay.to_seconds() + + @property + def max_delay_seconds(self) -> int: + """Get max delay in seconds.""" + return self.max_delay.to_seconds() def create_retry_strategy( - config: RetryStrategyConfig, + config: RetryStrategyConfig | None = None, ) -> Callable[[Exception, int], RetryDecision]: if config is None: config = RetryStrategyConfig() + # Apply default retryableErrors only if user didn't specify either filter + should_use_default_errors: bool = ( + config.retryable_errors is None and config.retryable_error_types is None + ) + + retryable_errors: list[str | re.Pattern] = ( + config.retryable_errors + if config.retryable_errors is not None + else ([_DEFAULT_RETRYABLE_ERROR_PATTERN] if should_use_default_errors else []) + ) + retryable_error_types: list[type[Exception]] = config.retryable_error_types or [] + def retry_strategy(error: Exception, attempts_made: int) -> RetryDecision: # Check if we've exceeded max attempts if attempts_made >= config.max_attempts: return RetryDecision.no_retry() # Check if error is retryable based on error message - is_retryable_error_message = any( + is_retryable_error_message: bool = any( pattern.search(str(error)) if isinstance(pattern, re.Pattern) else pattern in str(error) - for pattern in config.retryable_errors + for pattern in retryable_errors ) # Check if error is retryable based on error type - is_retryable_error_type = any( - isinstance(error, error_type) for error_type in config.retryable_error_types + is_retryable_error_type: bool = any( + isinstance(error, error_type) for error_type in retryable_error_types ) if not is_retryable_error_message and not is_retryable_error_type: return RetryDecision.no_retry() # Calculate delay with exponential backoff - delay = min( + base_delay: float = min( config.initial_delay_seconds * (config.backoff_rate ** (attempts_made - 1)), config.max_delay_seconds, ) - delay_with_jitter = delay + config.jitter_strategy.compute_jitter(delay) - delay_with_jitter = math.ceil(delay_with_jitter) - final_delay = max(1, delay_with_jitter) + # Apply jitter to get final delay + delay_with_jitter: float = config.jitter_strategy.apply_jitter(base_delay) + # Round up and ensure minimum of 1 second + final_delay: int = max(1, math.ceil(delay_with_jitter)) - return RetryDecision.retry(round(final_delay)) + return RetryDecision.retry(Duration(seconds=final_delay)) return retry_strategy @@ -101,8 +132,8 @@ def default(cls) -> Callable[[Exception, int], RetryDecision]: return create_retry_strategy( RetryStrategyConfig( max_attempts=6, - initial_delay_seconds=5, - max_delay_seconds=60, + initial_delay=Duration.from_seconds(5), + max_delay=Duration.from_minutes(1), backoff_rate=2, jitter_strategy=JitterStrategy.FULL, ) @@ -123,8 +154,8 @@ def resource_availability(cls) -> Callable[[Exception, int], RetryDecision]: return create_retry_strategy( RetryStrategyConfig( max_attempts=5, - initial_delay_seconds=5, - max_delay_seconds=300, + initial_delay=Duration.from_seconds(5), + max_delay=Duration.from_minutes(5), backoff_rate=2, ) ) @@ -135,8 +166,8 @@ def critical(cls) -> Callable[[Exception, int], RetryDecision]: return create_retry_strategy( RetryStrategyConfig( max_attempts=10, - initial_delay_seconds=1, - max_delay_seconds=60, + initial_delay=Duration.from_seconds(1), + max_delay=Duration.from_minutes(1), backoff_rate=1.5, jitter_strategy=JitterStrategy.NONE, ) diff --git a/src/aws_durable_execution_sdk_python/serdes.py b/src/aws_durable_execution_sdk_python/serdes.py index e979a72..b3b704a 100644 --- a/src/aws_durable_execution_sdk_python/serdes.py +++ b/src/aws_durable_execution_sdk_python/serdes.py @@ -32,6 +32,7 @@ from enum import StrEnum from typing import Any, Generic, Protocol, TypeVar +from aws_durable_execution_sdk_python.concurrency.models import BatchResult from aws_durable_execution_sdk_python.exceptions import ( DurableExecutionsError, ExecutionError, @@ -62,6 +63,7 @@ class TypeTag(StrEnum): TUPLE = "t" LIST = "l" DICT = "m" + BATCH_RESULT = "br" @dataclass(frozen=True) @@ -206,7 +208,14 @@ def dispatcher(self): def encode(self, obj: Any) -> EncodedValue: """Encode container using dispatcher for recursive elements.""" + match obj: + case BatchResult(): + # Encode BatchResult as dict with special tag + return EncodedValue( + TypeTag.BATCH_RESULT, + self._wrap(obj.to_dict(), self.dispatcher).value, + ) case list(): return EncodedValue( TypeTag.LIST, [self._wrap(v, self.dispatcher) for v in obj] @@ -230,7 +239,13 @@ def encode(self, obj: Any) -> EncodedValue: def decode(self, tag: TypeTag, value: Any) -> Any: """Decode container using dispatcher for recursive elements.""" + match tag: + case TypeTag.BATCH_RESULT: + # Decode BatchResult from dict - value is already the dict structure + # First decode it as a dict to unwrap all nested EncodedValues + decoded_dict = self.decode(TypeTag.DICT, value) + return BatchResult.from_dict(decoded_dict) case TypeTag.LIST: if not isinstance(value, list): msg = f"Expected list, got {type(value)}" @@ -292,7 +307,7 @@ def encode(self, obj: Any) -> EncodedValue: return self.decimal_codec.encode(obj) case datetime() | date(): return self.datetime_codec.encode(obj) - case list() | tuple() | dict(): + case list() | tuple() | dict() | BatchResult(): return self.container_codec.encode(obj) case _: msg = f"Unsupported type: {type(obj)}" @@ -316,7 +331,7 @@ def decode(self, tag: TypeTag, value: Any) -> Any: return self.decimal_codec.decode(tag, value) case TypeTag.DATETIME | TypeTag.DATE: return self.datetime_codec.decode(tag, value) - case TypeTag.LIST | TypeTag.TUPLE | TypeTag.DICT: + case TypeTag.LIST | TypeTag.TUPLE | TypeTag.DICT | TypeTag.BATCH_RESULT: return self.container_codec.decode(tag, value) case _: msg = f"Unknown type tag: {tag}" @@ -357,6 +372,14 @@ def is_primitive(obj: Any) -> bool: return False +class PassThroughSerDes(SerDes[T]): + def serialize(self, value: T, _: SerDesContext) -> str: # noqa: PLR6301 + return value # type: ignore + + def deserialize(self, data: str, _: SerDesContext) -> T: # noqa: PLR6301 + return data # type: ignore + + class JsonSerDes(SerDes[T]): def serialize(self, value: T, _: SerDesContext) -> str: # noqa: PLR6301 return json.dumps(value) @@ -392,10 +415,14 @@ def deserialize(self, data: str, context: SerDesContext | None = None) -> Any: if not (isinstance(obj, dict) and TYPE_TOKEN in obj and VALUE_TOKEN in obj): msg = 'Malformed envelope: missing "t" or "v" at root.' raise SerDesError(msg) - if obj[TYPE_TOKEN] not in TypeTag: + # Python 3.11 compatibility: Using try-except instead of 'in' operator + # because checking 'str in EnumType' raises TypeError in Python 3.11 + try: + tag = TypeTag(obj[TYPE_TOKEN]) + except ValueError: msg = f'Unknown type tag: "{obj[TYPE_TOKEN]}"' - raise SerDesError(msg) - tag = TypeTag(obj[TYPE_TOKEN]) + raise SerDesError(msg) from None + return self._codec.decode(tag, obj[VALUE_TOKEN]) def _to_json_serializable(self, obj: Any) -> Any: @@ -414,8 +441,8 @@ def _to_json_serializable(self, obj: Any) -> Any: return obj -_DEFAULT_JSON_SERDES: SerDes[Any] = JsonSerDes() -_EXTENDED_TYPES_SERDES: SerDes[Any] = ExtendedTypeSerDes() +DEFAULT_JSON_SERDES: SerDes[Any] = JsonSerDes() +EXTENDED_TYPES_SERDES: SerDes[Any] = ExtendedTypeSerDes() def serialize( @@ -436,7 +463,7 @@ def serialize( FatalError: If serialization fails """ serdes_context: SerDesContext = SerDesContext(operation_id, durable_execution_arn) - active_serdes: SerDes[T] = serdes or _EXTENDED_TYPES_SERDES + active_serdes: SerDes[T] = serdes or EXTENDED_TYPES_SERDES try: return active_serdes.serialize(value, serdes_context) except Exception as e: @@ -466,7 +493,7 @@ def deserialize( FatalError: If deserialization fails """ serdes_context: SerDesContext = SerDesContext(operation_id, durable_execution_arn) - active_serdes: SerDes[T] = serdes or _EXTENDED_TYPES_SERDES + active_serdes: SerDes[T] = serdes or EXTENDED_TYPES_SERDES try: return active_serdes.deserialize(data, serdes_context) except Exception as e: diff --git a/src/aws_durable_execution_sdk_python/state.py b/src/aws_durable_execution_sdk_python/state.py index d97d19d..a6fc0c7 100644 --- a/src/aws_durable_execution_sdk_python/state.py +++ b/src/aws_durable_execution_sdk_python/state.py @@ -8,6 +8,7 @@ import threading import time from dataclasses import dataclass +from enum import Enum from threading import Lock from typing import TYPE_CHECKING @@ -15,6 +16,7 @@ BackgroundThreadError, CallableRuntimeError, DurableExecutionsError, + OrphanedChildException, ) from aws_durable_execution_sdk_python.lambda_service import ( CheckpointOutput, @@ -210,6 +212,13 @@ def get_next_attempt_timestamp(self) -> datetime.datetime | None: CHECKPOINT_NOT_FOUND = CheckpointedResult.create_not_found() +class ReplayStatus(Enum): + """Status indicating whether execution is replaying or executing new operations.""" + + REPLAY = "replay" + NEW = "new" + + class ExecutionState: """Get, set and maintain execution state. This is mutable. Create and check checkpoints.""" @@ -220,6 +229,7 @@ def __init__( operations: MutableMapping[str, Operation], service_client: DurableServiceClient, batcher_config: CheckpointBatcherConfig | None = None, + replay_status: ReplayStatus = ReplayStatus.NEW, ): self.durable_execution_arn: str = durable_execution_arn self._current_checkpoint_token: str = initial_checkpoint_token @@ -247,6 +257,9 @@ def __init__( # Protects parent_to_children and parent_done self._parent_done_lock: Lock = Lock() + self._replay_status: ReplayStatus = replay_status + self._replay_status_lock: Lock = Lock() + self._visited_operations: set[str] = set() def fetch_paginated_operations( self, @@ -277,6 +290,48 @@ def fetch_paginated_operations( with self._operations_lock: self.operations.update({op.operation_id: op for op in all_operations}) + def track_replay(self, operation_id: str) -> None: + """Check if operation exists with completed status; if not, transition to NEW status. + + This method is called before each operation (step, wait, invoke, etc.) to determine + if we've reached the replay boundary. Once we encounter an operation that doesn't + exist or isn't completed, we transition from REPLAY to NEW status, which enables + logging for all subsequent code. + + Args: + operation_id: The operation ID to check + """ + with self._replay_status_lock: + if self._replay_status == ReplayStatus.REPLAY: + self._visited_operations.add(operation_id) + completed_ops = { + op_id + for op_id, op in self.operations.items() + if op.operation_type != OperationType.EXECUTION + and op.status + in { + OperationStatus.SUCCEEDED, + OperationStatus.FAILED, + OperationStatus.CANCELLED, + OperationStatus.STOPPED, + } + } + if completed_ops.issubset(self._visited_operations): + logger.debug( + "Transitioning from REPLAY to NEW status at operation %s", + operation_id, + ) + self._replay_status = ReplayStatus.NEW + + def is_replaying(self) -> bool: + """Check if execution is currently in replay mode. + + Returns: + True if in REPLAY status, False if in NEW status + """ + with self._replay_status_lock: + return self._replay_status is ReplayStatus.REPLAY + def get_checkpoint_result(self, checkpoint_id: str) -> CheckpointedResult: """Get checkpoint result. @@ -395,7 +450,13 @@ def create_checkpoint( "Rejecting checkpoint for operation %s - parent is done", operation_update.operation_id, ) - return + error_msg = ( + "Parent context completed, child operation cannot checkpoint" + ) + raise OrphanedChildException( + error_msg, + operation_id=operation_update.operation_id, + ) # Check if background checkpointing has failed if self._checkpointing_failed.is_set(): @@ -731,3 +792,6 @@ def _calculate_operation_size(queued_op: QueuedOperation) -> int: # Use JSON serialization to estimate size serialized = json.dumps(queued_op.operation_update.to_dict()).encode("utf-8") return len(serialized) + + def close(self): + self.stop_checkpointing() diff --git a/src/aws_durable_execution_sdk_python/types.py b/src/aws_durable_execution_sdk_python/types.py index 65c4be5..9181be9 100644 --- a/src/aws_durable_execution_sdk_python/types.py +++ b/src/aws_durable_execution_sdk_python/types.py @@ -13,6 +13,7 @@ BatchedInput, CallbackConfig, ChildConfig, + Duration, MapConfig, ParallelConfig, StepConfig, @@ -56,6 +57,11 @@ class StepContext(OperationContext): pass +@dataclass(frozen=True) +class WaitForCallbackContext(OperationContext): + """Context provided to waitForCallback submitter functions.""" + + @dataclass(frozen=True) class WaitForConditionCheckContext(OperationContext): pass @@ -126,7 +132,7 @@ def parallel( ... # pragma: no cover @abstractmethod - def wait(self, seconds: int, name: str | None = None) -> None: + def wait(self, duration: Duration, name: str | None = None) -> None: """Wait for a specified amount of time.""" ... # pragma: no cover diff --git a/src/aws_durable_execution_sdk_python/waits.py b/src/aws_durable_execution_sdk_python/waits.py index 351fb69..b4d740a 100644 --- a/src/aws_durable_execution_sdk_python/waits.py +++ b/src/aws_durable_execution_sdk_python/waits.py @@ -2,10 +2,11 @@ from __future__ import annotations +import math from dataclasses import dataclass, field from typing import TYPE_CHECKING, Generic -from aws_durable_execution_sdk_python.config import JitterStrategy, T +from aws_durable_execution_sdk_python.config import Duration, JitterStrategy, T if TYPE_CHECKING: from collections.abc import Callable @@ -20,28 +21,52 @@ class WaitDecision: """Decision about whether to wait a step and with what delay.""" should_wait: bool - delay_seconds: int + delay: Duration + + @property + def delay_seconds(self) -> int: + """Get delay in seconds.""" + return self.delay.to_seconds() @classmethod - def wait(cls, delay_seconds: int) -> WaitDecision: + def wait(cls, delay: Duration) -> WaitDecision: """Create a wait decision.""" - return cls(should_wait=True, delay_seconds=delay_seconds) + return cls(should_wait=True, delay=delay) @classmethod def no_wait(cls) -> WaitDecision: """Create a no-wait decision.""" - return cls(should_wait=False, delay_seconds=0) + return cls(should_wait=False, delay=Duration()) @dataclass class WaitStrategyConfig(Generic[T]): should_continue_polling: Callable[[T], bool] max_attempts: int = 60 - initial_delay_seconds: int = 5 - max_delay_seconds: int = 300 # 5 minutes + initial_delay: Duration = field(default_factory=lambda: Duration.from_seconds(5)) + max_delay: Duration = field( + default_factory=lambda: Duration.from_minutes(5) + ) # 5 minutes backoff_rate: Numeric = 1.5 jitter_strategy: JitterStrategy = field(default=JitterStrategy.FULL) - timeout_seconds: int | None = None # Not implemented yet + timeout: Duration | None = None # Not implemented yet + + @property + def initial_delay_seconds(self) -> int: + """Get initial delay in seconds.""" + return self.initial_delay.to_seconds() + + @property + def max_delay_seconds(self) -> int: + """Get max delay in seconds.""" + return self.max_delay.to_seconds() + + @property + def timeout_seconds(self) -> int | None: + """Get timeout in seconds.""" + if self.timeout is None: + return None + return self.timeout.to_seconds() def create_wait_strategy( @@ -57,19 +82,18 @@ def wait_strategy(result: T, attempts_made: int) -> WaitDecision: return WaitDecision.no_wait() # Calculate delay with exponential backoff - base_delay = min( + base_delay: float = min( config.initial_delay_seconds * (config.backoff_rate ** (attempts_made - 1)), config.max_delay_seconds, ) - # Apply jitter (add jitter to base delay) - jitter = config.jitter_strategy.compute_jitter(base_delay) - delay_with_jitter = base_delay + jitter + # Apply jitter to get final delay + delay_with_jitter: float = config.jitter_strategy.apply_jitter(base_delay) - # Ensure delay is an integer >= 1 - final_delay = max(1, round(delay_with_jitter)) + # Round up and ensure minimum of 1 second + final_delay: int = max(1, math.ceil(delay_with_jitter)) - return WaitDecision.wait(final_delay) + return WaitDecision.wait(Duration(seconds=final_delay)) return wait_strategy @@ -79,17 +103,22 @@ class WaitForConditionDecision: """Decision about whether to continue waiting.""" should_continue: bool - delay_seconds: int + delay: Duration + + @property + def delay_seconds(self) -> int: + """Get delay in seconds.""" + return self.delay.to_seconds() @classmethod - def continue_waiting(cls, delay_seconds: int) -> WaitForConditionDecision: + def continue_waiting(cls, delay: Duration) -> WaitForConditionDecision: """Create a decision to continue waiting for delay_seconds.""" - return cls(should_continue=True, delay_seconds=delay_seconds) + return cls(should_continue=True, delay=delay) @classmethod def stop_polling(cls) -> WaitForConditionDecision: """Create a decision to stop polling.""" - return cls(should_continue=False, delay_seconds=-1) + return cls(should_continue=False, delay=Duration()) @dataclass(frozen=True) diff --git a/tests/concurrency_test.py b/tests/concurrency_test.py index ea9c26f..cb2f0ba 100644 --- a/tests/concurrency_test.py +++ b/tests/concurrency_test.py @@ -1,5 +1,6 @@ """Tests for the concurrency module.""" +import json import random import threading import time @@ -10,17 +11,19 @@ import pytest -from aws_durable_execution_sdk_python.concurrency import ( +from aws_durable_execution_sdk_python.concurrency.executor import ( + ConcurrentExecutor, + TimerScheduler, +) +from aws_durable_execution_sdk_python.concurrency.models import ( BatchItem, BatchItemStatus, BatchResult, BranchStatus, CompletionReason, - ConcurrentExecutor, Executable, ExecutableWithState, ExecutionCounters, - TimerScheduler, ) from aws_durable_execution_sdk_python.config import CompletionConfig, MapConfig from aws_durable_execution_sdk_python.exceptions import ( @@ -29,7 +32,9 @@ SuspendExecution, TimedSuspendExecution, ) -from aws_durable_execution_sdk_python.lambda_service import ErrorObject +from aws_durable_execution_sdk_python.lambda_service import ( + ErrorObject, +) from aws_durable_execution_sdk_python.operation.map import MapExecutor @@ -102,28 +107,6 @@ def test_batch_item_from_dict(): assert item.error is None -def test_batch_item_from_dict_with_error(): - """Test BatchItem from_dict with error object.""" - error_data = { - "message": "Test error", - "type": "TestError", - "data": None, - "stackTrace": None, - } - data = { - "index": 1, - "status": "FAILED", - "result": None, - "error": error_data, - } - - item = BatchItem.from_dict(data) - assert item.index == 1 - assert item.status == BatchItemStatus.FAILED - assert item.result is None - assert item.error is not None - - def test_batch_result_creation(): """Test BatchResult creation.""" items = [ @@ -323,7 +306,9 @@ def test_batch_result_from_dict_default_completion_reason(): # No completionReason provided } - with patch("aws_durable_execution_sdk_python.concurrency.logger") as mock_logger: + with patch( + "aws_durable_execution_sdk_python.concurrency.models.logger" + ) as mock_logger: result = BatchResult.from_dict(data) assert result.completion_reason == CompletionReason.ALL_COMPLETED # Verify warning was logged @@ -341,7 +326,9 @@ def test_batch_result_from_dict_infer_all_completed_all_succeeded(): # No completionReason provided } - with patch("aws_durable_execution_sdk_python.concurrency.logger") as mock_logger: + with patch( + "aws_durable_execution_sdk_python.concurrency.models.logger" + ) as mock_logger: result = BatchResult.from_dict(data) assert result.completion_reason == CompletionReason.ALL_COMPLETED mock_logger.warning.assert_called_once() @@ -365,7 +352,9 @@ def test_batch_result_from_dict_infer_failure_tolerance_exceeded_all_failed(): # even if everything has failed, if we've completed all items, then we've finished as ALL_COMPLETED # https://github.com/aws/aws-durable-execution-sdk-js/blob/f20396f24afa9d6539d8e5056ee851ac7ef62301/packages/aws-durable-execution-sdk-js/src/handlers/concurrent-execution-handler/concurrent-execution-handler.ts#L324-L335 - with patch("aws_durable_execution_sdk_python.concurrency.logger") as mock_logger: + with patch( + "aws_durable_execution_sdk_python.concurrency.models.logger" + ) as mock_logger: result = BatchResult.from_dict(data) assert result.completion_reason == CompletionReason.ALL_COMPLETED mock_logger.warning.assert_called_once() @@ -389,7 +378,9 @@ def test_batch_result_from_dict_infer_all_completed_mixed_success_failure(): } # the logic is that when \every item i: hasCompleted(i) then terminate due to all_completed - with patch("aws_durable_execution_sdk_python.concurrency.logger") as mock_logger: + with patch( + "aws_durable_execution_sdk_python.concurrency.models.logger" + ) as mock_logger: result = BatchResult.from_dict(data) assert result.completion_reason == CompletionReason.ALL_COMPLETED mock_logger.warning.assert_called_once() @@ -406,7 +397,9 @@ def test_batch_result_from_dict_infer_min_successful_reached_has_started(): # No completionReason provided } - with patch("aws_durable_execution_sdk_python.concurrency.logger") as mock_logger: + with patch( + "aws_durable_execution_sdk_python.concurrency.models.logger" + ) as mock_logger: result = BatchResult.from_dict(data, CompletionConfig(1)) assert result.completion_reason == CompletionReason.MIN_SUCCESSFUL_REACHED mock_logger.warning.assert_called_once() @@ -419,7 +412,9 @@ def test_batch_result_from_dict_infer_empty_items(): # No completionReason provided } - with patch("aws_durable_execution_sdk_python.concurrency.logger") as mock_logger: + with patch( + "aws_durable_execution_sdk_python.concurrency.models.logger" + ) as mock_logger: result = BatchResult.from_dict(data) assert result.completion_reason == CompletionReason.ALL_COMPLETED mock_logger.warning.assert_called_once() @@ -434,7 +429,9 @@ def test_batch_result_from_dict_with_explicit_completion_reason(): "completionReason": "MIN_SUCCESSFUL_REACHED", } - with patch("aws_durable_execution_sdk_python.concurrency.logger") as mock_logger: + with patch( + "aws_durable_execution_sdk_python.concurrency.models.logger" + ) as mock_logger: result = BatchResult.from_dict(data) assert result.completion_reason == CompletionReason.MIN_SUCCESSFUL_REACHED # No warning should be logged when completionReason is provided @@ -973,6 +970,7 @@ def execute_item(self, child_context, executable): exe_state = ExecutableWithState(executables[0]) future = Mock() future.result.side_effect = TimedSuspendExecution("test message", time.time() + 1) + future.cancelled.return_value = False scheduler = Mock() scheduler.schedule_resume = Mock() @@ -1045,6 +1043,7 @@ def execute_item(self, child_context, executable): exe_state = ExecutableWithState(executables[0]) future = Mock() future.result.side_effect = ValueError("Test error") + future.cancelled.return_value = False scheduler = Mock() @@ -1054,7 +1053,7 @@ def execute_item(self, child_context, executable): assert isinstance(exe_state.error, ValueError) -def test_concurrent_executor_create_result_with_failed_branches(): +def test_concurrent_executor_create_result_with_early_exit(): """Test ConcurrentExecutor with failed branches using public execute method.""" class TestExecutor(ConcurrentExecutor): @@ -1062,6 +1061,8 @@ def execute_item(self, child_context, executable): if executable.index == 0: return f"result_{executable.index}" msg = "Test error" + # giving space to terminate early with + time.sleep(0.5) raise ValueError(msg) def success_callable(): @@ -1072,7 +1073,8 @@ def failure_callable(): executables = [Executable(0, success_callable), Executable(1, failure_callable)] completion_config = CompletionConfig( - min_successful=1, + # setting min successful to None to execute all children and avoid early stopping + min_successful=None, tolerated_failure_count=None, tolerated_failure_percentage=None, ) @@ -2373,7 +2375,9 @@ def test_batch_result_from_dict_with_completion_config(): # With started items, should infer MIN_SUCCESSFUL_REACHED completion_config = CompletionConfig(min_successful=1) - with patch("aws_durable_execution_sdk_python.concurrency.logger") as mock_logger: + with patch( + "aws_durable_execution_sdk_python.concurrency.models.logger" + ) as mock_logger: result = BatchResult.from_dict(data, completion_config) assert result.completion_reason == CompletionReason.MIN_SUCCESSFUL_REACHED mock_logger.warning.assert_called_once() @@ -2399,7 +2403,9 @@ def test_batch_result_from_dict_all_completed(): # No completionReason provided } - with patch("aws_durable_execution_sdk_python.concurrency.logger") as mock_logger: + with patch( + "aws_durable_execution_sdk_python.concurrency.models.logger" + ) as mock_logger: result = BatchResult.from_dict(data) assert result.completion_reason == CompletionReason.ALL_COMPLETED mock_logger.warning.assert_called_once() @@ -2520,7 +2526,7 @@ def create_child_context(operation_id): executor_context.create_child_context = create_child_context with patch( - "aws_durable_execution_sdk_python.concurrency.child_handler", + "aws_durable_execution_sdk_python.concurrency.executor.child_handler", patched_child_handler, ): executor.execute(execution_state, executor_context) @@ -2676,3 +2682,352 @@ def mock_get_checkpoint_result(operation_id): assert len(result.all) == 1 assert result.all[0].status == BatchItemStatus.SUCCEEDED assert result.all[0].result == "re_executed_result" + + +def test_batch_item_from_dict_with_error(): + """Test BatchItem.from_dict() with error.""" + data = { + "index": 3, + "status": "FAILED", + "result": None, + "error": { + "ErrorType": "ValueError", + "ErrorMessage": "bad value", + "StackTrace": [], + }, + } + + item = BatchItem.from_dict(data) + + assert item.index == 3 + assert item.status == BatchItemStatus.FAILED + assert item.error.type == "ValueError" + assert item.error.message == "bad value" + + +def test_batch_result_with_mixed_statuses(): + """Test BatchResult serialization with mixed item statuses.""" + result = BatchResult( + all=[ + BatchItem(0, BatchItemStatus.SUCCEEDED, result="success"), + BatchItem( + 1, + BatchItemStatus.FAILED, + error=ErrorObject(message="msg", type="E", data=None, stack_trace=[]), + ), + BatchItem(2, BatchItemStatus.STARTED), + ], + completion_reason=CompletionReason.FAILURE_TOLERANCE_EXCEEDED, + ) + + serialized = json.dumps(result.to_dict()) + deserialized = BatchResult.from_dict(json.loads(serialized)) + + assert len(deserialized.all) == 3 + assert deserialized.all[0].status == BatchItemStatus.SUCCEEDED + assert deserialized.all[1].status == BatchItemStatus.FAILED + assert deserialized.all[2].status == BatchItemStatus.STARTED + assert deserialized.completion_reason == CompletionReason.FAILURE_TOLERANCE_EXCEEDED + + +def test_batch_result_empty_list(): + """Test BatchResult serialization with empty items list.""" + result = BatchResult(all=[], completion_reason=CompletionReason.ALL_COMPLETED) + + serialized = json.dumps(result.to_dict()) + deserialized = BatchResult.from_dict(json.loads(serialized)) + + assert len(deserialized.all) == 0 + assert deserialized.completion_reason == CompletionReason.ALL_COMPLETED + + +def test_batch_result_complex_nested_data(): + """Test BatchResult with complex nested data structures.""" + complex_result = { + "users": [{"id": 1, "name": "Alice"}, {"id": 2, "name": "Bob"}], + "metadata": {"count": 2, "timestamp": "2025-10-31"}, + } + + result = BatchResult( + all=[BatchItem(0, BatchItemStatus.SUCCEEDED, result=complex_result)], + completion_reason=CompletionReason.ALL_COMPLETED, + ) + + serialized = json.dumps(result.to_dict()) + deserialized = BatchResult.from_dict(json.loads(serialized)) + + assert deserialized.all[0].result == complex_result + assert deserialized.all[0].result["users"][0]["name"] == "Alice" + + +def test_executor_does_not_deadlock_when_all_tasks_terminal_but_completion_config_allows_failures(): + """Ensure executor returns when all tasks are terminal even if completion rules are confusing.""" + + class TestExecutor(ConcurrentExecutor): + def execute_item(self, child_context, executable): + if executable.index == 0: + # fail one task + raise Exception("boom") # noqa EM101 TRY002 + return f"ok_{executable.index}" + + # Two tasks, min_successful=2 but tolerated failure_count set to 1. + # After one fail + one success, counters.is_complete() should return true, + # should_continue() should return false. counters.is_complete was failing to + # stop early, which caused map to hang. + executables = [Executable(0, lambda: "a"), Executable(1, lambda: "b")] + completion_config = CompletionConfig( + min_successful=2, + tolerated_failure_count=1, + tolerated_failure_percentage=None, + ) + + executor = TestExecutor( + executables=executables, + max_concurrency=2, + completion_config=completion_config, + sub_type_top="TOP", + sub_type_iteration="ITER", + name_prefix="test_", + serdes=None, + ) + + execution_state = Mock() + execution_state.create_checkpoint = Mock() + executor_context = Mock() + executor_context._create_step_id_for_logical_step = lambda *args: "1" # noqa SLF001 + executor_context.create_child_context = lambda *args: Mock() + + # Should return (not hang) and batch should reflect one FAILED and one SUCCEEDED + result = executor.execute(execution_state, executor_context) + statuses = {item.index: item.status for item in result.all} + assert statuses[0] == BatchItemStatus.FAILED + assert statuses[1] == BatchItemStatus.SUCCEEDED + + +def test_executor_terminates_quickly_when_impossible_to_succeed(): + """Test that executor terminates when min_successful becomes impossible.""" + executed_count = {"value": 0} + + def task_func(ctx, item, idx, items): + executed_count["value"] += 1 + if idx < 2: + raise Exception(f"fail_{idx}") # noqa EM102 TRY002 + time.sleep(0.05) + return f"ok_{idx}" + + items = list(range(100)) + config = MapConfig( + max_concurrency=10, completion_config=CompletionConfig(min_successful=99) + ) + + executor = MapExecutor.from_items(items=items, func=task_func, config=config) + + execution_state = Mock() + execution_state.create_checkpoint = Mock() + executor_context = Mock() + executor_context._create_step_id_for_logical_step = lambda *args: "1" # noqa SLF001 + executor_context.create_child_context = lambda *args: Mock() + + result = executor.execute(execution_state, executor_context) + + # With concurrency=1, only 2 tasks should execute before terminating + # min_successful(99) + failure_count(2) = 101 > total_tasks(100) + assert executed_count["value"] < 100 + assert ( + result.completion_reason == CompletionReason.FAILURE_TOLERANCE_EXCEEDED + ), executed_count + assert sum(1 for item in result.all if item.status == BatchItemStatus.FAILED) == 2 + assert ( + sum(1 for item in result.all if item.status == BatchItemStatus.SUCCEEDED) < 98 + ) + + +def test_executor_exits_early_with_min_successful(): + """Test that parallel exits immediately when min_successful is reached without waiting for other branches.""" + + class TestExecutor(ConcurrentExecutor): + def execute_item(self, child_context, executable): + return executable.func() + + execution_times = [] + + def fast_branch(): + execution_times.append(("fast", time.time())) + return "fast_result" + + def slow_branch(): + execution_times.append(("slow_start", time.time())) + time.sleep(2) # Long sleep + execution_times.append(("slow_end", time.time())) + return "slow_result" + + executables = [ + Executable(0, fast_branch), + Executable(1, slow_branch), + ] + + completion_config = CompletionConfig(min_successful=1) + + executor = TestExecutor( + executables=executables, + max_concurrency=2, + completion_config=completion_config, + sub_type_top="TOP", + sub_type_iteration="ITER", + name_prefix="test_", + serdes=None, + ) + + execution_state = Mock() + execution_state.create_checkpoint = Mock() + executor_context = Mock() + executor_context._create_step_id_for_logical_step = lambda idx: f"step_{idx}" # noqa: SLF001 + executor_context._parent_id = "parent" # noqa: SLF001 + + def create_child_context(op_id): + child = Mock() + child.state = execution_state + return child + + executor_context.create_child_context = create_child_context + + start_time = time.time() + result = executor.execute(execution_state, executor_context) + elapsed_time = time.time() - start_time + + # Should complete in less than 1.5 second (not wait for 2-second sleep) + assert elapsed_time < 1.5, f"Took {elapsed_time}s, expected < 1.5s" + + # Result should show MIN_SUCCESSFUL_REACHED + assert result.completion_reason == CompletionReason.MIN_SUCCESSFUL_REACHED + + # Fast branch should succeed + assert result.all[0].status == BatchItemStatus.SUCCEEDED + assert result.all[0].result == "fast_result" + + # Slow branch should be marked as STARTED (incomplete) + assert result.all[1].status == BatchItemStatus.STARTED + + # Verify counts + assert result.success_count == 1 + assert result.failure_count == 0 + assert result.started_count == 1 + assert result.total_count == 2 + + +def test_executor_returns_with_incomplete_branches(): + """Test that executor returns when min_successful is reached, leaving other branches incomplete.""" + + class TestExecutor(ConcurrentExecutor): + def execute_item(self, child_context, executable): + return executable.func() + + operation_tracker = Mock() + + def fast_branch(): + operation_tracker.fast_executed() + return "fast_result" + + def slow_branch(): + operation_tracker.slow_started() + time.sleep(2) # Long sleep + operation_tracker.slow_completed() + return "slow_result" + + executables = [ + Executable(0, fast_branch), + Executable(1, slow_branch), + ] + + completion_config = CompletionConfig(min_successful=1) + + executor = TestExecutor( + executables=executables, + max_concurrency=2, + completion_config=completion_config, + sub_type_top="TOP", + sub_type_iteration="ITER", + name_prefix="test_", + serdes=None, + ) + + execution_state = Mock() + execution_state.create_checkpoint = Mock() + executor_context = Mock() + executor_context._create_step_id_for_logical_step = lambda idx: f"step_{idx}" # noqa: SLF001 + executor_context._parent_id = "parent" # noqa: SLF001 + executor_context.create_child_context = lambda op_id: Mock(state=execution_state) + + result = executor.execute(execution_state, executor_context) + + # Verify fast branch executed + assert operation_tracker.fast_executed.call_count == 1 + + # Slow branch may or may not have started (depends on thread scheduling) + # but it definitely should not have completed + assert ( + operation_tracker.slow_completed.call_count == 0 + ), "Executor should return before slow branch completes" + + # Result should show MIN_SUCCESSFUL_REACHED + assert result.completion_reason == CompletionReason.MIN_SUCCESSFUL_REACHED + + # Verify counts - one succeeded, one incomplete + assert result.success_count == 1 + assert result.failure_count == 0 + assert result.started_count == 1 + assert result.total_count == 2 + + +def test_executor_returns_before_slow_branch_completes(): + """Test that executor returns immediately when min_successful is reached, not waiting for slow branches.""" + + class TestExecutor(ConcurrentExecutor): + def execute_item(self, child_context, executable): + return executable.func() + + slow_branch_mock = Mock() + + def fast_func(): + return "fast" + + def slow_func(): + time.sleep(3) # Sleep + slow_branch_mock.completed() # Should not be called before executor returns + return "slow" + + executables = [Executable(0, fast_func), Executable(1, slow_func)] + completion_config = CompletionConfig(min_successful=1) + + executor = TestExecutor( + executables=executables, + max_concurrency=2, + completion_config=completion_config, + sub_type_top="TOP", + sub_type_iteration="ITER", + name_prefix="test_", + serdes=None, + ) + + execution_state = Mock() + execution_state.create_checkpoint = Mock() + executor_context = Mock() + executor_context._create_step_id_for_logical_step = lambda idx: f"step_{idx}" # noqa: SLF001 + executor_context._parent_id = "parent" # noqa: SLF001 + executor_context.create_child_context = lambda op_id: Mock(state=execution_state) + + result = executor.execute(execution_state, executor_context) + + # Executor should have returned before slow branch completed + assert ( + not slow_branch_mock.completed.called + ), "Executor should return before slow branch completes" + + # Result should show MIN_SUCCESSFUL_REACHED + assert result.completion_reason == CompletionReason.MIN_SUCCESSFUL_REACHED + + # Verify counts + assert result.success_count == 1 + assert result.failure_count == 0 + assert result.started_count == 1 + assert result.total_count == 2 diff --git a/tests/config_test.py b/tests/config_test.py index b2b7af9..24edf6d 100644 --- a/tests/config_test.py +++ b/tests/config_test.py @@ -9,6 +9,8 @@ CheckpointMode, ChildConfig, CompletionConfig, + Duration, + InvokeConfig, ItemBatcher, ItemsPerBatchUnit, MapConfig, @@ -85,7 +87,7 @@ def test_parallel_config_defaults(): def test_wait_for_condition_decision_continue(): """Test WaitForConditionDecision.continue_waiting factory method.""" - decision = WaitForConditionDecision.continue_waiting(30) + decision = WaitForConditionDecision.continue_waiting(Duration.from_seconds(30)) assert decision.should_continue is True assert decision.delay_seconds == 30 @@ -94,14 +96,14 @@ def test_wait_for_condition_decision_stop(): """Test WaitForConditionDecision.stop_polling factory method.""" decision = WaitForConditionDecision.stop_polling() assert decision.should_continue is False - assert decision.delay_seconds == -1 + assert decision.delay_seconds == 0 def test_wait_for_condition_config(): """Test WaitForConditionConfig with custom values.""" def wait_strategy(state, attempt): - return WaitForConditionDecision.continue_waiting(10) + return WaitForConditionDecision.continue_waiting(Duration.from_seconds(10)) serdes = Mock() config = WaitForConditionConfig( @@ -237,7 +239,9 @@ def test_callback_config_with_values(): """Test CallbackConfig with custom values.""" serdes = Mock() config = CallbackConfig( - timeout_seconds=30, heartbeat_timeout_seconds=10, serdes=serdes + timeout=Duration.from_seconds(30), + heartbeat_timeout=Duration.from_seconds(10), + serdes=serdes, ) assert config.timeout_seconds == 30 assert config.heartbeat_timeout_seconds == 10 @@ -272,3 +276,16 @@ def test_step_future_without_name(): result = step_future.result() assert result == 42 + + +def test_invoke_config_defaults(): + """Test InvokeConfig defaults.""" + config = InvokeConfig() + assert config.tenant_id is None + assert config.timeout_seconds == 0 + + +def test_invoke_config_with_tenant_id(): + """Test InvokeConfig with explicit tenant_id.""" + config = InvokeConfig(tenant_id="test-tenant") + assert config.tenant_id == "test-tenant" diff --git a/tests/context_test.py b/tests/context_test.py index 3804ee4..4e43347 100644 --- a/tests/context_test.py +++ b/tests/context_test.py @@ -3,13 +3,14 @@ import json import random from itertools import islice -from unittest.mock import ANY, Mock, patch +from unittest.mock import ANY, MagicMock, Mock, patch import pytest from aws_durable_execution_sdk_python.config import ( CallbackConfig, ChildConfig, + Duration, InvokeConfig, MapConfig, ParallelConfig, @@ -17,7 +18,6 @@ ) from aws_durable_execution_sdk_python.context import Callback, DurableContext from aws_durable_execution_sdk_python.exceptions import ( - CallableRuntimeError, CallbackError, SuspendExecution, ValidationError, @@ -74,6 +74,28 @@ def test_callback_result_succeeded(): callback = Callback("callback1", "op1", mock_state) result = callback.result() + assert result == '"success_result"' + mock_state.get_checkpoint_result.assert_called_once_with("op1") + + +def test_callback_result_succeeded_with_plain_str(): + """Test Callback.result() when operation succeeded.""" + mock_state = Mock(spec=ExecutionState) + mock_state.durable_execution_arn = "test_arn" + operation = Operation( + operation_id="op1", + operation_type=OperationType.CALLBACK, + status=OperationStatus.SUCCEEDED, + callback_details=CallbackDetails( + callback_id="callback1", result="success_result" + ), + ) + mock_result = CheckpointedResult.create_from_operation(operation) + mock_state.get_checkpoint_result.return_value = mock_result + + callback = Callback("callback1", "op1", mock_state) + result = callback.result() + assert result == "success_result" mock_state.get_checkpoint_result.assert_called_once_with("op1") @@ -149,7 +171,7 @@ def test_callback_result_failed(): callback = Callback("callback5", "op5", mock_state) - with pytest.raises(CallableRuntimeError): + with pytest.raises(CallbackError): callback.result() @@ -208,7 +230,7 @@ def test_callback_result_timed_out(): callback = Callback("callback_timeout", "op_timeout", mock_state) - with pytest.raises(CallableRuntimeError): + with pytest.raises(CallbackError): callback.result() @@ -216,10 +238,13 @@ def test_callback_result_timed_out(): # region create_callback -@patch("aws_durable_execution_sdk_python.context.create_callback_handler") -def test_create_callback_basic(mock_handler): +@patch("aws_durable_execution_sdk_python.context.CallbackOperationExecutor") +def test_create_callback_basic(mock_executor_class): """Test create_callback with basic parameters.""" - mock_handler.return_value = "callback123" + mock_executor = MagicMock() + mock_executor.process.return_value = "callback123" + mock_executor_class.return_value = mock_executor + mock_state = Mock(spec=ExecutionState) mock_state.durable_execution_arn = ( "arn:aws:durable:us-east-1:123456789012:execution/test" @@ -236,17 +261,21 @@ def test_create_callback_basic(mock_handler): assert callback.operation_id == expected_operation_id assert callback.state is mock_state - mock_handler.assert_called_once_with( + mock_executor_class.assert_called_once_with( state=mock_state, operation_identifier=OperationIdentifier(expected_operation_id, None, None), config=CallbackConfig(), ) + mock_executor.process.assert_called_once() -@patch("aws_durable_execution_sdk_python.context.create_callback_handler") -def test_create_callback_with_name_and_config(mock_handler): +@patch("aws_durable_execution_sdk_python.context.CallbackOperationExecutor") +def test_create_callback_with_name_and_config(mock_executor_class): """Test create_callback with name and config.""" - mock_handler.return_value = "callback456" + mock_executor = MagicMock() + mock_executor.process.return_value = "callback456" + mock_executor_class.return_value = mock_executor + mock_state = Mock(spec=ExecutionState) mock_state.durable_execution_arn = ( "arn:aws:durable:us-east-1:123456789012:execution/test" @@ -264,18 +293,23 @@ def test_create_callback_with_name_and_config(mock_handler): assert callback.callback_id == "callback456" assert callback.operation_id == expected_operation_id - mock_handler.assert_called_once_with( + mock_executor_class.assert_called_once_with( state=mock_state, operation_identifier=OperationIdentifier(expected_operation_id, None, None), config=config, ) + mock_executor.process.assert_called_once() -@patch("aws_durable_execution_sdk_python.context.create_callback_handler") -def test_create_callback_with_parent_id(mock_handler): +@patch("aws_durable_execution_sdk_python.context.CallbackOperationExecutor") +def test_create_callback_with_parent_id(mock_executor_class): """Test create_callback with parent_id.""" - mock_handler.return_value = "callback789" + mock_executor = MagicMock() + + mock_executor.process.return_value = "callback789" + + mock_executor_class.return_value = mock_executor mock_state = Mock(spec=ExecutionState) mock_state.durable_execution_arn = ( "arn:aws:durable:us-east-1:123456789012:execution/test" @@ -291,17 +325,21 @@ def test_create_callback_with_parent_id(mock_handler): assert callback.operation_id == expected_operation_id - mock_handler.assert_called_once_with( + mock_executor_class.assert_called_once_with( state=mock_state, operation_identifier=OperationIdentifier(expected_operation_id, "parent123"), config=CallbackConfig(), ) -@patch("aws_durable_execution_sdk_python.context.create_callback_handler") -def test_create_callback_increments_counter(mock_handler): +@patch("aws_durable_execution_sdk_python.context.CallbackOperationExecutor") +def test_create_callback_increments_counter(mock_executor_class): """Test create_callback increments step counter.""" - mock_handler.return_value = "callback_test" + mock_executor = MagicMock() + + mock_executor.process.return_value = "callback_test" + + mock_executor_class.return_value = mock_executor mock_state = Mock(spec=ExecutionState) mock_state.durable_execution_arn = ( "arn:aws:durable:us-east-1:123456789012:execution/test" @@ -328,10 +366,14 @@ def test_create_callback_increments_counter(mock_handler): # region step -@patch("aws_durable_execution_sdk_python.context.step_handler") -def test_step_basic(mock_handler): +@patch("aws_durable_execution_sdk_python.context.StepOperationExecutor") +def test_step_basic(mock_executor_class): """Test step with basic parameters.""" - mock_handler.return_value = "step_result" + mock_executor = MagicMock() + + mock_executor.process.return_value = "step_result" + + mock_executor_class.return_value = mock_executor mock_state = Mock(spec=ExecutionState) mock_state.durable_execution_arn = ( "arn:aws:durable:us-east-1:123456789012:execution/test" @@ -348,19 +390,24 @@ def test_step_basic(mock_handler): result = context.step(mock_callable) assert result == "step_result" - mock_handler.assert_called_once_with( - func=mock_callable, - config=None, + mock_executor_class.assert_called_once_with( state=mock_state, operation_identifier=OperationIdentifier(expected_operation_id, None, None), + config=ANY, # StepConfig() is created in context.step() + func=mock_callable, context_logger=ANY, ) + mock_executor.process.assert_called_once() -@patch("aws_durable_execution_sdk_python.context.step_handler") -def test_step_with_name_and_config(mock_handler): +@patch("aws_durable_execution_sdk_python.context.StepOperationExecutor") +def test_step_with_name_and_config(mock_executor_class): """Test step with name and config.""" - mock_handler.return_value = "configured_result" + mock_executor = MagicMock() + + mock_executor.process.return_value = "configured_result" + + mock_executor_class.return_value = mock_executor mock_state = Mock(spec=ExecutionState) mock_state.durable_execution_arn = ( "arn:aws:durable:us-east-1:123456789012:execution/test" @@ -382,19 +429,24 @@ def test_step_with_name_and_config(mock_handler): expected_id = next(seq) # 6th assert result == "configured_result" - mock_handler.assert_called_once_with( - func=mock_callable, - config=config, + mock_executor_class.assert_called_once_with( state=mock_state, operation_identifier=OperationIdentifier(expected_id, None, None), + config=config, + func=mock_callable, context_logger=ANY, ) + mock_executor.process.assert_called_once() -@patch("aws_durable_execution_sdk_python.context.step_handler") -def test_step_with_parent_id(mock_handler): +@patch("aws_durable_execution_sdk_python.context.StepOperationExecutor") +def test_step_with_parent_id(mock_executor_class): """Test step with parent_id.""" - mock_handler.return_value = "parent_result" + mock_executor = MagicMock() + + mock_executor.process.return_value = "parent_result" + + mock_executor_class.return_value = mock_executor mock_state = Mock(spec=ExecutionState) mock_state.durable_execution_arn = ( "arn:aws:durable:us-east-1:123456789012:execution/test" @@ -414,19 +466,24 @@ def test_step_with_parent_id(mock_handler): [next(seq) for _ in range(2)] # Skip first 2 expected_id = next(seq) # 3rd - mock_handler.assert_called_once_with( - func=mock_callable, - config=None, + mock_executor_class.assert_called_once_with( state=mock_state, operation_identifier=OperationIdentifier(expected_id, "parent123"), + config=ANY, + func=mock_callable, context_logger=ANY, ) + mock_executor.process.assert_called_once() -@patch("aws_durable_execution_sdk_python.context.step_handler") -def test_step_increments_counter(mock_handler): +@patch("aws_durable_execution_sdk_python.context.StepOperationExecutor") +def test_step_increments_counter(mock_executor_class): """Test step increments step counter.""" - mock_handler.return_value = "result" + mock_executor = MagicMock() + + mock_executor.process.return_value = "result" + + mock_executor_class.return_value = mock_executor mock_state = Mock(spec=ExecutionState) mock_state.durable_execution_arn = ( "arn:aws:durable:us-east-1:123456789012:execution/test" @@ -449,18 +506,22 @@ def test_step_increments_counter(mock_handler): expected_id2 = next(seq) # 12th assert context._step_counter.get_current() == 12 # noqa: SLF001 - assert mock_handler.call_args_list[0][1][ + assert mock_executor_class.call_args_list[0][1][ "operation_identifier" ] == OperationIdentifier(expected_id1, None, None) - assert mock_handler.call_args_list[1][1][ + assert mock_executor_class.call_args_list[1][1][ "operation_identifier" ] == OperationIdentifier(expected_id2, None, None) -@patch("aws_durable_execution_sdk_python.context.step_handler") -def test_step_with_original_name(mock_handler): +@patch("aws_durable_execution_sdk_python.context.StepOperationExecutor") +def test_step_with_original_name(mock_executor_class): """Test step with callable that has _original_name attribute.""" - mock_handler.return_value = "named_result" + mock_executor = MagicMock() + + mock_executor.process.return_value = "named_result" + + mock_executor_class.return_value = mock_executor mock_state = Mock(spec=ExecutionState) mock_state.durable_execution_arn = ( "arn:aws:durable:us-east-1:123456789012:execution/test" @@ -476,23 +537,28 @@ def test_step_with_original_name(mock_handler): seq = operation_id_sequence() expected_id = next(seq) # 1st - mock_handler.assert_called_once_with( - func=mock_callable, - config=None, + mock_executor_class.assert_called_once_with( state=mock_state, operation_identifier=OperationIdentifier(expected_id, None, "override_name"), + config=ANY, + func=mock_callable, context_logger=ANY, ) + mock_executor.process.assert_called_once() # endregion step # region invoke -@patch("aws_durable_execution_sdk_python.context.invoke_handler") -def test_invoke_basic(mock_handler): +@patch("aws_durable_execution_sdk_python.context.InvokeOperationExecutor") +def test_invoke_basic(mock_executor_class): """Test invoke with basic parameters.""" - mock_handler.return_value = "invoke_result" + mock_executor = MagicMock() + + mock_executor.process.return_value = "invoke_result" + + mock_executor_class.return_value = mock_executor mock_state = Mock(spec=ExecutionState) mock_state.durable_execution_arn = ( "arn:aws:durable:us-east-1:123456789012:execution/test" @@ -506,24 +572,29 @@ def test_invoke_basic(mock_handler): assert result == "invoke_result" - mock_handler.assert_called_once_with( - function_name="test_function", - payload="test_payload", + mock_executor_class.assert_called_once_with( state=mock_state, operation_identifier=OperationIdentifier(expected_operation_id, None, None), - config=None, + function_name="test_function", + payload="test_payload", + config=ANY, # InvokeConfig() is created in context.invoke() ) + mock_executor.process.assert_called_once() -@patch("aws_durable_execution_sdk_python.context.invoke_handler") -def test_invoke_with_name_and_config(mock_handler): +@patch("aws_durable_execution_sdk_python.context.InvokeOperationExecutor") +def test_invoke_with_name_and_config(mock_executor_class): """Test invoke with name and config.""" - mock_handler.return_value = "configured_result" + mock_executor = MagicMock() + + mock_executor.process.return_value = "configured_result" + + mock_executor_class.return_value = mock_executor mock_state = Mock(spec=ExecutionState) mock_state.durable_execution_arn = ( "arn:aws:durable:us-east-1:123456789012:execution/test" ) - config = InvokeConfig[str, str](timeout_seconds=30) + config = InvokeConfig[str, str](timeout=Duration.from_seconds(30)) context = DurableContext(state=mock_state) [context._create_step_id() for _ in range(5)] # Set counter to 5 # noqa: SLF001 @@ -538,19 +609,24 @@ def test_invoke_with_name_and_config(mock_handler): expected_id = next(seq) # 6th assert result == "configured_result" - mock_handler.assert_called_once_with( - function_name="test_function", - payload={"key": "value"}, + mock_executor_class.assert_called_once_with( state=mock_state, operation_identifier=OperationIdentifier(expected_id, None, "named_invoke"), + function_name="test_function", + payload={"key": "value"}, config=config, ) + mock_executor.process.assert_called_once() -@patch("aws_durable_execution_sdk_python.context.invoke_handler") -def test_invoke_with_parent_id(mock_handler): +@patch("aws_durable_execution_sdk_python.context.InvokeOperationExecutor") +def test_invoke_with_parent_id(mock_executor_class): """Test invoke with parent_id.""" - mock_handler.return_value = "parent_result" + mock_executor = MagicMock() + + mock_executor.process.return_value = "parent_result" + + mock_executor_class.return_value = mock_executor mock_state = Mock(spec=ExecutionState) mock_state.durable_execution_arn = ( "arn:aws:durable:us-east-1:123456789012:execution/test" @@ -565,19 +641,24 @@ def test_invoke_with_parent_id(mock_handler): [next(seq) for _ in range(2)] expected_id = next(seq) - mock_handler.assert_called_once_with( - function_name="test_function", - payload=None, + mock_executor_class.assert_called_once_with( state=mock_state, operation_identifier=OperationIdentifier(expected_id, "parent123", None), - config=None, + function_name="test_function", + payload=None, + config=ANY, ) + mock_executor.process.assert_called_once() -@patch("aws_durable_execution_sdk_python.context.invoke_handler") -def test_invoke_increments_counter(mock_handler): +@patch("aws_durable_execution_sdk_python.context.InvokeOperationExecutor") +def test_invoke_increments_counter(mock_executor_class): """Test invoke increments step counter.""" - mock_handler.return_value = "result" + mock_executor = MagicMock() + + mock_executor.process.return_value = "result" + + mock_executor_class.return_value = mock_executor mock_state = Mock(spec=ExecutionState) mock_state.durable_execution_arn = ( "arn:aws:durable:us-east-1:123456789012:execution/test" @@ -595,18 +676,22 @@ def test_invoke_increments_counter(mock_handler): expected_id2 = next(seq) assert context._step_counter.get_current() == 12 # noqa: SLF001 - assert mock_handler.call_args_list[0][1][ + assert mock_executor_class.call_args_list[0][1][ "operation_identifier" ] == OperationIdentifier(expected_id1, None, None) - assert mock_handler.call_args_list[1][1][ + assert mock_executor_class.call_args_list[1][1][ "operation_identifier" ] == OperationIdentifier(expected_id2, None, None) -@patch("aws_durable_execution_sdk_python.context.invoke_handler") -def test_invoke_with_none_payload(mock_handler): +@patch("aws_durable_execution_sdk_python.context.InvokeOperationExecutor") +def test_invoke_with_none_payload(mock_executor_class): """Test invoke with None payload.""" - mock_handler.return_value = None + mock_executor = MagicMock() + + mock_executor.process.return_value = None + + mock_executor_class.return_value = mock_executor mock_state = Mock(spec=ExecutionState) mock_state.durable_execution_arn = ( "arn:aws:durable:us-east-1:123456789012:execution/test" @@ -621,28 +706,35 @@ def test_invoke_with_none_payload(mock_handler): assert result is None - mock_handler.assert_called_once_with( - function_name="test_function", - payload=None, + mock_executor_class.assert_called_once_with( state=mock_state, operation_identifier=OperationIdentifier(expected_id, None, None), - config=None, + function_name="test_function", + payload=None, + config=ANY, ) + mock_executor.process.assert_called_once() -@patch("aws_durable_execution_sdk_python.context.invoke_handler") -def test_invoke_with_custom_serdes(mock_handler): +@patch("aws_durable_execution_sdk_python.context.InvokeOperationExecutor") +def test_invoke_with_custom_serdes(mock_executor_class): """Test invoke with custom serialization config.""" - mock_handler.return_value = {"transformed": "data"} + mock_executor = MagicMock() + + mock_executor.process.return_value = {"transformed": "data"} + + mock_executor_class.return_value = mock_executor mock_state = Mock(spec=ExecutionState) mock_state.durable_execution_arn = ( "arn:aws:durable:us-east-1:123456789012:execution/test" ) + payload_serdes = CustomDictSerDes() + result_serdes = CustomDictSerDes() config = InvokeConfig[dict, dict]( - serdes_payload=CustomDictSerDes(), - serdes_result=CustomDictSerDes(), - timeout_seconds=60, + serdes_payload=payload_serdes, + serdes_result=result_serdes, + timeout=Duration.from_minutes(1), ) context = DurableContext(state=mock_state) @@ -658,24 +750,29 @@ def test_invoke_with_custom_serdes(mock_handler): expected_id = next(seq) assert result == {"transformed": "data"} - mock_handler.assert_called_once_with( - function_name="test_function", - payload={"original": "data"}, + mock_executor_class.assert_called_once_with( state=mock_state, operation_identifier=OperationIdentifier( expected_id, None, "custom_serdes_invoke" ), + function_name="test_function", + payload={"original": "data"}, config=config, ) + mock_executor.process.assert_called_once() # endregion invoke # region wait -@patch("aws_durable_execution_sdk_python.context.wait_handler") -def test_wait_basic(mock_handler): +@patch("aws_durable_execution_sdk_python.context.WaitOperationExecutor") +def test_wait_basic(mock_executor_class): """Test wait with basic parameters.""" + mock_executor = MagicMock() + mock_executor.process.return_value = None + mock_executor_class.return_value = mock_executor + mock_state = Mock(spec=ExecutionState) mock_state.durable_execution_arn = ( "arn:aws:durable:us-east-1:123456789012:execution/test" @@ -685,18 +782,23 @@ def test_wait_basic(mock_handler): operation_ids = operation_id_sequence() expected_operation_id = next(operation_ids) - context.wait(30) + context.wait(Duration.from_seconds(30)) - mock_handler.assert_called_once_with( - seconds=30, + mock_executor_class.assert_called_once_with( state=mock_state, operation_identifier=OperationIdentifier(expected_operation_id, None, None), + seconds=30, ) + mock_executor.process.assert_called_once() -@patch("aws_durable_execution_sdk_python.context.wait_handler") -def test_wait_with_name(mock_handler): +@patch("aws_durable_execution_sdk_python.context.WaitOperationExecutor") +def test_wait_with_name(mock_executor_class): """Test wait with name parameter.""" + mock_executor = MagicMock() + mock_executor.process.return_value = None + mock_executor_class.return_value = mock_executor + mock_state = Mock(spec=ExecutionState) mock_state.durable_execution_arn = ( "arn:aws:durable:us-east-1:123456789012:execution/test" @@ -705,22 +807,27 @@ def test_wait_with_name(mock_handler): context = DurableContext(state=mock_state) [context._create_step_id() for _ in range(5)] # Set counter to 5 # noqa: SLF001 - context.wait(60, name="test_wait") + context.wait(Duration.from_minutes(1), name="test_wait") seq = operation_id_sequence() [next(seq) for _ in range(5)] expected_id = next(seq) - mock_handler.assert_called_once_with( - seconds=60, + mock_executor_class.assert_called_once_with( state=mock_state, operation_identifier=OperationIdentifier(expected_id, None, "test_wait"), + seconds=60, ) + mock_executor.process.assert_called_once() -@patch("aws_durable_execution_sdk_python.context.wait_handler") -def test_wait_with_parent_id(mock_handler): +@patch("aws_durable_execution_sdk_python.context.WaitOperationExecutor") +def test_wait_with_parent_id(mock_executor_class): """Test wait with parent_id.""" + mock_executor = MagicMock() + mock_executor.process.return_value = None + mock_executor_class.return_value = mock_executor + mock_state = Mock(spec=ExecutionState) mock_state.durable_execution_arn = ( "arn:aws:durable:us-east-1:123456789012:execution/test" @@ -729,22 +836,27 @@ def test_wait_with_parent_id(mock_handler): context = DurableContext(state=mock_state, parent_id="parent123") [context._create_step_id() for _ in range(2)] # Set counter to 2 # noqa: SLF001 - context.wait(45) + context.wait(Duration.from_seconds(45)) seq = operation_id_sequence("parent123") [next(seq) for _ in range(2)] expected_id = next(seq) - mock_handler.assert_called_once_with( - seconds=45, + mock_executor_class.assert_called_once_with( state=mock_state, operation_identifier=OperationIdentifier(expected_id, "parent123"), + seconds=45, ) + mock_executor.process.assert_called_once() -@patch("aws_durable_execution_sdk_python.context.wait_handler") -def test_wait_increments_counter(mock_handler): +@patch("aws_durable_execution_sdk_python.context.WaitOperationExecutor") +def test_wait_increments_counter(mock_executor_class): """Test wait increments step counter.""" + mock_executor = MagicMock() + mock_executor.process.return_value = None + mock_executor_class.return_value = mock_executor + mock_state = Mock(spec=ExecutionState) mock_state.durable_execution_arn = ( "arn:aws:durable:us-east-1:123456789012:execution/test" @@ -753,8 +865,8 @@ def test_wait_increments_counter(mock_handler): context = DurableContext(state=mock_state) [context._create_step_id() for _ in range(10)] # Set counter to 10 # noqa: SLF001 - context.wait(15) - context.wait(25) + context.wait(Duration.from_seconds(15)) + context.wait(Duration.from_seconds(25)) seq = operation_id_sequence() [next(seq) for _ in range(10)] @@ -762,17 +874,21 @@ def test_wait_increments_counter(mock_handler): expected_id2 = next(seq) assert context._step_counter.get_current() == 12 # noqa: SLF001 - assert mock_handler.call_args_list[0][1][ + assert mock_executor_class.call_args_list[0][1][ "operation_identifier" ] == OperationIdentifier(expected_id1, None, None) - assert mock_handler.call_args_list[1][1][ + assert mock_executor_class.call_args_list[1][1][ "operation_identifier" ] == OperationIdentifier(expected_id2, None, None) -@patch("aws_durable_execution_sdk_python.context.wait_handler") -def test_wait_returns_none(mock_handler): +@patch("aws_durable_execution_sdk_python.context.WaitOperationExecutor") +def test_wait_returns_none(mock_executor_class): """Test wait returns None.""" + mock_executor = MagicMock() + mock_executor.process.return_value = None + mock_executor_class.return_value = mock_executor + mock_state = Mock(spec=ExecutionState) mock_state.durable_execution_arn = ( "arn:aws:durable:us-east-1:123456789012:execution/test" @@ -780,14 +896,18 @@ def test_wait_returns_none(mock_handler): context = DurableContext(state=mock_state) - result = context.wait(10) + result = context.wait(Duration.from_seconds(10)) assert result is None -@patch("aws_durable_execution_sdk_python.context.wait_handler") -def test_wait_with_time_less_than_one(mock_handler): +@patch("aws_durable_execution_sdk_python.context.WaitOperationExecutor") +def test_wait_with_time_less_than_one(mock_executor_class): """Test wait with time less than one.""" + mock_executor = MagicMock() + mock_executor.process.return_value = None + mock_executor_class.return_value = mock_executor + mock_state = Mock(spec=ExecutionState) mock_state.durable_execution_arn = ( "arn:aws:durable:us-east-1:123456789012:execution/test" @@ -796,7 +916,7 @@ def test_wait_with_time_less_than_one(mock_handler): context = DurableContext(state=mock_state) with pytest.raises(ValidationError): - context.wait(0) + context.wait(Duration.from_seconds(0)) # endregion wait @@ -865,9 +985,13 @@ def test_run_in_child_context_with_name_and_config(mock_handler): @patch("aws_durable_execution_sdk_python.context.child_handler") -def test_run_in_child_context_with_parent_id(mock_handler): +def test_run_in_child_context_with_parent_id(mock_executor_class): """Test run_in_child_context with parent_id.""" - mock_handler.return_value = "parent_child_result" + mock_executor = MagicMock() + + mock_executor.process.return_value = "parent_child_result" + + mock_executor_class.return_value = mock_executor mock_state = Mock(spec=ExecutionState) mock_state.durable_execution_arn = ( "arn:aws:durable:us-east-1:123456789012:execution/test" @@ -886,14 +1010,14 @@ def test_run_in_child_context_with_parent_id(mock_handler): [next(seq) for _ in range(1)] expected_id = next(seq) - call_args = mock_handler.call_args + call_args = mock_executor_class.call_args assert call_args[1]["operation_identifier"] == OperationIdentifier( expected_id, "parent456", None ) @patch("aws_durable_execution_sdk_python.context.child_handler") -def test_run_in_child_context_creates_child_context(mock_handler): +def test_run_in_child_context_creates_child_context(mock_executor_class): """Test run_in_child_context creates proper child context.""" mock_state = Mock(spec=ExecutionState) mock_state.durable_execution_arn = ( @@ -911,7 +1035,7 @@ def capture_child_context(child_context): return "child_executed" mock_callable = Mock(side_effect=capture_child_context) - mock_handler.side_effect = lambda func, **kwargs: func() + mock_executor_class.side_effect = lambda func, **kwargs: func() context = DurableContext(state=mock_state) @@ -922,9 +1046,13 @@ def capture_child_context(child_context): @patch("aws_durable_execution_sdk_python.context.child_handler") -def test_run_in_child_context_increments_counter(mock_handler): +def test_run_in_child_context_increments_counter(mock_executor_class): """Test run_in_child_context increments step counter.""" - mock_handler.return_value = "result" + mock_executor = MagicMock() + + mock_executor.process.return_value = "result" + + mock_executor_class.return_value = mock_executor mock_state = Mock(spec=ExecutionState) mock_state.durable_execution_arn = ( "arn:aws:durable:us-east-1:123456789012:execution/test" @@ -946,18 +1074,22 @@ def test_run_in_child_context_increments_counter(mock_handler): expected_id2 = next(seq) assert context._step_counter.get_current() == 7 # noqa: SLF001 - assert mock_handler.call_args_list[0][1][ + assert mock_executor_class.call_args_list[0][1][ "operation_identifier" ] == OperationIdentifier(expected_id1, None, None) - assert mock_handler.call_args_list[1][1][ + assert mock_executor_class.call_args_list[1][1][ "operation_identifier" ] == OperationIdentifier(expected_id2, None, None) @patch("aws_durable_execution_sdk_python.context.child_handler") -def test_run_in_child_context_resolves_name_from_callable(mock_handler): +def test_run_in_child_context_resolves_name_from_callable(mock_executor_class): """Test run_in_child_context resolves name from callable._original_name.""" - mock_handler.return_value = "named_result" + mock_executor = MagicMock() + + mock_executor.process.return_value = "named_result" + + mock_executor_class.return_value = mock_executor mock_state = Mock(spec=ExecutionState) mock_state.durable_execution_arn = ( "arn:aws:durable:us-east-1:123456789012:execution/test" @@ -969,7 +1101,7 @@ def test_run_in_child_context_resolves_name_from_callable(mock_handler): context.run_in_child_context(mock_callable) - call_args = mock_handler.call_args + call_args = mock_executor_class.call_args assert call_args[1]["operation_identifier"].name == "original_function_name" @@ -978,9 +1110,13 @@ def test_run_in_child_context_resolves_name_from_callable(mock_handler): # region wait_for_callback @patch("aws_durable_execution_sdk_python.context.wait_for_callback_handler") -def test_wait_for_callback_basic(mock_handler): +def test_wait_for_callback_basic(mock_executor_class): """Test wait_for_callback with basic parameters.""" - mock_handler.return_value = "callback_result" + mock_executor = MagicMock() + + mock_executor.process.return_value = "callback_result" + + mock_executor_class.return_value = mock_executor mock_state = Mock(spec=ExecutionState) mock_state.durable_execution_arn = ( "arn:aws:durable:us-east-1:123456789012:execution/test" @@ -1005,9 +1141,13 @@ def test_wait_for_callback_basic(mock_handler): @patch("aws_durable_execution_sdk_python.context.wait_for_callback_handler") -def test_wait_for_callback_with_name_and_config(mock_handler): +def test_wait_for_callback_with_name_and_config(mock_executor_class): """Test wait_for_callback with name and config.""" - mock_handler.return_value = "configured_callback_result" + mock_executor = MagicMock() + + mock_executor.process.return_value = "configured_callback_result" + + mock_executor_class.return_value = mock_executor mock_state = Mock(spec=ExecutionState) mock_state.durable_execution_arn = ( "arn:aws:durable:us-east-1:123456789012:execution/test" @@ -1030,9 +1170,13 @@ def test_wait_for_callback_with_name_and_config(mock_handler): @patch("aws_durable_execution_sdk_python.context.wait_for_callback_handler") -def test_wait_for_callback_resolves_name_from_submitter(mock_handler): +def test_wait_for_callback_resolves_name_from_submitter(mock_executor_class): """Test wait_for_callback resolves name from submitter._original_name.""" - mock_handler.return_value = "named_callback_result" + mock_executor = MagicMock() + + mock_executor.process.return_value = "named_callback_result" + + mock_executor_class.return_value = mock_executor mock_state = Mock(spec=ExecutionState) mock_state.durable_execution_arn = ( "arn:aws:durable:us-east-1:123456789012:execution/test" @@ -1051,7 +1195,7 @@ def test_wait_for_callback_resolves_name_from_submitter(mock_handler): @patch("aws_durable_execution_sdk_python.context.wait_for_callback_handler") -def test_wait_for_callback_passes_child_context(mock_handler): +def test_wait_for_callback_passes_child_context(mock_executor_class): """Test wait_for_callback passes child context to handler.""" mock_state = Mock(spec=ExecutionState) mock_state.durable_execution_arn = ( @@ -1064,7 +1208,7 @@ def capture_handler_call(context, submitter, name, config): assert submitter is mock_submitter return "handler_result" - mock_handler.side_effect = capture_handler_call + mock_executor_class.side_effect = capture_handler_call with patch.object(DurableContext, "run_in_child_context") as mock_run_in_child: @@ -1079,7 +1223,7 @@ def run_child_context(callable_func, name): result = context.wait_for_callback(mock_submitter) assert result == "handler_result" - mock_handler.assert_called_once() + mock_executor_class.assert_called_once() # endregion wait_for_callback @@ -1582,17 +1726,20 @@ def test_wait_strategy(state, attempt): wait_strategy=test_wait_strategy, initial_state="test" ) - # Mock the handler to track calls + # Mock the executor to track calls with patch( - "aws_durable_execution_sdk_python.context.wait_for_condition_handler" - ) as mock_handler: - mock_handler.return_value = "final_state" + "aws_durable_execution_sdk_python.context.WaitForConditionOperationExecutor" + ) as mock_executor_class: + mock_executor = MagicMock() + mock_executor.process.return_value = "final_state" + mock_executor_class.return_value = mock_executor # Call wait_for_condition method result = context.wait_for_condition(test_check, config) - # Verify wait_for_condition_handler was called (line 425) - mock_handler.assert_called_once() + # Verify executor was called + mock_executor_class.assert_called_once() + mock_executor.process.assert_called_once() assert result == "final_state" @@ -1657,3 +1804,50 @@ def test_operation_id_generation_unique(): for i in range(len(ids) - 1): assert ids[i] != ids[i + 1] + + +@patch("aws_durable_execution_sdk_python.context.InvokeOperationExecutor") +def test_invoke_with_explicit_tenant_id(mock_executor_class): + """Test invoke with explicit tenant_id in config.""" + mock_executor = MagicMock() + + mock_executor.process.return_value = "result" + + mock_executor_class.return_value = mock_executor + mock_state = Mock(spec=ExecutionState) + mock_state.durable_execution_arn = ( + "arn:aws:durable:us-east-1:123456789012:execution/test" + ) + + config = InvokeConfig(tenant_id="explicit-tenant") + context = DurableContext(state=mock_state) + + result = context.invoke("test_function", "payload", config=config) + + assert result == "result" + call_args = mock_executor_class.call_args[1] + assert call_args["config"].tenant_id == "explicit-tenant" + + +@patch("aws_durable_execution_sdk_python.context.InvokeOperationExecutor") +def test_invoke_without_tenant_id_defaults_to_none(mock_executor_class): + """Test invoke without tenant_id defaults to None.""" + mock_executor = MagicMock() + + mock_executor.process.return_value = "result" + + mock_executor_class.return_value = mock_executor + mock_state = Mock(spec=ExecutionState) + mock_state.durable_execution_arn = ( + "arn:aws:durable:us-east-1:123456789012:execution/test" + ) + + context = DurableContext(state=mock_state) + + result = context.invoke("test_function", "payload") + + assert result == "result" + # Config is created as InvokeConfig() when not provided + call_args = mock_executor_class.call_args[1] + assert isinstance(call_args["config"], InvokeConfig) + assert call_args["config"].tenant_id is None diff --git a/tests/e2e/checkpoint_response_int_test.py b/tests/e2e/checkpoint_response_int_test.py new file mode 100644 index 0000000..c0fd0f5 --- /dev/null +++ b/tests/e2e/checkpoint_response_int_test.py @@ -0,0 +1,768 @@ +"""Integration tests for immediate checkpoint response handling. + +Tests end-to-end operation execution with the immediate response handling +that's implemented via the OperationExecutor base class pattern. +""" + +from __future__ import annotations + +from typing import TYPE_CHECKING +from unittest.mock import Mock, patch + +import pytest + +from aws_durable_execution_sdk_python.config import ChildConfig, Duration +from aws_durable_execution_sdk_python.context import DurableContext, durable_step +from aws_durable_execution_sdk_python.exceptions import InvocationError +from aws_durable_execution_sdk_python.execution import ( + InvocationStatus, + durable_execution, +) +from aws_durable_execution_sdk_python.lambda_service import ( + CallbackDetails, + CheckpointOutput, + CheckpointUpdatedExecutionState, + Operation, + OperationStatus, + OperationType, +) + +if TYPE_CHECKING: + from aws_durable_execution_sdk_python.types import StepContext + + +def create_mock_checkpoint_with_operations(): + """Create a mock checkpoint function that properly tracks operations. + + Returns a tuple of (mock_checkpoint_function, checkpoint_calls_list). + The mock properly maintains an operations list that gets updated with each checkpoint. + """ + checkpoint_calls = [] + operations = [ + Operation( + operation_id="execution-1", + operation_type=OperationType.EXECUTION, + status=OperationStatus.STARTED, + ) + ] + + def mock_checkpoint( + durable_execution_arn, + checkpoint_token, + updates, + client_token="token", # noqa: S107 + ): + checkpoint_calls.append(updates) + + # Convert updates to Operation objects and add to operations list + for update in updates: + op = Operation( + operation_id=update.operation_id, + operation_type=update.operation_type, + status=OperationStatus.STARTED, + parent_id=update.parent_id, + ) + operations.append(op) + + return CheckpointOutput( + checkpoint_token="new_token", # noqa: S106 + new_execution_state=CheckpointUpdatedExecutionState( + operations=operations.copy() + ), + ) + + return mock_checkpoint, checkpoint_calls + + +def test_end_to_end_step_operation_with_double_check(): + """Test end-to-end step operation execution with double-check pattern. + + Verifies that the OperationExecutor.process() method properly calls + check_result_status() twice when a checkpoint is created, enabling + immediate response handling. + """ + + @durable_step + def my_step(step_context: StepContext) -> str: + return "step_result" + + @durable_execution + def my_handler(event, context: DurableContext) -> str: + result: str = context.step(my_step()) + return result + + with patch( + "aws_durable_execution_sdk_python.execution.LambdaClient" + ) as mock_client_class: + mock_client = Mock() + mock_client_class.initialize_client.return_value = mock_client + + mock_checkpoint, checkpoint_calls = create_mock_checkpoint_with_operations() + mock_client.checkpoint = mock_checkpoint + + event = { + "DurableExecutionArn": "test-arn", + "CheckpointToken": "test-token", + "InitialExecutionState": { + "Operations": [ + { + "Id": "execution-1", + "Type": "EXECUTION", + "Status": "STARTED", + "ExecutionDetails": {"InputPayload": "{}"}, + } + ], + "NextMarker": "", + }, + "LocalRunner": True, + } + + lambda_context = Mock() + lambda_context.aws_request_id = "test-request-id" + lambda_context.client_context = None + lambda_context.identity = None + lambda_context._epoch_deadline_time_in_ms = 0 # noqa: SLF001 + lambda_context.invoked_function_arn = "test-arn" + lambda_context.tenant_id = None + + result = my_handler(event, lambda_context) + + assert result["Status"] == InvocationStatus.SUCCEEDED.value + assert result["Result"] == '"step_result"' + + # Verify checkpoints were created (START + SUCCEED) + all_operations = [op for batch in checkpoint_calls for op in batch] + assert len(all_operations) == 2 + + +def test_end_to_end_multiple_operations_execute_sequentially(): + """Test end-to-end execution with multiple operations. + + Verifies that multiple operations in a workflow execute correctly + with the immediate response handling pattern. + """ + + @durable_step + def step1(step_context: StepContext) -> str: + return "result1" + + @durable_step + def step2(step_context: StepContext) -> str: + return "result2" + + @durable_execution + def my_handler(event, context: DurableContext) -> list[str]: + return [context.step(step1()), context.step(step2())] + + with patch( + "aws_durable_execution_sdk_python.execution.LambdaClient" + ) as mock_client_class: + mock_client = Mock() + mock_client_class.initialize_client.return_value = mock_client + + mock_checkpoint, checkpoint_calls = create_mock_checkpoint_with_operations() + mock_client.checkpoint = mock_checkpoint + + event = { + "DurableExecutionArn": "test-arn", + "CheckpointToken": "test-token", + "InitialExecutionState": { + "Operations": [ + { + "Id": "execution-1", + "Type": "EXECUTION", + "Status": "STARTED", + "ExecutionDetails": {"InputPayload": "{}"}, + } + ], + "NextMarker": "", + }, + "LocalRunner": True, + } + + lambda_context = Mock() + lambda_context.aws_request_id = "test-request-id" + lambda_context.client_context = None + lambda_context.identity = None + lambda_context._epoch_deadline_time_in_ms = 0 # noqa: SLF001 + lambda_context.invoked_function_arn = "test-arn" + lambda_context.tenant_id = None + + result = my_handler(event, lambda_context) + + assert result["Status"] == InvocationStatus.SUCCEEDED.value + assert result["Result"] == '["result1", "result2"]' + + # Verify all checkpoints were created (2 START + 2 SUCCEED) + all_operations = [op for batch in checkpoint_calls for op in batch] + assert len(all_operations) == 4 + + +def test_end_to_end_wait_operation_with_double_check(): + """Test end-to-end wait operation execution with double-check pattern. + + Verifies that wait operations properly use the double-check pattern + for immediate response handling. + """ + + @durable_execution + def my_handler(event, context: DurableContext) -> str: + context.wait(Duration.from_seconds(5)) + return "completed" + + with patch( + "aws_durable_execution_sdk_python.execution.LambdaClient" + ) as mock_client_class: + mock_client = Mock() + mock_client_class.initialize_client.return_value = mock_client + + mock_checkpoint, checkpoint_calls = create_mock_checkpoint_with_operations() + mock_client.checkpoint = mock_checkpoint + + event = { + "DurableExecutionArn": "test-arn", + "CheckpointToken": "test-token", + "InitialExecutionState": { + "Operations": [ + { + "Id": "execution-1", + "Type": "EXECUTION", + "Status": "STARTED", + "ExecutionDetails": {"InputPayload": "{}"}, + } + ], + "NextMarker": "", + }, + "LocalRunner": True, + } + + lambda_context = Mock() + lambda_context.aws_request_id = "test-request-id" + lambda_context.client_context = None + lambda_context.identity = None + lambda_context._epoch_deadline_time_in_ms = 0 # noqa: SLF001 + lambda_context.invoked_function_arn = "test-arn" + lambda_context.tenant_id = None + + # Wait will suspend, so we expect PENDING status + result = my_handler(event, lambda_context) + + assert result["Status"] == InvocationStatus.PENDING.value + + # Verify wait checkpoint was created + all_operations = [op for batch in checkpoint_calls for op in batch] + assert len(all_operations) >= 1 + + +def test_end_to_end_checkpoint_synchronization_with_operations_list(): + """Test that synchronous checkpoints properly update operations list. + + Verifies that when is_sync=True, the operations list is updated + before the second status check occurs. + """ + + @durable_step + def my_step(step_context: StepContext) -> str: + return "result" + + @durable_execution + def my_handler(event, context: DurableContext) -> str: + return context.step(my_step()) + + with patch( + "aws_durable_execution_sdk_python.execution.LambdaClient" + ) as mock_client_class: + mock_client = Mock() + mock_client_class.initialize_client.return_value = mock_client + + mock_checkpoint, checkpoint_calls = create_mock_checkpoint_with_operations() + mock_client.checkpoint = mock_checkpoint + + event = { + "DurableExecutionArn": "test-arn", + "CheckpointToken": "test-token", + "InitialExecutionState": { + "Operations": [ + { + "Id": "execution-1", + "Type": "EXECUTION", + "Status": "STARTED", + "ExecutionDetails": {"InputPayload": "{}"}, + } + ], + "NextMarker": "", + }, + "LocalRunner": True, + } + + lambda_context = Mock() + lambda_context.aws_request_id = "test-request-id" + lambda_context.client_context = None + lambda_context.identity = None + lambda_context._epoch_deadline_time_in_ms = 0 # noqa: SLF001 + lambda_context.invoked_function_arn = "test-arn" + lambda_context.tenant_id = None + + result = my_handler(event, lambda_context) + + assert result["Status"] == InvocationStatus.SUCCEEDED.value + + # Verify operations list was properly maintained + all_operations = [op for batch in checkpoint_calls for op in batch] + assert len(all_operations) >= 2 # At least START and SUCCEED + + +def test_callback_deferred_error_handling_to_result(): + """Test callback deferred error handling pattern. + + Verifies that callback operations properly return callback_id through + the immediate response handling pattern, enabling deferred error handling. + """ + + @durable_step + def step_after_callback(step_context: StepContext) -> str: + return "code_executed_after_callback" + + @durable_execution + def my_handler(event, context: DurableContext) -> str: + # Create callback + callback_id = context.create_callback("test_callback") + + # This code executes even if callback will eventually fail + # This is the deferred error handling pattern + result = context.step(step_after_callback()) + + return f"{callback_id}:{result}" + + with patch( + "aws_durable_execution_sdk_python.execution.LambdaClient" + ) as mock_client_class: + mock_client = Mock() + mock_client_class.initialize_client.return_value = mock_client + + checkpoint_calls = [] + operations = [ + Operation( + operation_id="execution-1", + operation_type=OperationType.EXECUTION, + status=OperationStatus.STARTED, + ) + ] + + def mock_checkpoint( + durable_execution_arn, + checkpoint_token, + updates, + client_token="token", # noqa: S107 + ): + checkpoint_calls.append(updates) + + # Add operations with proper details + for update in updates: + if update.operation_type == OperationType.CALLBACK: + op = Operation( + operation_id=update.operation_id, + operation_type=update.operation_type, + status=OperationStatus.STARTED, + parent_id=update.parent_id, + callback_details=CallbackDetails( + callback_id=f"cb-{update.operation_id[:8]}" + ), + ) + else: + op = Operation( + operation_id=update.operation_id, + operation_type=update.operation_type, + status=OperationStatus.STARTED, + parent_id=update.parent_id, + ) + operations.append(op) + + return CheckpointOutput( + checkpoint_token="new_token", # noqa: S106 + new_execution_state=CheckpointUpdatedExecutionState( + operations=operations.copy() + ), + ) + + mock_client.checkpoint = mock_checkpoint + + event = { + "DurableExecutionArn": "test-arn", + "CheckpointToken": "test-token", + "InitialExecutionState": { + "Operations": [ + { + "Id": "execution-1", + "Type": "EXECUTION", + "Status": "STARTED", + "ExecutionDetails": {"InputPayload": "{}"}, + } + ], + "NextMarker": "", + }, + "LocalRunner": True, + } + + lambda_context = Mock() + lambda_context.aws_request_id = "test-request-id" + lambda_context.client_context = None + lambda_context.identity = None + lambda_context._epoch_deadline_time_in_ms = 0 # noqa: SLF001 + lambda_context.invoked_function_arn = "test-arn" + lambda_context.tenant_id = None + + result = my_handler(event, lambda_context) + + # Verify execution succeeded and code after callback executed + assert result["Status"] == InvocationStatus.SUCCEEDED.value + assert "code_executed_after_callback" in result["Result"] + + +def test_end_to_end_invoke_operation_with_double_check(): + """Test end-to-end invoke operation execution with double-check pattern. + + Verifies that invoke operations properly use the double-check pattern + for immediate response handling. + """ + + @durable_execution + def my_handler(event, context: DurableContext): + context.invoke("my-function", {"data": "test"}) + + with patch( + "aws_durable_execution_sdk_python.execution.LambdaClient" + ) as mock_client_class: + mock_client = Mock() + mock_client_class.initialize_client.return_value = mock_client + + mock_checkpoint, checkpoint_calls = create_mock_checkpoint_with_operations() + mock_client.checkpoint = mock_checkpoint + + event = { + "DurableExecutionArn": "test-arn", + "CheckpointToken": "test-token", + "InitialExecutionState": { + "Operations": [ + { + "Id": "execution-1", + "Type": "EXECUTION", + "Status": "STARTED", + "ExecutionDetails": {"InputPayload": "{}"}, + } + ], + "NextMarker": "", + }, + "LocalRunner": True, + } + + lambda_context = Mock() + lambda_context.aws_request_id = "test-request-id" + lambda_context.client_context = None + lambda_context.identity = None + lambda_context._epoch_deadline_time_in_ms = 0 # noqa: SLF001 + lambda_context.invoked_function_arn = "test-arn" + lambda_context.tenant_id = None + + # Invoke will suspend, so we expect PENDING status + result = my_handler(event, lambda_context) + + assert result["Status"] == InvocationStatus.PENDING.value + + # Verify invoke checkpoint was created + all_operations = [op for batch in checkpoint_calls for op in batch] + assert len(all_operations) >= 1 + + +def test_end_to_end_child_context_with_async_checkpoint(): + """Test end-to-end child context execution with async checkpoint. + + Verifies that child context operations use async checkpoint (is_sync=False) + and execute correctly without waiting for immediate response. + """ + + def child_function(ctx: DurableContext) -> str: + return "child_result" + + @durable_execution + def my_handler(event, context: DurableContext) -> str: + result: str = context.run_in_child_context(child_function) + return result + + with patch( + "aws_durable_execution_sdk_python.execution.LambdaClient" + ) as mock_client_class: + mock_client = Mock() + mock_client_class.initialize_client.return_value = mock_client + + mock_checkpoint, checkpoint_calls = create_mock_checkpoint_with_operations() + mock_client.checkpoint = mock_checkpoint + + event = { + "DurableExecutionArn": "test-arn", + "CheckpointToken": "test-token", + "InitialExecutionState": { + "Operations": [ + { + "Id": "execution-1", + "Type": "EXECUTION", + "Status": "STARTED", + "ExecutionDetails": {"InputPayload": "{}"}, + } + ], + "NextMarker": "", + }, + "LocalRunner": True, + } + + lambda_context = Mock() + lambda_context.aws_request_id = "test-request-id" + lambda_context.client_context = None + lambda_context.identity = None + lambda_context._epoch_deadline_time_in_ms = 0 # noqa: SLF001 + lambda_context.invoked_function_arn = "test-arn" + lambda_context.tenant_id = None + + result = my_handler(event, lambda_context) + + assert result["Status"] == InvocationStatus.SUCCEEDED.value + assert result["Result"] == '"child_result"' + + # Verify checkpoints were created (START + SUCCEED) + all_operations = [op for batch in checkpoint_calls for op in batch] + assert len(all_operations) == 2 + + +def test_end_to_end_child_context_replay_children_mode(): + """Test end-to-end child context with large payload and ReplayChildren mode. + + Verifies that child context with large result (>256KB) triggers replay_children mode, + uses summary generator if provided, and re-executes function on replay. + """ + execution_count = {"count": 0} + + def child_function_with_large_result(ctx: DurableContext) -> str: + execution_count["count"] += 1 + return "large" * 256 * 1024 + + def summary_generator(result: str) -> str: + return f"summary_of_{len(result)}_bytes" + + @durable_execution + def my_handler(event, context: DurableContext) -> str: + context.run_in_child_context( + child_function_with_large_result, + config=ChildConfig(summary_generator=summary_generator), + ) + return f"executed_{execution_count['count']}_times" + + with patch( + "aws_durable_execution_sdk_python.execution.LambdaClient" + ) as mock_client_class: + mock_client = Mock() + mock_client_class.initialize_client.return_value = mock_client + + checkpoint_calls = [] + operations = [ + Operation( + operation_id="execution-1", + operation_type=OperationType.EXECUTION, + status=OperationStatus.STARTED, + ) + ] + + def mock_checkpoint( + durable_execution_arn, + checkpoint_token, + updates, + client_token="token", # noqa: S107 + ): + checkpoint_calls.append(updates) + + for update in updates: + op = Operation( + operation_id=update.operation_id, + operation_type=update.operation_type, + status=OperationStatus.STARTED, + parent_id=update.parent_id, + ) + operations.append(op) + + return CheckpointOutput( + checkpoint_token="new_token", # noqa: S106 + new_execution_state=CheckpointUpdatedExecutionState( + operations=operations.copy() + ), + ) + + mock_client.checkpoint = mock_checkpoint + + event = { + "DurableExecutionArn": "test-arn", + "CheckpointToken": "test-token", + "InitialExecutionState": { + "Operations": [ + { + "Id": "execution-1", + "Type": "EXECUTION", + "Status": "STARTED", + "ExecutionDetails": {"InputPayload": "{}"}, + } + ], + "NextMarker": "", + }, + "LocalRunner": True, + } + + lambda_context = Mock() + lambda_context.aws_request_id = "test-request-id" + lambda_context.client_context = None + lambda_context.identity = None + lambda_context._epoch_deadline_time_in_ms = 0 # noqa: SLF001 + lambda_context.invoked_function_arn = "test-arn" + lambda_context.tenant_id = None + + result = my_handler(event, lambda_context) + + assert result["Status"] == InvocationStatus.SUCCEEDED.value + # Function executed once during initial execution + assert execution_count["count"] == 1 + + # Verify replay_children was set in SUCCEED checkpoint + all_operations = [op for batch in checkpoint_calls for op in batch] + succeed_updates = [ + op + for op in all_operations + if hasattr(op, "action") and op.action.value == "SUCCEED" + ] + assert len(succeed_updates) == 1 + assert succeed_updates[0].context_options.replay_children is True + + +def test_end_to_end_child_context_error_handling(): + """Test end-to-end child context error handling. + + Verifies that child context that raises exception creates FAIL checkpoint + and error is wrapped as CallableRuntimeError. + """ + + def child_function_that_fails(ctx: DurableContext) -> str: + msg = "Child function error" + raise ValueError(msg) + + @durable_execution + def my_handler(event, context: DurableContext) -> str: + result: str = context.run_in_child_context(child_function_that_fails) + return result + + with patch( + "aws_durable_execution_sdk_python.execution.LambdaClient" + ) as mock_client_class: + mock_client = Mock() + mock_client_class.initialize_client.return_value = mock_client + + mock_checkpoint, checkpoint_calls = create_mock_checkpoint_with_operations() + mock_client.checkpoint = mock_checkpoint + + event = { + "DurableExecutionArn": "test-arn", + "CheckpointToken": "test-token", + "InitialExecutionState": { + "Operations": [ + { + "Id": "execution-1", + "Type": "EXECUTION", + "Status": "STARTED", + "ExecutionDetails": {"InputPayload": "{}"}, + } + ], + "NextMarker": "", + }, + "LocalRunner": True, + } + + lambda_context = Mock() + lambda_context.aws_request_id = "test-request-id" + lambda_context.client_context = None + lambda_context.identity = None + lambda_context._epoch_deadline_time_in_ms = 0 # noqa: SLF001 + lambda_context.invoked_function_arn = "test-arn" + lambda_context.tenant_id = None + + result = my_handler(event, lambda_context) + + # Verify execution failed + assert result["Status"] == InvocationStatus.FAILED.value + + # Verify FAIL checkpoint was created + all_operations = [op for batch in checkpoint_calls for op in batch] + fail_updates = [ + op + for op in all_operations + if hasattr(op, "action") and op.action.value == "FAIL" + ] + assert len(fail_updates) == 1 + + +def test_end_to_end_child_context_invocation_error_reraised(): + """Test end-to-end child context InvocationError re-raising. + + Verifies that child context that raises InvocationError creates FAIL checkpoint + and re-raises InvocationError (not wrapped) to enable retry at execution handler level. + """ + + def child_function_with_invocation_error(ctx: DurableContext) -> str: + msg = "Invocation failed in child" + raise InvocationError(msg) + + @durable_execution + def my_handler(event, context: DurableContext) -> str: + result: str = context.run_in_child_context(child_function_with_invocation_error) + return result + + with patch( + "aws_durable_execution_sdk_python.execution.LambdaClient" + ) as mock_client_class: + mock_client = Mock() + mock_client_class.initialize_client.return_value = mock_client + + mock_checkpoint, checkpoint_calls = create_mock_checkpoint_with_operations() + mock_client.checkpoint = mock_checkpoint + + event = { + "DurableExecutionArn": "test-arn", + "CheckpointToken": "test-token", + "InitialExecutionState": { + "Operations": [ + { + "Id": "execution-1", + "Type": "EXECUTION", + "Status": "STARTED", + "ExecutionDetails": {"InputPayload": "{}"}, + } + ], + "NextMarker": "", + }, + "LocalRunner": True, + } + + lambda_context = Mock() + lambda_context.aws_request_id = "test-request-id" + lambda_context.client_context = None + lambda_context.identity = None + lambda_context._epoch_deadline_time_in_ms = 0 # noqa: SLF001 + lambda_context.invoked_function_arn = "test-arn" + lambda_context.tenant_id = None + + # InvocationError should be re-raised (not wrapped) to trigger Lambda retry + with pytest.raises(InvocationError, match="Invocation failed in child"): + my_handler(event, lambda_context) + + # Verify FAIL checkpoint was created before re-raising + all_operations = [op for batch in checkpoint_calls for op in batch] + fail_updates = [ + op + for op in all_operations + if hasattr(op, "action") and op.action.value == "FAIL" + ] + assert len(fail_updates) == 1 diff --git a/tests/e2e/execution_int_test.py b/tests/e2e/execution_int_test.py index b5ec116..5a884bf 100644 --- a/tests/e2e/execution_int_test.py +++ b/tests/e2e/execution_int_test.py @@ -7,21 +7,24 @@ import pytest +from aws_durable_execution_sdk_python.config import Duration from aws_durable_execution_sdk_python.context import ( DurableContext, durable_step, + durable_wait_for_callback, durable_with_child_context, ) from aws_durable_execution_sdk_python.execution import ( InvocationStatus, durable_execution, ) - -# LambdaContext no longer needed - using duck typing from aws_durable_execution_sdk_python.lambda_service import ( + CallbackDetails, CheckpointOutput, CheckpointUpdatedExecutionState, + Operation, OperationAction, + OperationStatus, OperationType, ) from aws_durable_execution_sdk_python.logger import LoggerInterface @@ -31,6 +34,49 @@ from aws_durable_execution_sdk_python.types import StepContext +def create_mock_checkpoint_with_operations(): + """Create a mock checkpoint function that properly tracks operations. + + Returns a tuple of (mock_checkpoint_function, checkpoint_calls_list). + The mock properly maintains an operations list that gets updated with each checkpoint. + """ + checkpoint_calls = [] + operations = [ + Operation( + operation_id="execution-1", + operation_type=OperationType.EXECUTION, + status=OperationStatus.STARTED, + ) + ] + + def mock_checkpoint( + durable_execution_arn, + checkpoint_token, + updates, + client_token="token", # noqa: S107 + ): + checkpoint_calls.append(updates) + + # Convert updates to Operation objects and add to operations list + for update in updates: + op = Operation( + operation_id=update.operation_id, + operation_type=update.operation_type, + status=OperationStatus.STARTED, # New operations start as STARTED + parent_id=update.parent_id, + ) + operations.append(op) + + return CheckpointOutput( + checkpoint_token="new_token", # noqa: S106 + new_execution_state=CheckpointUpdatedExecutionState( + operations=operations.copy() + ), + ) + + return mock_checkpoint, checkpoint_calls + + def test_step_different_ways_to_pass_args(): def step_plain(step_context: StepContext) -> str: return "from step plain" @@ -67,7 +113,7 @@ def my_handler(event, context: DurableContext) -> list[str]: "aws_durable_execution_sdk_python.execution.LambdaClient" ) as mock_client_class: mock_client = Mock() - mock_client_class.initialize_local_runner_client.return_value = mock_client + mock_client_class.initialize_client.return_value = mock_client # Mock the checkpoint method to track calls checkpoint_calls = [] @@ -153,7 +199,7 @@ def my_handler(event, context: DurableContext): "aws_durable_execution_sdk_python.execution.LambdaClient" ) as mock_client_class: mock_client = Mock() - mock_client_class.initialize_local_runner_client.return_value = mock_client + mock_client_class.initialize_client.return_value = mock_client # Mock the checkpoint method to track calls checkpoint_calls = [] @@ -203,13 +249,6 @@ def mock_checkpoint( # Execute the handler result = my_handler(event, lambda_context) - my_logger.info.assert_called_once_with( - "from step %s %s", - 123, - "str", - extra={"execution_arn": "test-arn", "name": "mystep"}, - ) - assert result["Status"] == InvocationStatus.SUCCEEDED.value # 1 START checkpoint, 1 SUCCEED checkpoint (batched together) @@ -218,6 +257,18 @@ def mock_checkpoint( assert len(all_operations) == 2 operation_id = next(operation_id_sequence()) + my_logger.info.assert_called_once_with( + "from step %s %s", + 123, + "str", + extra={ + "executionArn": "test-arn", + "operationName": "mystep", + "attempt": 1, + "operationId": operation_id, + }, + ) + # Check the START operation start_op = all_operations[0] assert start_op.operation_type == OperationType.STEP @@ -238,7 +289,7 @@ def test_wait_inside_run_in_childcontext(): @durable_with_child_context def func(child_context: DurableContext, a: int, b: int): mock_inside_child(a, b) - child_context.wait(1) + child_context.wait(Duration.from_seconds(1)) @durable_execution def my_handler(event, context): @@ -249,24 +300,10 @@ def my_handler(event, context): "aws_durable_execution_sdk_python.execution.LambdaClient" ) as mock_client_class: mock_client = Mock() - mock_client_class.initialize_local_runner_client.return_value = mock_client - - # Mock the checkpoint method to track calls - checkpoint_calls = [] - - def mock_checkpoint( - durable_execution_arn, - checkpoint_token, - updates, - client_token="token", # noqa: S107 - ): - checkpoint_calls.append(updates) - - return CheckpointOutput( - checkpoint_token="new_token", # noqa: S106 - new_execution_state=CheckpointUpdatedExecutionState(), - ) + mock_client_class.initialize_client.return_value = mock_client + # Use helper to create mock that properly tracks operations + mock_checkpoint, checkpoint_calls = create_mock_checkpoint_with_operations() mock_client.checkpoint = mock_checkpoint # Create test event @@ -355,7 +392,7 @@ def my_handler(event, context: DurableContext): "aws_durable_execution_sdk_python.execution.LambdaClient" ) as mock_client_class: mock_client = Mock() - mock_client_class.initialize_local_runner_client.return_value = mock_client + mock_client_class.initialize_client.return_value = mock_client # Mock the checkpoint method to raise an error (using RuntimeError as a generic exception) def mock_checkpoint_failure( @@ -409,7 +446,7 @@ def test_wait_not_caught_by_exception(): @durable_execution def my_handler(event: Any, context: DurableContext): try: - context.wait(1) + context.wait(Duration.from_seconds(1)) except Exception as err: msg = "This should not be caught" raise CustomError(msg) from err @@ -418,9 +455,77 @@ def my_handler(event: Any, context: DurableContext): "aws_durable_execution_sdk_python.execution.LambdaClient" ) as mock_client_class: mock_client = Mock() - mock_client_class.initialize_local_runner_client.return_value = mock_client + mock_client_class.initialize_client.return_value = mock_client + + # Use helper to create mock that properly tracks operations + mock_checkpoint, checkpoint_calls = create_mock_checkpoint_with_operations() + mock_client.checkpoint = mock_checkpoint + + # Create test event + event = { + "DurableExecutionArn": "test-arn", + "CheckpointToken": "test-token", + "InitialExecutionState": { + "Operations": [ + { + "Id": "execution-1", + "Type": "EXECUTION", + "Status": "STARTED", + "ExecutionDetails": {"InputPayload": "{}"}, + } + ], + "NextMarker": "", + }, + "LocalRunner": True, + } + + # Create mock lambda context + lambda_context = Mock() + lambda_context.aws_request_id = "test-request-id" + lambda_context.client_context = None + lambda_context.identity = None + lambda_context._epoch_deadline_time_in_ms = 0 # noqa: SLF001 + lambda_context.invoked_function_arn = "test-arn" + lambda_context.tenant_id = None + + # Execute the handler + result = my_handler(event, lambda_context) + operation_ids = operation_id_sequence() + + # Assert the execution returns PENDING status + assert result["Status"] == InvocationStatus.PENDING.value + + # Assert that only 1 checkpoint was created for the wait operation + assert len(checkpoint_calls) == 1 + + # Check the wait checkpoint + checkpoint = checkpoint_calls[0][0] + assert checkpoint.operation_type is OperationType.WAIT + assert checkpoint.action is OperationAction.START + assert checkpoint.operation_id == next(operation_ids) + assert checkpoint.wait_options.wait_seconds == 1 + + +def test_durable_wait_for_callback_decorator(): + """Test the durable_wait_for_callback decorator with additional parameters.""" + + mock_submitter = Mock() + + @durable_wait_for_callback + def submit_to_external_system(callback_id, context, task_name, priority): + mock_submitter(callback_id, task_name, priority) + context.logger.info("Submitting %s with callback %s", task_name, callback_id) + + @durable_execution + def my_handler(event, context): + context.wait_for_callback(submit_to_external_system("my_task", priority=5)) + + with patch( + "aws_durable_execution_sdk_python.execution.LambdaClient" + ) as mock_client_class: + mock_client = Mock() + mock_client_class.initialize_client.return_value = mock_client - # Mock the checkpoint method to track calls checkpoint_calls = [] def mock_checkpoint( @@ -431,14 +536,29 @@ def mock_checkpoint( ): checkpoint_calls.append(updates) + # For CALLBACK operations, return the operation with callback details + operations = [ + Operation( + operation_id=update.operation_id, + operation_type=OperationType.CALLBACK, + status=OperationStatus.STARTED, + callback_details=CallbackDetails( + callback_id=f"callback-{update.operation_id[:8]}" + ), + ) + for update in updates + if update.operation_type == OperationType.CALLBACK + ] + return CheckpointOutput( checkpoint_token="new_token", # noqa: S106 - new_execution_state=CheckpointUpdatedExecutionState(), + new_execution_state=CheckpointUpdatedExecutionState( + operations=operations, next_marker=None + ), ) mock_client.checkpoint = mock_checkpoint - # Create test event event = { "DurableExecutionArn": "test-arn", "CheckpointToken": "test-token", @@ -456,7 +576,6 @@ def mock_checkpoint( "LocalRunner": True, } - # Create mock lambda context lambda_context = Mock() lambda_context.aws_request_id = "test-request-id" lambda_context.client_context = None @@ -465,19 +584,40 @@ def mock_checkpoint( lambda_context.invoked_function_arn = "test-arn" lambda_context.tenant_id = None - # Execute the handler result = my_handler(event, lambda_context) - operation_ids = operation_id_sequence() - # Assert the execution returns PENDING status assert result["Status"] == InvocationStatus.PENDING.value - # Assert that only 1 checkpoint was created for the wait operation - assert len(checkpoint_calls) == 1 + all_operations = [op for batch in checkpoint_calls for op in batch] + assert len(all_operations) == 4 - # Check the wait checkpoint - checkpoint = checkpoint_calls[0][0] - assert checkpoint.operation_type is OperationType.WAIT - assert checkpoint.action is OperationAction.START - assert checkpoint.operation_id == next(operation_ids) - assert checkpoint.wait_options.wait_seconds == 1 + # First: CONTEXT START + first_checkpoint = all_operations[0] + assert first_checkpoint.operation_type is OperationType.CONTEXT + assert first_checkpoint.action is OperationAction.START + assert first_checkpoint.name == "submit_to_external_system" + + # Second: CALLBACK START + second_checkpoint = all_operations[1] + assert second_checkpoint.operation_type is OperationType.CALLBACK + assert second_checkpoint.action is OperationAction.START + assert second_checkpoint.parent_id == first_checkpoint.operation_id + assert second_checkpoint.name == "submit_to_external_system create callback id" + + # Third: STEP START + third_checkpoint = all_operations[2] + assert third_checkpoint.operation_type is OperationType.STEP + assert third_checkpoint.action is OperationAction.START + assert third_checkpoint.parent_id == first_checkpoint.operation_id + assert third_checkpoint.name == "submit_to_external_system submitter" + + # Fourth: STEP SUCCEED + fourth_checkpoint = all_operations[3] + assert fourth_checkpoint.operation_type is OperationType.STEP + assert fourth_checkpoint.action is OperationAction.SUCCEED + assert fourth_checkpoint.operation_id == third_checkpoint.operation_id + + mock_submitter.assert_called_once() + call_args = mock_submitter.call_args[0] + assert call_args[1] == "my_task" + assert call_args[2] == 5 diff --git a/tests/exceptions_test.py b/tests/exceptions_test.py index ac425ac..f3ed213 100644 --- a/tests/exceptions_test.py +++ b/tests/exceptions_test.py @@ -4,15 +4,18 @@ from unittest.mock import patch import pytest +from botocore.exceptions import ClientError # type: ignore[import-untyped] from aws_durable_execution_sdk_python.exceptions import ( CallableRuntimeError, CallableRuntimeErrorSerializableDetails, CheckpointError, + CheckpointErrorCategory, DurableExecutionsError, ExecutionError, InvocationError, OrderedLockError, + OrphanedChildException, StepInterruptedError, SuspendExecution, TerminationReason, @@ -41,13 +44,101 @@ def test_invocation_error(): def test_checkpoint_error(): """Test CheckpointError exception.""" - error = CheckpointError("checkpoint failed") + error = CheckpointError( + "checkpoint failed", error_category=CheckpointErrorCategory.EXECUTION + ) assert str(error) == "checkpoint failed" assert isinstance(error, InvocationError) assert isinstance(error, UnrecoverableError) assert error.termination_reason == TerminationReason.CHECKPOINT_FAILED +def test_checkpoint_error_classification_invalid_token_invocation(): + """Test 4xx InvalidParameterValueException with Invalid Checkpoint Token is invocation error.""" + error_response = { + "Error": { + "Code": "InvalidParameterValueException", + "Message": "Invalid Checkpoint Token: token expired", + }, + "ResponseMetadata": {"HTTPStatusCode": 400}, + } + client_error = ClientError(error_response, "Checkpoint") + + result = CheckpointError.from_exception(client_error) + + assert result.error_category == CheckpointErrorCategory.INVOCATION + assert not result.is_retriable() + + +def test_checkpoint_error_classification_other_4xx_execution(): + """Test other 4xx errors are execution errors.""" + error_response = { + "Error": {"Code": "ValidationException", "Message": "Invalid parameter value"}, + "ResponseMetadata": {"HTTPStatusCode": 400}, + } + client_error = ClientError(error_response, "Checkpoint") + + result = CheckpointError.from_exception(client_error) + + assert result.error_category == CheckpointErrorCategory.EXECUTION + assert result.is_retriable() + + +def test_checkpoint_error_classification_429_invocation(): + """Test 429 errors are invocation errors (retryable).""" + error_response = { + "Error": {"Code": "TooManyRequestsException", "Message": "Rate limit exceeded"}, + "ResponseMetadata": {"HTTPStatusCode": 429}, + } + client_error = ClientError(error_response, "Checkpoint") + + result = CheckpointError.from_exception(client_error) + + assert result.error_category == CheckpointErrorCategory.INVOCATION + assert not result.is_retriable() + + +def test_checkpoint_error_classification_invalid_param_without_token_execution(): + """Test 4xx InvalidParameterValueException without Invalid Checkpoint Token is execution error.""" + error_response = { + "Error": { + "Code": "InvalidParameterValueException", + "Message": "Some other invalid parameter", + }, + "ResponseMetadata": {"HTTPStatusCode": 400}, + } + client_error = ClientError(error_response, "Checkpoint") + + result = CheckpointError.from_exception(client_error) + + assert result.error_category == CheckpointErrorCategory.EXECUTION + assert result.is_retriable() + + +def test_checkpoint_error_classification_5xx_invocation(): + """Test 5xx errors are invocation errors.""" + error_response = { + "Error": {"Code": "InternalServerError", "Message": "Service unavailable"}, + "ResponseMetadata": {"HTTPStatusCode": 500}, + } + client_error = ClientError(error_response, "Checkpoint") + + result = CheckpointError.from_exception(client_error) + + assert result.error_category == CheckpointErrorCategory.INVOCATION + assert not result.is_retriable() + + +def test_checkpoint_error_classification_unknown_invocation(): + """Test unknown errors are invocation errors.""" + unknown_error = Exception("Network timeout") + + result = CheckpointError.from_exception(unknown_error) + + assert result.error_category == CheckpointErrorCategory.INVOCATION + assert not result.is_retriable() + + def test_validation_error(): """Test ValidationError exception.""" error = ValidationError("validation failed") @@ -242,3 +333,44 @@ def test_execution_error_with_custom_termination_reason(): error = ExecutionError("custom error", TerminationReason.SERIALIZATION_ERROR) assert str(error) == "custom error" assert error.termination_reason == TerminationReason.SERIALIZATION_ERROR + + +def test_orphaned_child_exception_is_base_exception(): + """Test that OrphanedChildException is a BaseException, not Exception.""" + assert issubclass(OrphanedChildException, BaseException) + assert not issubclass(OrphanedChildException, Exception) + + +def test_orphaned_child_exception_bypasses_user_exception_handler(): + """Test that OrphanedChildException cannot be caught by user's except Exception handler.""" + caught_by_exception = False + caught_by_base_exception = False + exception_instance = None + + try: + msg = "test message" + raise OrphanedChildException(msg, operation_id="test_op_123") + except Exception: # noqa: BLE001 + caught_by_exception = True + except BaseException as e: # noqa: BLE001 + caught_by_base_exception = True + exception_instance = e + + expected_msg = "OrphanedChildException should not be caught by except Exception" + assert not caught_by_exception, expected_msg + expected_base_msg = ( + "OrphanedChildException should be caught by except BaseException" + ) + assert caught_by_base_exception, expected_base_msg + + # Verify operation_id is preserved + assert isinstance(exception_instance, OrphanedChildException) + assert exception_instance.operation_id == "test_op_123" + assert str(exception_instance) == "test message" + + +def test_orphaned_child_exception_with_operation_id(): + """Test OrphanedChildException stores operation_id correctly.""" + exception = OrphanedChildException("parent completed", operation_id="child_op_456") + assert exception.operation_id == "child_op_456" + assert str(exception) == "parent completed" diff --git a/tests/execution_test.py b/tests/execution_test.py index 6678d73..4383ceb 100644 --- a/tests/execution_test.py +++ b/tests/execution_test.py @@ -11,7 +11,9 @@ from aws_durable_execution_sdk_python.config import StepConfig, StepSemantics from aws_durable_execution_sdk_python.context import DurableContext from aws_durable_execution_sdk_python.exceptions import ( + BotoClientError, CheckpointError, + CheckpointErrorCategory, ExecutionError, InvocationError, SuspendExecution, @@ -160,7 +162,6 @@ def test_durable_execution_invocation_input_to_dict(): durable_execution_arn="arn:test:execution", checkpoint_token="token123", # noqa: S106 initial_execution_state=initial_state, - is_local_runner=True, ) result = invocation_input.to_dict() @@ -168,21 +169,18 @@ def test_durable_execution_invocation_input_to_dict(): "DurableExecutionArn": "arn:test:execution", "CheckpointToken": "token123", "InitialExecutionState": initial_state.to_dict(), - "LocalRunner": True, } assert result == expected def test_durable_execution_invocation_input_to_dict_not_local(): - """Test DurableExecutionInvocationInput.to_dict with is_local_runner=False.""" initial_state = InitialExecutionState(operations=[], next_marker="") invocation_input = DurableExecutionInvocationInput( durable_execution_arn="arn:test:execution", checkpoint_token="token123", # noqa: S106 initial_execution_state=initial_state, - is_local_runner=False, ) result = invocation_input.to_dict() @@ -190,7 +188,6 @@ def test_durable_execution_invocation_input_to_dict_not_local(): "DurableExecutionArn": "arn:test:execution", "CheckpointToken": "token123", "InitialExecutionState": initial_state.to_dict(), - "LocalRunner": False, } assert result == expected @@ -205,7 +202,6 @@ def test_durable_execution_invocation_input_with_client_inheritance(): durable_execution_arn="arn:test:execution", checkpoint_token="token123", # noqa: S106 initial_execution_state=initial_state, - is_local_runner=True, service_client=mock_client, ) @@ -215,7 +211,6 @@ def test_durable_execution_invocation_input_with_client_inheritance(): "DurableExecutionArn": "arn:test:execution", "CheckpointToken": "token123", "InitialExecutionState": initial_state.to_dict(), - "LocalRunner": True, } assert result == expected @@ -231,7 +226,6 @@ def test_durable_execution_invocation_input_with_client_from_parent(): durable_execution_arn="arn:test:execution", checkpoint_token="token123", # noqa: S106 initial_execution_state=initial_state, - is_local_runner=False, ) with_client = DurableExecutionInvocationInputWithClient.from_durable_execution_invocation_input( @@ -241,7 +235,6 @@ def test_durable_execution_invocation_input_with_client_from_parent(): assert with_client.durable_execution_arn == parent_input.durable_execution_arn assert with_client.checkpoint_token == parent_input.checkpoint_token assert with_client.initial_execution_state == parent_input.initial_execution_state - assert with_client.is_local_runner == parent_input.is_local_runner assert with_client.service_client == mock_client @@ -344,7 +337,7 @@ def test_durable_execution_client_selection_env_normal_result(): "aws_durable_execution_sdk_python.execution.LambdaClient" ) as mock_lambda_client: mock_client = Mock(spec=DurableServiceClient) - mock_lambda_client.initialize_from_env.return_value = mock_client + mock_lambda_client.initialize_client.return_value = mock_client # Mock successful checkpoint mock_output = CheckpointOutput( @@ -372,7 +365,6 @@ def test_handler(event: Any, context: DurableContext) -> dict: ], "NextMarker": "", }, - "LocalRunner": False, } lambda_context = Mock() @@ -387,7 +379,7 @@ def test_handler(event: Any, context: DurableContext) -> dict: assert result["Status"] == InvocationStatus.SUCCEEDED.value assert result["Result"] == '{"result": "success"}' - mock_lambda_client.initialize_from_env.assert_called_once() + mock_lambda_client.initialize_client.assert_called_once() mock_client.checkpoint.assert_not_called() @@ -397,7 +389,7 @@ def test_durable_execution_client_selection_env_large_result(): "aws_durable_execution_sdk_python.execution.LambdaClient" ) as mock_lambda_client: mock_client = Mock(spec=DurableServiceClient) - mock_lambda_client.initialize_from_env.return_value = mock_client + mock_lambda_client.initialize_client.return_value = mock_client # Mock successful checkpoint mock_output = CheckpointOutput( @@ -425,7 +417,6 @@ def test_handler(event: Any, context: DurableContext) -> dict: ], "NextMarker": "", }, - "LocalRunner": False, } lambda_context = Mock() @@ -440,7 +431,7 @@ def test_handler(event: Any, context: DurableContext) -> dict: assert result["Status"] == InvocationStatus.SUCCEEDED.value assert not result["Result"] - mock_lambda_client.initialize_from_env.assert_called_once() + mock_lambda_client.initialize_client.assert_called_once() mock_client.checkpoint.assert_called_once() @@ -473,7 +464,6 @@ def test_handler(event: Any, context: DurableContext) -> dict: durable_execution_arn="arn:test:execution", checkpoint_token="token123", # noqa: S106 initial_execution_state=initial_state, - is_local_runner=False, service_client=mock_client, ) @@ -521,7 +511,6 @@ def test_handler(event: Any, context: DurableContext) -> dict: durable_execution_arn="arn:test:execution", checkpoint_token="token123", # noqa: S106 initial_execution_state=initial_state, - is_local_runner=False, service_client=mock_client, ) @@ -577,7 +566,6 @@ def test_handler(event: Any, context: DurableContext) -> dict: durable_execution_arn="arn:test:execution", checkpoint_token="token123", # noqa: S106 initial_execution_state=initial_state, - is_local_runner=False, service_client=mock_client, ) @@ -591,17 +579,62 @@ def test_handler(event: Any, context: DurableContext) -> dict: result = test_handler(invocation_input, lambda_context) + # small error, should not call checkpoint assert result["Status"] == InvocationStatus.FAILED.value + assert result["Error"] == {"ErrorMessage": "Test error", "ErrorType": "ValueError"} + + assert not mock_client.checkpoint.called + + +def test_durable_execution_with_large_error_payload(): + """Test that large error payloads trigger checkpoint.""" + mock_client = Mock(spec=DurableServiceClient) + mock_output = CheckpointOutput( + checkpoint_token="new_token", # noqa: S106 + new_execution_state=CheckpointUpdatedExecutionState(), + ) + mock_client.checkpoint.return_value = mock_output + + @durable_execution + def test_handler(event: Any, context: DurableContext) -> dict: + raise ValueError(LARGE_RESULT) + + operation = Operation( + operation_id="exec1", + operation_type=OperationType.EXECUTION, + status=OperationStatus.STARTED, + execution_details=ExecutionDetails(input_payload="{}"), + ) + + initial_state = InitialExecutionState(operations=[operation], next_marker="") + + invocation_input = DurableExecutionInvocationInputWithClient( + durable_execution_arn="arn:test:execution", + checkpoint_token="token123", # noqa: S106 + initial_execution_state=initial_state, + service_client=mock_client, + ) + + lambda_context = Mock() + lambda_context.aws_request_id = "test-request" + lambda_context.client_context = None + lambda_context.identity = None + lambda_context._epoch_deadline_time_in_ms = 1000000 # noqa: SLF001 + lambda_context.invoked_function_arn = None + lambda_context.tenant_id = None + + result = test_handler(invocation_input, lambda_context) + + assert result["Status"] == InvocationStatus.FAILED.value + assert "Error" not in result mock_client.checkpoint.assert_called_once() - # Verify the checkpoint call was for execution failure call_args = mock_client.checkpoint.call_args updates = call_args[1]["updates"] assert len(updates) == 1 assert updates[0].operation_type == OperationType.EXECUTION assert updates[0].action.value == "FAIL" - assert updates[0].error.message == "Test error" - assert updates[0].error.type == "ValueError" + assert updates[0].error.message == LARGE_RESULT def test_durable_execution_fatal_error_handling(): @@ -626,7 +659,6 @@ def test_handler(event: Any, context: DurableContext) -> dict: durable_execution_arn="arn:test:execution", checkpoint_token="token123", # noqa: S106 initial_execution_state=initial_state, - is_local_runner=False, service_client=mock_client, ) @@ -665,7 +697,6 @@ def test_handler(event: Any, context: DurableContext) -> dict: durable_execution_arn="arn:test:execution", checkpoint_token="token123", # noqa: S106 initial_execution_state=initial_state, - is_local_runner=False, service_client=mock_client, ) @@ -688,13 +719,13 @@ def test_handler(event: Any, context: DurableContext) -> dict: assert error_data["ErrorType"] == "ExecutionError" -def test_durable_execution_client_selection_local_runner(): - """Test durable_execution selects correct client for local runner.""" +def test_durable_execution_client_selection_default(): + """Test durable_execution selects correct client using default initialization.""" with patch( "aws_durable_execution_sdk_python.execution.LambdaClient" ) as mock_lambda_client: mock_client = Mock(spec=DurableServiceClient) - mock_lambda_client.initialize_local_runner_client.return_value = mock_client + mock_lambda_client.initialize_client.return_value = mock_client # Mock successful checkpoint mock_output = CheckpointOutput( @@ -722,7 +753,6 @@ def test_handler(event: Any, context: DurableContext) -> dict: ], "NextMarker": "", }, - "LocalRunner": True, } lambda_context = Mock() @@ -736,17 +766,20 @@ def test_handler(event: Any, context: DurableContext) -> dict: result = test_handler(event, lambda_context) assert result["Status"] == InvocationStatus.SUCCEEDED.value - mock_lambda_client.initialize_local_runner_client.assert_called_once() + mock_lambda_client.initialize_client.assert_called_once() def test_initial_execution_state_get_execution_operation_no_operations(): - """Test get_execution_operation raises error when no operations exist.""" + """Test get_execution_operation logs debug and returns None when no operations exist.""" state = InitialExecutionState(operations=[], next_marker="") - with pytest.raises( - Exception, match="No durable operations found in initial execution state" - ): - state.get_execution_operation() + with patch("aws_durable_execution_sdk_python.execution.logger") as mock_logger: + result = state.get_execution_operation() + + assert result is None + mock_logger.debug.assert_called_once_with( + "No durable operations found in initial execution state." + ) def test_initial_execution_state_get_execution_operation_wrong_type(): @@ -803,7 +836,6 @@ def test_handler(event: Any, context: DurableContext) -> dict: durable_execution_arn="arn:test:execution", checkpoint_token="token123", # noqa: S106 initial_execution_state=initial_state, - is_local_runner=False, service_client=mock_client, ) @@ -843,7 +875,6 @@ def test_handler(event: Any, context: DurableContext) -> dict: durable_execution_arn="arn:test:execution", checkpoint_token="token123", # noqa: S106 initial_execution_state=initial_state, - is_local_runner=False, service_client=mock_client, ) @@ -883,7 +914,6 @@ def test_handler(event: Any, context: DurableContext) -> dict: durable_execution_arn="arn:test:execution", checkpoint_token="token123", # noqa: S106 initial_execution_state=initial_state, - is_local_runner=False, service_client=mock_client, ) @@ -927,7 +957,6 @@ def test_handler(event: Any, context: DurableContext) -> dict: durable_execution_arn="arn:test:execution", checkpoint_token="token123", # noqa: S106 initial_execution_state=initial_state, - is_local_runner=False, service_client=mock_client, ) @@ -968,7 +997,6 @@ def test_handler(event: Any, context: DurableContext) -> dict: durable_execution_arn="arn:test:execution", checkpoint_token="token123", # noqa: S106 initial_execution_state=initial_state, - is_local_runner=False, service_client=mock_client, ) @@ -998,7 +1026,7 @@ def test_durable_execution_checkpoint_error_in_background_thread(): # Make the background checkpoint thread fail immediately def failing_checkpoint(*args, **kwargs): msg = "Background checkpoint failed" - raise CheckpointError(msg) + raise CheckpointError(msg, error_category=CheckpointErrorCategory.EXECUTION) @durable_execution def test_handler(event: Any, context: DurableContext) -> dict: @@ -1019,7 +1047,6 @@ def test_handler(event: Any, context: DurableContext) -> dict: durable_execution_arn="arn:test:execution", checkpoint_token="token123", # noqa: S106 initial_execution_state=initial_state, - is_local_runner=False, service_client=mock_client, ) @@ -1041,7 +1068,7 @@ def test_handler(event: Any, context: DurableContext) -> dict: # endregion durable_execution -def test_durable_execution_checkpoint_error_stops_background(): +def test_durable_execution_checkpoint_execution_error_stops_background(): """Test that CheckpointError handler stops background checkpointing. When user code raises CheckpointError, the handler should stop the background @@ -1053,7 +1080,7 @@ def test_durable_execution_checkpoint_error_stops_background(): def test_handler(event: Any, context: DurableContext) -> dict: # Directly raise CheckpointError to simulate checkpoint failure msg = "Checkpoint system failed" - raise CheckpointError(msg) + raise CheckpointError(msg, CheckpointErrorCategory.EXECUTION) operation = Operation( operation_id="exec1", @@ -1068,7 +1095,6 @@ def test_handler(event: Any, context: DurableContext) -> dict: durable_execution_arn="arn:test:execution", checkpoint_token="token123", # noqa: S106 initial_execution_state=initial_state, - is_local_runner=False, service_client=mock_client, ) @@ -1093,6 +1119,330 @@ def slow_background(): test_handler(invocation_input, lambda_context) +def test_durable_execution_checkpoint_invocation_error_stops_background(): + """Test that CheckpointError handler stops background checkpointing. + + When user code raises CheckpointError, the handler should stop the background + thread before re-raising to terminate the Lambda. + """ + mock_client = Mock(spec=DurableServiceClient) + + @durable_execution + def test_handler(event: Any, context: DurableContext) -> dict: + # Directly raise CheckpointError to simulate checkpoint failure + msg = "Checkpoint system failed" + raise CheckpointError(msg, CheckpointErrorCategory.INVOCATION) + + operation = Operation( + operation_id="exec1", + operation_type=OperationType.EXECUTION, + status=OperationStatus.STARTED, + execution_details=ExecutionDetails(input_payload="{}"), + ) + + initial_state = InitialExecutionState(operations=[operation], next_marker="") + + invocation_input = DurableExecutionInvocationInputWithClient( + durable_execution_arn="arn:test:execution", + checkpoint_token="token123", # noqa: S106 + initial_execution_state=initial_state, + service_client=mock_client, + ) + + lambda_context = Mock() + lambda_context.aws_request_id = "test-request" + lambda_context.client_context = None + lambda_context.identity = None + lambda_context._epoch_deadline_time_in_ms = 1000000 # noqa: SLF001 + lambda_context.invoked_function_arn = None + lambda_context.tenant_id = None + + # Make background thread sleep so user code completes first + def slow_background(): + time.sleep(1) + + # Mock checkpoint_batches_forever to sleep (simulates background thread running) + with patch( + "aws_durable_execution_sdk_python.state.ExecutionState.checkpoint_batches_forever", + side_effect=slow_background, + ): + response = test_handler(invocation_input, lambda_context) + assert response["Status"] == InvocationStatus.FAILED.value + assert response["Error"]["ErrorType"] == "CheckpointError" + + +def test_durable_execution_background_thread_execution_error_retries(): + """Test that background thread Execution errors are retried (re-raised).""" + mock_client = Mock(spec=DurableServiceClient) + + def failing_checkpoint(*args, **kwargs): + msg = "Background checkpoint failed" + raise CheckpointError(msg, error_category=CheckpointErrorCategory.EXECUTION) + + @durable_execution + def test_handler(event: Any, context: DurableContext) -> dict: + context.step(lambda ctx: "step_result") + return {"result": "success"} + + operation = Operation( + operation_id="exec1", + operation_type=OperationType.EXECUTION, + status=OperationStatus.STARTED, + execution_details=ExecutionDetails(input_payload="{}"), + ) + + initial_state = InitialExecutionState(operations=[operation], next_marker="") + + invocation_input = DurableExecutionInvocationInputWithClient( + durable_execution_arn="arn:test:execution", + checkpoint_token="token123", # noqa: S106 + initial_execution_state=initial_state, + service_client=mock_client, + ) + + lambda_context = Mock() + lambda_context.aws_request_id = "test-request" + lambda_context.client_context = None + lambda_context.identity = None + lambda_context._epoch_deadline_time_in_ms = 1000000 # noqa: SLF001 + lambda_context.invoked_function_arn = None + lambda_context.tenant_id = None + + mock_client.checkpoint.side_effect = failing_checkpoint + + with pytest.raises(CheckpointError, match="Background checkpoint failed"): + test_handler(invocation_input, lambda_context) + + +def test_durable_execution_background_thread_invocation_error_returns_failed(): + """Test that background thread Invocation errors return FAILED status.""" + mock_client = Mock(spec=DurableServiceClient) + + def failing_checkpoint(*args, **kwargs): + msg = "Background checkpoint failed" + raise CheckpointError(msg, error_category=CheckpointErrorCategory.INVOCATION) + + @durable_execution + def test_handler(event: Any, context: DurableContext) -> dict: + context.step(lambda ctx: "step_result") + return {"result": "success"} + + operation = Operation( + operation_id="exec1", + operation_type=OperationType.EXECUTION, + status=OperationStatus.STARTED, + execution_details=ExecutionDetails(input_payload="{}"), + ) + + initial_state = InitialExecutionState(operations=[operation], next_marker="") + + invocation_input = DurableExecutionInvocationInputWithClient( + durable_execution_arn="arn:test:execution", + checkpoint_token="token123", # noqa: S106 + initial_execution_state=initial_state, + service_client=mock_client, + ) + + lambda_context = Mock() + lambda_context.aws_request_id = "test-request" + lambda_context.client_context = None + lambda_context.identity = None + lambda_context._epoch_deadline_time_in_ms = 1000000 # noqa: SLF001 + lambda_context.invoked_function_arn = None + lambda_context.tenant_id = None + + mock_client.checkpoint.side_effect = failing_checkpoint + + response = test_handler(invocation_input, lambda_context) + assert response["Status"] == InvocationStatus.FAILED.value + assert response["Error"]["ErrorType"] == "CheckpointError" + + +def test_durable_execution_final_success_checkpoint_execution_error_retries(): + """Test that execution errors on final success checkpoint trigger retry.""" + mock_client = Mock(spec=DurableServiceClient) + + def failing_final_checkpoint(*args, **kwargs): + raise CheckpointError( # noqa TRY003 + "Final checkpoint failed", # noqa EM101 + error_category=CheckpointErrorCategory.EXECUTION, + ) + + @durable_execution + def test_handler(event: Any, context: DurableContext) -> dict: + # Return large result to trigger final checkpoint (>6MB) + return {"result": "x" * (7 * 1024 * 1024)} + + operation = Operation( + operation_id="exec1", + operation_type=OperationType.EXECUTION, + status=OperationStatus.STARTED, + execution_details=ExecutionDetails(input_payload="{}"), + ) + initial_state = InitialExecutionState(operations=[operation], next_marker="") + + invocation_input = DurableExecutionInvocationInputWithClient( + durable_execution_arn="arn:test:execution", + checkpoint_token="token123", # noqa: S106 + initial_execution_state=initial_state, + service_client=mock_client, + ) + + lambda_context = Mock() + lambda_context.aws_request_id = "test-request" + lambda_context.client_context = None + lambda_context.identity = None + lambda_context._epoch_deadline_time_in_ms = 1000000 # noqa: SLF001 + lambda_context.invoked_function_arn = None + lambda_context.tenant_id = None + + mock_client.checkpoint.side_effect = failing_final_checkpoint + + with pytest.raises(CheckpointError, match="Final checkpoint failed"): + test_handler(invocation_input, lambda_context) + + +def test_durable_execution_final_success_checkpoint_invocation_error_returns_failed(): + """Test that invocation errors on final success checkpoint return FAILED.""" + mock_client = Mock(spec=DurableServiceClient) + + def failing_final_checkpoint(*args, **kwargs): + raise CheckpointError( # noqa TRY003 + "Final checkpoint failed", # noqa EM101 + error_category=CheckpointErrorCategory.INVOCATION, + ) + + @durable_execution + def test_handler(event: Any, context: DurableContext) -> dict: + # Return large result to trigger final checkpoint (>6MB) + return {"result": "x" * (7 * 1024 * 1024)} + + operation = Operation( + operation_id="exec1", + operation_type=OperationType.EXECUTION, + status=OperationStatus.STARTED, + execution_details=ExecutionDetails(input_payload="{}"), + ) + + initial_state = InitialExecutionState(operations=[operation], next_marker="") + + invocation_input = DurableExecutionInvocationInputWithClient( + durable_execution_arn="arn:test:execution", + checkpoint_token="token123", # noqa: S106 + initial_execution_state=initial_state, + service_client=mock_client, + ) + + lambda_context = Mock() + lambda_context.aws_request_id = "test-request" + lambda_context.client_context = None + lambda_context.identity = None + lambda_context._epoch_deadline_time_in_ms = 1000000 # noqa: SLF001 + lambda_context.invoked_function_arn = None + lambda_context.tenant_id = None + + mock_client.checkpoint.side_effect = failing_final_checkpoint + + response = test_handler(invocation_input, lambda_context) + assert response["Status"] == InvocationStatus.FAILED.value + assert response["Error"]["ErrorType"] == "CheckpointError" + assert response["Error"]["ErrorMessage"] == "Final checkpoint failed" + + +def test_durable_execution_final_failure_checkpoint_execution_error_retries(): + """Test that execution errors on final failure checkpoint trigger retry.""" + mock_client = Mock(spec=DurableServiceClient) + + def failing_final_checkpoint(*args, **kwargs): + raise CheckpointError( # noqa TRY003 + "Final checkpoint failed", # noqa EM101 + error_category=CheckpointErrorCategory.EXECUTION, + ) + + @durable_execution + def test_handler(event: Any, context: DurableContext) -> dict: + # Raise error with large message to trigger final checkpoint (>6MB) + msg = "x" * (7 * 1024 * 1024) + raise ValueError(msg) + + operation = Operation( + operation_id="exec1", + operation_type=OperationType.EXECUTION, + status=OperationStatus.STARTED, + execution_details=ExecutionDetails(input_payload="{}"), + ) + + initial_state = InitialExecutionState(operations=[operation], next_marker="") + + invocation_input = DurableExecutionInvocationInputWithClient( + durable_execution_arn="arn:test:execution", + checkpoint_token="token123", # noqa: S106 + initial_execution_state=initial_state, + service_client=mock_client, + ) + + lambda_context = Mock() + lambda_context.aws_request_id = "test-request" + lambda_context.client_context = None + lambda_context.identity = None + lambda_context._epoch_deadline_time_in_ms = 1000000 # noqa: SLF001 + lambda_context.invoked_function_arn = None + lambda_context.tenant_id = None + + mock_client.checkpoint.side_effect = failing_final_checkpoint + + with pytest.raises(CheckpointError, match="Final checkpoint failed"): + test_handler(invocation_input, lambda_context) + + +def test_durable_execution_final_failure_checkpoint_invocation_error_returns_failed(): + """Test that invocation errors on final failure checkpoint return FAILED.""" + mock_client = Mock(spec=DurableServiceClient) + + def failing_final_checkpoint(*args, **kwargs): + raise CheckpointError( # noqa TRY003 + "Final checkpoint failed", # noqa EM101 + error_category=CheckpointErrorCategory.INVOCATION, + ) + + @durable_execution + def test_handler(event: Any, context: DurableContext) -> dict: + # Raise error with large message to trigger final checkpoint (>6MB) + msg = "x" * (7 * 1024 * 1024) + raise ValueError(msg) + + operation = Operation( + operation_id="exec1", + operation_type=OperationType.EXECUTION, + status=OperationStatus.STARTED, + execution_details=ExecutionDetails(input_payload="{}"), + ) + + initial_state = InitialExecutionState(operations=[operation], next_marker="") + + invocation_input = DurableExecutionInvocationInputWithClient( + durable_execution_arn="arn:test:execution", + checkpoint_token="token123", # noqa: S106 + initial_execution_state=initial_state, + service_client=mock_client, + ) + + lambda_context = Mock() + lambda_context.aws_request_id = "test-request" + lambda_context.client_context = None + lambda_context.identity = None + lambda_context._epoch_deadline_time_in_ms = 1000000 # noqa: SLF001 + lambda_context.invoked_function_arn = None + lambda_context.tenant_id = None + + mock_client.checkpoint.side_effect = failing_final_checkpoint + + response = test_handler(invocation_input, lambda_context) + assert response["Status"] == InvocationStatus.FAILED.value + assert response["Error"]["ErrorType"] == "CheckpointError" + assert response["Error"]["ErrorMessage"] == "Final checkpoint failed" + + def test_durable_handler_background_thread_failure_on_succeed_checkpoint(): """Test durable_handler handles background thread failure on SUCCEED checkpoint. @@ -1142,7 +1492,6 @@ def test_handler(event: Any, context: DurableContext) -> dict: durable_execution_arn="arn:test:execution", checkpoint_token="token123", # noqa: S106 initial_execution_state=initial_state, - is_local_runner=False, service_client=mock_client, ) @@ -1233,7 +1582,6 @@ def test_handler(event: Any, context: DurableContext) -> dict: durable_execution_arn="arn:test:execution", checkpoint_token="token123", # noqa: S106 initial_execution_state=initial_state, - is_local_runner=False, service_client=mock_client, ) @@ -1317,7 +1665,6 @@ def test_handler(event: Any, context: DurableContext) -> str: durable_execution_arn="arn:test:execution", checkpoint_token="token123", # noqa: S106 initial_execution_state=initial_state, - is_local_runner=False, service_client=mock_client, ) @@ -1388,7 +1735,6 @@ def test_handler(event: Any, context: DurableContext) -> str: durable_execution_arn="arn:test:execution", checkpoint_token="token123", # noqa: S106 initial_execution_state=initial_state, - is_local_runner=False, service_client=mock_client, ) @@ -1403,8 +1749,291 @@ def test_handler(event: Any, context: DurableContext) -> str: # Make the service client checkpoint call fail on error handling mock_client.checkpoint.side_effect = failing_checkpoint - # Verify that the checkpoint error is raised (not the original ValueError) + # Verify that errors are not raised, but returned because response is small + resp = test_handler(invocation_input, lambda_context) + assert resp["Error"]["ErrorMessage"] == "User function error" + assert resp["Error"]["ErrorType"] == "ValueError" + assert resp["Status"] == InvocationStatus.FAILED.value + + +def test_durable_execution_logs_checkpoint_error_extras_from_background_thread(): + """Test that CheckpointError extras are logged when raised from background thread.""" + mock_client = Mock(spec=DurableServiceClient) + mock_logger = Mock() + + error_obj = {"Code": "TestError", "Message": "Test checkpoint error"} + metadata_obj = {"RequestId": "test-request-id"} + + def failing_checkpoint(*args, **kwargs): + raise CheckpointError( # noqa TRY003 + "Checkpoint failed", # noqa EM101 + error_category=CheckpointErrorCategory.EXECUTION, + error=error_obj, + response_metadata=metadata_obj, # EM101 + ) + + @durable_execution + def test_handler(event: Any, context: DurableContext) -> dict: + context.step(lambda ctx: "step_result") + return {"result": "success"} + + operation = Operation( + operation_id="exec1", + operation_type=OperationType.EXECUTION, + status=OperationStatus.STARTED, + execution_details=ExecutionDetails(input_payload="{}"), + ) + + initial_state = InitialExecutionState(operations=[operation], next_marker="") + + invocation_input = DurableExecutionInvocationInputWithClient( + durable_execution_arn="arn:test:execution", + checkpoint_token="token123", # noqa: S106 + initial_execution_state=initial_state, + service_client=mock_client, + ) + + lambda_context = Mock() + lambda_context.aws_request_id = "test-request" + lambda_context.client_context = None + lambda_context.identity = None + lambda_context._epoch_deadline_time_in_ms = 1000000 # noqa: SLF001 + lambda_context.invoked_function_arn = None + lambda_context.tenant_id = None + + mock_client.checkpoint.side_effect = failing_checkpoint + + with patch("aws_durable_execution_sdk_python.execution.logger", mock_logger): + with pytest.raises(CheckpointError): + test_handler(invocation_input, lambda_context) + + mock_logger.exception.assert_called_once() + call_args = mock_logger.exception.call_args + assert "Checkpoint processing failed" in call_args[0][0] + assert call_args[1]["extra"]["Error"] == error_obj + assert call_args[1]["extra"]["ResponseMetadata"] == metadata_obj + + +def test_durable_execution_logs_boto_client_error_extras_from_background_thread(): + """Test that BotoClientError extras are logged when raised from background thread.""" + + mock_client = Mock(spec=DurableServiceClient) + mock_logger = Mock() + + error_obj = {"Code": "ServiceError", "Message": "Boto3 service error"} + metadata_obj = {"RequestId": "boto-request-id"} + + def failing_checkpoint(*args, **kwargs): + raise BotoClientError( # noqa TRY003 + "Boto3 error", # noqa EM101 + error=error_obj, + response_metadata=metadata_obj, # EM101 + ) + + @durable_execution + def test_handler(event: Any, context: DurableContext) -> dict: + context.step(lambda ctx: "step_result") + return {"result": "success"} + + operation = Operation( + operation_id="exec1", + operation_type=OperationType.EXECUTION, + status=OperationStatus.STARTED, + execution_details=ExecutionDetails(input_payload="{}"), + ) + + initial_state = InitialExecutionState(operations=[operation], next_marker="") + + invocation_input = DurableExecutionInvocationInputWithClient( + durable_execution_arn="arn:test:execution", + checkpoint_token="token123", # noqa: S106 + initial_execution_state=initial_state, + service_client=mock_client, + ) + + lambda_context = Mock() + lambda_context.aws_request_id = "test-request" + lambda_context.client_context = None + lambda_context.identity = None + lambda_context._epoch_deadline_time_in_ms = 1000000 # noqa: SLF001 + lambda_context.invoked_function_arn = None + lambda_context.tenant_id = None + + mock_client.checkpoint.side_effect = failing_checkpoint + + with patch("aws_durable_execution_sdk_python.execution.logger", mock_logger): + with pytest.raises(BotoClientError): + test_handler(invocation_input, lambda_context) + + mock_logger.exception.assert_called_once() + call_args = mock_logger.exception.call_args + assert "Checkpoint processing failed" in call_args[0][0] + assert call_args[1]["extra"]["Error"] == error_obj + assert call_args[1]["extra"]["ResponseMetadata"] == metadata_obj + + +def test_durable_execution_logs_checkpoint_error_extras_from_user_code(): + """Test that CheckpointError extras are logged when raised directly from user code.""" + mock_client = Mock(spec=DurableServiceClient) + mock_logger = Mock() + + error_obj = { + "Code": "UserCheckpointError", + "Message": "User raised checkpoint error", + } + metadata_obj = {"RequestId": "user-request-id"} + + @durable_execution + def test_handler(event: Any, context: DurableContext) -> dict: + raise CheckpointError( # noqa TRY003 + "User checkpoint error", # noqa EM101 + error_category=CheckpointErrorCategory.EXECUTION, + error=error_obj, + response_metadata=metadata_obj, # EM101 + ) + + operation = Operation( + operation_id="exec1", + operation_type=OperationType.EXECUTION, + status=OperationStatus.STARTED, + execution_details=ExecutionDetails(input_payload="{}"), + ) + + initial_state = InitialExecutionState(operations=[operation], next_marker="") + + invocation_input = DurableExecutionInvocationInputWithClient( + durable_execution_arn="arn:test:execution", + checkpoint_token="token123", # noqa: S106 + initial_execution_state=initial_state, + service_client=mock_client, + ) + + lambda_context = Mock() + lambda_context.aws_request_id = "test-request" + lambda_context.client_context = None + lambda_context.identity = None + lambda_context._epoch_deadline_time_in_ms = 1000000 # noqa: SLF001 + lambda_context.invoked_function_arn = None + lambda_context.tenant_id = None + + with patch("aws_durable_execution_sdk_python.execution.logger", mock_logger): + with pytest.raises(CheckpointError): + test_handler(invocation_input, lambda_context) + + mock_logger.exception.assert_called_once() + call_args = mock_logger.exception.call_args + assert call_args[0][0] == "Checkpoint system failed" + assert call_args[1]["extra"]["Error"] == error_obj + assert call_args[1]["extra"]["ResponseMetadata"] == metadata_obj + + +def test_durable_execution_with_boto3_client_parameter(): + """Test durable_execution decorator accepts boto3_client parameter.""" + # GIVEN a custom boto3 Lambda client + mock_boto3_client = Mock() + mock_boto3_client.checkpoint_durable_execution.return_value = { + "CheckpointToken": "new_token", + "NewExecutionState": {"Operations": [], "NextMarker": ""}, + } + mock_boto3_client.get_durable_execution_state.return_value = { + "Operations": [], + "NextMarker": "", + } + + # GIVEN a durable function decorated with the custom client + @durable_execution(boto3_client=mock_boto3_client) + def test_handler(event: Any, context: DurableContext) -> dict: + return {"result": "success"} + + event = { + "DurableExecutionArn": "arn:test:execution", + "CheckpointToken": "token123", + "InitialExecutionState": { + "Operations": [ + { + "Id": "exec1", + "Type": "EXECUTION", + "Status": "STARTED", + "ExecutionDetails": {"InputPayload": '{"input": "test"}'}, + } + ], + "NextMarker": "", + }, + } + + lambda_context = Mock() + lambda_context.aws_request_id = "test-request" + lambda_context.client_context = None + lambda_context.identity = None + lambda_context._epoch_deadline_time_in_ms = 1000000 # noqa: SLF001 + lambda_context.invoked_function_arn = None + lambda_context.tenant_id = None + + # WHEN the handler is invoked + result = test_handler(event, lambda_context) + + # THEN the execution succeeds using the custom client + assert result["Status"] == InvocationStatus.SUCCEEDED.value + assert result["Result"] == '{"result": "success"}' + + +def test_durable_execution_with_non_durable_payload_raises_error(): + """Test that invoking a durable function with a regular event raises a helpful error.""" + + # GIVEN a durable function + @durable_execution + def test_handler(event: Any, context: DurableContext) -> dict: + return {"result": "success"} + + # GIVEN a regular Lambda event (not a durable execution payload) + regular_event = {"key": "value", "data": "test"} + + lambda_context = Mock() + lambda_context.aws_request_id = "test-request" + lambda_context.client_context = None + lambda_context.identity = None + lambda_context._epoch_deadline_time_in_ms = 1000000 # noqa: SLF001 + lambda_context.invoked_function_arn = None + lambda_context.tenant_id = None + + # WHEN the handler is invoked with a non-durable payload + # THEN it raises a ValueError with a helpful message with pytest.raises( - RuntimeError, match="Background checkpoint failed on error handling" + ExecutionError, + match=( + "Unexpected payload provided to start the durable execution. " + "Check your resource configurations to confirm the durability is set." + ), ): - test_handler(invocation_input, lambda_context) + test_handler(regular_event, lambda_context) + + +def test_durable_execution_with_non_dict_event_raises_error(): + """Test that invoking a durable function with a non-dict event raises a helpful error.""" + + # GIVEN a durable function + @durable_execution + def test_handler(event: Any, context: DurableContext) -> dict: + return {"result": "success"} + + # GIVEN a non-dict event + non_dict_event = "not a dict" + + lambda_context = Mock() + lambda_context.aws_request_id = "test-request" + lambda_context.client_context = None + lambda_context.identity = None + lambda_context._epoch_deadline_time_in_ms = 1000000 # noqa: SLF001 + lambda_context.invoked_function_arn = None + lambda_context.tenant_id = None + + # WHEN the handler is invoked with a non-dict event + # THEN it raises a ValueError with a helpful message + with pytest.raises( + ExecutionError, + match=( + "Unexpected payload provided to start the durable execution. " + "Check your resource configurations to confirm the durability is set." + ), + ): + test_handler(non_dict_event, lambda_context) diff --git a/tests/lambda_service_test.py b/tests/lambda_service_test.py index 35214b9..cc4dce4 100644 --- a/tests/lambda_service_test.py +++ b/tests/lambda_service_test.py @@ -8,6 +8,7 @@ from aws_durable_execution_sdk_python.exceptions import ( CallableRuntimeError, CheckpointError, + GetExecutionStateError, ) from aws_durable_execution_sdk_python.identifier import OperationIdentifier from aws_durable_execution_sdk_python.lambda_service import ( @@ -399,9 +400,10 @@ def test_callback_options_from_dict_partial(): def test_invoke_options_from_dict(): """Test ChainedInvokeOptions.from_dict method.""" - data = {"FunctionName": "test-function", "TimeoutSeconds": 120} + data = {"FunctionName": "test-function", "TenantId": "test-tenant"} options = ChainedInvokeOptions.from_dict(data) assert options.function_name == "test-function" + assert options.tenant_id == "test-tenant" def test_invoke_options_from_dict_required_only(): @@ -409,6 +411,15 @@ def test_invoke_options_from_dict_required_only(): data = {"FunctionName": "test-function"} options = ChainedInvokeOptions.from_dict(data) assert options.function_name == "test-function" + assert options.tenant_id is None + + +def test_invoke_options_from_dict_with_none_tenant(): + """Test ChainedInvokeOptions.from_dict with explicit None tenant_id.""" + data = {"FunctionName": "test-function", "TenantId": None} + options = ChainedInvokeOptions.from_dict(data) + assert options.function_name == "test-function" + assert options.tenant_id is None def test_context_options_from_dict(): @@ -685,9 +696,12 @@ def test_operation_update_create_wait_start(): @patch("aws_durable_execution_sdk_python.lambda_service.datetime") def test_operation_update_create_execution_succeed(mock_datetime): """Test OperationUpdate.create_execution_succeed factory method.""" - mock_datetime.datetime.now.return_value = "2023-01-01" + + mock_datetime.datetime.now.return_value = datetime.datetime.fromtimestamp( + 1672531200.0, tz=datetime.UTC + ) update = OperationUpdate.create_execution_succeed("success_payload") - assert update.operation_id == "execution-result-2023-01-01" + assert update.operation_id == "execution-result-1672531200000" assert update.operation_type == OperationType.EXECUTION assert update.action == OperationAction.SUCCEED assert update.payload == "success_payload" @@ -1467,6 +1481,8 @@ def test_operation_from_dict_complete(): assert operation.step_details.result == "step_result" assert operation.wait_details.scheduled_end_timestamp == start_time assert operation.callback_details.callback_id == "cb1" + assert operation.chained_invoke_details is not None + assert operation.chained_invoke_details.result == "invoke_result" def test_operation_to_dict_with_subtype(): @@ -1629,13 +1645,6 @@ def test_checkpoint_updated_execution_state_from_dict_with_operations(): assert state.next_marker == "marker123" -@patch.dict( - "os.environ", - { - "DURABLE_LOCAL_RUNNER_ENDPOINT": "http://test:5000/", - "DURABLE_LOCAL_RUNNER_REGION": "us-west-1", - }, -) @patch("aws_durable_execution_sdk_python.lambda_service.boto3") def test_lambda_client_checkpoint(mock_boto3): """Test LambdaClient.checkpoint method.""" @@ -1788,6 +1797,80 @@ def test_lambda_client_checkpoint_with_exception(): lambda_client.checkpoint("arn123", "token123", [update], None) +@patch("aws_durable_execution_sdk_python.lambda_service.logger") +def test_lambda_client_checkpoint_logs_response_metadata(mock_logger): + """Test LambdaClient.checkpoint logs ResponseMetadata from boto3 exception.""" + mock_client = Mock() + boto_error = Exception("API Error") + boto_error.response = { + "ResponseMetadata": { + "RequestId": "test-request-id-123", + "HTTPStatusCode": 500, + "RetryAttempts": 2, + } + } + mock_client.checkpoint_durable_execution.side_effect = boto_error + + lambda_client = LambdaClient(mock_client) + update = OperationUpdate( + operation_id="op1", + operation_type=OperationType.STEP, + action=OperationAction.START, + ) + + with pytest.raises(CheckpointError): + lambda_client.checkpoint("arn123", "token123", [update], None) + + mock_logger.exception.assert_called_once_with( + "Failed to checkpoint.", + extra={ + "ResponseMetadata": { + "RequestId": "test-request-id-123", + "HTTPStatusCode": 500, + "RetryAttempts": 2, + }, + }, + ) + + +@patch("aws_durable_execution_sdk_python.lambda_service.logger") +def test_lambda_client_get_execution_state_logs_response_metadata(mock_logger): + """Test LambdaClient.get_execution_state logs ResponseMetadata from boto3 exception.""" + mock_client = Mock() + boto_error = Exception("API Error") + boto_error.response = { + "ResponseMetadata": { + "RequestId": "test-request-id-456", + "HTTPStatusCode": 503, + "RetryAttempts": 1, + } + } + mock_client.get_durable_execution_state.side_effect = boto_error + + lambda_client = LambdaClient(mock_client) + + with pytest.raises(GetExecutionStateError) as exc_info: + lambda_client.get_execution_state("arn123", "token123", "", 1000) + + assert exc_info.value.error is None + assert exc_info.value.response_metadata == { + "RequestId": "test-request-id-456", + "HTTPStatusCode": 503, + "RetryAttempts": 1, + } + + mock_logger.exception.assert_called_once_with( + "Failed to get execution state.", + extra={ + "ResponseMetadata": { + "RequestId": "test-request-id-456", + "HTTPStatusCode": 503, + "RetryAttempts": 1, + }, + }, + ) + + def test_durable_service_client_protocol_checkpoint(): """Test DurableServiceClient protocol checkpoint method signature.""" mock_client = Mock(spec=DurableServiceClient) @@ -1825,50 +1908,45 @@ def test_lambda_client_constructor(): @patch.dict("os.environ", {}, clear=True) @patch("boto3.client") -def test_lambda_client_initialize_from_env_default(mock_boto_client): - """Test LambdaClient.initialize_from_env with default endpoint.""" +def test_lambda_client_initialize_client_default(mock_boto_client): + """Test LambdaClient.initialize_client with default endpoint.""" mock_client = Mock() mock_boto_client.return_value = mock_client - with patch.object(LambdaClient, "load_preview_botocore_models"): - client = LambdaClient.initialize_from_env() + client = LambdaClient.initialize_client() - mock_boto_client.assert_called_with("lambdainternal") + # Check that boto3.client was called with the right service name and config + mock_boto_client.assert_called_once() + call_args = mock_boto_client.call_args + assert call_args[0][0] == "lambda" + assert "config" in call_args[1] + config = call_args[1]["config"] + assert config.connect_timeout == 5 + assert config.read_timeout == 50 assert isinstance(client, LambdaClient) @patch.dict("os.environ", {"AWS_ENDPOINT_URL_LAMBDA": "http://localhost:3000/"}) @patch("boto3.client") -def test_lambda_client_initialize_from_env_with_endpoint(mock_boto_client): - """Test LambdaClient.initialize_from_env with custom endpoint.""" +def test_lambda_client_initialize_client_with_endpoint(mock_boto_client): + """Test LambdaClient.initialize_client with custom endpoint (boto3 handles it automatically).""" mock_client = Mock() mock_boto_client.return_value = mock_client - with patch.object(LambdaClient, "load_preview_botocore_models"): - client = LambdaClient.initialize_from_env() - - mock_boto_client.assert_called_with( - "lambdainternal", endpoint_url="http://localhost:3000/" - ) + client = LambdaClient.initialize_client() + + # Check that boto3.client was called with the right parameters and config + # Note: boto3 automatically picks up AWS_ENDPOINT_URL_LAMBDA from environment + mock_boto_client.assert_called_once() + call_args = mock_boto_client.call_args + assert call_args[0][0] == "lambda" + assert "config" in call_args[1] + config = call_args[1]["config"] + assert config.connect_timeout == 5 + assert config.read_timeout == 50 assert isinstance(client, LambdaClient) -@patch("aws_durable_execution_sdk_python.lambda_service.boto3") -def test_lambda_client_initialize_local_runner_client(mock_boto3): - """Test LambdaClient.initialize_local_runner_client method.""" - mock_client = Mock() - mock_boto3.client.return_value = mock_client - - lambda_client = LambdaClient.initialize_local_runner_client() - - mock_boto3.client.assert_called_once_with( - "lambdainternal-local", - endpoint_url="http://host.docker.internal:5000/", - region_name="us-west-2", - ) - assert lambda_client.client == mock_client - - def test_lambda_client_get_execution_state(): """Test LambdaClient.get_execution_state method.""" mock_client = Mock() @@ -1902,40 +1980,14 @@ def test_durable_service_client_protocol_get_execution_state(): assert result == mock_output -@patch("aws_durable_execution_sdk_python.lambda_service.boto3") -def test_lambda_client_initialize_local_runner_client_defaults(mock_boto3): - """Test LambdaClient.initialize_local_runner_client with default environment values.""" - mock_client = Mock() - mock_boto3.client.return_value = mock_client - - lambda_client = LambdaClient.initialize_local_runner_client() - - mock_boto3.client.assert_called_once_with( - "lambdainternal-local", - endpoint_url="http://host.docker.internal:5000/", - region_name="us-west-2", - ) - assert lambda_client.client == mock_client - - @patch.dict("os.environ", {}, clear=True) -@patch( - "aws_durable_execution_sdk_python.lambda_service.LambdaClient.initialize_from_env" -) -def test_lambda_client_initialize_from_env_defaults(mock_init): - """Test LambdaClient.initialize_from_env with default environment values.""" - LambdaClient.initialize_from_env() +@patch("aws_durable_execution_sdk_python.lambda_service.LambdaClient.initialize_client") +def test_lambda_client_initialize_client_defaults(mock_init): + """Test LambdaClient.initialize_client with default environment values.""" + LambdaClient.initialize_client() mock_init.assert_called_once_with() -@patch("os.environ") -def test_lambda_client_load_preview_botocore_models(mock_environ): - """Test LambdaClient.load_preview_botocore_models method.""" - LambdaClient.load_preview_botocore_models() - # Verify that AWS_DATA_PATH is set - assert "AWS_DATA_PATH" in mock_environ.__setitem__.call_args[0] - - def test_checkpoint_error_handling(): """Test CheckpointError exception handling in LambdaClient.checkpoint.""" mock_client = Mock() @@ -1954,15 +2006,17 @@ def test_checkpoint_error_handling(): @patch.dict("os.environ", {}, clear=True) @patch("boto3.client") -def test_lambda_client_initialize_from_env_no_endpoint(mock_boto_client): - """Test LambdaClient.initialize_from_env without AWS_ENDPOINT_URL_LAMBDA.""" +def test_lambda_client_initialize_client_no_endpoint(mock_boto_client): + """Test LambdaClient.initialize_client without AWS_ENDPOINT_URL_LAMBDA.""" mock_client = Mock() mock_boto_client.return_value = mock_client - with patch.object(LambdaClient, "load_preview_botocore_models"): - client = LambdaClient.initialize_from_env() + client = LambdaClient.initialize_client() - mock_boto_client.assert_called_with("lambdainternal") + # Verify the call was made with the expected arguments including config + call_args = mock_boto_client.call_args + assert call_args[0] == ("lambda",) + assert "config" in call_args[1] assert isinstance(client, LambdaClient) diff --git a/tests/logger_test.py b/tests/logger_test.py index d3b76aa..f503538 100644 --- a/tests/logger_test.py +++ b/tests/logger_test.py @@ -1,10 +1,17 @@ """Unit tests for logger module.""" +import logging from collections.abc import Mapping from unittest.mock import Mock from aws_durable_execution_sdk_python.identifier import OperationIdentifier +from aws_durable_execution_sdk_python.lambda_service import ( + Operation, + OperationStatus, + OperationType, +) from aws_durable_execution_sdk_python.logger import Logger, LoggerInterface, LogInfo +from aws_durable_execution_sdk_python.state import ExecutionState, ReplayStatus class PowertoolsLoggerStub: @@ -71,6 +78,14 @@ def exception( pass +EXECUTION_STATE = ExecutionState( + durable_execution_arn="arn:aws:test", + initial_checkpoint_token="test_token", # noqa: S106 + operations={}, + service_client=Mock(), +) + + def test_powertools_logger_compatibility(): """Test that PowertoolsLoggerStub is compatible with LoggerInterface protocol.""" powertools_logger = PowertoolsLoggerStub() @@ -87,7 +102,7 @@ def accepts_logger_interface(logger: LoggerInterface) -> None: accepts_logger_interface(powertools_logger) # Test that our Logger can wrap the PowertoolsLoggerStub - log_info = LogInfo("arn:aws:test") + log_info = LogInfo(EXECUTION_STATE) wrapped_logger = Logger.from_log_info(powertools_logger, log_info) # Test all methods work @@ -100,18 +115,20 @@ def accepts_logger_interface(logger: LoggerInterface) -> None: def test_log_info_creation(): """Test LogInfo creation with all parameters.""" - log_info = LogInfo("arn:aws:test", "parent123", "test_name", 5) - assert log_info.execution_arn == "arn:aws:test" + log_info = LogInfo(EXECUTION_STATE, "parent123", "operation123", "test_name", 5) + assert log_info.execution_state.durable_execution_arn == "arn:aws:test" assert log_info.parent_id == "parent123" + assert log_info.operation_id == "operation123" assert log_info.name == "test_name" assert log_info.attempt == 5 def test_log_info_creation_minimal(): """Test LogInfo creation with minimal parameters.""" - log_info = LogInfo("arn:aws:test") - assert log_info.execution_arn == "arn:aws:test" + log_info = LogInfo(EXECUTION_STATE) + assert log_info.execution_state.durable_execution_arn == "arn:aws:test" assert log_info.parent_id is None + assert log_info.operation_id is None assert log_info.name is None assert log_info.attempt is None @@ -119,9 +136,10 @@ def test_log_info_creation_minimal(): def test_log_info_from_operation_identifier(): """Test LogInfo.from_operation_identifier.""" op_id = OperationIdentifier("op123", "parent456", "op_name") - log_info = LogInfo.from_operation_identifier("arn:aws:test", op_id, 3) - assert log_info.execution_arn == "arn:aws:test" + log_info = LogInfo.from_operation_identifier(EXECUTION_STATE, op_id, 3) + assert log_info.execution_state.durable_execution_arn == "arn:aws:test" assert log_info.parent_id == "parent456" + assert log_info.operation_id == "op123" assert log_info.name == "op_name" assert log_info.attempt == 3 @@ -129,19 +147,21 @@ def test_log_info_from_operation_identifier(): def test_log_info_from_operation_identifier_no_attempt(): """Test LogInfo.from_operation_identifier without attempt.""" op_id = OperationIdentifier("op123", "parent456", "op_name") - log_info = LogInfo.from_operation_identifier("arn:aws:test", op_id) - assert log_info.execution_arn == "arn:aws:test" + log_info = LogInfo.from_operation_identifier(EXECUTION_STATE, op_id) + assert log_info.execution_state.durable_execution_arn == "arn:aws:test" assert log_info.parent_id == "parent456" + assert log_info.operation_id == "op123" assert log_info.name == "op_name" assert log_info.attempt is None def test_log_info_with_parent_id(): """Test LogInfo.with_parent_id.""" - original = LogInfo("arn:aws:test", "old_parent", "test_name", 2) + original = LogInfo(EXECUTION_STATE, "old_parent", "op123", "test_name", 2) new_log_info = original.with_parent_id("new_parent") - assert new_log_info.execution_arn == "arn:aws:test" + assert new_log_info.execution_state.durable_execution_arn == "arn:aws:test" assert new_log_info.parent_id == "new_parent" + assert new_log_info.operation_id == "op123" assert new_log_info.name == "test_name" assert new_log_info.attempt == 2 @@ -149,14 +169,15 @@ def test_log_info_with_parent_id(): def test_logger_from_log_info_full(): """Test Logger.from_log_info with all LogInfo fields.""" mock_logger = Mock() - log_info = LogInfo("arn:aws:test", "parent123", "test_name", 5) + log_info = LogInfo(EXECUTION_STATE, "parent123", "op123", "test_name", 5) logger = Logger.from_log_info(mock_logger, log_info) expected_extra = { - "execution_arn": "arn:aws:test", - "parent_id": "parent123", - "name": "test_name", - "attempt": 5, + "executionArn": "arn:aws:test", + "parentId": "parent123", + "operationId": "op123", + "operationName": "test_name", + "attempt": 6, } assert logger._default_extra == expected_extra # noqa: SLF001 assert logger._logger is mock_logger # noqa: SLF001 @@ -167,47 +188,54 @@ def test_logger_from_log_info_partial_fields(): mock_logger = Mock() # Test with parent_id but no name or attempt - log_info = LogInfo("arn:aws:test", "parent123") + log_info = LogInfo(EXECUTION_STATE, "parent123") logger = Logger.from_log_info(mock_logger, log_info) - expected_extra = {"execution_arn": "arn:aws:test", "parent_id": "parent123"} + expected_extra = {"executionArn": "arn:aws:test", "parentId": "parent123"} assert logger._default_extra == expected_extra # noqa: SLF001 # Test with name but no parent_id or attempt - log_info = LogInfo("arn:aws:test", None, "test_name") + log_info = LogInfo(EXECUTION_STATE, None, None, "test_name") logger = Logger.from_log_info(mock_logger, log_info) - expected_extra = {"execution_arn": "arn:aws:test", "name": "test_name"} + expected_extra = {"executionArn": "arn:aws:test", "operationName": "test_name"} assert logger._default_extra == expected_extra # noqa: SLF001 # Test with attempt but no parent_id or name - log_info = LogInfo("arn:aws:test", None, None, 5) + log_info = LogInfo(EXECUTION_STATE, None, None, None, 5) logger = Logger.from_log_info(mock_logger, log_info) - expected_extra = {"execution_arn": "arn:aws:test", "attempt": 5} + expected_extra = {"executionArn": "arn:aws:test", "attempt": 6} assert logger._default_extra == expected_extra # noqa: SLF001 def test_logger_from_log_info_minimal(): """Test Logger.from_log_info with minimal LogInfo.""" mock_logger = Mock() - log_info = LogInfo("arn:aws:test") + log_info = LogInfo(EXECUTION_STATE) logger = Logger.from_log_info(mock_logger, log_info) - expected_extra = {"execution_arn": "arn:aws:test"} + expected_extra = {"executionArn": "arn:aws:test"} assert logger._default_extra == expected_extra # noqa: SLF001 def test_logger_with_log_info(): """Test Logger.with_log_info.""" mock_logger = Mock() - original_info = LogInfo("arn:aws:test", "parent1") + original_info = LogInfo(EXECUTION_STATE, "parent1") logger = Logger.from_log_info(mock_logger, original_info) - new_info = LogInfo("arn:aws:new", "parent2", "new_name") + execution_state_new = ExecutionState( + durable_execution_arn="arn:aws:new", + initial_checkpoint_token="test_token", # noqa: S106 + operations={}, + service_client=Mock(), + ) + new_info = LogInfo(execution_state_new, "parent2", "op123", "new_name") new_logger = logger.with_log_info(new_info) expected_extra = { - "execution_arn": "arn:aws:new", - "parent_id": "parent2", - "name": "new_name", + "executionArn": "arn:aws:new", + "parentId": "parent2", + "operationId": "op123", + "operationName": "new_name", } assert new_logger._default_extra == expected_extra # noqa: SLF001 assert new_logger._logger is mock_logger # noqa: SLF001 @@ -216,7 +244,7 @@ def test_logger_with_log_info(): def test_logger_get_logger(): """Test Logger.get_logger.""" mock_logger = Mock() - log_info = LogInfo("arn:aws:test") + log_info = LogInfo(EXECUTION_STATE) logger = Logger.from_log_info(mock_logger, log_info) assert logger.get_logger() is mock_logger @@ -224,14 +252,14 @@ def test_logger_get_logger(): def test_logger_debug(): """Test Logger.debug method.""" mock_logger = Mock() - log_info = LogInfo("arn:aws:test", "parent123") + log_info = LogInfo(EXECUTION_STATE, "parent123") logger = Logger.from_log_info(mock_logger, log_info) logger.debug("test %s message", "arg1", extra={"custom": "value"}) expected_extra = { - "execution_arn": "arn:aws:test", - "parent_id": "parent123", + "executionArn": "arn:aws:test", + "parentId": "parent123", "custom": "value", } mock_logger.debug.assert_called_once_with( @@ -242,24 +270,24 @@ def test_logger_debug(): def test_logger_info(): """Test Logger.info method.""" mock_logger = Mock() - log_info = LogInfo("arn:aws:test") + log_info = LogInfo(EXECUTION_STATE) logger = Logger.from_log_info(mock_logger, log_info) logger.info("info message") - expected_extra = {"execution_arn": "arn:aws:test"} + expected_extra = {"executionArn": "arn:aws:test"} mock_logger.info.assert_called_once_with("info message", extra=expected_extra) def test_logger_warning(): """Test Logger.warning method.""" mock_logger = Mock() - log_info = LogInfo("arn:aws:test") + log_info = LogInfo(EXECUTION_STATE) logger = Logger.from_log_info(mock_logger, log_info) logger.warning("warning %s %s message", "arg1", "arg2") - expected_extra = {"execution_arn": "arn:aws:test"} + expected_extra = {"executionArn": "arn:aws:test"} mock_logger.warning.assert_called_once_with( "warning %s %s message", "arg1", "arg2", extra=expected_extra ) @@ -268,24 +296,24 @@ def test_logger_warning(): def test_logger_error(): """Test Logger.error method.""" mock_logger = Mock() - log_info = LogInfo("arn:aws:test") + log_info = LogInfo(EXECUTION_STATE) logger = Logger.from_log_info(mock_logger, log_info) logger.error("error message", extra={"error_code": 500}) - expected_extra = {"execution_arn": "arn:aws:test", "error_code": 500} + expected_extra = {"executionArn": "arn:aws:test", "error_code": 500} mock_logger.error.assert_called_once_with("error message", extra=expected_extra) def test_logger_exception(): """Test Logger.exception method.""" mock_logger = Mock() - log_info = LogInfo("arn:aws:test") + log_info = LogInfo(EXECUTION_STATE) logger = Logger.from_log_info(mock_logger, log_info) logger.exception("exception message") - expected_extra = {"execution_arn": "arn:aws:test"} + expected_extra = {"executionArn": "arn:aws:test"} mock_logger.exception.assert_called_once_with( "exception message", extra=expected_extra ) @@ -294,7 +322,7 @@ def test_logger_exception(): def test_logger_methods_with_none_extra(): """Test logger methods handle None extra parameter.""" mock_logger = Mock() - log_info = LogInfo("arn:aws:test") + log_info = LogInfo(EXECUTION_STATE) logger = Logger.from_log_info(mock_logger, log_info) logger.debug("debug", extra=None) @@ -303,7 +331,7 @@ def test_logger_methods_with_none_extra(): logger.error("error", extra=None) logger.exception("exception", extra=None) - expected_extra = {"execution_arn": "arn:aws:test"} + expected_extra = {"executionArn": "arn:aws:test"} mock_logger.debug.assert_called_with("debug", extra=expected_extra) mock_logger.info.assert_called_with("info", extra=expected_extra) mock_logger.warning.assert_called_with("warning", extra=expected_extra) @@ -314,14 +342,77 @@ def test_logger_methods_with_none_extra(): def test_logger_extra_override(): """Test that custom extra overrides default extra.""" mock_logger = Mock() - log_info = LogInfo("arn:aws:test", "parent123") + log_info = LogInfo(EXECUTION_STATE, "parent123") logger = Logger.from_log_info(mock_logger, log_info) - logger.info("test", extra={"execution_arn": "overridden", "new_field": "value"}) + logger.info("test", extra={"executionArn": "overridden", "newField": "value"}) expected_extra = { - "execution_arn": "overridden", - "parent_id": "parent123", - "new_field": "value", + "executionArn": "overridden", + "parentId": "parent123", + "newField": "value", } mock_logger.info.assert_called_once_with("test", extra=expected_extra) + + +def test_logger_without_mocked_logger(): + """Test Logger methods without mocking the underlying logger.""" + log_info = LogInfo(EXECUTION_STATE, "parent123", "test_name", 5) + logger = Logger.from_log_info(logging.getLogger(), log_info) + + logger.info("test", extra={"execution_arn": "overridden", "new_field": "value"}) + logger.warning("test", extra={"execution_arn": "overridden", "new_field": "value"}) + logger.error("test", extra={"execution_arn": "overridden", "new_field": "value"}) + + +def test_logger_replay_no_logging(): + operation = Operation( + operation_id="op1", + operation_type=OperationType.STEP, + status=OperationStatus.SUCCEEDED, + ) + replay_execution_state = ExecutionState( + durable_execution_arn="arn:aws:test", + initial_checkpoint_token="test_token", # noqa: S106 + operations={"op1": operation}, + service_client=Mock(), + replay_status=ReplayStatus.REPLAY, + ) + log_info = LogInfo(replay_execution_state, "parent123", "test_name", 5) + mock_logger = Mock() + logger = Logger.from_log_info(mock_logger, log_info) + logger.info("logging info") + replay_execution_state.track_replay(operation_id="op1") + + mock_logger.info.assert_not_called() + + +def test_logger_replay_then_new_logging(): + operation1 = Operation( + operation_id="op1", + operation_type=OperationType.STEP, + status=OperationStatus.SUCCEEDED, + ) + operation2 = Operation( + operation_id="op2", + operation_type=OperationType.STEP, + status=OperationStatus.SUCCEEDED, + ) + execution_state = ExecutionState( + durable_execution_arn="arn:aws:test", + initial_checkpoint_token="test_token", # noqa: S106 + operations={"op1": operation1, "op2": operation2}, + service_client=Mock(), + replay_status=ReplayStatus.REPLAY, + ) + log_info = LogInfo(execution_state, "parent123", "test_name", 5) + mock_logger = Mock() + logger = Logger.from_log_info(mock_logger, log_info) + execution_state.track_replay(operation_id="op1") + logger.info("logging info") + + mock_logger.info.assert_not_called() + + execution_state.track_replay(operation_id="op2") + logger.info("logging info") + mock_logger.info.assert_called_once() diff --git a/tests/operation/base_test.py b/tests/operation/base_test.py new file mode 100644 index 0000000..4b20818 --- /dev/null +++ b/tests/operation/base_test.py @@ -0,0 +1,314 @@ +"""Unit tests for OperationExecutor base framework.""" + +from __future__ import annotations + +import pytest + +from aws_durable_execution_sdk_python.exceptions import InvalidStateError +from aws_durable_execution_sdk_python.lambda_service import ( + Operation, + OperationStatus, + OperationType, +) +from aws_durable_execution_sdk_python.operation.base import ( + CheckResult, + OperationExecutor, +) +from aws_durable_execution_sdk_python.state import CheckpointedResult + +# Test fixtures and helpers + + +class ConcreteOperationExecutor(OperationExecutor[str]): + """Concrete implementation for testing the abstract base class.""" + + def __init__(self): + self.check_result_status_called = 0 + self.execute_called = 0 + self.check_result_to_return = None + self.execute_result_to_return = "executed_result" + + def check_result_status(self) -> CheckResult[str]: + """Mock implementation that returns configured result.""" + self.check_result_status_called += 1 + if self.check_result_to_return is None: + msg = "check_result_to_return not configured" + raise ValueError(msg) + return self.check_result_to_return + + def execute(self, checkpointed_result: CheckpointedResult) -> str: + """Mock implementation that returns configured result.""" + self.execute_called += 1 + return self.execute_result_to_return + + +def create_mock_checkpoint(status: OperationStatus) -> CheckpointedResult: + """Create a mock CheckpointedResult with the given status.""" + operation = Operation( + operation_id="test_op", + operation_type=OperationType.STEP, + status=status, + ) + return CheckpointedResult.create_from_operation(operation) + + +# Tests for CheckResult factory methods + + +def test_check_result_create_is_ready_to_execute(): + """Test CheckResult.create_is_ready_to_execute factory method.""" + checkpoint = create_mock_checkpoint(OperationStatus.STARTED) + + result = CheckResult.create_is_ready_to_execute(checkpoint) + + assert result.is_ready_to_execute is True + assert result.has_checkpointed_result is False + assert result.checkpointed_result is checkpoint + assert result.deserialized_result is None + + +def test_check_result_create_started(): + """Test CheckResult.create_started factory method.""" + result = CheckResult.create_started() + + assert result.is_ready_to_execute is False + assert result.has_checkpointed_result is False + assert result.checkpointed_result is None + assert result.deserialized_result is None + + +def test_check_result_create_completed(): + """Test CheckResult.create_completed factory method.""" + test_result = "test_completed_result" + + result = CheckResult.create_completed(test_result) + + assert result.is_ready_to_execute is False + assert result.has_checkpointed_result is True + assert result.checkpointed_result is None + assert result.deserialized_result == test_result + + +def test_check_result_create_completed_with_none(): + """Test CheckResult.create_completed with None result (valid for operations that return None).""" + result = CheckResult.create_completed(None) + + assert result.is_ready_to_execute is False + assert result.has_checkpointed_result is True + assert result.checkpointed_result is None + assert result.deserialized_result is None + + +# Tests for OperationExecutor.process() method + + +def test_process_with_terminal_result_on_first_check(): + """Test process() when check_result_status returns terminal result on first call.""" + executor = ConcreteOperationExecutor() + executor.check_result_to_return = CheckResult.create_completed("terminal_result") + + result = executor.process() + + assert result == "terminal_result" + assert executor.check_result_status_called == 1 + assert executor.execute_called == 0 + + +def test_process_with_ready_to_execute_on_first_check(): + """Test process() when check_result_status returns ready_to_execute on first call.""" + executor = ConcreteOperationExecutor() + checkpoint = create_mock_checkpoint(OperationStatus.STARTED) + executor.check_result_to_return = CheckResult.create_is_ready_to_execute(checkpoint) + executor.execute_result_to_return = "execution_result" + + result = executor.process() + + assert result == "execution_result" + assert executor.check_result_status_called == 1 + assert executor.execute_called == 1 + + +def test_process_with_checkpoint_created_then_terminal(): + """Test process() when checkpoint is created, then terminal result on second check.""" + executor = ConcreteOperationExecutor() + + # First call returns create_started (checkpoint was created) + # Second call returns terminal result (immediate response) + call_count = 0 + + def check_result_side_effect(): + nonlocal call_count + call_count += 1 + if call_count == 1: + return CheckResult.create_started() + return CheckResult.create_completed("immediate_response") + + executor.check_result_status = check_result_side_effect + + result = executor.process() + + assert result == "immediate_response" + assert call_count == 2 + assert executor.execute_called == 0 + + +def test_process_with_checkpoint_created_then_ready_to_execute(): + """Test process() when checkpoint is created, then ready_to_execute on second check.""" + executor = ConcreteOperationExecutor() + checkpoint = create_mock_checkpoint(OperationStatus.STARTED) + + # First call returns create_started (checkpoint was created) + # Second call returns ready_to_execute (no immediate response, proceed to execute) + call_count = 0 + + def check_result_side_effect(): + nonlocal call_count + call_count += 1 + if call_count == 1: + return CheckResult.create_started() + return CheckResult.create_is_ready_to_execute(checkpoint) + + executor.check_result_status = check_result_side_effect + executor.execute_result_to_return = "execution_result" + + result = executor.process() + + assert result == "execution_result" + assert call_count == 2 + assert executor.execute_called == 1 + + +def test_process_with_none_result_terminal(): + """Test process() with terminal result that is None (valid for operations returning None).""" + executor = ConcreteOperationExecutor() + executor.check_result_to_return = CheckResult.create_completed(None) + + result = executor.process() + + assert result is None + assert executor.check_result_status_called == 1 + assert executor.execute_called == 0 + + +def test_process_raises_invalid_state_when_checkpointed_result_missing(): + """Test process() raises InvalidStateError when ready_to_execute but checkpoint is None.""" + executor = ConcreteOperationExecutor() + # Create invalid state: ready_to_execute but no checkpoint + executor.check_result_to_return = CheckResult( + is_ready_to_execute=True, + has_checkpointed_result=False, + checkpointed_result=None, + ) + + with pytest.raises(InvalidStateError) as exc_info: + executor.process() + + assert "checkpointed result is not set" in str(exc_info.value) + + +def test_process_raises_invalid_state_when_neither_terminal_nor_ready(): + """Test process() raises InvalidStateError when result is neither terminal nor ready.""" + executor = ConcreteOperationExecutor() + # Create invalid state: neither terminal nor ready (both False) + executor.check_result_to_return = CheckResult( + is_ready_to_execute=False, + has_checkpointed_result=False, + ) + + # Mock to return same invalid state on both calls + call_count = 0 + + def check_result_side_effect(): + nonlocal call_count + call_count += 1 + return CheckResult( + is_ready_to_execute=False, + has_checkpointed_result=False, + ) + + executor.check_result_status = check_result_side_effect + + with pytest.raises(InvalidStateError) as exc_info: + executor.process() + + assert "neither terminal nor ready to execute" in str(exc_info.value) + assert call_count == 2 # Should call twice before raising + + +def test_process_double_check_pattern(): + """Test that process() implements the double-check pattern correctly. + + This verifies the core immediate response handling logic: + 1. Check status once (may find existing checkpoint or create new one) + 2. If checkpoint was just created, check again (catches immediate response) + 3. Only call execute() if ready after both checks + """ + executor = ConcreteOperationExecutor() + checkpoint = create_mock_checkpoint(OperationStatus.STARTED) + + check_calls = [] + + def track_check_calls(): + call_num = len(check_calls) + 1 + check_calls.append(call_num) + + if call_num == 1: + # First check: checkpoint doesn't exist, create it + return CheckResult.create_started() + # Second check: checkpoint exists, ready to execute + return CheckResult.create_is_ready_to_execute(checkpoint) + + executor.check_result_status = track_check_calls + executor.execute_result_to_return = "final_result" + + result = executor.process() + + # Verify the double-check pattern + assert len(check_calls) == 2, "Should check status exactly twice" + assert check_calls == [1, 2], "Checks should be in order" + assert executor.execute_called == 1, "Should execute once after both checks" + assert result == "final_result" + + +def test_process_single_check_when_terminal_immediately(): + """Test that process() only checks once when terminal result is found immediately.""" + executor = ConcreteOperationExecutor() + + check_calls = [] + + def track_check_calls(): + call_num = len(check_calls) + 1 + check_calls.append(call_num) + return CheckResult.create_completed("immediate_terminal") + + executor.check_result_status = track_check_calls + + result = executor.process() + + # Should only check once since terminal result was found + assert len(check_calls) == 1, "Should check status only once for immediate terminal" + assert executor.execute_called == 0, "Should not execute when terminal result found" + assert result == "immediate_terminal" + + +def test_process_single_check_when_ready_immediately(): + """Test that process() only checks once when ready_to_execute is found immediately.""" + executor = ConcreteOperationExecutor() + checkpoint = create_mock_checkpoint(OperationStatus.STARTED) + + check_calls = [] + + def track_check_calls(): + call_num = len(check_calls) + 1 + check_calls.append(call_num) + return CheckResult.create_is_ready_to_execute(checkpoint) + + executor.check_result_status = track_check_calls + executor.execute_result_to_return = "execution_result" + + result = executor.process() + + # Should only check once since ready_to_execute was found + assert len(check_calls) == 1, "Should check status only once when ready immediately" + assert executor.execute_called == 1, "Should execute once" + assert result == "execution_result" diff --git a/tests/operation/callback_test.py b/tests/operation/callback_test.py index 3943f76..334e276 100644 --- a/tests/operation/callback_test.py +++ b/tests/operation/callback_test.py @@ -7,14 +7,17 @@ from aws_durable_execution_sdk_python.config import ( CallbackConfig, + Duration, StepConfig, WaitForCallbackConfig, ) -from aws_durable_execution_sdk_python.exceptions import CallbackError +from aws_durable_execution_sdk_python.context import Callback +from aws_durable_execution_sdk_python.exceptions import CallbackError, ValidationError from aws_durable_execution_sdk_python.identifier import OperationIdentifier from aws_durable_execution_sdk_python.lambda_service import ( CallbackDetails, CallbackOptions, + ErrorObject, Operation, OperationAction, OperationStatus, @@ -23,7 +26,7 @@ OperationUpdate, ) from aws_durable_execution_sdk_python.operation.callback import ( - create_callback_handler, + CallbackOperationExecutor, wait_for_callback_handler, ) from aws_durable_execution_sdk_python.retries import RetryDecision @@ -32,6 +35,17 @@ from aws_durable_execution_sdk_python.types import DurableContext, StepContext +# Test helper - maintains old handler signature for backward compatibility in tests +def create_callback_handler(state, operation_identifier, config=None): + """Test helper that wraps CallbackOperationExecutor with old handler signature.""" + executor = CallbackOperationExecutor( + state=state, + operation_identifier=operation_identifier, + config=config, + ) + return executor.process() + + # region create_callback_handler def test_create_callback_handler_new_operation_with_config(): """Test create_callback_handler creates new checkpoint when operation doesn't exist.""" @@ -50,7 +64,9 @@ def test_create_callback_handler_new_operation_with_config(): CheckpointedResult.create_from_operation(operation), ] - config = CallbackConfig(timeout_seconds=300, heartbeat_timeout_seconds=60) + config = CallbackConfig( + timeout=Duration.from_minutes(5), heartbeat_timeout=Duration.from_minutes(1) + ) result = create_callback_handler( state=mock_state, @@ -139,23 +155,27 @@ def test_create_callback_handler_existing_started_operation(): def test_create_callback_handler_existing_failed_operation(): - """Test create_callback_handler raises error for failed operation.""" + """Test create_callback_handler returns callback_id for failed operation (deferred error).""" + # CRITICAL: create_callback_handler should NOT raise on FAILED + # Errors are deferred to Callback.result() for deterministic replay mock_state = Mock(spec=ExecutionState) - mock_result = Mock(spec=CheckpointedResult) - mock_result.is_failed.return_value = True - mock_result.is_started.return_value = False - msg = "Checkpointed error" - mock_result.raise_callable_error.side_effect = Exception(msg) + failed_op = Operation( + operation_id="callback4", + operation_type=OperationType.CALLBACK, + status=OperationStatus.FAILED, + callback_details=CallbackDetails(callback_id="failed_cb4"), + ) + mock_result = CheckpointedResult.create_from_operation(failed_op) mock_state.get_checkpoint_result.return_value = mock_result - with pytest.raises(Exception, match="Checkpointed error"): - create_callback_handler( - state=mock_state, - operation_identifier=OperationIdentifier("callback4", None), - config=None, - ) + # Should return callback_id without raising + callback_id = create_callback_handler( + state=mock_state, + operation_identifier=OperationIdentifier("callback4", None), + config=None, + ) - mock_result.raise_callable_error.assert_called_once() + assert callback_id == "failed_cb4" mock_state.create_checkpoint.assert_not_called() @@ -300,13 +320,18 @@ def test_wait_for_callback_handler_submitter_called_with_callback_id(): def capture_step_call(func, name, config=None): # Execute the step callable to verify submitter is called correctly step_context = Mock(spec=StepContext) + step_context.logger = Mock() func(step_context) mock_context.step.side_effect = capture_step_call wait_for_callback_handler(mock_context, mock_submitter, "test") - mock_submitter.assert_called_once_with("callback_test_id") + # Verify submitter was called with callback_id and WaitForCallbackContext + assert mock_submitter.call_count == 1 + call_args = mock_submitter.call_args[0] + assert call_args[0] == "callback_test_id" + assert hasattr(call_args[1], "logger") def test_create_callback_handler_with_none_operation_in_result(): @@ -329,29 +354,11 @@ def test_create_callback_handler_with_none_operation_in_result(): def test_create_callback_handler_with_negative_timeouts(): """Test create_callback_handler with negative timeout values in config.""" - mock_state = Mock(spec=ExecutionState) - callback_details = CallbackDetails(callback_id="negative_timeout_cb") - operation = Operation( - operation_id="negative_timeout", - operation_type=OperationType.CALLBACK, - status=OperationStatus.STARTED, - callback_details=callback_details, - ) - mock_state.get_checkpoint_result.side_effect = [ - CheckpointedResult.create_not_found(), - CheckpointedResult.create_from_operation(operation), - ] - - config = CallbackConfig(timeout_seconds=-100, heartbeat_timeout_seconds=-50) - - result = create_callback_handler( - state=mock_state, - operation_identifier=OperationIdentifier("negative_timeout", None), - config=config, - ) - - assert result == "negative_timeout_cb" - mock_state.create_checkpoint.assert_called_once() + # Duration now validates that all values must be positive + with pytest.raises(ValidationError, match="Duration seconds must be positive"): + CallbackConfig( + timeout=Duration(seconds=-100), heartbeat_timeout=Duration(seconds=-50) + ) def test_wait_for_callback_handler_with_none_callback_id(): @@ -365,6 +372,7 @@ def test_wait_for_callback_handler_with_none_callback_id(): def execute_step(func, name, config=None): step_context = Mock(spec=StepContext) + step_context.logger = Mock() return func(step_context) mock_context.step.side_effect = execute_step @@ -372,7 +380,11 @@ def execute_step(func, name, config=None): result = wait_for_callback_handler(mock_context, mock_submitter, "test") assert result == "result_with_none_id" - mock_submitter.assert_called_once_with(None) + # Verify submitter was called with None callback_id and WaitForCallbackContext + assert mock_submitter.call_count == 1 + call_args = mock_submitter.call_args[0] + assert call_args[0] is None + assert hasattr(call_args[1], "logger") def test_wait_for_callback_handler_with_empty_string_callback_id(): @@ -386,6 +398,7 @@ def test_wait_for_callback_handler_with_empty_string_callback_id(): def execute_step(func, name, config=None): step_context = Mock(spec=StepContext) + step_context.logger = Mock() return func(step_context) mock_context.step.side_effect = execute_step @@ -393,7 +406,11 @@ def execute_step(func, name, config=None): result = wait_for_callback_handler(mock_context, mock_submitter, "test") assert result == "result_with_empty_id" - mock_submitter.assert_called_once_with("") + # Verify submitter was called with empty string callback_id and WaitForCallbackContext + assert mock_submitter.call_count == 1 + call_args = mock_submitter.call_args[0] + assert call_args[0] == "" # noqa: PLC1901 - explicitly testing empty string, not just falsey + assert hasattr(call_args[1], "logger") def test_wait_for_callback_handler_with_large_data(): @@ -498,7 +515,9 @@ def test_create_callback_handler_config_with_zero_timeouts(): CheckpointedResult.create_from_operation(operation), ] - config = CallbackConfig(timeout_seconds=0, heartbeat_timeout_seconds=0) + config = CallbackConfig( + timeout=Duration.from_seconds(0), heartbeat_timeout=Duration.from_seconds(0) + ) result = create_callback_handler( state=mock_state, @@ -538,7 +557,10 @@ def test_create_callback_handler_config_with_large_timeouts(): CheckpointedResult.create_from_operation(operation), ] - config = CallbackConfig(timeout_seconds=86400, heartbeat_timeout_seconds=3600) + config = CallbackConfig( + timeout=Duration.from_days(1), + heartbeat_timeout=Duration.from_hours(1), + ) result = create_callback_handler( state=mock_state, @@ -595,12 +617,13 @@ def test_wait_for_callback_handler_submitter_exception_handling(): mock_callback.result.return_value = "exception_result" mock_context.create_callback.return_value = mock_callback - def failing_submitter(callback_id): + def failing_submitter(callback_id, context): msg = "Submitter failed" raise ValueError(msg) def step_side_effect(func, name, config=None): step_context = Mock(spec=StepContext) + step_context.logger = Mock() func(step_context) mock_context.step.side_effect = step_side_effect @@ -683,7 +706,9 @@ def test_wait_for_callback_handler_config_propagation(): mock_context.create_callback.return_value = mock_callback mock_submitter = Mock() - config = WaitForCallbackConfig(timeout_seconds=120, heartbeat_timeout_seconds=30) + config = WaitForCallbackConfig( + timeout=Duration.from_minutes(2), heartbeat_timeout=Duration.from_seconds(30) + ) result = wait_for_callback_handler( mock_context, mock_submitter, "config_test", config @@ -772,7 +797,9 @@ def test_callback_lifecycle_complete_flow(): mock_callback.result.return_value = {"status": "completed", "data": "test_data"} mock_context.create_callback.return_value = mock_callback - config = WaitForCallbackConfig(timeout_seconds=300, heartbeat_timeout_seconds=60) + config = WaitForCallbackConfig( + timeout=Duration.from_minutes(5), heartbeat_timeout=Duration.from_minutes(1) + ) callback_id = create_callback_handler( state=mock_state, operation_identifier=OperationIdentifier("lifecycle_callback", None), @@ -781,12 +808,14 @@ def test_callback_lifecycle_complete_flow(): assert callback_id == "lifecycle_cb123" - def mock_submitter(cb_id): + def mock_submitter(cb_id, context): assert cb_id == "lifecycle_cb123" + assert hasattr(context, "logger") return "submitted" def execute_step(func, name, config=None): step_context = Mock(spec=StepContext) + step_context.logger = Mock() return func(step_context) mock_context.step.side_effect = execute_step @@ -847,8 +876,8 @@ def test_callback_timeout_configuration(): ] config = CallbackConfig( - timeout_seconds=timeout_seconds, - heartbeat_timeout_seconds=heartbeat_timeout_seconds, + timeout=Duration.from_seconds(timeout_seconds), + heartbeat_timeout=Duration.from_seconds(heartbeat_timeout_seconds), ) callback_id = create_callback_handler( @@ -864,19 +893,25 @@ def test_callback_timeout_configuration(): def test_callback_error_propagation(): """Test error propagation through callback operations.""" + # CRITICAL: create_callback_handler should NOT raise on FAILED + # Errors are deferred to Callback.result() for deterministic replay mock_state = Mock(spec=ExecutionState) - mock_result = Mock(spec=CheckpointedResult) - mock_result.is_failed.return_value = True - msg = "Callback creation failed" - mock_result.raise_callable_error.side_effect = RuntimeError(msg) + failed_op = Operation( + operation_id="error_callback", + operation_type=OperationType.CALLBACK, + status=OperationStatus.FAILED, + callback_details=CallbackDetails(callback_id="failed_cb"), + ) + mock_result = CheckpointedResult.create_from_operation(failed_op) mock_state.get_checkpoint_result.return_value = mock_result - with pytest.raises(RuntimeError, match="Callback creation failed"): - create_callback_handler( - state=mock_state, - operation_identifier=OperationIdentifier("error_callback", None), - config=None, - ) + # Should return callback_id without raising + callback_id = create_callback_handler( + state=mock_state, + operation_identifier=OperationIdentifier("error_callback", None), + config=None, + ) + assert callback_id == "failed_cb" mock_context = Mock(spec=DurableContext) mock_context.create_callback.side_effect = ValueError("Context creation failed") @@ -895,7 +930,7 @@ def test_callback_with_complex_submitter(): submission_log = [] - def complex_submitter(callback_id): + def complex_submitter(callback_id, context): submission_log.append(f"received_id: {callback_id}") if callback_id == "complex_cb789": submission_log.append("api_call_success") @@ -907,6 +942,7 @@ def complex_submitter(callback_id): def execute_step(func, name, config): step_context = Mock(spec=StepContext) + step_context.logger = Mock() return func(step_context) mock_context.step.side_effect = execute_step @@ -1008,7 +1044,9 @@ def test_callback_operation_update_creation(mock_operation_update): CheckpointedResult.create_from_operation(operation), ] - config = CallbackConfig(timeout_seconds=600, heartbeat_timeout_seconds=120) + config = CallbackConfig( + timeout=Duration.from_minutes(10), heartbeat_timeout=Duration.from_minutes(2) + ) create_callback_handler( state=mock_state, @@ -1025,3 +1063,471 @@ def test_callback_operation_update_creation(mock_operation_update): # endregion wait_for_callback_handler + + +# region immediate response handling tests +def test_callback_immediate_response_get_checkpoint_result_called_twice(): + """Test that get_checkpoint_result is called twice when checkpoint is created.""" + mock_state = Mock(spec=ExecutionState) + + # First call: not found, second call: started (no immediate response) + not_found = CheckpointedResult.create_not_found() + callback_details = CallbackDetails(callback_id="cb_immediate_1") + started_op = Operation( + operation_id="callback_immediate_1", + operation_type=OperationType.CALLBACK, + status=OperationStatus.STARTED, + callback_details=callback_details, + ) + started = CheckpointedResult.create_from_operation(started_op) + mock_state.get_checkpoint_result.side_effect = [not_found, started] + + result = create_callback_handler( + state=mock_state, + operation_identifier=OperationIdentifier("callback_immediate_1", None), + config=None, + ) + + # Verify callback_id was returned + assert result == "cb_immediate_1" + # Verify get_checkpoint_result was called twice + assert mock_state.get_checkpoint_result.call_count == 2 + + +def test_callback_immediate_response_create_checkpoint_with_is_sync_true(): + """Test that create_checkpoint is called with is_sync=True.""" + mock_state = Mock(spec=ExecutionState) + + # First call: not found, second call: started + not_found = CheckpointedResult.create_not_found() + callback_details = CallbackDetails(callback_id="cb_immediate_2") + started_op = Operation( + operation_id="callback_immediate_2", + operation_type=OperationType.CALLBACK, + status=OperationStatus.STARTED, + callback_details=callback_details, + ) + started = CheckpointedResult.create_from_operation(started_op) + mock_state.get_checkpoint_result.side_effect = [not_found, started] + + result = create_callback_handler( + state=mock_state, + operation_identifier=OperationIdentifier("callback_immediate_2", None), + config=None, + ) + + # Verify callback_id was returned + assert result == "cb_immediate_2" + # Verify create_checkpoint was called with is_sync=True (default) + mock_state.create_checkpoint.assert_called_once() + # is_sync=True is the default, so it won't be in kwargs if not explicitly passed + # We just verify the checkpoint was created + + +def test_callback_immediate_response_immediate_success(): + """Test immediate success: checkpoint returns SUCCEEDED on second check. + + When checkpoint returns SUCCEEDED on second check, operation returns callback_id + without raising. + """ + mock_state = Mock(spec=ExecutionState) + + # First call: not found, second call: succeeded (immediate response) + not_found = CheckpointedResult.create_not_found() + callback_details = CallbackDetails(callback_id="cb_immediate_success") + succeeded_op = Operation( + operation_id="callback_immediate_3", + operation_type=OperationType.CALLBACK, + status=OperationStatus.SUCCEEDED, + callback_details=callback_details, + ) + succeeded = CheckpointedResult.create_from_operation(succeeded_op) + mock_state.get_checkpoint_result.side_effect = [not_found, succeeded] + + result = create_callback_handler( + state=mock_state, + operation_identifier=OperationIdentifier("callback_immediate_3", None), + config=None, + ) + + # Verify callback_id was returned without raising + assert result == "cb_immediate_success" + # Verify checkpoint was created + mock_state.create_checkpoint.assert_called_once() + # Verify get_checkpoint_result was called twice + assert mock_state.get_checkpoint_result.call_count == 2 + + +def test_callback_immediate_response_immediate_failure_deferred(): + """Test immediate failure deferred: checkpoint returns FAILED on second check. + + CRITICAL: When checkpoint returns FAILED on second check, create_callback() + returns callback_id (does NOT raise). Errors are deferred to Callback.result() + for deterministic replay. + """ + mock_state = Mock(spec=ExecutionState) + + # First call: not found, second call: failed (immediate response) + not_found = CheckpointedResult.create_not_found() + callback_details = CallbackDetails(callback_id="cb_immediate_failed") + failed_op = Operation( + operation_id="callback_immediate_4", + operation_type=OperationType.CALLBACK, + status=OperationStatus.FAILED, + callback_details=callback_details, + ) + failed = CheckpointedResult.create_from_operation(failed_op) + mock_state.get_checkpoint_result.side_effect = [not_found, failed] + + # CRITICAL: Should return callback_id without raising + result = create_callback_handler( + state=mock_state, + operation_identifier=OperationIdentifier("callback_immediate_4", None), + config=None, + ) + + # Verify callback_id was returned (error deferred) + assert result == "cb_immediate_failed" + # Verify checkpoint was created + mock_state.create_checkpoint.assert_called_once() + # Verify get_checkpoint_result was called twice + assert mock_state.get_checkpoint_result.call_count == 2 + + +def test_callback_result_raises_error_for_failed_callbacks(): + """Test that Callback.result() raises error for FAILED callbacks (deferred error handling). + + This test verifies that errors are properly deferred to Callback.result() rather + than being raised during create_callback(). This ensures deterministic replay: + code between create_callback() and callback.result() always executes. + """ + + mock_state = Mock(spec=ExecutionState) + + # Create a FAILED callback operation + error = ErrorObject( + message="Callback failed", type="CallbackError", data=None, stack_trace=None + ) + callback_details = CallbackDetails( + callback_id="cb_failed_result", result=None, error=error + ) + failed_op = Operation( + operation_id="callback_failed_result", + operation_type=OperationType.CALLBACK, + status=OperationStatus.FAILED, + callback_details=callback_details, + ) + failed_result = CheckpointedResult.create_from_operation(failed_op) + mock_state.get_checkpoint_result.return_value = failed_result + + # Create Callback instance + callback = Callback( + callback_id="cb_failed_result", + operation_id="callback_failed_result", + state=mock_state, + serdes=None, + ) + + # Verify that result() raises CallbackError + with pytest.raises(CallbackError, match="Callback failed"): + callback.result() + + +def test_callback_result_raises_error_for_timed_out_callbacks(): + """Test that Callback.result() raises error for TIMED_OUT callbacks.""" + + mock_state = Mock(spec=ExecutionState) + + # Create a TIMED_OUT callback operation + error = ErrorObject( + message="Callback timed out", + type="CallbackTimeoutError", + data=None, + stack_trace=None, + ) + callback_details = CallbackDetails( + callback_id="cb_timed_out_result", result=None, error=error + ) + timed_out_op = Operation( + operation_id="callback_timed_out_result", + operation_type=OperationType.CALLBACK, + status=OperationStatus.TIMED_OUT, + callback_details=callback_details, + ) + timed_out_result = CheckpointedResult.create_from_operation(timed_out_op) + mock_state.get_checkpoint_result.return_value = timed_out_result + + # Create Callback instance + callback = Callback( + callback_id="cb_timed_out_result", + operation_id="callback_timed_out_result", + state=mock_state, + serdes=None, + ) + + # Verify that result() raises CallbackError + with pytest.raises(CallbackError, match="Callback timed out"): + callback.result() + + +def test_callback_immediate_response_no_immediate_response(): + """Test no immediate response: checkpoint returns STARTED on second check. + + When checkpoint returns STARTED on second check, operation returns callback_id + normally (callbacks don't suspend). + """ + mock_state = Mock(spec=ExecutionState) + + # First call: not found, second call: started (no immediate response) + not_found = CheckpointedResult.create_not_found() + callback_details = CallbackDetails(callback_id="cb_immediate_started") + started_op = Operation( + operation_id="callback_immediate_5", + operation_type=OperationType.CALLBACK, + status=OperationStatus.STARTED, + callback_details=callback_details, + ) + started = CheckpointedResult.create_from_operation(started_op) + mock_state.get_checkpoint_result.side_effect = [not_found, started] + + result = create_callback_handler( + state=mock_state, + operation_identifier=OperationIdentifier("callback_immediate_5", None), + config=None, + ) + + # Verify callback_id was returned + assert result == "cb_immediate_started" + # Verify checkpoint was created + mock_state.create_checkpoint.assert_called_once() + # Verify get_checkpoint_result was called twice + assert mock_state.get_checkpoint_result.call_count == 2 + + +def test_callback_immediate_response_already_completed(): + """Test already completed: checkpoint exists on first check. + + When checkpoint is already SUCCEEDED on first check, no checkpoint is created + and callback_id is returned immediately. + """ + mock_state = Mock(spec=ExecutionState) + + # First call: already succeeded + callback_details = CallbackDetails(callback_id="cb_already_completed") + succeeded_op = Operation( + operation_id="callback_immediate_6", + operation_type=OperationType.CALLBACK, + status=OperationStatus.SUCCEEDED, + callback_details=callback_details, + ) + succeeded = CheckpointedResult.create_from_operation(succeeded_op) + mock_state.get_checkpoint_result.return_value = succeeded + + result = create_callback_handler( + state=mock_state, + operation_identifier=OperationIdentifier("callback_immediate_6", None), + config=None, + ) + + # Verify callback_id was returned + assert result == "cb_already_completed" + # Verify no checkpoint was created (already exists) + mock_state.create_checkpoint.assert_not_called() + # Verify get_checkpoint_result was called only once + assert mock_state.get_checkpoint_result.call_count == 1 + + +def test_callback_immediate_response_already_failed(): + """Test already failed: checkpoint is already FAILED on first check. + + When checkpoint is already FAILED on first check, no checkpoint is created + and callback_id is returned (error deferred to Callback.result()). + """ + mock_state = Mock(spec=ExecutionState) + + # First call: already failed + callback_details = CallbackDetails(callback_id="cb_already_failed") + failed_op = Operation( + operation_id="callback_immediate_7", + operation_type=OperationType.CALLBACK, + status=OperationStatus.FAILED, + callback_details=callback_details, + ) + failed = CheckpointedResult.create_from_operation(failed_op) + mock_state.get_checkpoint_result.return_value = failed + + # Should return callback_id without raising + result = create_callback_handler( + state=mock_state, + operation_identifier=OperationIdentifier("callback_immediate_7", None), + config=None, + ) + + # Verify callback_id was returned (error deferred) + assert result == "cb_already_failed" + # Verify no checkpoint was created (already exists) + mock_state.create_checkpoint.assert_not_called() + # Verify get_checkpoint_result was called only once + assert mock_state.get_checkpoint_result.call_count == 1 + + +def test_callback_deferred_error_handling_code_execution_between_create_and_result(): + """Test callback deferred error handling with code execution between create_callback() and callback.result(). + + This test verifies that code between create_callback() and callback.result() executes + even when the callback is FAILED. This ensures deterministic replay. + """ + + mock_state = Mock(spec=ExecutionState) + + # Setup: callback is already FAILED + error = ErrorObject( + message="Callback failed", type="CallbackError", data=None, stack_trace=None + ) + callback_details = CallbackDetails( + callback_id="cb_deferred_error", result=None, error=error + ) + failed_op = Operation( + operation_id="callback_deferred_error", + operation_type=OperationType.CALLBACK, + status=OperationStatus.FAILED, + callback_details=callback_details, + ) + failed_result = CheckpointedResult.create_from_operation(failed_op) + mock_state.get_checkpoint_result.return_value = failed_result + + # Step 1: create_callback() returns callback_id without raising + callback_id = create_callback_handler( + state=mock_state, + operation_identifier=OperationIdentifier("callback_deferred_error", None), + config=None, + ) + assert callback_id == "cb_deferred_error" + + # Step 2: Code executes between create_callback() and callback.result() + execution_log = [ + "code_executed_after_create_callback", + f"callback_id: {callback_id}", + ] + + # Step 3: Callback.result() raises the error + callback = Callback( + callback_id=callback_id, + operation_id="callback_deferred_error", + state=mock_state, + serdes=None, + ) + + with pytest.raises(CallbackError, match="Callback failed"): + callback.result() + + # Verify code between create_callback() and callback.result() executed + assert execution_log == [ + "code_executed_after_create_callback", + "callback_id: cb_deferred_error", + ] + + +def test_callback_immediate_response_with_config(): + """Test immediate response with callback configuration.""" + mock_state = Mock(spec=ExecutionState) + + # First call: not found, second call: succeeded + not_found = CheckpointedResult.create_not_found() + callback_details = CallbackDetails(callback_id="cb_with_config") + succeeded_op = Operation( + operation_id="callback_with_config", + operation_type=OperationType.CALLBACK, + status=OperationStatus.SUCCEEDED, + callback_details=callback_details, + ) + succeeded = CheckpointedResult.create_from_operation(succeeded_op) + mock_state.get_checkpoint_result.side_effect = [not_found, succeeded] + + config = CallbackConfig( + timeout=Duration.from_minutes(5), heartbeat_timeout=Duration.from_minutes(1) + ) + + result = create_callback_handler( + state=mock_state, + operation_identifier=OperationIdentifier("callback_with_config", None), + config=config, + ) + + # Verify callback_id was returned + assert result == "cb_with_config" + # Verify checkpoint was created with config + mock_state.create_checkpoint.assert_called_once() + call_args = mock_state.create_checkpoint.call_args[1] + operation_update = call_args["operation_update"] + assert operation_update.callback_options.timeout_seconds == 300 + assert operation_update.callback_options.heartbeat_timeout_seconds == 60 + + +# endregion immediate response handling tests + + +def test_callback_returns_id_when_second_check_returns_started(): + """Test when the second checkpoint check returns + STARTED (not terminal), the callback operation returns callback_id normally. + """ + mock_state = Mock(spec=ExecutionState) + mock_state.durable_execution_arn = "test_arn" + + # First call: checkpoint doesn't exist + # Second call: checkpoint returns STARTED (no immediate response) + mock_state.get_checkpoint_result.side_effect = [ + CheckpointedResult.create_not_found(), + CheckpointedResult.create_from_operation( + Operation( + operation_id="callback-1", + operation_type=OperationType.CALLBACK, + status=OperationStatus.STARTED, + callback_details=CallbackDetails(callback_id="cb-123"), + ) + ), + ] + + executor = CallbackOperationExecutor( + state=mock_state, + operation_identifier=OperationIdentifier("callback-1", None, "test_callback"), + config=CallbackConfig(), + ) + callback_id = executor.process() + + # Assert - behaves like "old way" + assert callback_id == "cb-123" + assert mock_state.get_checkpoint_result.call_count == 2 # Double-check happened + mock_state.create_checkpoint.assert_called_once() # START checkpoint created + + +def test_callback_returns_id_when_second_check_returns_started_duplicate(): + """Test when the second checkpoint check returns + STARTED (not terminal), the callback operation returns callback_id normally. + """ + mock_state = Mock(spec=ExecutionState) + mock_state.durable_execution_arn = "test_arn" + + # First call: checkpoint doesn't exist + # Second call: checkpoint returns STARTED (no immediate response) + not_found = CheckpointedResult.create_not_found() + started_op = Operation( + operation_id="callback-1", + operation_type=OperationType.CALLBACK, + status=OperationStatus.STARTED, + callback_details=CallbackDetails(callback_id="cb-123"), + ) + started = CheckpointedResult.create_from_operation(started_op) + mock_state.get_checkpoint_result.side_effect = [not_found, started] + + executor = CallbackOperationExecutor( + state=mock_state, + operation_identifier=OperationIdentifier("callback-1", None, "test_callback"), + config=CallbackConfig(), + ) + callback_id = executor.process() + + # Assert - behaves like "old way" + assert callback_id == "cb-123" + assert mock_state.get_checkpoint_result.call_count == 2 # Double-check happened + mock_state.create_checkpoint.assert_called_once() # START checkpoint created diff --git a/tests/operation/child_test.py b/tests/operation/child_test.py index e888ebb..ae1bb3a 100644 --- a/tests/operation/child_test.py +++ b/tests/operation/child_test.py @@ -1,5 +1,7 @@ """Unit tests for child handler.""" +from __future__ import annotations + import json from typing import cast from unittest.mock import Mock @@ -7,7 +9,10 @@ import pytest from aws_durable_execution_sdk_python.config import ChildConfig -from aws_durable_execution_sdk_python.exceptions import CallableRuntimeError +from aws_durable_execution_sdk_python.exceptions import ( + CallableRuntimeError, + InvocationError, +) from aws_durable_execution_sdk_python.identifier import OperationIdentifier from aws_durable_execution_sdk_python.lambda_service import ( ErrorObject, @@ -34,9 +39,15 @@ ], ) def test_child_handler_not_started( - config: ChildConfig, expected_sub_type: OperationSubType + config: ChildConfig | None, expected_sub_type: OperationSubType ): - """Test child_handler when operation not started.""" + """Test child_handler when operation not started. + + Verifies: + - get_checkpoint_result is called once (async checkpoint, no second check) + - create_checkpoint is called with is_sync=False for START + - Operation executes and creates SUCCEED checkpoint + """ mock_state = Mock(spec=ExecutionState) mock_state.durable_execution_arn = "test_arn" mock_result = Mock() @@ -44,7 +55,6 @@ def test_child_handler_not_started( mock_result.is_failed.return_value = False mock_result.is_started.return_value = False mock_result.is_replay_children.return_value = False - mock_result.is_replay_children.return_value = False mock_result.is_existent.return_value = False mock_state.get_checkpoint_result.return_value = mock_result mock_callable = Mock(return_value="fresh_result") @@ -54,10 +64,15 @@ def test_child_handler_not_started( ) assert result == "fresh_result" + + # Verify get_checkpoint_result called once (async checkpoint, no second check) + assert mock_state.get_checkpoint_result.call_count == 1 + + # Verify create_checkpoint called twice (start and succeed) mock_state.create_checkpoint.assert_called() - assert mock_state.create_checkpoint.call_count == 2 # start and succeed + assert mock_state.create_checkpoint.call_count == 2 - # Verify start checkpoint + # Verify start checkpoint with is_sync=False start_call = mock_state.create_checkpoint.call_args_list[0] start_operation = start_call[1]["operation_update"] assert start_operation.operation_id == "op1" @@ -65,6 +80,8 @@ def test_child_handler_not_started( assert start_operation.operation_type is OperationType.CONTEXT assert start_operation.sub_type is expected_sub_type assert start_operation.action is OperationAction.START + # CRITICAL: Verify is_sync=False for START checkpoint (async, no immediate response) + assert start_call[1]["is_sync"] is False # Verify success checkpoint success_call = mock_state.create_checkpoint.call_args_list[1] @@ -80,7 +97,13 @@ def test_child_handler_not_started( def test_child_handler_already_succeeded(): - """Test child_handler when operation already succeeded.""" + """Test child_handler when operation already succeeded without replay_children. + + Verifies: + - Returns cached result without executing function + - No checkpoint created + - get_checkpoint_result called once + """ mock_state = Mock(spec=ExecutionState) mock_state.durable_execution_arn = "test_arn" mock_result = Mock() @@ -95,8 +118,12 @@ def test_child_handler_already_succeeded(): ) assert result == "cached_result" + # Verify function not executed mock_callable.assert_not_called() + # Verify no checkpoint created mock_state.create_checkpoint.assert_not_called() + # Verify get_checkpoint_result called once + assert mock_state.get_checkpoint_result.call_count == 1 def test_child_handler_already_succeeded_none_result(): @@ -119,7 +146,13 @@ def test_child_handler_already_succeeded_none_result(): def test_child_handler_already_failed(): - """Test child_handler when operation already failed.""" + """Test child_handler when operation already failed. + + Verifies: + - Already failed: raises error without executing function + - No checkpoint created + - get_checkpoint_result called once + """ mock_state = Mock(spec=ExecutionState) mock_result = Mock() mock_result.is_succeeded.return_value = False @@ -138,7 +171,10 @@ def test_child_handler_already_failed(): None, ) + # Verify function not executed mock_callable.assert_not_called() + # Verify get_checkpoint_result called once + assert mock_state.get_checkpoint_result.call_count == 1 @pytest.mark.parametrize( @@ -153,9 +189,15 @@ def test_child_handler_already_failed(): ], ) def test_child_handler_already_started( - config: ChildConfig, expected_sub_type: OperationSubType + config: ChildConfig | None, expected_sub_type: OperationSubType ): - """Test child_handler when operation already started.""" + """Test child_handler when operation already started. + + Verifies: + - Operation executes when already started + - Only SUCCEED checkpoint created (no START) + - get_checkpoint_result called once + """ mock_state = Mock(spec=ExecutionState) mock_state.durable_execution_arn = "test_arn" mock_result = Mock() @@ -172,7 +214,11 @@ def test_child_handler_already_started( assert result == "started_result" - # Verify success checkpoint + # Verify get_checkpoint_result called once + assert mock_state.get_checkpoint_result.call_count == 1 + + # Verify only success checkpoint (no START since already started) + assert mock_state.create_checkpoint.call_count == 1 success_call = mock_state.create_checkpoint.call_args_list[0] success_operation = success_call[1]["operation_update"] assert success_operation.operation_id == "op5" @@ -197,9 +243,15 @@ def test_child_handler_already_started( ], ) def test_child_handler_callable_exception( - config: ChildConfig, expected_sub_type: OperationSubType + config: ChildConfig | None, expected_sub_type: OperationSubType ): - """Test child_handler when callable raises exception.""" + """Test child_handler when callable raises exception. + + Verifies: + - Error handling: checkpoints FAIL and raises wrapped error + - get_checkpoint_result called once + - create_checkpoint called with is_sync=False for START + """ mock_state = Mock(spec=ExecutionState) mock_state.durable_execution_arn = "test_arn" mock_result = Mock() @@ -218,10 +270,14 @@ def test_child_handler_callable_exception( config, ) + # Verify get_checkpoint_result called once + assert mock_state.get_checkpoint_result.call_count == 1 + + # Verify create_checkpoint called twice (start and fail) mock_state.create_checkpoint.assert_called() - assert mock_state.create_checkpoint.call_count == 2 # start and fail + assert mock_state.create_checkpoint.call_count == 2 - # Verify start checkpoint + # Verify start checkpoint with is_sync=False start_call = mock_state.create_checkpoint.call_args_list[0] start_operation = start_call[1]["operation_update"] assert start_operation.operation_id == "op6" @@ -229,6 +285,7 @@ def test_child_handler_callable_exception( assert start_operation.operation_type is OperationType.CONTEXT assert start_operation.sub_type is expected_sub_type assert start_operation.action is OperationAction.START + assert start_call[1]["is_sync"] is False # Verify fail checkpoint fail_call = mock_state.create_checkpoint.call_args_list[1] @@ -242,13 +299,19 @@ def test_child_handler_callable_exception( def test_child_handler_error_wrapped(): - """Test child_handler wraps regular errors as CallableRuntimeError.""" + """Test child_handler wraps regular errors as CallableRuntimeError. + + Verifies: + - Regular exceptions are wrapped as CallableRuntimeError + - FAIL checkpoint is created + """ mock_state = Mock(spec=ExecutionState) mock_state.durable_execution_arn = "test_arn" mock_result = Mock() mock_result.is_succeeded.return_value = False mock_result.is_failed.return_value = False mock_result.is_started.return_value = False + mock_result.is_existent.return_value = False mock_state.get_checkpoint_result.return_value = mock_result test_error = RuntimeError("Test error") mock_callable = Mock(side_effect=test_error) @@ -261,6 +324,46 @@ def test_child_handler_error_wrapped(): None, ) + # Verify FAIL checkpoint was created + assert mock_state.create_checkpoint.call_count == 2 # start and fail + + +def test_child_handler_invocation_error_reraised(): + """Test child_handler re-raises InvocationError after checkpointing FAIL. + + Verifies: + - InvocationError: checkpoints FAIL and re-raises (for retry) + - FAIL checkpoint is created + - Original InvocationError is re-raised (not wrapped) + """ + + mock_state = Mock(spec=ExecutionState) + mock_state.durable_execution_arn = "test_arn" + mock_result = Mock() + mock_result.is_succeeded.return_value = False + mock_result.is_failed.return_value = False + mock_result.is_started.return_value = False + mock_result.is_existent.return_value = False + mock_state.get_checkpoint_result.return_value = mock_result + test_error = InvocationError("Invocation failed") + mock_callable = Mock(side_effect=test_error) + + with pytest.raises(InvocationError, match="Invocation failed"): + child_handler( + mock_callable, + mock_state, + OperationIdentifier("op7b", None, "test_name"), + None, + ) + + # Verify FAIL checkpoint was created + assert mock_state.create_checkpoint.call_count == 2 # start and fail + + # Verify fail checkpoint + fail_call = mock_state.create_checkpoint.call_args_list[1] + fail_operation = fail_call[1]["operation_update"] + assert fail_operation.action is OperationAction.FAIL + def test_child_handler_with_config(): """Test child_handler with config parameter.""" @@ -270,6 +373,7 @@ def test_child_handler_with_config(): mock_result.is_succeeded.return_value = False mock_result.is_failed.return_value = False mock_result.is_started.return_value = False + mock_result.is_existent.return_value = False mock_state.get_checkpoint_result.return_value = mock_result mock_callable = Mock(return_value="config_result") config = ChildConfig() @@ -280,6 +384,8 @@ def test_child_handler_with_config(): assert result == "config_result" mock_callable.assert_called_once() + # Verify get_checkpoint_result called once + assert mock_state.get_checkpoint_result.call_count == 1 def test_child_handler_default_serialization(): @@ -291,6 +397,7 @@ def test_child_handler_default_serialization(): mock_result.is_failed.return_value = False mock_result.is_started.return_value = False mock_result.is_replay_children.return_value = False + mock_result.is_existent.return_value = False mock_state.get_checkpoint_result.return_value = mock_result complex_result = {"key": "value", "number": 42, "list": [1, 2, 3]} mock_callable = Mock(return_value=complex_result) @@ -300,6 +407,8 @@ def test_child_handler_default_serialization(): ) assert result == complex_result + # Verify get_checkpoint_result called once + assert mock_state.get_checkpoint_result.call_count == 1 # Verify JSON serialization was used in checkpoint success_call = [ call @@ -362,6 +471,8 @@ def test_child_handler_custom_serdes_already_succeeded() -> None: expected_checkpoointed_result = {"key": "value", "number": 42, "list": [1, 2, 3]} assert actual_result == expected_checkpoointed_result + # Verify get_checkpoint_result called once + assert mock_state.get_checkpoint_result.call_count == 1 # endregion child_handler @@ -369,7 +480,12 @@ def test_child_handler_custom_serdes_already_succeeded() -> None: # large payload with summary generator def test_child_handler_large_payload_with_summary_generator() -> None: - """Test child_handler with large payload and summary generator.""" + """Test child_handler with large payload and summary generator. + + Verifies: + - Large payload: uses ReplayChildren mode with summary_generator + - get_checkpoint_result called once + """ mock_state = Mock(spec=ExecutionState) mock_state.durable_execution_arn = "test_arn" mock_result = Mock() @@ -397,6 +513,9 @@ def my_summary(result: str) -> str: ) assert large_result == actual_result + # Verify get_checkpoint_result called once + assert mock_state.get_checkpoint_result.call_count == 1 + # Verify replay_children mode with summary success_call = mock_state.create_checkpoint.call_args_list[1] success_operation = success_call[1]["operation_update"] assert success_operation.context_options.replay_children @@ -406,7 +525,12 @@ def my_summary(result: str) -> str: # large payload without summary generator def test_child_handler_large_payload_without_summary_generator() -> None: - """Test child_handler with large payload and no summary generator.""" + """Test child_handler with large payload and no summary generator. + + Verifies: + - Large payload without summary_generator: uses ReplayChildren mode with empty string + - get_checkpoint_result called once + """ mock_state = Mock(spec=ExecutionState) mock_state.durable_execution_arn = "test_arn" mock_result = Mock() @@ -428,6 +552,9 @@ def test_child_handler_large_payload_without_summary_generator() -> None: ) assert large_result == actual_result + # Verify get_checkpoint_result called once + assert mock_state.get_checkpoint_result.call_count == 1 + # Verify replay_children mode with empty string success_call = mock_state.create_checkpoint.call_args_list[1] success_operation = success_call[1]["operation_update"] assert success_operation.context_options.replay_children @@ -437,7 +564,13 @@ def test_child_handler_large_payload_without_summary_generator() -> None: # mocked children replay mode execute the function again def test_child_handler_replay_children_mode() -> None: - """Test child_handler in ReplayChildren mode.""" + """Test child_handler in ReplayChildren mode. + + Verifies: + - Already succeeded with replay_children: re-executes function + - No checkpoint created (returns without checkpointing) + - get_checkpoint_result called once + """ mock_state = Mock(spec=ExecutionState) mock_state.durable_execution_arn = "test_arn" mock_result = Mock() @@ -458,12 +591,21 @@ def test_child_handler_replay_children_mode() -> None: ) assert actual_result == complex_result - + # Verify function was executed (replay_children mode) + mock_callable.assert_called_once() + # Verify no checkpoint created (returns without checkpointing in replay mode) mock_state.create_checkpoint.assert_not_called() + # Verify get_checkpoint_result called once + assert mock_state.get_checkpoint_result.call_count == 1 def test_small_payload_with_summary_generator(): - """Test: Small payload with summary_generator -> replay_children = False""" + """Test: Small payload with summary_generator -> replay_children = False + + Verifies: + - Small payload does NOT trigger replay_children even with summary_generator + - get_checkpoint_result called once + """ mock_state = Mock(spec=ExecutionState) mock_state.durable_execution_arn = "test_arn" mock_result = Mock() @@ -491,6 +633,8 @@ def my_summary(result: str) -> str: ) assert actual_result == small_result + # Verify get_checkpoint_result called once + assert mock_state.get_checkpoint_result.call_count == 1 success_call = mock_state.create_checkpoint.call_args_list[1] success_operation = success_call[1]["operation_update"] @@ -501,7 +645,12 @@ def my_summary(result: str) -> str: def test_small_payload_without_summary_generator(): - """Test: Small payload without summary_generator -> replay_children = False""" + """Test: Small payload without summary_generator -> replay_children = False + + Verifies: + - Small payload does NOT trigger replay_children + - get_checkpoint_result called once + """ mock_state = Mock(spec=ExecutionState) mock_state.durable_execution_arn = "test_arn" mock_result = Mock() @@ -526,6 +675,8 @@ def test_small_payload_without_summary_generator(): ) assert actual_result == small_result + # Verify get_checkpoint_result called once + assert mock_state.get_checkpoint_result.call_count == 1 success_call = mock_state.create_checkpoint.call_args_list[1] success_operation = success_call[1]["operation_update"] diff --git a/tests/operation/invoke_test.py b/tests/operation/invoke_test.py index baf69bf..5bb98da 100644 --- a/tests/operation/invoke_test.py +++ b/tests/operation/invoke_test.py @@ -7,7 +7,7 @@ import pytest -from aws_durable_execution_sdk_python.config import InvokeConfig +from aws_durable_execution_sdk_python.config import Duration, InvokeConfig from aws_durable_execution_sdk_python.exceptions import ( CallableRuntimeError, ExecutionError, @@ -23,14 +23,27 @@ OperationStatus, OperationType, ) -from aws_durable_execution_sdk_python.operation.invoke import ( - invoke_handler, - suspend_with_optional_resume_delay, -) +from aws_durable_execution_sdk_python.operation.invoke import InvokeOperationExecutor from aws_durable_execution_sdk_python.state import CheckpointedResult, ExecutionState +from aws_durable_execution_sdk_python.suspend import suspend_with_optional_resume_delay from tests.serdes_test import CustomDictSerDes +# Test helper - maintains old handler signature for backward compatibility in tests +def invoke_handler(function_name, payload, state, operation_identifier, config): + """Test helper that wraps InvokeOperationExecutor with old handler signature.""" + if not config: + config = InvokeConfig() + executor = InvokeOperationExecutor( + function_name=function_name, + payload=payload, + state=state, + operation_identifier=operation_identifier, + config=config, + ) + return executor.process() + + def test_invoke_handler_already_succeeded(): """Test invoke_handler when operation already succeeded.""" mock_state = Mock(spec=ExecutionState) @@ -164,7 +177,7 @@ def test_invoke_handler_already_timed_out(): ) -@pytest.mark.parametrize("status", [OperationStatus.STARTED, OperationStatus.PENDING]) +@pytest.mark.parametrize("status", [OperationStatus.STARTED]) def test_invoke_handler_already_started(status): """Test invoke_handler when operation is already started.""" mock_state = Mock(spec=ExecutionState) @@ -179,7 +192,9 @@ def test_invoke_handler_already_started(status): mock_result = CheckpointedResult.create_from_operation(operation) mock_state.get_checkpoint_result.return_value = mock_result - with pytest.raises(SuspendExecution, match="Invoke invoke6 still in progress"): + with pytest.raises( + SuspendExecution, match="Invoke invoke6 started, suspending for completion" + ): invoke_handler( function_name="test_function", payload="test_input", @@ -204,7 +219,7 @@ def test_invoke_handler_already_started_with_timeout(status): mock_result = CheckpointedResult.create_from_operation(operation) mock_state.get_checkpoint_result.return_value = mock_result - config = InvokeConfig[str, str](timeout_seconds=30) + config = InvokeConfig[str, str](timeout=Duration.from_seconds(30)) with pytest.raises(TimedSuspendExecution): invoke_handler( @@ -221,10 +236,17 @@ def test_invoke_handler_new_operation(): mock_state = Mock(spec=ExecutionState) mock_state.durable_execution_arn = "test_arn" - mock_result = CheckpointedResult.create_not_found() - mock_state.get_checkpoint_result.return_value = mock_result + # First call: not found, second call: started (no immediate response) + not_found = CheckpointedResult.create_not_found() + started_op = Operation( + operation_id="invoke8", + operation_type=OperationType.CHAINED_INVOKE, + status=OperationStatus.STARTED, + ) + started = CheckpointedResult.create_from_operation(started_op) + mock_state.get_checkpoint_result.side_effect = [not_found, started] - config = InvokeConfig[str, str](timeout_seconds=60) + config = InvokeConfig[str, str](timeout=Duration.from_minutes(1)) with pytest.raises( SuspendExecution, match="Invoke invoke8 started, suspending for completion" @@ -254,10 +276,16 @@ def test_invoke_handler_new_operation_with_timeout(): mock_state = Mock(spec=ExecutionState) mock_state.durable_execution_arn = "test_arn" - mock_result = CheckpointedResult.create_not_found() - mock_state.get_checkpoint_result.return_value = mock_result + not_found = CheckpointedResult.create_not_found() + started_op = Operation( + operation_id="invoke_test", + operation_type=OperationType.CHAINED_INVOKE, + status=OperationStatus.STARTED, + ) + started = CheckpointedResult.create_from_operation(started_op) + mock_state.get_checkpoint_result.side_effect = [not_found, started] - config = InvokeConfig[str, str](timeout_seconds=30) + config = InvokeConfig[str, str](timeout=Duration.from_seconds(30)) with pytest.raises(TimedSuspendExecution): invoke_handler( @@ -274,10 +302,16 @@ def test_invoke_handler_new_operation_no_timeout(): mock_state = Mock(spec=ExecutionState) mock_state.durable_execution_arn = "test_arn" - mock_result = CheckpointedResult.create_not_found() - mock_state.get_checkpoint_result.return_value = mock_result + not_found = CheckpointedResult.create_not_found() + started_op = Operation( + operation_id="invoke_test", + operation_type=OperationType.CHAINED_INVOKE, + status=OperationStatus.STARTED, + ) + started = CheckpointedResult.create_from_operation(started_op) + mock_state.get_checkpoint_result.side_effect = [not_found, started] - config = InvokeConfig[str, str](timeout_seconds=0) + config = InvokeConfig[str, str](timeout=Duration.from_seconds(0)) with pytest.raises(SuspendExecution): invoke_handler( @@ -294,8 +328,14 @@ def test_invoke_handler_no_config(): mock_state = Mock(spec=ExecutionState) mock_state.durable_execution_arn = "test_arn" - mock_result = CheckpointedResult.create_not_found() - mock_state.get_checkpoint_result.return_value = mock_result + not_found = CheckpointedResult.create_not_found() + started_op = Operation( + operation_id="invoke_test", + operation_type=OperationType.CHAINED_INVOKE, + status=OperationStatus.STARTED, + ) + started = CheckpointedResult.create_from_operation(started_op) + mock_state.get_checkpoint_result.side_effect = [not_found, started] with pytest.raises(SuspendExecution): invoke_handler( @@ -308,10 +348,10 @@ def test_invoke_handler_no_config(): # Verify default config was used operation_update = mock_state.create_checkpoint.call_args[1]["operation_update"] - assert ( - operation_update.to_dict()["ChainedInvokeOptions"]["FunctionName"] - == "test_function" - ) + chained_invoke_options = operation_update.to_dict()["ChainedInvokeOptions"] + assert chained_invoke_options["FunctionName"] == "test_function" + # tenant_id should be None when not specified + assert "TenantId" not in chained_invoke_options def test_invoke_handler_custom_serdes(): @@ -351,8 +391,14 @@ def test_invoke_handler_custom_serdes_new_operation(): mock_state = Mock(spec=ExecutionState) mock_state.durable_execution_arn = "test_arn" - mock_result = CheckpointedResult.create_not_found() - mock_state.get_checkpoint_result.return_value = mock_result + not_found = CheckpointedResult.create_not_found() + started_op = Operation( + operation_id="invoke_test", + operation_type=OperationType.CHAINED_INVOKE, + status=OperationStatus.STARTED, + ) + started = CheckpointedResult.create_from_operation(started_op) + mock_state.get_checkpoint_result.side_effect = [not_found, started] config = InvokeConfig[dict, dict]( serdes_payload=CustomDictSerDes(), serdes_result=CustomDictSerDes() @@ -461,8 +507,14 @@ def test_invoke_handler_with_none_payload(): mock_state = Mock(spec=ExecutionState) mock_state.durable_execution_arn = "test_arn" - mock_result = CheckpointedResult.create_not_found() - mock_state.get_checkpoint_result.return_value = mock_result + not_found = CheckpointedResult.create_not_found() + started_op = Operation( + operation_id="invoke_test", + operation_type=OperationType.CHAINED_INVOKE, + status=OperationStatus.STARTED, + ) + started = CheckpointedResult.create_from_operation(started_op) + mock_state.get_checkpoint_result.side_effect = [not_found, started] with pytest.raises(SuspendExecution): invoke_handler( @@ -514,8 +566,14 @@ def test_invoke_handler_suspend_does_not_raise(mock_suspend): mock_state = Mock(spec=ExecutionState) mock_state.durable_execution_arn = "test_arn" - mock_result = CheckpointedResult.create_not_found() - mock_state.get_checkpoint_result.return_value = mock_result + not_found = CheckpointedResult.create_not_found() + started_op = Operation( + operation_id="invoke_test", + operation_type=OperationType.CHAINED_INVOKE, + status=OperationStatus.STARTED, + ) + started = CheckpointedResult.create_from_operation(started_op) + mock_state.get_checkpoint_result.side_effect = [not_found, started] # Mock suspend_with_optional_resume_delay to not raise an exception (which it should always do) mock_suspend.return_value = None @@ -533,3 +591,597 @@ def test_invoke_handler_suspend_does_not_raise(mock_suspend): ) mock_suspend.assert_called_once() + + +def test_invoke_handler_with_tenant_id(): + """Test invoke_handler passes tenant_id to checkpoint.""" + mock_state = Mock(spec=ExecutionState) + mock_state.durable_execution_arn = "test_arn" + + not_found = CheckpointedResult.create_not_found() + started_op = Operation( + operation_id="invoke1", + operation_type=OperationType.CHAINED_INVOKE, + status=OperationStatus.STARTED, + ) + started = CheckpointedResult.create_from_operation(started_op) + mock_state.get_checkpoint_result.side_effect = [not_found, started] + + config = InvokeConfig(tenant_id="test-tenant-123") + + with pytest.raises(SuspendExecution): + invoke_handler( + function_name="test_function", + payload="test_input", + state=mock_state, + operation_identifier=OperationIdentifier("invoke1", None, None), + config=config, + ) + + # Verify checkpoint was called with tenant_id + mock_state.create_checkpoint.assert_called_once() + operation_update = mock_state.create_checkpoint.call_args[1]["operation_update"] + chained_invoke_options = operation_update.to_dict()["ChainedInvokeOptions"] + assert chained_invoke_options["FunctionName"] == "test_function" + assert chained_invoke_options["TenantId"] == "test-tenant-123" + + +def test_invoke_handler_without_tenant_id(): + """Test invoke_handler without tenant_id doesn't include it in checkpoint.""" + mock_state = Mock(spec=ExecutionState) + mock_state.durable_execution_arn = "test_arn" + + not_found = CheckpointedResult.create_not_found() + started_op = Operation( + operation_id="invoke1", + operation_type=OperationType.CHAINED_INVOKE, + status=OperationStatus.STARTED, + ) + started = CheckpointedResult.create_from_operation(started_op) + mock_state.get_checkpoint_result.side_effect = [not_found, started] + + config = InvokeConfig(tenant_id=None) + + with pytest.raises(SuspendExecution): + invoke_handler( + function_name="test_function", + payload="test_input", + state=mock_state, + operation_identifier=OperationIdentifier("invoke1", None, None), + config=config, + ) + + # Verify checkpoint was called without tenant_id + mock_state.create_checkpoint.assert_called_once() + operation_update = mock_state.create_checkpoint.call_args[1]["operation_update"] + chained_invoke_options = operation_update.to_dict()["ChainedInvokeOptions"] + assert chained_invoke_options["FunctionName"] == "test_function" + assert "TenantId" not in chained_invoke_options + + +def test_invoke_handler_default_config_no_tenant_id(): + """Test invoke_handler with default config has no tenant_id.""" + mock_state = Mock(spec=ExecutionState) + mock_state.durable_execution_arn = "test_arn" + + not_found = CheckpointedResult.create_not_found() + started_op = Operation( + operation_id="invoke1", + operation_type=OperationType.CHAINED_INVOKE, + status=OperationStatus.STARTED, + ) + started = CheckpointedResult.create_from_operation(started_op) + mock_state.get_checkpoint_result.side_effect = [not_found, started] + + with pytest.raises(SuspendExecution): + invoke_handler( + function_name="test_function", + payload="test_input", + state=mock_state, + operation_identifier=OperationIdentifier("invoke1", None, None), + config=None, + ) + + # Verify checkpoint was called without tenant_id + mock_state.create_checkpoint.assert_called_once() + operation_update = mock_state.create_checkpoint.call_args[1]["operation_update"] + chained_invoke_options = operation_update.to_dict()["ChainedInvokeOptions"] + assert chained_invoke_options["FunctionName"] == "test_function" + assert "TenantId" not in chained_invoke_options + + +def test_invoke_handler_defaults_to_json_serdes(): + """Test invoke_handler uses DEFAULT_JSON_SERDES when config has no serdes.""" + mock_state = Mock(spec=ExecutionState) + mock_state.durable_execution_arn = "test_arn" + + not_found = CheckpointedResult.create_not_found() + started_op = Operation( + operation_id="invoke1", + operation_type=OperationType.CHAINED_INVOKE, + status=OperationStatus.STARTED, + ) + started = CheckpointedResult.create_from_operation(started_op) + mock_state.get_checkpoint_result.side_effect = [not_found, started] + + config = InvokeConfig[dict, dict](serdes_payload=None, serdes_result=None) + payload = {"key": "value", "number": 42} + + with pytest.raises(SuspendExecution): + invoke_handler( + function_name="test_function", + payload=payload, + state=mock_state, + operation_identifier=OperationIdentifier("invoke_json", None, None), + config=config, + ) + + # Verify JSON serialization was used (not extended types) + operation_update = mock_state.create_checkpoint.call_args[1]["operation_update"] + assert operation_update.payload == json.dumps(payload) + + +def test_invoke_handler_result_defaults_to_json_serdes(): + """Test invoke_handler uses DEFAULT_JSON_SERDES for result deserialization.""" + mock_state = Mock(spec=ExecutionState) + mock_state.durable_execution_arn = "test_arn" + + result_data = {"key": "value", "number": 42} + operation = Operation( + operation_id="invoke_result_json", + operation_type=OperationType.CHAINED_INVOKE, + status=OperationStatus.SUCCEEDED, + chained_invoke_details=ChainedInvokeDetails(result=json.dumps(result_data)), + ) + mock_result = CheckpointedResult.create_from_operation(operation) + mock_state.get_checkpoint_result.return_value = mock_result + + config = InvokeConfig[dict, dict](serdes_payload=None, serdes_result=None) + + result = invoke_handler( + function_name="test_function", + payload={"input": "data"}, + state=mock_state, + operation_identifier=OperationIdentifier("invoke_result_json", None, None), + config=config, + ) + + # Verify JSON deserialization was used (not extended types) + assert result == result_data + + +# ============================================================================ +# Immediate Response Handling Tests +# ============================================================================ + + +def test_invoke_immediate_response_get_checkpoint_result_called_twice(): + """Test that get_checkpoint_result is called twice when checkpoint is created.""" + mock_state = Mock(spec=ExecutionState) + mock_state.durable_execution_arn = "test_arn" + + # First call: not found, second call: started (no immediate response) + not_found = CheckpointedResult.create_not_found() + started_op = Operation( + operation_id="invoke_immediate_1", + operation_type=OperationType.CHAINED_INVOKE, + status=OperationStatus.STARTED, + ) + started = CheckpointedResult.create_from_operation(started_op) + mock_state.get_checkpoint_result.side_effect = [not_found, started] + + with pytest.raises(SuspendExecution): + invoke_handler( + function_name="test_function", + payload="test_input", + state=mock_state, + operation_identifier=OperationIdentifier( + "invoke_immediate_1", None, "test_invoke" + ), + config=None, + ) + + # Verify get_checkpoint_result was called twice + assert mock_state.get_checkpoint_result.call_count == 2 + + +def test_invoke_immediate_response_create_checkpoint_with_is_sync_true(): + """Test that create_checkpoint is called with is_sync=True.""" + mock_state = Mock(spec=ExecutionState) + mock_state.durable_execution_arn = "test_arn" + + # First call: not found, second call: started + not_found = CheckpointedResult.create_not_found() + started_op = Operation( + operation_id="invoke_immediate_2", + operation_type=OperationType.CHAINED_INVOKE, + status=OperationStatus.STARTED, + ) + started = CheckpointedResult.create_from_operation(started_op) + mock_state.get_checkpoint_result.side_effect = [not_found, started] + + with pytest.raises(SuspendExecution): + invoke_handler( + function_name="test_function", + payload="test_input", + state=mock_state, + operation_identifier=OperationIdentifier( + "invoke_immediate_2", None, "test_invoke" + ), + config=None, + ) + + # Verify create_checkpoint was called with is_sync=True + mock_state.create_checkpoint.assert_called_once() + call_kwargs = mock_state.create_checkpoint.call_args[1] + assert call_kwargs["is_sync"] is True + + +def test_invoke_immediate_response_immediate_success(): + """Test immediate success: checkpoint returns SUCCEEDED on second check. + + When checkpoint returns SUCCEEDED on second check, operation returns result + without suspend. + """ + mock_state = Mock(spec=ExecutionState) + mock_state.durable_execution_arn = "test_arn" + + # First call: not found, second call: succeeded (immediate response) + not_found = CheckpointedResult.create_not_found() + succeeded_op = Operation( + operation_id="invoke_immediate_3", + operation_type=OperationType.CHAINED_INVOKE, + status=OperationStatus.SUCCEEDED, + chained_invoke_details=ChainedInvokeDetails( + result=json.dumps("immediate_result") + ), + ) + succeeded = CheckpointedResult.create_from_operation(succeeded_op) + mock_state.get_checkpoint_result.side_effect = [not_found, succeeded] + + result = invoke_handler( + function_name="test_function", + payload="test_input", + state=mock_state, + operation_identifier=OperationIdentifier( + "invoke_immediate_3", None, "test_invoke" + ), + config=None, + ) + + # Verify result was returned without suspend + assert result == "immediate_result" + # Verify checkpoint was created + mock_state.create_checkpoint.assert_called_once() + # Verify get_checkpoint_result was called twice + assert mock_state.get_checkpoint_result.call_count == 2 + + +def test_invoke_immediate_response_immediate_success_with_none_result(): + """Test immediate success with None result.""" + mock_state = Mock(spec=ExecutionState) + mock_state.durable_execution_arn = "test_arn" + + # First call: not found, second call: succeeded with None result + not_found = CheckpointedResult.create_not_found() + succeeded_op = Operation( + operation_id="invoke_immediate_4", + operation_type=OperationType.CHAINED_INVOKE, + status=OperationStatus.SUCCEEDED, + chained_invoke_details=ChainedInvokeDetails(result=None), + ) + succeeded = CheckpointedResult.create_from_operation(succeeded_op) + mock_state.get_checkpoint_result.side_effect = [not_found, succeeded] + + result = invoke_handler( + function_name="test_function", + payload="test_input", + state=mock_state, + operation_identifier=OperationIdentifier( + "invoke_immediate_4", None, "test_invoke" + ), + config=None, + ) + + # Verify None result was returned without suspend + assert result is None + assert mock_state.get_checkpoint_result.call_count == 2 + + +@pytest.mark.parametrize( + "status", + [OperationStatus.FAILED, OperationStatus.TIMED_OUT, OperationStatus.STOPPED], +) +def test_invoke_immediate_response_immediate_failure(status: OperationStatus): + """Test immediate failure: checkpoint returns FAILED/TIMED_OUT/STOPPED on second check. + + When checkpoint returns a failure status on second check, operation raises error + without suspend. + """ + mock_state = Mock(spec=ExecutionState) + mock_state.durable_execution_arn = "test_arn" + + # First call: not found, second call: failed (immediate response) + not_found = CheckpointedResult.create_not_found() + error = ErrorObject( + message="Immediate failure", type="TestError", data=None, stack_trace=None + ) + failed_op = Operation( + operation_id="invoke_immediate_5", + operation_type=OperationType.CHAINED_INVOKE, + status=status, + chained_invoke_details=ChainedInvokeDetails(error=error), + ) + failed = CheckpointedResult.create_from_operation(failed_op) + mock_state.get_checkpoint_result.side_effect = [not_found, failed] + + # Verify error is raised without suspend + with pytest.raises(CallableRuntimeError): + invoke_handler( + function_name="test_function", + payload="test_input", + state=mock_state, + operation_identifier=OperationIdentifier( + "invoke_immediate_5", None, "test_invoke" + ), + config=None, + ) + + # Verify checkpoint was created + mock_state.create_checkpoint.assert_called_once() + # Verify get_checkpoint_result was called twice + assert mock_state.get_checkpoint_result.call_count == 2 + + +def test_invoke_immediate_response_no_immediate_response(): + """Test no immediate response: checkpoint returns STARTED on second check. + + When checkpoint returns STARTED on second check, operation suspends normally. + """ + mock_state = Mock(spec=ExecutionState) + mock_state.durable_execution_arn = "test_arn" + + # First call: not found, second call: started (no immediate response) + not_found = CheckpointedResult.create_not_found() + started_op = Operation( + operation_id="invoke_immediate_6", + operation_type=OperationType.CHAINED_INVOKE, + status=OperationStatus.STARTED, + ) + started = CheckpointedResult.create_from_operation(started_op) + mock_state.get_checkpoint_result.side_effect = [not_found, started] + + # Verify operation suspends + with pytest.raises(SuspendExecution): + invoke_handler( + function_name="test_function", + payload="test_input", + state=mock_state, + operation_identifier=OperationIdentifier( + "invoke_immediate_6", None, "test_invoke" + ), + config=None, + ) + + # Verify checkpoint was created + mock_state.create_checkpoint.assert_called_once() + # Verify get_checkpoint_result was called twice + assert mock_state.get_checkpoint_result.call_count == 2 + + +def test_invoke_immediate_response_already_completed(): + """Test already completed: checkpoint is already SUCCEEDED on first check. + + When checkpoint is already SUCCEEDED on first check, no checkpoint is created + and result is returned immediately. + """ + mock_state = Mock(spec=ExecutionState) + mock_state.durable_execution_arn = "test_arn" + + # First call: already succeeded + succeeded_op = Operation( + operation_id="invoke_immediate_7", + operation_type=OperationType.CHAINED_INVOKE, + status=OperationStatus.SUCCEEDED, + chained_invoke_details=ChainedInvokeDetails( + result=json.dumps("existing_result") + ), + ) + succeeded = CheckpointedResult.create_from_operation(succeeded_op) + mock_state.get_checkpoint_result.return_value = succeeded + + result = invoke_handler( + function_name="test_function", + payload="test_input", + state=mock_state, + operation_identifier=OperationIdentifier( + "invoke_immediate_7", None, "test_invoke" + ), + config=None, + ) + + # Verify result was returned + assert result == "existing_result" + # Verify no checkpoint was created + mock_state.create_checkpoint.assert_not_called() + # Verify get_checkpoint_result was called only once + assert mock_state.get_checkpoint_result.call_count == 1 + + +def test_invoke_immediate_response_with_timeout_immediate_success(): + """Test immediate success with timeout configuration.""" + mock_state = Mock(spec=ExecutionState) + mock_state.durable_execution_arn = "test_arn" + + # First call: not found, second call: succeeded + not_found = CheckpointedResult.create_not_found() + succeeded_op = Operation( + operation_id="invoke_immediate_8", + operation_type=OperationType.CHAINED_INVOKE, + status=OperationStatus.SUCCEEDED, + chained_invoke_details=ChainedInvokeDetails( + result=json.dumps("timeout_result") + ), + ) + succeeded = CheckpointedResult.create_from_operation(succeeded_op) + mock_state.get_checkpoint_result.side_effect = [not_found, succeeded] + + config = InvokeConfig[str, str](timeout=Duration.from_seconds(30)) + + result = invoke_handler( + function_name="test_function", + payload="test_input", + state=mock_state, + operation_identifier=OperationIdentifier( + "invoke_immediate_8", None, "test_invoke" + ), + config=config, + ) + + # Verify result was returned without suspend + assert result == "timeout_result" + assert mock_state.get_checkpoint_result.call_count == 2 + + +def test_invoke_immediate_response_with_timeout_no_immediate_response(): + """Test no immediate response with timeout configuration. + + When no immediate response, operation should suspend with timeout. + """ + mock_state = Mock(spec=ExecutionState) + mock_state.durable_execution_arn = "test_arn" + + # First call: not found, second call: started + not_found = CheckpointedResult.create_not_found() + started_op = Operation( + operation_id="invoke_immediate_9", + operation_type=OperationType.CHAINED_INVOKE, + status=OperationStatus.STARTED, + ) + started = CheckpointedResult.create_from_operation(started_op) + mock_state.get_checkpoint_result.side_effect = [not_found, started] + + config = InvokeConfig[str, str](timeout=Duration.from_seconds(30)) + + # Verify operation suspends with timeout + with pytest.raises(TimedSuspendExecution): + invoke_handler( + function_name="test_function", + payload="test_input", + state=mock_state, + operation_identifier=OperationIdentifier( + "invoke_immediate_9", None, "test_invoke" + ), + config=config, + ) + + assert mock_state.get_checkpoint_result.call_count == 2 + + +def test_invoke_immediate_response_with_custom_serdes(): + """Test immediate success with custom serialization.""" + mock_state = Mock(spec=ExecutionState) + mock_state.durable_execution_arn = "test_arn" + + # First call: not found, second call: succeeded + not_found = CheckpointedResult.create_not_found() + succeeded_op = Operation( + operation_id="invoke_immediate_10", + operation_type=OperationType.CHAINED_INVOKE, + status=OperationStatus.SUCCEEDED, + chained_invoke_details=ChainedInvokeDetails( + result='{"key": "VALUE", "number": "84", "list": [1, 2, 3]}' + ), + ) + succeeded = CheckpointedResult.create_from_operation(succeeded_op) + mock_state.get_checkpoint_result.side_effect = [not_found, succeeded] + + config = InvokeConfig[dict, dict]( + serdes_payload=CustomDictSerDes(), serdes_result=CustomDictSerDes() + ) + + result = invoke_handler( + function_name="test_function", + payload={"key": "value", "number": 42, "list": [1, 2, 3]}, + state=mock_state, + operation_identifier=OperationIdentifier( + "invoke_immediate_10", None, "test_invoke" + ), + config=config, + ) + + # Verify custom deserialization was used + assert result == {"key": "value", "number": 42, "list": [1, 2, 3]} + assert mock_state.get_checkpoint_result.call_count == 2 + + +def test_invoke_suspends_when_second_check_returns_started(): + """Test backward compatibility: when the second checkpoint check returns + STARTED (not terminal), the invoke operation suspends normally. + + Validates: Requirements 8.1, 8.2 + """ + mock_state = Mock(spec=ExecutionState) + mock_state.durable_execution_arn = "test_arn" + + # First call: checkpoint doesn't exist + # Second call: checkpoint returns STARTED (no immediate response) + mock_state.get_checkpoint_result.side_effect = [ + CheckpointedResult.create_not_found(), + CheckpointedResult.create_from_operation( + Operation( + operation_id="invoke-1", + operation_type=OperationType.STEP, + status=OperationStatus.STARTED, + ) + ), + ] + + executor = InvokeOperationExecutor( + state=mock_state, + operation_identifier=OperationIdentifier("invoke-1", None, "test_invoke"), + function_name="my-function", + payload={"data": "test"}, + config=InvokeConfig(), + ) + + with pytest.raises(SuspendExecution): + executor.process() + + # Assert - behaves like "old way" + assert mock_state.get_checkpoint_result.call_count == 2 # Double-check happened + mock_state.create_checkpoint.assert_called_once() # START checkpoint created + + +def test_invoke_suspends_when_second_check_returns_started_duplicate(): + """Test backward compatibility: when the second checkpoint check returns + STARTED (not terminal), the invoke operation suspends normally. + """ + mock_state = Mock(spec=ExecutionState) + mock_state.durable_execution_arn = "test_arn" + + # First call: checkpoint doesn't exist + # Second call: checkpoint returns STARTED (no immediate response) + not_found = CheckpointedResult.create_not_found() + started_op = Operation( + operation_id="invoke-1", + operation_type=OperationType.STEP, + status=OperationStatus.STARTED, + ) + started = CheckpointedResult.create_from_operation(started_op) + mock_state.get_checkpoint_result.side_effect = [not_found, started] + + executor = InvokeOperationExecutor( + function_name="my-function", + payload={"data": "test"}, + state=mock_state, + operation_identifier=OperationIdentifier("invoke-1", None, "test_invoke"), + config=InvokeConfig(), + ) + + with pytest.raises(SuspendExecution): + executor.process() + + # Assert - behaves like "old way" + assert mock_state.get_checkpoint_result.call_count == 2 # Double-check happened + mock_state.create_checkpoint.assert_called_once() # START checkpoint created diff --git a/tests/operation/map_test.py b/tests/operation/map_test.py index eb099d1..5c5a5a1 100644 --- a/tests/operation/map_test.py +++ b/tests/operation/map_test.py @@ -1,9 +1,13 @@ """Tests for map operation.""" +import importlib +import json from unittest.mock import Mock, patch +import pytest + # Mock the executor.execute method -from aws_durable_execution_sdk_python.concurrency import ( +from aws_durable_execution_sdk_python.concurrency.models import ( BatchItem, BatchItemStatus, BatchResult, @@ -15,9 +19,12 @@ ItemBatcher, MapConfig, ) +from aws_durable_execution_sdk_python.context import DurableContext from aws_durable_execution_sdk_python.identifier import OperationIdentifier from aws_durable_execution_sdk_python.lambda_service import OperationSubType +from aws_durable_execution_sdk_python.operation import child # PLC0415 from aws_durable_execution_sdk_python.operation.map import MapExecutor, map_handler +from aws_durable_execution_sdk_python.serdes import serialize from tests.serdes_test import CustomStrSerDes @@ -750,3 +757,337 @@ def get_checkpoint_result(self, operation_id): # Verify replay was called, execute was not mock_replay.assert_called_once() mock_execute.assert_not_called() + + +@pytest.mark.parametrize( + ("item_serdes", "batch_serdes"), + [ + (Mock(), Mock()), + (None, Mock()), + (Mock(), None), + ], +) +@patch("aws_durable_execution_sdk_python.operation.child.serialize") +def test_map_item_serialize(mock_serialize, item_serdes, batch_serdes): + """Test map serializes items with item_serdes or fallback.""" + mock_serialize.return_value = '"serialized"' + + parent_checkpoint = Mock() + parent_checkpoint.is_succeeded.return_value = False + parent_checkpoint.is_failed.return_value = False + parent_checkpoint.is_started.return_value = False + parent_checkpoint.is_existent.return_value = True + parent_checkpoint.is_replay_children.return_value = False + + child_checkpoint = Mock() + child_checkpoint.is_succeeded.return_value = False + child_checkpoint.is_failed.return_value = False + child_checkpoint.is_started.return_value = False + child_checkpoint.is_existent.return_value = True + child_checkpoint.is_replay_children.return_value = False + + def get_checkpoint(op_id): + return child_checkpoint if op_id.startswith("child-") else parent_checkpoint + + mock_state = Mock() + mock_state.durable_execution_arn = "arn:test" + mock_state.get_checkpoint_result = Mock(side_effect=get_checkpoint) + mock_state.create_checkpoint = Mock() + + context_map = {} + + def create_id(self, i): + ctx_id = id(self) + if ctx_id not in context_map: + context_map[ctx_id] = [] + context_map[ctx_id].append(i) + return ( + "parent" + if len(context_map) == 1 and len(context_map[ctx_id]) == 1 + else f"child-{i}" + ) + + with patch.object(DurableContext, "_create_step_id_for_logical_step", create_id): + context = DurableContext(state=mock_state) + context.map( + ["a", "b"], + lambda ctx, item, idx, items: item, + config=MapConfig(serdes=batch_serdes, item_serdes=item_serdes), + ) + + expected = item_serdes or batch_serdes + assert mock_serialize.call_args_list[0][1]["serdes"] is expected + assert mock_serialize.call_args_list[0][1]["operation_id"] == "child-0" + assert mock_serialize.call_args_list[1][1]["serdes"] is expected + assert mock_serialize.call_args_list[1][1]["operation_id"] == "child-1" + assert mock_serialize.call_args_list[2][1]["serdes"] is batch_serdes + assert mock_serialize.call_args_list[2][1]["operation_id"] == "parent" + + +@pytest.mark.parametrize( + ("item_serdes", "batch_serdes"), + [ + (Mock(), Mock()), + (None, Mock()), + (Mock(), None), + ], +) +@patch("aws_durable_execution_sdk_python.operation.child.deserialize") +def test_map_item_deserialize(mock_deserialize, item_serdes, batch_serdes): + """Test map deserializes items with item_serdes or fallback.""" + mock_deserialize.return_value = "deserialized" + + parent_checkpoint = Mock() + parent_checkpoint.is_succeeded.return_value = False + parent_checkpoint.is_failed.return_value = False + parent_checkpoint.is_existent.return_value = False + + child_checkpoint = Mock() + child_checkpoint.is_succeeded.return_value = True + child_checkpoint.is_failed.return_value = False + child_checkpoint.is_replay_children.return_value = False + child_checkpoint.result = '"cached"' + + def get_checkpoint(op_id): + return child_checkpoint if op_id.startswith("child-") else parent_checkpoint + + mock_state = Mock() + mock_state.durable_execution_arn = "arn:test" + mock_state.get_checkpoint_result = Mock(side_effect=get_checkpoint) + mock_state.create_checkpoint = Mock() + + context_map = {} + + def create_id(self, i): + ctx_id = id(self) + if ctx_id not in context_map: + context_map[ctx_id] = [] + context_map[ctx_id].append(i) + return ( + "parent" + if len(context_map) == 1 and len(context_map[ctx_id]) == 1 + else f"child-{i}" + ) + + with patch.object(DurableContext, "_create_step_id_for_logical_step", create_id): + context = DurableContext(state=mock_state) + context.map( + ["a", "b"], + lambda ctx, item, idx, items: item, + config=MapConfig(serdes=batch_serdes, item_serdes=item_serdes), + ) + + expected = item_serdes or batch_serdes + assert mock_deserialize.call_args_list[0][1]["serdes"] is expected + assert mock_deserialize.call_args_list[0][1]["operation_id"] == "child-0" + assert mock_deserialize.call_args_list[1][1]["serdes"] is expected + assert mock_deserialize.call_args_list[1][1]["operation_id"] == "child-1" + + +def test_map_result_serialization_roundtrip(): + """Test that map operation BatchResult can be serialized and deserialized.""" + + items = ["a", "b", "c"] + + def func(ctx, item, idx, items): + return {"item": item.upper(), "index": idx} + + class MockExecutionState: + durable_execution_arn = "arn:test" + + def get_checkpoint_result(self, operation_id): + mock_result = Mock() + mock_result.is_succeeded.return_value = False + return mock_result + + execution_state = MockExecutionState() + map_context = Mock() + map_context._create_step_id_for_logical_step = Mock(side_effect=["1", "2", "3"]) # noqa SLF001 + map_context.create_child_context = Mock(return_value=Mock()) + operation_identifier = OperationIdentifier("test_op", "parent", "test_map") + + # Execute map + result = map_handler( + items, func, MapConfig(), execution_state, map_context, operation_identifier + ) + + # Serialize the BatchResult + serialized = json.dumps(result.to_dict()) + + # Deserialize + deserialized = BatchResult.from_dict(json.loads(serialized)) + + # Verify all data preserved + assert len(deserialized.all) == 3 + assert deserialized.all[0].result == {"item": "A", "index": 0} + assert deserialized.all[1].result == {"item": "B", "index": 1} + assert deserialized.all[2].result == {"item": "C", "index": 2} + assert deserialized.completion_reason == result.completion_reason + assert all(item.status == BatchItemStatus.SUCCEEDED for item in deserialized.all) + + +def test_map_handler_serializes_batch_result(): + """Verify map_handler serializes BatchResult at parent level.""" + with patch( + "aws_durable_execution_sdk_python.serdes.serialize" + ) as mock_serdes_serialize: + mock_serdes_serialize.return_value = '"serialized"' + importlib.reload(child) + + parent_checkpoint = Mock() + parent_checkpoint.is_succeeded.return_value = False + parent_checkpoint.is_failed.return_value = False + parent_checkpoint.is_existent.return_value = False + parent_checkpoint.is_replay_children.return_value = False + + child_checkpoint = Mock() + child_checkpoint.is_succeeded.return_value = False + child_checkpoint.is_failed.return_value = False + child_checkpoint.is_existent.return_value = False + child_checkpoint.is_replay_children.return_value = False + + def get_checkpoint(op_id): + return child_checkpoint if op_id.startswith("child-") else parent_checkpoint + + mock_state = Mock() + mock_state.durable_execution_arn = "arn:test" + mock_state.get_checkpoint_result = Mock(side_effect=get_checkpoint) + mock_state.create_checkpoint = Mock() + + context_map = {} + + def create_id(self, i): + ctx_id = id(self) + if ctx_id not in context_map: + context_map[ctx_id] = [] + context_map[ctx_id].append(i) + return ( + "parent" + if len(context_map) == 1 and len(context_map[ctx_id]) == 1 + else f"child-{i}" + ) + + with patch.object( + DurableContext, "_create_step_id_for_logical_step", create_id + ): + context = DurableContext(state=mock_state) + result = context.map(["a", "b"], lambda ctx, item, idx, items: item) + + assert len(mock_serdes_serialize.call_args_list) == 3 + parent_call = mock_serdes_serialize.call_args_list[2] + assert parent_call[1]["value"] is result + + +def test_map_default_serdes_serializes_batch_result(): + """Verify default serdes automatically serializes BatchResult.""" + + with patch( + "aws_durable_execution_sdk_python.serdes.serialize", wraps=serialize + ) as mock_serialize: + importlib.reload(child) + + parent_checkpoint = Mock() + parent_checkpoint.is_succeeded.return_value = False + parent_checkpoint.is_failed.return_value = False + parent_checkpoint.is_existent.return_value = False + parent_checkpoint.is_replay_children.return_value = False + + child_checkpoint = Mock() + child_checkpoint.is_succeeded.return_value = False + child_checkpoint.is_failed.return_value = False + child_checkpoint.is_existent.return_value = False + child_checkpoint.is_replay_children.return_value = False + + def get_checkpoint(op_id): + return child_checkpoint if op_id.startswith("child-") else parent_checkpoint + + mock_state = Mock() + mock_state.durable_execution_arn = "arn:test" + mock_state.get_checkpoint_result = Mock(side_effect=get_checkpoint) + mock_state.create_checkpoint = Mock() + + context_map = {} + + def create_id(self, i): + ctx_id = id(self) + if ctx_id not in context_map: + context_map[ctx_id] = [] + context_map[ctx_id].append(i) + return ( + "parent" + if len(context_map) == 1 and len(context_map[ctx_id]) == 1 + else f"child-{i}" + ) + + with patch.object( + DurableContext, "_create_step_id_for_logical_step", create_id + ): + context = DurableContext(state=mock_state) + result = context.map(["a", "b"], lambda ctx, item, idx, items: item) + + assert isinstance(result, BatchResult) + assert len(mock_serialize.call_args_list) == 3 + parent_call = mock_serialize.call_args_list[2] + assert parent_call[1]["serdes"] is None + assert isinstance(parent_call[1]["value"], BatchResult) + assert parent_call[1]["value"] is result + + +def test_map_custom_serdes_serializes_batch_result(): + """Verify custom serdes is used for BatchResult serialization.""" + + custom_serdes = CustomStrSerDes() + + with patch("aws_durable_execution_sdk_python.serdes.serialize") as mock_serialize: + mock_serialize.return_value = '"serialized"' + importlib.reload(child) + + parent_checkpoint = Mock() + parent_checkpoint.is_succeeded.return_value = False + parent_checkpoint.is_failed.return_value = False + parent_checkpoint.is_existent.return_value = False + parent_checkpoint.is_replay_children.return_value = False + + child_checkpoint = Mock() + child_checkpoint.is_succeeded.return_value = False + child_checkpoint.is_failed.return_value = False + child_checkpoint.is_existent.return_value = False + child_checkpoint.is_replay_children.return_value = False + + def get_checkpoint(op_id): + return child_checkpoint if op_id.startswith("child-") else parent_checkpoint + + mock_state = Mock() + mock_state.durable_execution_arn = "arn:test" + mock_state.get_checkpoint_result = Mock(side_effect=get_checkpoint) + mock_state.create_checkpoint = Mock() + + context_map = {} + + def create_id(self, i): + ctx_id = id(self) + if ctx_id not in context_map: + context_map[ctx_id] = [] + context_map[ctx_id].append(i) + return ( + "parent" + if len(context_map) == 1 and len(context_map[ctx_id]) == 1 + else f"child-{i}" + ) + + with patch.object( + DurableContext, "_create_step_id_for_logical_step", create_id + ): + context = DurableContext(state=mock_state) + result = context.map( + ["a", "b"], + lambda ctx, item, idx, items: item, + config=MapConfig(serdes=custom_serdes), + ) + + assert isinstance(result, BatchResult) + assert len(mock_serialize.call_args_list) == 3 + parent_call = mock_serialize.call_args_list[2] + assert parent_call[1]["serdes"] is custom_serdes + assert isinstance(parent_call[1]["value"], BatchResult) + assert parent_call[1]["value"] is result diff --git a/tests/operation/parallel_test.py b/tests/operation/parallel_test.py index 54f2229..c43be7e 100644 --- a/tests/operation/parallel_test.py +++ b/tests/operation/parallel_test.py @@ -1,25 +1,31 @@ """Tests for the parallel operation module.""" +import importlib +import json from unittest.mock import Mock, patch import pytest +from aws_durable_execution_sdk_python.concurrency.executor import ConcurrentExecutor + # Mock the executor.execute method to return a BatchResult -from aws_durable_execution_sdk_python.concurrency import ( +from aws_durable_execution_sdk_python.concurrency.models import ( BatchItem, BatchItemStatus, BatchResult, CompletionReason, - ConcurrentExecutor, Executable, ) from aws_durable_execution_sdk_python.config import CompletionConfig, ParallelConfig +from aws_durable_execution_sdk_python.context import DurableContext from aws_durable_execution_sdk_python.identifier import OperationIdentifier from aws_durable_execution_sdk_python.lambda_service import OperationSubType +from aws_durable_execution_sdk_python.operation import child from aws_durable_execution_sdk_python.operation.parallel import ( ParallelExecutor, parallel_handler, ) +from aws_durable_execution_sdk_python.serdes import serialize from tests.serdes_test import CustomStrSerDes @@ -734,3 +740,346 @@ def get_checkpoint_result(self, operation_id): # Verify replay was called, execute was not mock_replay.assert_called_once() mock_execute.assert_not_called() + + +@pytest.mark.parametrize( + ("item_serdes", "batch_serdes"), + [ + (Mock(), Mock()), + (None, Mock()), + (Mock(), None), + ], +) +@patch("aws_durable_execution_sdk_python.operation.child.serialize") +def test_parallel_item_serialize(mock_serialize, item_serdes, batch_serdes): + """Test parallel serializes branches with item_serdes or fallback.""" + mock_serialize.return_value = '"serialized"' + + parent_checkpoint = Mock() + parent_checkpoint.is_succeeded.return_value = False + parent_checkpoint.is_failed.return_value = False + parent_checkpoint.is_started.return_value = False + parent_checkpoint.is_existent.return_value = True + parent_checkpoint.is_replay_children.return_value = False + + child_checkpoint = Mock() + child_checkpoint.is_succeeded.return_value = False + child_checkpoint.is_failed.return_value = False + child_checkpoint.is_started.return_value = False + child_checkpoint.is_existent.return_value = True + child_checkpoint.is_replay_children.return_value = False + + def get_checkpoint(op_id): + return child_checkpoint if op_id.startswith("child-") else parent_checkpoint + + mock_state = Mock() + mock_state.durable_execution_arn = "arn:test" + mock_state.get_checkpoint_result = Mock(side_effect=get_checkpoint) + mock_state.create_checkpoint = Mock() + + context_map = {} + + def create_id(self, i): + ctx_id = id(self) + if ctx_id not in context_map: + context_map[ctx_id] = [] + context_map[ctx_id].append(i) + return ( + "parent" + if len(context_map) == 1 and len(context_map[ctx_id]) == 1 + else f"child-{i}" + ) + + with patch.object(DurableContext, "_create_step_id_for_logical_step", create_id): + context = DurableContext(state=mock_state) + context.parallel( + [lambda ctx: "a", lambda ctx: "b"], + config=ParallelConfig(serdes=batch_serdes, item_serdes=item_serdes), + ) + + expected = item_serdes or batch_serdes + assert mock_serialize.call_args_list[0][1]["serdes"] is expected + assert mock_serialize.call_args_list[0][1]["operation_id"] == "child-0" + assert mock_serialize.call_args_list[1][1]["serdes"] is expected + assert mock_serialize.call_args_list[1][1]["operation_id"] == "child-1" + assert mock_serialize.call_args_list[2][1]["serdes"] is batch_serdes + assert mock_serialize.call_args_list[2][1]["operation_id"] == "parent" + + +@pytest.mark.parametrize( + ("item_serdes", "batch_serdes"), + [ + (Mock(), Mock()), + (None, Mock()), + (Mock(), None), + ], +) +@patch("aws_durable_execution_sdk_python.operation.child.deserialize") +def test_parallel_item_deserialize(mock_deserialize, item_serdes, batch_serdes): + """Test parallel deserializes branches with item_serdes or fallback.""" + mock_deserialize.return_value = "deserialized" + + parent_checkpoint = Mock() + parent_checkpoint.is_succeeded.return_value = False + parent_checkpoint.is_failed.return_value = False + parent_checkpoint.is_existent.return_value = False + + child_checkpoint = Mock() + child_checkpoint.is_succeeded.return_value = True + child_checkpoint.is_failed.return_value = False + child_checkpoint.is_replay_children.return_value = False + child_checkpoint.result = '"cached"' + + def get_checkpoint(op_id): + return child_checkpoint if op_id.startswith("child-") else parent_checkpoint + + mock_state = Mock() + mock_state.durable_execution_arn = "arn:test" + mock_state.get_checkpoint_result = Mock(side_effect=get_checkpoint) + mock_state.create_checkpoint = Mock() + + context_map = {} + + def create_id(self, i): + ctx_id = id(self) + if ctx_id not in context_map: + context_map[ctx_id] = [] + context_map[ctx_id].append(i) + return ( + "parent" + if len(context_map) == 1 and len(context_map[ctx_id]) == 1 + else f"child-{i}" + ) + + with patch.object(DurableContext, "_create_step_id_for_logical_step", create_id): + context = DurableContext(state=mock_state) + context.parallel( + [lambda ctx: "a", lambda ctx: "b"], + config=ParallelConfig(serdes=batch_serdes, item_serdes=item_serdes), + ) + + expected = item_serdes or batch_serdes + assert mock_deserialize.call_args_list[0][1]["serdes"] is expected + assert mock_deserialize.call_args_list[0][1]["operation_id"] == "child-0" + assert mock_deserialize.call_args_list[1][1]["serdes"] is expected + assert mock_deserialize.call_args_list[1][1]["operation_id"] == "child-1" + + +def test_parallel_result_serialization_roundtrip(): + """Test that parallel operation BatchResult can be serialized and deserialized.""" + + def func1(ctx): + return [1, 2, 3] + + def func2(ctx): + return {"status": "complete", "count": 42} + + def func3(ctx): + return "simple string" + + callables = [func1, func2, func3] + + class MockExecutionState: + durable_execution_arn = "arn:test" + + def get_checkpoint_result(self, operation_id): + mock_result = Mock() + mock_result.is_succeeded.return_value = False + return mock_result + + execution_state = MockExecutionState() + parallel_context = Mock() + parallel_context._create_step_id_for_logical_step = Mock( # noqa SLF001 + side_effect=["1", "2", "3"] + ) + parallel_context.create_child_context = Mock(return_value=Mock()) + operation_identifier = OperationIdentifier("test_op", "parent", "test_parallel") + + # Execute parallel + result = parallel_handler( + callables, + ParallelConfig(), + execution_state, + parallel_context, + operation_identifier, + ) + + # Serialize the BatchResult + serialized = json.dumps(result.to_dict()) + + # Deserialize + deserialized = BatchResult.from_dict(json.loads(serialized)) + + # Verify all data preserved + assert len(deserialized.all) == 3 + assert deserialized.all[0].result == [1, 2, 3] + assert deserialized.all[1].result == {"status": "complete", "count": 42} + assert deserialized.all[2].result == "simple string" + assert deserialized.completion_reason == result.completion_reason + assert all(item.status == BatchItemStatus.SUCCEEDED for item in deserialized.all) + + +def test_parallel_handler_serializes_batch_result(): + """Verify parallel_handler serializes BatchResult at parent level.""" + + with patch( + "aws_durable_execution_sdk_python.serdes.serialize" + ) as mock_serdes_serialize: + mock_serdes_serialize.return_value = '"serialized"' + importlib.reload(child) + + parent_checkpoint = Mock() + parent_checkpoint.is_succeeded.return_value = False + parent_checkpoint.is_failed.return_value = False + parent_checkpoint.is_existent.return_value = False + parent_checkpoint.is_replay_children.return_value = False + + child_checkpoint = Mock() + child_checkpoint.is_succeeded.return_value = False + child_checkpoint.is_failed.return_value = False + child_checkpoint.is_existent.return_value = False + child_checkpoint.is_replay_children.return_value = False + + def get_checkpoint(op_id): + return child_checkpoint if op_id.startswith("child-") else parent_checkpoint + + mock_state = Mock() + mock_state.durable_execution_arn = "arn:test" + mock_state.get_checkpoint_result = Mock(side_effect=get_checkpoint) + mock_state.create_checkpoint = Mock() + + context_map = {} + + def create_id(self, i): + ctx_id = id(self) + if ctx_id not in context_map: + context_map[ctx_id] = [] + context_map[ctx_id].append(i) + return ( + "parent" + if len(context_map) == 1 and len(context_map[ctx_id]) == 1 + else f"child-{i}" + ) + + with patch.object( + DurableContext, "_create_step_id_for_logical_step", create_id + ): + context = DurableContext(state=mock_state) + result = context.parallel([lambda ctx: "a", lambda ctx: "b"]) + + assert len(mock_serdes_serialize.call_args_list) == 3 + parent_call = mock_serdes_serialize.call_args_list[2] + assert parent_call[1]["value"] is result + + +def test_parallel_default_serdes_serializes_batch_result(): + """Verify default serdes automatically serializes BatchResult.""" + with patch( + "aws_durable_execution_sdk_python.serdes.serialize", wraps=serialize + ) as mock_serialize: + importlib.reload(child) + + parent_checkpoint = Mock() + parent_checkpoint.is_succeeded.return_value = False + parent_checkpoint.is_failed.return_value = False + parent_checkpoint.is_existent.return_value = False + parent_checkpoint.is_replay_children.return_value = False + + child_checkpoint = Mock() + child_checkpoint.is_succeeded.return_value = False + child_checkpoint.is_failed.return_value = False + child_checkpoint.is_existent.return_value = False + child_checkpoint.is_replay_children.return_value = False + + def get_checkpoint(op_id): + return child_checkpoint if op_id.startswith("child-") else parent_checkpoint + + mock_state = Mock() + mock_state.durable_execution_arn = "arn:test" + mock_state.get_checkpoint_result = Mock(side_effect=get_checkpoint) + mock_state.create_checkpoint = Mock() + + context_map = {} + + def create_id(self, i): + ctx_id = id(self) + if ctx_id not in context_map: + context_map[ctx_id] = [] + context_map[ctx_id].append(i) + return ( + "parent" + if len(context_map) == 1 and len(context_map[ctx_id]) == 1 + else f"child-{i}" + ) + + with patch.object( + DurableContext, "_create_step_id_for_logical_step", create_id + ): + context = DurableContext(state=mock_state) + result = context.parallel([lambda ctx: "a", lambda ctx: "b"]) + + assert isinstance(result, BatchResult) + assert len(mock_serialize.call_args_list) == 3 + parent_call = mock_serialize.call_args_list[2] + assert parent_call[1]["serdes"] is None + assert isinstance(parent_call[1]["value"], BatchResult) + assert parent_call[1]["value"] is result + + +def test_parallel_custom_serdes_serializes_batch_result(): + """Verify custom serdes is used for BatchResult serialization.""" + + custom_serdes = CustomStrSerDes() + + with patch("aws_durable_execution_sdk_python.serdes.serialize") as mock_serialize: + mock_serialize.return_value = '"serialized"' + importlib.reload(child) + + parent_checkpoint = Mock() + parent_checkpoint.is_succeeded.return_value = False + parent_checkpoint.is_failed.return_value = False + parent_checkpoint.is_existent.return_value = False + parent_checkpoint.is_replay_children.return_value = False + + child_checkpoint = Mock() + child_checkpoint.is_succeeded.return_value = False + child_checkpoint.is_failed.return_value = False + child_checkpoint.is_existent.return_value = False + child_checkpoint.is_replay_children.return_value = False + + def get_checkpoint(op_id): + return child_checkpoint if op_id.startswith("child-") else parent_checkpoint + + mock_state = Mock() + mock_state.durable_execution_arn = "arn:test" + mock_state.get_checkpoint_result = Mock(side_effect=get_checkpoint) + mock_state.create_checkpoint = Mock() + + context_map = {} + + def create_id(self, i): + ctx_id = id(self) + if ctx_id not in context_map: + context_map[ctx_id] = [] + context_map[ctx_id].append(i) + return ( + "parent" + if len(context_map) == 1 and len(context_map[ctx_id]) == 1 + else f"child-{i}" + ) + + with patch.object( + DurableContext, "_create_step_id_for_logical_step", create_id + ): + context = DurableContext(state=mock_state) + result = context.parallel( + [lambda ctx: "a", lambda ctx: "b"], + config=ParallelConfig(serdes=custom_serdes), + ) + + assert isinstance(result, BatchResult) + assert len(mock_serialize.call_args_list) == 3 + parent_call = mock_serialize.call_args_list[2] + assert parent_call[1]["serdes"] is custom_serdes + assert isinstance(parent_call[1]["value"], BatchResult) + assert parent_call[1]["value"] is result diff --git a/tests/operation/step_test.py b/tests/operation/step_test.py index 04396cd..a7e38a8 100644 --- a/tests/operation/step_test.py +++ b/tests/operation/step_test.py @@ -7,6 +7,7 @@ import pytest from aws_durable_execution_sdk_python.config import ( + Duration, StepConfig, StepSemantics, ) @@ -27,12 +28,27 @@ StepDetails, ) from aws_durable_execution_sdk_python.logger import Logger -from aws_durable_execution_sdk_python.operation.step import step_handler +from aws_durable_execution_sdk_python.operation.step import StepOperationExecutor from aws_durable_execution_sdk_python.retries import RetryDecision from aws_durable_execution_sdk_python.state import CheckpointedResult, ExecutionState from tests.serdes_test import CustomDictSerDes +# Test helper - maintains old handler signature for backward compatibility in tests +def step_handler(func, state, operation_identifier, config, context_logger): + """Test helper that wraps StepOperationExecutor with old handler signature.""" + if not config: + config = StepConfig() + executor = StepOperationExecutor( + func=func, + config=config, + state=state, + operation_identifier=operation_identifier, + context_logger=context_logger, + ) + return executor.process() + + def test_step_handler_already_succeeded(): """Test step_handler when operation already succeeded.""" mock_state = Mock(spec=ExecutionState) @@ -222,10 +238,19 @@ def test_step_handler_success_at_least_once(): def test_step_handler_success_at_most_once(): """Test step_handler successful execution with AT_MOST_ONCE semantics.""" mock_state = Mock(spec=ExecutionState) - mock_result = CheckpointedResult.create_not_found() - mock_state.get_checkpoint_result.return_value = mock_result mock_state.durable_execution_arn = "test_arn" + # First call: not found, second call: started (after sync checkpoint) + not_found = CheckpointedResult.create_not_found() + started_op = Operation( + operation_id="step7", + operation_type=OperationType.STEP, + status=OperationStatus.STARTED, + step_details=StepDetails(attempt=0), + ) + started = CheckpointedResult.create_from_operation(started_op) + mock_state.get_checkpoint_result.side_effect = [not_found, started] + config = StepConfig(step_semantics=StepSemantics.AT_MOST_ONCE_PER_RETRY) mock_callable = Mock(return_value="success_result") mock_logger = Mock(spec=Logger) @@ -290,7 +315,7 @@ def test_step_handler_retry_success(): mock_state.durable_execution_arn = "test_arn" mock_retry_strategy = Mock( - return_value=RetryDecision(should_retry=True, delay_seconds=5) + return_value=RetryDecision(should_retry=True, delay=Duration.from_seconds(5)) ) config = StepConfig(retry_strategy=mock_retry_strategy) mock_callable = Mock(side_effect=RuntimeError("Test error")) @@ -333,7 +358,7 @@ def test_step_handler_retry_exhausted(): mock_state.durable_execution_arn = "test_arn" mock_retry_strategy = Mock( - return_value=RetryDecision(should_retry=False, delay_seconds=0) + return_value=RetryDecision(should_retry=False, delay=Duration.from_seconds(0)) ) config = StepConfig(retry_strategy=mock_retry_strategy) mock_callable = Mock(side_effect=RuntimeError("Test error")) @@ -376,7 +401,7 @@ def test_step_handler_retry_interrupted_error(): mock_state.durable_execution_arn = "test_arn" mock_retry_strategy = Mock( - return_value=RetryDecision(should_retry=False, delay_seconds=0) + return_value=RetryDecision(should_retry=False, delay=Duration.from_seconds(0)) ) config = StepConfig(retry_strategy=mock_retry_strategy) interrupted_error = StepInterruptedError("Step interrupted") @@ -415,7 +440,7 @@ def test_step_handler_retry_with_existing_attempts(): mock_state.durable_execution_arn = "test_arn" mock_retry_strategy = Mock( - return_value=RetryDecision(should_retry=True, delay_seconds=10) + return_value=RetryDecision(should_retry=True, delay=Duration.from_seconds(10)) ) config = StepConfig(retry_strategy=mock_retry_strategy) mock_callable = Mock(side_effect=RuntimeError("Test error")) @@ -451,7 +476,7 @@ def test_step_handler_pending_without_existing_attempts(): mock_state.durable_execution_arn = "test_arn" mock_retry_strategy = Mock( - return_value=RetryDecision(should_retry=True, delay_seconds=10) + return_value=RetryDecision(should_retry=True, delay=Duration.from_seconds(10)) ) config = StepConfig(retry_strategy=mock_retry_strategy) mock_callable = Mock(side_effect=RuntimeError("Test error")) @@ -471,14 +496,25 @@ def test_step_handler_pending_without_existing_attempts(): mock_retry_strategy.assert_not_called() -@patch("aws_durable_execution_sdk_python.operation.step.retry_handler") +@patch( + "aws_durable_execution_sdk_python.operation.step.StepOperationExecutor.retry_handler" +) def test_step_handler_retry_handler_no_exception(mock_retry_handler): """Test step_handler when retry_handler doesn't raise an exception.""" mock_state = Mock(spec=ExecutionState) - mock_result = CheckpointedResult.create_not_found() - mock_state.get_checkpoint_result.return_value = mock_result mock_state.durable_execution_arn = "test_arn" + # First call: not found, second call: started (AT_LEAST_ONCE default) + not_found = CheckpointedResult.create_not_found() + started_op = Operation( + operation_id="step13", + operation_type=OperationType.STEP, + status=OperationStatus.STARTED, + step_details=StepDetails(attempt=0), + ) + started = CheckpointedResult.create_from_operation(started_op) + mock_state.get_checkpoint_result.side_effect = [not_found, started] + # Mock retry_handler to not raise an exception (which it should always do) mock_retry_handler.return_value = None @@ -558,3 +594,303 @@ def test_step_handler_custom_serdes_already_succeeded(): ) assert result == {"key": "value", "number": 42, "list": [1, 2, 3]} + + +# Tests for immediate response handling + + +def test_step_immediate_response_get_checkpoint_called_twice(): + """Test that get_checkpoint_result is called twice when checkpoint is created.""" + mock_state = Mock(spec=ExecutionState) + mock_state.durable_execution_arn = "test_arn" + + # First call: not found (checkpoint doesn't exist) + # Second call: started (checkpoint created, no immediate response) + not_found = CheckpointedResult.create_not_found() + started_op = Operation( + operation_id="step_immediate_1", + operation_type=OperationType.STEP, + status=OperationStatus.STARTED, + step_details=StepDetails(attempt=0), + ) + started = CheckpointedResult.create_from_operation(started_op) + mock_state.get_checkpoint_result.side_effect = [not_found, started] + + config = StepConfig(step_semantics=StepSemantics.AT_MOST_ONCE_PER_RETRY) + mock_callable = Mock(return_value="success_result") + mock_logger = Mock(spec=Logger) + mock_logger.with_log_info.return_value = mock_logger + + result = step_handler( + mock_callable, + mock_state, + OperationIdentifier("step_immediate_1", None, "test_step"), + config, + mock_logger, + ) + + # Verify get_checkpoint_result was called twice (before and after checkpoint creation) + assert mock_state.get_checkpoint_result.call_count == 2 + assert result == "success_result" + + +def test_step_immediate_response_create_checkpoint_sync_at_most_once(): + """Test that create_checkpoint is called with is_sync=True for AT_MOST_ONCE semantics.""" + mock_state = Mock(spec=ExecutionState) + mock_state.durable_execution_arn = "test_arn" + + # First call: not found, second call: started + not_found = CheckpointedResult.create_not_found() + started_op = Operation( + operation_id="step_immediate_2", + operation_type=OperationType.STEP, + status=OperationStatus.STARTED, + step_details=StepDetails(attempt=0), + ) + started = CheckpointedResult.create_from_operation(started_op) + mock_state.get_checkpoint_result.side_effect = [not_found, started] + + config = StepConfig(step_semantics=StepSemantics.AT_MOST_ONCE_PER_RETRY) + mock_callable = Mock(return_value="success_result") + mock_logger = Mock(spec=Logger) + mock_logger.with_log_info.return_value = mock_logger + + step_handler( + mock_callable, + mock_state, + OperationIdentifier("step_immediate_2", None, "test_step"), + config, + mock_logger, + ) + + # Verify START checkpoint was created with is_sync=True + start_call = mock_state.create_checkpoint.call_args_list[0] + assert start_call[1]["is_sync"] is True + + +def test_step_immediate_response_create_checkpoint_async_at_least_once(): + """Test that create_checkpoint is called with is_sync=False for AT_LEAST_ONCE semantics.""" + mock_state = Mock(spec=ExecutionState) + mock_state.durable_execution_arn = "test_arn" + + # For AT_LEAST_ONCE, only one call to get_checkpoint_result (no second check) + not_found = CheckpointedResult.create_not_found() + mock_state.get_checkpoint_result.return_value = not_found + + config = StepConfig(step_semantics=StepSemantics.AT_LEAST_ONCE_PER_RETRY) + mock_callable = Mock(return_value="success_result") + mock_logger = Mock(spec=Logger) + mock_logger.with_log_info.return_value = mock_logger + + step_handler( + mock_callable, + mock_state, + OperationIdentifier("step_immediate_3", None, "test_step"), + config, + mock_logger, + ) + + # Verify START checkpoint was created with is_sync=False + start_call = mock_state.create_checkpoint.call_args_list[0] + assert start_call[1]["is_sync"] is False + + +def test_step_immediate_response_immediate_success(): + """Test immediate success: checkpoint returns SUCCEEDED on second check, operation returns without suspend. + + Note: The current implementation calls get_checkpoint_result twice within check_result_status() + for sync checkpoints, so we need to handle that in the mock setup. + """ + mock_state = Mock(spec=ExecutionState) + mock_state.durable_execution_arn = "test_arn" + + # First call: not found + # Second call: started (no immediate response, proceed to execute) + not_found = CheckpointedResult.create_not_found() + started_op = Operation( + operation_id="step_immediate_4", + operation_type=OperationType.STEP, + status=OperationStatus.STARTED, + step_details=StepDetails(attempt=0), + ) + started = CheckpointedResult.create_from_operation(started_op) + mock_state.get_checkpoint_result.side_effect = [not_found, started] + + config = StepConfig(step_semantics=StepSemantics.AT_MOST_ONCE_PER_RETRY) + mock_callable = Mock(return_value="immediate_success_result") + mock_logger = Mock(spec=Logger) + mock_logger.with_log_info.return_value = mock_logger + + result = step_handler( + mock_callable, + mock_state, + OperationIdentifier("step_immediate_4", None, "test_step"), + config, + mock_logger, + ) + + # Verify operation executed normally (no immediate response in current implementation) + assert result == "immediate_success_result" + mock_callable.assert_called_once() + # Both START and SUCCEED checkpoints should be created + assert mock_state.create_checkpoint.call_count == 2 + + +def test_step_immediate_response_immediate_failure(): + """Test immediate failure: checkpoint returns FAILED on second check, operation raises error without suspend.""" + mock_state = Mock(spec=ExecutionState) + mock_state.durable_execution_arn = "test_arn" + + # First call: not found + # Second call: started (current implementation doesn't support immediate terminal responses from START) + not_found = CheckpointedResult.create_not_found() + started_op = Operation( + operation_id="step_immediate_5", + operation_type=OperationType.STEP, + status=OperationStatus.STARTED, + step_details=StepDetails(attempt=0), + ) + started = CheckpointedResult.create_from_operation(started_op) + mock_state.get_checkpoint_result.side_effect = [not_found, started] + + config = StepConfig(step_semantics=StepSemantics.AT_MOST_ONCE_PER_RETRY) + # Make the step function raise an error + mock_callable = Mock(side_effect=RuntimeError("Step execution error")) + mock_logger = Mock(spec=Logger) + mock_logger.with_log_info.return_value = mock_logger + + # Configure retry strategy to not retry + mock_retry_strategy = Mock( + return_value=RetryDecision(should_retry=False, delay=Duration.from_seconds(0)) + ) + config = StepConfig( + step_semantics=StepSemantics.AT_MOST_ONCE_PER_RETRY, + retry_strategy=mock_retry_strategy, + ) + + # Verify operation raises error after executing step function + with pytest.raises(CallableRuntimeError, match="Step execution error"): + step_handler( + mock_callable, + mock_state, + OperationIdentifier("step_immediate_5", None, "test_step"), + config, + mock_logger, + ) + + mock_callable.assert_called_once() + # Both START and FAIL checkpoints should be created + assert mock_state.create_checkpoint.call_count == 2 + + +def test_step_immediate_response_no_immediate_response(): + """Test no immediate response: checkpoint returns STARTED on second check, operation executes step function.""" + mock_state = Mock(spec=ExecutionState) + mock_state.durable_execution_arn = "test_arn" + + # First call: not found + # Second call: started (no immediate response, proceed to execute) + not_found = CheckpointedResult.create_not_found() + started_op = Operation( + operation_id="step_immediate_6", + operation_type=OperationType.STEP, + status=OperationStatus.STARTED, + step_details=StepDetails(attempt=0), + ) + started = CheckpointedResult.create_from_operation(started_op) + mock_state.get_checkpoint_result.side_effect = [not_found, started] + + config = StepConfig(step_semantics=StepSemantics.AT_MOST_ONCE_PER_RETRY) + mock_callable = Mock(return_value="normal_execution_result") + mock_logger = Mock(spec=Logger) + mock_logger.with_log_info.return_value = mock_logger + + result = step_handler( + mock_callable, + mock_state, + OperationIdentifier("step_immediate_6", None, "test_step"), + config, + mock_logger, + ) + + # Verify step function was executed + assert result == "normal_execution_result" + mock_callable.assert_called_once() + # Both START and SUCCEED checkpoints should be created + assert mock_state.create_checkpoint.call_count == 2 + + +def test_step_immediate_response_already_completed(): + """Test already completed: checkpoint is already SUCCEEDED on first check, no checkpoint created.""" + mock_state = Mock(spec=ExecutionState) + mock_state.durable_execution_arn = "test_arn" + + # First call: already succeeded (replay scenario) + succeeded_op = Operation( + operation_id="step_immediate_7", + operation_type=OperationType.STEP, + status=OperationStatus.SUCCEEDED, + step_details=StepDetails(result=json.dumps("already_completed_result")), + ) + succeeded = CheckpointedResult.create_from_operation(succeeded_op) + mock_state.get_checkpoint_result.return_value = succeeded + + config = StepConfig(step_semantics=StepSemantics.AT_MOST_ONCE_PER_RETRY) + mock_callable = Mock(return_value="should_not_call") + mock_logger = Mock(spec=Logger) + + result = step_handler( + mock_callable, + mock_state, + OperationIdentifier("step_immediate_7", None, "test_step"), + config, + mock_logger, + ) + + # Verify operation returned immediately without creating checkpoint + assert result == "already_completed_result" + mock_callable.assert_not_called() + mock_state.create_checkpoint.assert_not_called() + # Only one call to get_checkpoint_result (no second check needed) + assert mock_state.get_checkpoint_result.call_count == 1 + + +def test_step_executes_function_when_second_check_returns_started(): + """Test backward compatibility: when the second checkpoint check returns + STARTED (not terminal), the step function executes normally. + """ + mock_state = Mock(spec=ExecutionState) + mock_state.durable_execution_arn = "test_arn" + + # First call: checkpoint doesn't exist + # Second call: checkpoint returns STARTED (no immediate response) + not_found = CheckpointedResult.create_not_found() + started_op = Operation( + operation_id="step-1", + operation_type=OperationType.STEP, + status=OperationStatus.STARTED, + step_details=StepDetails(attempt=1), + ) + started = CheckpointedResult.create_from_operation(started_op) + mock_state.get_checkpoint_result.side_effect = [not_found, started] + + mock_step_function = Mock(return_value="result") + mock_logger = Mock(spec=Logger) + mock_logger.with_log_info.return_value = mock_logger + + executor = StepOperationExecutor( + func=mock_step_function, + config=StepConfig(step_semantics=StepSemantics.AT_LEAST_ONCE_PER_RETRY), + state=mock_state, + operation_identifier=OperationIdentifier("step-1", None, "test_step"), + context_logger=mock_logger, + ) + result = executor.process() + + # Assert - behaves like "old way" + mock_step_function.assert_called_once() # Function executed (not skipped) + assert result == "result" + assert ( + mock_state.get_checkpoint_result.call_count == 1 + ) # Only one check for AT_LEAST_ONCE + assert mock_state.create_checkpoint.call_count == 2 # START + SUCCEED checkpoints diff --git a/tests/operation/wait_for_condition_test.py b/tests/operation/wait_for_condition_test.py index d1e43af..676244f 100644 --- a/tests/operation/wait_for_condition_test.py +++ b/tests/operation/wait_for_condition_test.py @@ -6,6 +6,7 @@ import pytest +from aws_durable_execution_sdk_python.config import Duration from aws_durable_execution_sdk_python.exceptions import ( CallableRuntimeError, InvocationError, @@ -21,7 +22,7 @@ ) from aws_durable_execution_sdk_python.logger import Logger, LogInfo from aws_durable_execution_sdk_python.operation.wait_for_condition import ( - wait_for_condition_handler, + WaitForConditionOperationExecutor, ) from aws_durable_execution_sdk_python.state import CheckpointedResult, ExecutionState from aws_durable_execution_sdk_python.types import WaitForConditionCheckContext @@ -32,6 +33,21 @@ from tests.serdes_test import CustomDictSerDes +# Test helper - maintains old handler signature for backward compatibility in tests +def wait_for_condition_handler( + check, config, state, operation_identifier, context_logger +): + """Test helper that wraps WaitForConditionOperationExecutor with old handler signature.""" + executor = WaitForConditionOperationExecutor( + check=check, + config=config, + state=state, + operation_identifier=operation_identifier, + context_logger=context_logger, + ) + return executor.process() + + def test_wait_for_condition_first_execution_condition_met(): """Test wait_for_condition on first execution when condition is met.""" mock_state = Mock(spec=ExecutionState) @@ -54,7 +70,11 @@ def wait_strategy(state, attempt): config = WaitForConditionConfig(initial_state=5, wait_strategy=wait_strategy) result = wait_for_condition_handler( - check_func, config, mock_state, op_id, mock_logger + state=mock_state, + operation_identifier=op_id, + check=check_func, + config=config, + context_logger=mock_logger, ) assert result == 6 @@ -78,12 +98,18 @@ def check_func(state, context): return state + 1 def wait_strategy(state, attempt): - return WaitForConditionDecision.continue_waiting(30) + return WaitForConditionDecision.continue_waiting(Duration.from_seconds(30)) config = WaitForConditionConfig(initial_state=5, wait_strategy=wait_strategy) with pytest.raises(SuspendExecution, match="will retry in 30 seconds"): - wait_for_condition_handler(check_func, config, mock_state, op_id, mock_logger) + wait_for_condition_handler( + state=mock_state, + operation_identifier=op_id, + check=check_func, + config=config, + context_logger=mock_logger, + ) assert mock_state.create_checkpoint.call_count == 2 # START and RETRY @@ -113,7 +139,11 @@ def check_func(state, context): ) result = wait_for_condition_handler( - check_func, config, mock_state, op_id, mock_logger + state=mock_state, + operation_identifier=op_id, + check=check_func, + config=config, + context_logger=mock_logger, ) assert result == 42 @@ -145,7 +175,11 @@ def check_func(state, context): ) result = wait_for_condition_handler( - check_func, config, mock_state, op_id, mock_logger + state=mock_state, + operation_identifier=op_id, + check=check_func, + config=config, + context_logger=mock_logger, ) assert result is None @@ -178,7 +212,13 @@ def check_func(state, context): ) with pytest.raises(CallableRuntimeError): - wait_for_condition_handler(check_func, config, mock_state, op_id, mock_logger) + wait_for_condition_handler( + state=mock_state, + operation_identifier=op_id, + check=check_func, + config=config, + context_logger=mock_logger, + ) def test_wait_for_condition_retry_with_state(): @@ -208,7 +248,11 @@ def check_func(state, context): ) result = wait_for_condition_handler( - check_func, config, mock_state, op_id, mock_logger + state=mock_state, + operation_identifier=op_id, + check=check_func, + config=config, + context_logger=mock_logger, ) assert result == 11 # 10 (from checkpoint) + 1 @@ -242,7 +286,11 @@ def check_func(state, context): ) result = wait_for_condition_handler( - check_func, config, mock_state, op_id, mock_logger + state=mock_state, + operation_identifier=op_id, + check=check_func, + config=config, + context_logger=mock_logger, ) assert result == 6 # 5 (initial) + 1 @@ -275,7 +323,11 @@ def check_func(state, context): ) result = wait_for_condition_handler( - check_func, config, mock_state, op_id, mock_logger + state=mock_state, + operation_identifier=op_id, + check=check_func, + config=config, + context_logger=mock_logger, ) assert result == 6 # Falls back to initial state @@ -304,7 +356,13 @@ def check_func(state, context): ) with pytest.raises(ValueError, match="Test error"): - wait_for_condition_handler(check_func, config, mock_state, op_id, mock_logger) + wait_for_condition_handler( + state=mock_state, + operation_identifier=op_id, + check=check_func, + config=config, + context_logger=mock_logger, + ) assert mock_state.create_checkpoint.call_count == 2 # START and FAIL @@ -334,7 +392,13 @@ def check_func(state, context): wait_strategy=lambda s, a: WaitForConditionDecision.stop_polling(), ) - wait_for_condition_handler(check_func, config, mock_state, op_id, mock_logger) + wait_for_condition_handler( + state=mock_state, + operation_identifier=op_id, + check=check_func, + config=config, + context_logger=mock_logger, + ) assert isinstance(captured_context, WaitForConditionCheckContext) assert captured_context.logger is mock_logger @@ -357,12 +421,18 @@ def check_func(state, context): return state + 1 def wait_strategy(state, attempt): - return WaitForConditionDecision(should_continue=True, delay_seconds=None) + return WaitForConditionDecision(should_continue=True, delay=Duration()) config = WaitForConditionConfig(initial_state=5, wait_strategy=wait_strategy) - with pytest.raises(SuspendExecution, match="will retry in None seconds"): - wait_for_condition_handler(check_func, config, mock_state, op_id, mock_logger) + with pytest.raises(SuspendExecution, match="will retry in 0 seconds"): + wait_for_condition_handler( + state=mock_state, + operation_identifier=op_id, + check=check_func, + config=config, + context_logger=mock_logger, + ) def test_wait_for_condition_no_operation_in_checkpoint(): @@ -396,7 +466,11 @@ def check_func(state, context): ) result = wait_for_condition_handler( - check_func, config, mock_state, op_id, mock_logger + state=mock_state, + operation_identifier=op_id, + check=check_func, + config=config, + context_logger=mock_logger, ) assert result == 11 # Uses attempt=1 by default @@ -441,7 +515,11 @@ def check_func(state, context): ) result = wait_for_condition_handler( - check_func, config, mock_state, op_id, mock_logger + state=mock_state, + operation_identifier=op_id, + check=check_func, + config=config, + context_logger=mock_logger, ) assert result == 11 # Uses attempt=1 by default @@ -464,12 +542,20 @@ def check_func(state, context): return state + 1 def wait_strategy(state, attempt): - return WaitForConditionDecision(should_continue=True, delay_seconds=60) + return WaitForConditionDecision( + should_continue=True, delay=Duration.from_minutes(1) + ) config = WaitForConditionConfig(initial_state=5, wait_strategy=wait_strategy) with pytest.raises(SuspendExecution, match="will retry in 60 seconds"): - wait_for_condition_handler(check_func, config, mock_state, op_id, mock_logger) + wait_for_condition_handler( + state=mock_state, + operation_identifier=op_id, + check=check_func, + config=config, + context_logger=mock_logger, + ) def test_wait_for_condition_attempt_number_passed_to_strategy(): @@ -502,7 +588,13 @@ def wait_strategy(state, attempt): config = WaitForConditionConfig(initial_state=5, wait_strategy=wait_strategy) - wait_for_condition_handler(check_func, config, mock_state, op_id, mock_logger) + wait_for_condition_handler( + state=mock_state, + operation_identifier=op_id, + check=check_func, + config=config, + context_logger=mock_logger, + ) assert captured_attempt == 3 @@ -532,7 +624,13 @@ def wait_strategy(state, attempt): config = WaitForConditionConfig(initial_state=5, wait_strategy=wait_strategy) - wait_for_condition_handler(check_func, config, mock_state, op_id, mock_logger) + wait_for_condition_handler( + state=mock_state, + operation_identifier=op_id, + check=check_func, + config=config, + context_logger=mock_logger, + ) assert captured_state == 10 # 5 * 2 @@ -558,7 +656,13 @@ def check_func(state, context): wait_strategy=lambda s, a: WaitForConditionDecision.stop_polling(), ) - wait_for_condition_handler(check_func, config, mock_state, op_id, mock_logger) + wait_for_condition_handler( + state=mock_state, + operation_identifier=op_id, + check=check_func, + config=config, + context_logger=mock_logger, + ) # Verify logger.with_log_info was called mock_logger.with_log_info.assert_called_once() @@ -583,12 +687,20 @@ def check_func(state, context): return state + 1 def wait_strategy(state, attempt): - return WaitForConditionDecision(should_continue=True, delay_seconds=0) + return WaitForConditionDecision( + should_continue=True, delay=Duration.from_seconds(0) + ) config = WaitForConditionConfig(initial_state=5, wait_strategy=wait_strategy) with pytest.raises(SuspendExecution, match="will retry in 0 seconds"): - wait_for_condition_handler(check_func, config, mock_state, op_id, mock_logger) + wait_for_condition_handler( + state=mock_state, + operation_identifier=op_id, + check=check_func, + config=config, + context_logger=mock_logger, + ) def test_wait_for_condition_custom_serdes_first_execution_condition_met(): @@ -614,7 +726,13 @@ def wait_strategy(state, attempt): initial_state=5, wait_strategy=wait_strategy, serdes=CustomDictSerDes() ) - wait_for_condition_handler(check_func, config, mock_state, op_id, mock_logger) + wait_for_condition_handler( + state=mock_state, + operation_identifier=op_id, + check=check_func, + config=config, + context_logger=mock_logger, + ) expected_checkpoointed_result = ( '{"key": "VALUE", "number": "84", "list": [1, 2, 3]}' ) @@ -651,7 +769,11 @@ def check_func(state, context): ) result = wait_for_condition_handler( - check_func, config, mock_state, op_id, mock_logger + state=mock_state, + operation_identifier=op_id, + check=check_func, + config=config, + context_logger=mock_logger, ) assert result == {"key": "value", "number": 42, "list": [1, 2, 3]} @@ -692,7 +814,13 @@ def check_func(state, context): with pytest.raises( SuspendExecution, match="wait_for_condition test_wait will retry at timestamp" ): - wait_for_condition_handler(check_func, config, mock_state, op_id, mock_logger) + wait_for_condition_handler( + state=mock_state, + operation_identifier=op_id, + check=check_func, + config=config, + context_logger=mock_logger, + ) def test_wait_for_condition_pending_without_next_attempt(): @@ -728,4 +856,346 @@ def check_func(state, context): SuspendExecution, match="No timestamp provided. Suspending without retry timestamp.", ): - wait_for_condition_handler(check_func, config, mock_state, op_id, mock_logger) + wait_for_condition_handler( + state=mock_state, + operation_identifier=op_id, + check=check_func, + config=config, + context_logger=mock_logger, + ) + + +# Immediate Response Handling Tests + + +def test_wait_for_condition_checkpoint_called_once_with_is_sync_false(): + """Test that get_checkpoint_result is called once when checkpoint is created (is_sync=False).""" + mock_state = Mock(spec=ExecutionState) + mock_state.durable_execution_arn = "arn:aws:test" + mock_state.get_checkpoint_result.return_value = ( + CheckpointedResult.create_not_found() + ) + + mock_logger = Mock(spec=Logger) + mock_logger.with_log_info.return_value = mock_logger + + op_id = OperationIdentifier("op1", None, "test_wait") + + def check_func(state, context): + return state + 1 + + config = WaitForConditionConfig( + initial_state=5, + wait_strategy=lambda s, a: WaitForConditionDecision.stop_polling(), + ) + + wait_for_condition_handler( + state=mock_state, + operation_identifier=op_id, + check=check_func, + config=config, + context_logger=mock_logger, + ) + + # Verify get_checkpoint_result called only once (no second check for async checkpoint) + assert mock_state.get_checkpoint_result.call_count == 1 + + # Verify create_checkpoint called with is_sync=False + assert mock_state.create_checkpoint.call_count == 2 # START and SUCCESS + start_call = mock_state.create_checkpoint.call_args_list[0] + assert start_call[1]["is_sync"] is False + + +def test_wait_for_condition_immediate_success_without_executing_check(): + """Test immediate success: checkpoint returns SUCCEEDED on first check, returns result without executing check.""" + mock_state = Mock(spec=ExecutionState) + mock_state.durable_execution_arn = "test_arn" + operation = Operation( + operation_id="op1", + operation_type=OperationType.STEP, + status=OperationStatus.SUCCEEDED, + step_details=StepDetails(result=json.dumps(42)), + ) + mock_result = CheckpointedResult.create_from_operation(operation) + mock_state.get_checkpoint_result.return_value = mock_result + + mock_logger = Mock(spec=Logger) + op_id = OperationIdentifier("op1", None, "test_wait") + + # Check function should NOT be called + def check_func(state, context): + msg = "Check function should not be called for immediate success" + raise AssertionError(msg) + + config = WaitForConditionConfig( + initial_state=5, + wait_strategy=lambda s, a: WaitForConditionDecision.stop_polling(), + ) + + result = wait_for_condition_handler( + state=mock_state, + operation_identifier=op_id, + check=check_func, + config=config, + context_logger=mock_logger, + ) + + # Verify result returned without executing check function + assert result == 42 + # Verify no new checkpoints created + assert mock_state.create_checkpoint.call_count == 0 + + +def test_wait_for_condition_immediate_failure_without_executing_check(): + """Test immediate failure: checkpoint returns FAILED on first check, raises error without executing check.""" + mock_state = Mock(spec=ExecutionState) + mock_state.durable_execution_arn = "test_arn" + operation = Operation( + operation_id="op1", + operation_type=OperationType.STEP, + status=OperationStatus.FAILED, + step_details=StepDetails( + error=ErrorObject("Test error", "TestError", None, None) + ), + ) + mock_result = CheckpointedResult.create_from_operation(operation) + mock_state.get_checkpoint_result.return_value = mock_result + + mock_logger = Mock(spec=Logger) + op_id = OperationIdentifier("op1", None, "test_wait") + + # Check function should NOT be called + def check_func(state, context): + msg = "Check function should not be called for immediate failure" + raise AssertionError(msg) + + config = WaitForConditionConfig( + initial_state=5, + wait_strategy=lambda s, a: WaitForConditionDecision.stop_polling(), + ) + + # Verify error raised without executing check function + with pytest.raises(CallableRuntimeError): + wait_for_condition_handler( + state=mock_state, + operation_identifier=op_id, + check=check_func, + config=config, + context_logger=mock_logger, + ) + + # Verify no new checkpoints created + assert mock_state.create_checkpoint.call_count == 0 + + +def test_wait_for_condition_pending_suspends_without_executing_check(): + """Test pending handling: checkpoint returns PENDING on first check, suspends without executing check.""" + mock_state = Mock(spec=ExecutionState) + mock_state.durable_execution_arn = "arn:aws:test" + operation = Operation( + operation_id="op1", + operation_type=OperationType.STEP, + status=OperationStatus.PENDING, + step_details=StepDetails( + result=json.dumps(10), + next_attempt_timestamp=datetime.datetime.fromtimestamp( + 1764547200, tz=datetime.UTC + ), + ), + ) + mock_result = CheckpointedResult.create_from_operation(operation) + mock_state.get_checkpoint_result.return_value = mock_result + + mock_logger = Mock(spec=Logger) + mock_logger.with_log_info.return_value = mock_logger + + op_id = OperationIdentifier("op1", None, "test_wait") + + # Check function should NOT be called + def check_func(state, context): + msg = "Check function should not be called for pending status" + raise AssertionError(msg) + + config = WaitForConditionConfig( + initial_state=5, + wait_strategy=lambda s, a: WaitForConditionDecision.stop_polling(), + ) + + # Verify suspend occurs without executing check function + with pytest.raises( + SuspendExecution, match="wait_for_condition test_wait will retry at timestamp" + ): + wait_for_condition_handler( + state=mock_state, + operation_identifier=op_id, + check=check_func, + config=config, + context_logger=mock_logger, + ) + + # Verify no new checkpoints created + assert mock_state.create_checkpoint.call_count == 0 + + +def test_wait_for_condition_no_checkpoint_executes_check_function(): + """Test no immediate response: when checkpoint doesn't exist, operation executes check function.""" + mock_state = Mock(spec=ExecutionState) + mock_state.durable_execution_arn = "arn:aws:test" + mock_state.get_checkpoint_result.return_value = ( + CheckpointedResult.create_not_found() + ) + + mock_logger = Mock(spec=Logger) + mock_logger.with_log_info.return_value = mock_logger + + op_id = OperationIdentifier("op1", None, "test_wait") + + check_called = False + + def check_func(state, context): + nonlocal check_called + check_called = True + return state + 1 + + config = WaitForConditionConfig( + initial_state=5, + wait_strategy=lambda s, a: WaitForConditionDecision.stop_polling(), + ) + + result = wait_for_condition_handler( + state=mock_state, + operation_identifier=op_id, + check=check_func, + config=config, + context_logger=mock_logger, + ) + + # Verify check function was executed + assert check_called is True + assert result == 6 + + # Verify checkpoints created (START and SUCCESS) + assert mock_state.create_checkpoint.call_count == 2 + + +def test_wait_for_condition_already_completed_no_checkpoint_created(): + """Test already completed: when checkpoint is SUCCEEDED on first check, no checkpoint created.""" + mock_state = Mock(spec=ExecutionState) + mock_state.durable_execution_arn = "test_arn" + operation = Operation( + operation_id="op1", + operation_type=OperationType.STEP, + status=OperationStatus.SUCCEEDED, + step_details=StepDetails(result=json.dumps(42)), + ) + mock_result = CheckpointedResult.create_from_operation(operation) + mock_state.get_checkpoint_result.return_value = mock_result + + mock_logger = Mock(spec=Logger) + op_id = OperationIdentifier("op1", None, "test_wait") + + def check_func(state, context): + return state + 1 + + config = WaitForConditionConfig( + initial_state=5, + wait_strategy=lambda s, a: WaitForConditionDecision.stop_polling(), + ) + + result = wait_for_condition_handler( + state=mock_state, + operation_identifier=op_id, + check=check_func, + config=config, + context_logger=mock_logger, + ) + + # Verify result returned + assert result == 42 + + # Verify NO checkpoints created (already completed) + assert mock_state.create_checkpoint.call_count == 0 + + +def test_wait_for_condition_executes_check_when_checkpoint_not_terminal(): + """Test backward compatibility: when checkpoint is not terminal (STARTED), + the wait_for_condition operation executes the check function normally. + + Note: wait_for_condition uses async checkpoints (is_sync=False), so there's + only one check, not two. + """ + mock_state = Mock(spec=ExecutionState) + mock_state.durable_execution_arn = "test_arn" + + # Single call: checkpoint doesn't exist (async checkpoint, no second check) + mock_state.get_checkpoint_result.return_value = ( + CheckpointedResult.create_not_found() + ) + + mock_check_function = Mock(return_value="final_state") + mock_logger = Mock(spec=Logger) + mock_logger.with_log_info.return_value = mock_logger + + def mock_wait_strategy(state, attempt): + return WaitForConditionDecision( + should_continue=False, delay=Duration.from_seconds(0) + ) + + executor = WaitForConditionOperationExecutor( + check=mock_check_function, + config=WaitForConditionConfig( + initial_state="initial", + wait_strategy=mock_wait_strategy, + ), + state=mock_state, + operation_identifier=OperationIdentifier("wfc-1", None, "test_wfc"), + context_logger=mock_logger, + ) + result = executor.process() + + # Assert - behaves like "old way" + mock_check_function.assert_called_once() # Check function executed + assert result == "final_state" + assert mock_state.get_checkpoint_result.call_count == 1 # Single check (async) + assert mock_state.create_checkpoint.call_count == 2 # START + SUCCESS checkpoints + + +def test_wait_for_condition_executes_check_when_checkpoint_not_terminal_duplicate(): + """Test backward compatibility: when checkpoint is not terminal (STARTED), + the wait_for_condition operation executes the check function normally. + + Note: wait_for_condition uses async checkpoints (is_sync=False), so there's + only one check, not two. + """ + mock_state = Mock(spec=ExecutionState) + mock_state.durable_execution_arn = "test_arn" + + # Single call: checkpoint doesn't exist (async checkpoint, no second check) + mock_state.get_checkpoint_result.return_value = ( + CheckpointedResult.create_not_found() + ) + + mock_check_function = Mock(return_value="final_state") + mock_logger = Mock(spec=Logger) + mock_logger.with_log_info.return_value = mock_logger + + def mock_wait_strategy(state, attempt): + return WaitForConditionDecision(should_continue=False, delay=None) + + executor = WaitForConditionOperationExecutor( + check=mock_check_function, + config=WaitForConditionConfig( + initial_state="initial", + wait_strategy=mock_wait_strategy, + ), + state=mock_state, + operation_identifier=OperationIdentifier("wfc-1", None, "test_wfc"), + context_logger=mock_logger, + ) + result = executor.process() + + # Assert - behaves like "old way" + mock_check_function.assert_called_once() # Check function executed + assert result == "final_state" + assert mock_state.get_checkpoint_result.call_count == 1 # Single check (async) + assert mock_state.create_checkpoint.call_count == 2 # START + SUCCESS checkpoints diff --git a/tests/operation/wait_test.py b/tests/operation/wait_test.py index 17b9de9..ca3083e 100644 --- a/tests/operation/wait_test.py +++ b/tests/operation/wait_test.py @@ -7,16 +7,29 @@ from aws_durable_execution_sdk_python.exceptions import SuspendExecution from aws_durable_execution_sdk_python.identifier import OperationIdentifier from aws_durable_execution_sdk_python.lambda_service import ( + Operation, OperationAction, + OperationStatus, OperationSubType, OperationType, OperationUpdate, WaitOptions, ) -from aws_durable_execution_sdk_python.operation.wait import wait_handler +from aws_durable_execution_sdk_python.operation.wait import WaitOperationExecutor from aws_durable_execution_sdk_python.state import CheckpointedResult, ExecutionState +# Test helper function - maintains old handler signature for backward compatibility +def wait_handler(seconds: int, state, operation_identifier) -> None: + """Test helper that wraps WaitOperationExecutor with old handler signature.""" + executor = WaitOperationExecutor( + seconds=seconds, + state=state, + operation_identifier=operation_identifier, + ) + return executor.process() + + def test_wait_handler_already_completed(): """Test wait_handler when operation is already completed.""" mock_state = Mock(spec=ExecutionState) @@ -37,10 +50,18 @@ def test_wait_handler_already_completed(): def test_wait_handler_not_completed(): """Test wait_handler when operation is not completed.""" mock_state = Mock(spec=ExecutionState) - mock_result = Mock(spec=CheckpointedResult) - mock_result.is_succeeded.return_value = False - mock_result.is_existent.return_value = False - mock_state.get_checkpoint_result.return_value = mock_result + + # First call: checkpoint doesn't exist + not_found_result = Mock(spec=CheckpointedResult) + not_found_result.is_succeeded.return_value = False + not_found_result.is_existent.return_value = False + + # Second call: checkpoint exists but not completed (no immediate response) + started_result = Mock(spec=CheckpointedResult) + started_result.is_succeeded.return_value = False + started_result.is_existent.return_value = True + + mock_state.get_checkpoint_result.side_effect = [not_found_result, started_result] with pytest.raises(SuspendExecution, match="Wait for 30 seconds"): wait_handler( @@ -49,7 +70,8 @@ def test_wait_handler_not_completed(): operation_identifier=OperationIdentifier("wait2", None), ) - mock_state.get_checkpoint_result.assert_called_once_with("wait2") + # Should be called twice: once before checkpoint, once after to check for immediate response + assert mock_state.get_checkpoint_result.call_count == 2 expected_operation = OperationUpdate( operation_id="wait2", @@ -60,25 +82,36 @@ def test_wait_handler_not_completed(): wait_options=WaitOptions(wait_seconds=30), ) mock_state.create_checkpoint.assert_called_once_with( - operation_update=expected_operation + operation_update=expected_operation, is_sync=True ) def test_wait_handler_with_none_name(): """Test wait_handler with None name.""" mock_state = Mock(spec=ExecutionState) - mock_result = Mock(spec=CheckpointedResult) - mock_result.is_succeeded.return_value = False - mock_result.is_existent.return_value = False - mock_state.get_checkpoint_result.return_value = mock_result + + # First call: checkpoint doesn't exist + not_found_result = Mock(spec=CheckpointedResult) + not_found_result.is_succeeded.return_value = False + not_found_result.is_existent.return_value = False + + # Second call: checkpoint exists but not completed (no immediate response) + started_result = Mock(spec=CheckpointedResult) + started_result.is_succeeded.return_value = False + started_result.is_existent.return_value = True + + mock_state.get_checkpoint_result.side_effect = [not_found_result, started_result] with pytest.raises(SuspendExecution, match="Wait for 5 seconds"): wait_handler( - seconds=5, state=mock_state, operation_identifier=OperationIdentifier("wait3", None), + seconds=5, ) + # Should be called twice: once before checkpoint, once after to check for immediate response + assert mock_state.get_checkpoint_result.call_count == 2 + expected_operation = OperationUpdate( operation_id="wait3", parent_id=None, @@ -88,7 +121,7 @@ def test_wait_handler_with_none_name(): wait_options=WaitOptions(wait_seconds=5), ) mock_state.create_checkpoint.assert_called_once_with( - operation_update=expected_operation + operation_update=expected_operation, is_sync=True ) @@ -102,10 +135,285 @@ def test_wait_handler_with_existent(): with pytest.raises(SuspendExecution, match="Wait for 5 seconds"): wait_handler( - seconds=5, state=mock_state, operation_identifier=OperationIdentifier("wait4", None), + seconds=5, ) mock_state.get_checkpoint_result.assert_called_once_with("wait4") mock_state.create_checkpoint.assert_not_called() + + +# Immediate response handling tests + + +def test_wait_status_evaluation_after_checkpoint(): + """Test that status is evaluated twice: before and after checkpoint creation. + + This verifies the immediate response pattern: + 1. Check status (checkpoint doesn't exist) + 2. Create checkpoint with is_sync=True + 3. Check status again (catches immediate response) + """ + # Arrange + mock_state = Mock(spec=ExecutionState) + + # First call: checkpoint doesn't exist + not_found_result = Mock(spec=CheckpointedResult) + not_found_result.is_succeeded.return_value = False + not_found_result.is_existent.return_value = False + + # Second call: checkpoint exists but not completed (no immediate response) + started_result = Mock(spec=CheckpointedResult) + started_result.is_succeeded.return_value = False + started_result.is_existent.return_value = True + + mock_state.get_checkpoint_result.side_effect = [not_found_result, started_result] + + executor = WaitOperationExecutor( + seconds=30, + state=mock_state, + operation_identifier=OperationIdentifier("wait_eval", None, "test_wait"), + ) + + # Act + with pytest.raises(SuspendExecution): + executor.process() + + # Assert - verify status checked twice + assert mock_state.get_checkpoint_result.call_count == 2 + mock_state.get_checkpoint_result.assert_any_call("wait_eval") + + # Verify checkpoint created with is_sync=True + expected_operation = OperationUpdate( + operation_id="wait_eval", + parent_id=None, + name="test_wait", + operation_type=OperationType.WAIT, + action=OperationAction.START, + sub_type=OperationSubType.WAIT, + wait_options=WaitOptions(wait_seconds=30), + ) + mock_state.create_checkpoint.assert_called_once_with( + operation_update=expected_operation, is_sync=True + ) + + +def test_wait_immediate_success_handling(): + """Test that immediate SUCCEEDED response returns without suspend. + + When the checkpoint returns SUCCEEDED on the second status check, + the operation should return immediately without suspending. + """ + # Arrange + mock_state = Mock(spec=ExecutionState) + + # First call: checkpoint doesn't exist + not_found_result = Mock(spec=CheckpointedResult) + not_found_result.is_succeeded.return_value = False + not_found_result.is_existent.return_value = False + + # Second call: checkpoint succeeded immediately + succeeded_result = Mock(spec=CheckpointedResult) + succeeded_result.is_succeeded.return_value = True + + mock_state.get_checkpoint_result.side_effect = [not_found_result, succeeded_result] + + executor = WaitOperationExecutor( + seconds=5, + state=mock_state, + operation_identifier=OperationIdentifier( + "wait_immediate", None, "immediate_wait" + ), + ) + + # Act + result = executor.process() + + # Assert - verify immediate return without suspend + assert result is None # Wait returns None + + # Verify checkpoint was created + assert mock_state.create_checkpoint.call_count == 1 + + # Verify status checked twice + assert mock_state.get_checkpoint_result.call_count == 2 + + +def test_wait_no_immediate_response_suspends(): + """Test that wait suspends when no immediate response received. + + When the checkpoint returns STARTED (not completed) on the second check, + the operation should suspend to wait for timer completion. + """ + # Arrange + mock_state = Mock(spec=ExecutionState) + + # First call: checkpoint doesn't exist + not_found_result = Mock(spec=CheckpointedResult) + not_found_result.is_succeeded.return_value = False + not_found_result.is_existent.return_value = False + + # Second call: checkpoint exists but not completed + started_result = Mock(spec=CheckpointedResult) + started_result.is_succeeded.return_value = False + started_result.is_existent.return_value = True + + mock_state.get_checkpoint_result.side_effect = [not_found_result, started_result] + + executor = WaitOperationExecutor( + seconds=60, + state=mock_state, + operation_identifier=OperationIdentifier("wait_suspend", None), + ) + + # Act & Assert - verify suspend occurs + with pytest.raises(SuspendExecution) as exc_info: + executor.process() + + # Verify suspend message + assert "Wait for 60 seconds" in str(exc_info.value) + + # Verify checkpoint was created + assert mock_state.create_checkpoint.call_count == 1 + + # Verify status checked twice + assert mock_state.get_checkpoint_result.call_count == 2 + + +def test_wait_already_completed_no_checkpoint(): + """Test that already completed wait doesn't create checkpoint. + + When replaying and the wait is already completed, it should return + immediately without creating a new checkpoint. + """ + # Arrange + mock_state = Mock(spec=ExecutionState) + + # Checkpoint already exists and succeeded + succeeded_result = Mock(spec=CheckpointedResult) + succeeded_result.is_succeeded.return_value = True + + mock_state.get_checkpoint_result.return_value = succeeded_result + + executor = WaitOperationExecutor( + seconds=10, + state=mock_state, + operation_identifier=OperationIdentifier("wait_replay", None, "completed_wait"), + ) + + # Act + result = executor.process() + + # Assert - verify immediate return without checkpoint + assert result is None + + # Verify no checkpoint created + mock_state.create_checkpoint.assert_not_called() + + # Verify status checked only once + mock_state.get_checkpoint_result.assert_called_once_with("wait_replay") + + +def test_wait_with_various_durations(): + """Test wait operations with different durations handle immediate response correctly.""" + for seconds in [1, 30, 300, 3600]: + # Arrange + mock_state = Mock(spec=ExecutionState) + + # First call: checkpoint doesn't exist + not_found_result = Mock(spec=CheckpointedResult) + not_found_result.is_succeeded.return_value = False + not_found_result.is_existent.return_value = False + + # Second call: immediate success + succeeded_result = Mock(spec=CheckpointedResult) + succeeded_result.is_succeeded.return_value = True + + mock_state.get_checkpoint_result.side_effect = [ + not_found_result, + succeeded_result, + ] + + executor = WaitOperationExecutor( + seconds=seconds, + state=mock_state, + operation_identifier=OperationIdentifier(f"wait_duration_{seconds}", None), + ) + + # Act + result = executor.process() + + # Assert + assert result is None + assert mock_state.get_checkpoint_result.call_count == 2 + + # Verify correct wait duration in checkpoint + call_args = mock_state.create_checkpoint.call_args + assert call_args[1]["operation_update"].wait_options.wait_seconds == seconds + + +def test_wait_suspends_when_second_check_returns_started(): + """Test backward compatibility: when the second checkpoint check returns + STARTED (not terminal), the wait operation suspends normally. + """ + mock_state = Mock(spec=ExecutionState) + mock_state.durable_execution_arn = "test_arn" + + # First call: checkpoint doesn't exist + # Second call: checkpoint returns STARTED (no immediate response) + mock_state.get_checkpoint_result.side_effect = [ + CheckpointedResult.create_not_found(), + CheckpointedResult.create_from_operation( + Operation( + operation_id="wait-1", + operation_type=OperationType.WAIT, + status=OperationStatus.STARTED, + ) + ), + ] + + executor = WaitOperationExecutor( + seconds=5, + state=mock_state, + operation_identifier=OperationIdentifier("wait-1", None, "test_wait"), + ) + + with pytest.raises(SuspendExecution): + executor.process() + + # Assert - behaves like "old way" + assert mock_state.get_checkpoint_result.call_count == 2 # Double-check happened + mock_state.create_checkpoint.assert_called_once() # START checkpoint created + + +def test_wait_suspends_when_second_check_returns_started_duplicate(): + """Test backward compatibility: when the second checkpoint check returns + STARTED (not terminal), the wait operation suspends normally. + """ + mock_state = Mock(spec=ExecutionState) + mock_state.durable_execution_arn = "test_arn" + + # First call: checkpoint doesn't exist + # Second call: checkpoint returns STARTED (no immediate response) + not_found = CheckpointedResult.create_not_found() + started_op = Operation( + operation_id="wait-1", + operation_type=OperationType.WAIT, + status=OperationStatus.STARTED, + ) + started = CheckpointedResult.create_from_operation(started_op) + mock_state.get_checkpoint_result.side_effect = [not_found, started] + + executor = WaitOperationExecutor( + seconds=5, + state=mock_state, + operation_identifier=OperationIdentifier("wait-1", None, "test_wait"), + ) + + with pytest.raises(SuspendExecution): + executor.process() + + # Assert - behaves like "old way" + assert mock_state.get_checkpoint_result.call_count == 2 # Double-check happened + mock_state.create_checkpoint.assert_called_once() # START checkpoint created diff --git a/tests/retries_test.py b/tests/retries_test.py index 8209376..1b58134 100644 --- a/tests/retries_test.py +++ b/tests/retries_test.py @@ -5,6 +5,7 @@ import pytest +from aws_durable_execution_sdk_python.config import Duration from aws_durable_execution_sdk_python.retries import ( JitterStrategy, RetryDecision, @@ -13,400 +14,563 @@ create_retry_strategy, ) +# region Jitter Strategy Tests -class TestJitterStrategy: - """Test jitter strategy implementations.""" - - def test_none_jitter_returns_zero(self): - """Test NONE jitter always returns 0.""" - strategy = JitterStrategy.NONE - assert strategy.compute_jitter(10) == 0 - assert strategy.compute_jitter(100) == 0 - - @patch("random.random") - def test_full_jitter_range(self, mock_random): - """Test FULL jitter returns value between 0 and delay.""" - mock_random.return_value = 0.5 - strategy = JitterStrategy.FULL - delay = 10 - result = strategy.compute_jitter(delay) - assert result == 5.0 # 0.5 * 10 - - @patch("random.random") - def test_half_jitter_range(self, mock_random): - """Test HALF jitter returns value between 0.5 and 1.0 (multiplier).""" - mock_random.return_value = 0.5 - strategy = JitterStrategy.HALF - result = strategy.compute_jitter(10) - assert result == 7.5 # 10 * (0.5 + 0.5*0.5) - - @patch("random.random") - def test_half_jitter_boundary_values(self, mock_random): - """Test HALF jitter boundary values.""" - strategy = JitterStrategy.HALF - - # Minimum value (random = 0) - mock_random.return_value = 0.0 - result = strategy.compute_jitter(100) - assert result == 50 - - # Maximum value (random = 1) - mock_random.return_value = 1.0 - result = strategy.compute_jitter(100) - assert result == 100 - - def test_invalid_jitter_strategy(self): - """Test behavior with invalid jitter strategy.""" - # Create an invalid enum value by bypassing normal construction - invalid_strategy = "INVALID" - - # This should raise an exception or return None - with pytest.raises((ValueError, AttributeError)): - JitterStrategy(invalid_strategy).compute_jitter(10) - - -class TestRetryDecision: - """Test RetryDecision factory methods.""" - - def test_retry_factory(self): - """Test retry factory method.""" - decision = RetryDecision.retry(30) - assert decision.should_retry is True - assert decision.delay_seconds == 30 - - def test_no_retry_factory(self): - """Test no_retry factory method.""" - decision = RetryDecision.no_retry() - assert decision.should_retry is False - assert decision.delay_seconds == 0 - - -class TestRetryStrategyConfig: - """Test RetryStrategyConfig defaults and behavior.""" - - def test_default_config(self): - """Test default configuration values.""" - config = RetryStrategyConfig() - assert config.max_attempts == 3 - assert config.initial_delay_seconds == 5 - assert config.max_delay_seconds == 300 - assert config.backoff_rate == 2.0 - assert config.jitter_strategy == JitterStrategy.FULL - assert len(config.retryable_errors) == 1 - assert config.retryable_error_types == [] - - -class TestCreateRetryStrategy: - """Test retry strategy creation and behavior.""" - - def test_max_attempts_exceeded(self): - """Test strategy returns no_retry when max attempts exceeded.""" - config = RetryStrategyConfig(max_attempts=2) - strategy = create_retry_strategy(config) - error = Exception("test error") - decision = strategy(error, 2) - assert decision.should_retry is False +def test_none_jitter_returns_delay(): + """Test NONE jitter returns the original delay unchanged.""" + strategy = JitterStrategy.NONE + assert strategy.apply_jitter(10) == 10 + assert strategy.apply_jitter(100) == 100 - def test_retryable_error_message_string(self): - """Test retry based on error message string match.""" - config = RetryStrategyConfig(retryable_errors=["timeout"]) - strategy = create_retry_strategy(config) - error = Exception("connection timeout") - decision = strategy(error, 1) - assert decision.should_retry is True +@patch("random.random") +def test_full_jitter_range(mock_random): + """Test FULL jitter returns value between 0 and delay.""" + mock_random.return_value = 0.5 + strategy = JitterStrategy.FULL + delay = 10 + result = strategy.apply_jitter(delay) + assert result == 5.0 # 0.5 * 10 - def test_retryable_error_message_regex(self): - """Test retry based on error message regex match.""" - config = RetryStrategyConfig(retryable_errors=[re.compile(r"timeout|error")]) - strategy = create_retry_strategy(config) - error = Exception("network timeout occurred") - decision = strategy(error, 1) - assert decision.should_retry is True +@patch("random.random") +def test_half_jitter_range(mock_random): + """Test HALF jitter returns value between delay/2 and delay.""" + mock_random.return_value = 0.5 + strategy = JitterStrategy.HALF + result = strategy.apply_jitter(10) + assert result == 7.5 # 10/2 + 0.5 * (10/2) = 5 + 2.5 - def test_retryable_error_type(self): - """Test retry based on error type.""" - config = RetryStrategyConfig(retryable_error_types=[ValueError]) - strategy = create_retry_strategy(config) - error = ValueError("invalid value") - decision = strategy(error, 1) - assert decision.should_retry is True +@patch("random.random") +def test_half_jitter_boundary_values(mock_random): + """Test HALF jitter boundary values.""" + strategy = JitterStrategy.HALF - def test_non_retryable_error(self): - """Test no retry for non-retryable error.""" - config = RetryStrategyConfig(retryable_errors=["timeout"]) - strategy = create_retry_strategy(config) + # Minimum value (random = 0): delay/2 + 0 = delay/2 + mock_random.return_value = 0.0 + result = strategy.apply_jitter(100) + assert result == 50 - error = Exception("permission denied") - decision = strategy(error, 1) - assert decision.should_retry is False + # Maximum value (random = 1): delay/2 + delay/2 = delay + mock_random.return_value = 1.0 + result = strategy.apply_jitter(100) + assert result == 100 - @patch("random.random") - def test_exponential_backoff_calculation(self, mock_random): - """Test exponential backoff delay calculation.""" - mock_random.return_value = 0.5 - config = RetryStrategyConfig( - initial_delay_seconds=2, - backoff_rate=2.0, - jitter_strategy=JitterStrategy.FULL, - ) - strategy = create_retry_strategy(config) - error = Exception("test error") +def test_invalid_jitter_strategy(): + """Test behavior with invalid jitter strategy.""" + # Create an invalid enum value by bypassing normal construction + invalid_strategy = "INVALID" - # First attempt: 2 * (2^0) = 2, jitter adds 1, total = 3 - decision = strategy(error, 1) - assert decision.delay_seconds == 3 + # This should raise an exception or return None + with pytest.raises((ValueError, AttributeError)): + JitterStrategy(invalid_strategy).apply_jitter(10) - # Second attempt: 2 * (2^1) = 4, jitter adds 2, total = 6 - decision = strategy(error, 2) - assert decision.delay_seconds == 6 - def test_max_delay_cap(self): - """Test delay is capped at max_delay_seconds.""" - config = RetryStrategyConfig( - initial_delay_seconds=100, - max_delay_seconds=50, - backoff_rate=2.0, - jitter_strategy=JitterStrategy.NONE, - ) - strategy = create_retry_strategy(config) +# endregion + + +# region Retry Decision Tests + + +def test_retry_factory(): + """Test retry factory method.""" + decision = RetryDecision.retry(Duration.from_seconds(30)) + assert decision.should_retry is True + assert decision.delay_seconds == 30 + + +def test_no_retry_factory(): + """Test no_retry factory method.""" + decision = RetryDecision.no_retry() + assert decision.should_retry is False + assert decision.delay_seconds == 0 + + +# endregion + + +# region Retry Strategy Config Tests + + +def test_default_config(): + """Test default configuration values.""" + config = RetryStrategyConfig() + assert config.max_attempts == 3 + assert config.initial_delay_seconds == 5 + assert config.max_delay_seconds == 300 + assert config.backoff_rate == 2.0 + assert config.jitter_strategy == JitterStrategy.FULL + assert config.retryable_errors is None + assert config.retryable_error_types is None + + +# endregion + + +# region Create Retry Strategy Tests + + +def test_max_attempts_exceeded(): + """Test strategy returns no_retry when max attempts exceeded.""" + config = RetryStrategyConfig(max_attempts=2) + strategy = create_retry_strategy(config) + + error = Exception("test error") + decision = strategy(error, 2) + assert decision.should_retry is False + + +def test_retryable_error_message_string(): + """Test retry based on error message string match.""" + config = RetryStrategyConfig(retryable_errors=["timeout"]) + strategy = create_retry_strategy(config) + + error = Exception("connection timeout") + decision = strategy(error, 1) + assert decision.should_retry is True - error = Exception("test error") - decision = strategy(error, 2) # Would be 200 without cap - assert decision.delay_seconds == 50 - def test_minimum_delay_one_second(self): - """Test delay is at least 1 second.""" +def test_retryable_error_message_regex(): + """Test retry based on error message regex match.""" + config = RetryStrategyConfig(retryable_errors=[re.compile(r"timeout|error")]) + strategy = create_retry_strategy(config) + + error = Exception("network timeout occurred") + decision = strategy(error, 1) + assert decision.should_retry is True + + +def test_retryable_error_type(): + """Test retry based on error type.""" + config = RetryStrategyConfig(retryable_error_types=[ValueError]) + strategy = create_retry_strategy(config) + + error = ValueError("invalid value") + decision = strategy(error, 1) + assert decision.should_retry is True + + +def test_non_retryable_error(): + """Test no retry for non-retryable error.""" + config = RetryStrategyConfig(retryable_errors=["timeout"]) + strategy = create_retry_strategy(config) + + error = Exception("permission denied") + decision = strategy(error, 1) + assert decision.should_retry is False + + +@patch("random.random") +def test_exponential_backoff_calculation(mock_random): + """Test exponential backoff delay calculation with jitter.""" + mock_random.return_value = 0.5 + config = RetryStrategyConfig( + initial_delay=Duration.from_seconds(2), + backoff_rate=2.0, + jitter_strategy=JitterStrategy.FULL, + ) + strategy = create_retry_strategy(config) + + error = Exception("test error") + + # First attempt: base = 2 * (2^0) = 2, full jitter = 0.5 * 2 = 1 + decision = strategy(error, 1) + assert decision.delay_seconds == 1 + + # Second attempt: base = 2 * (2^1) = 4, full jitter = 0.5 * 4 = 2 + decision = strategy(error, 2) + assert decision.delay_seconds == 2 + + +def test_max_delay_cap(): + """Test delay is capped at max_delay_seconds.""" + config = RetryStrategyConfig( + initial_delay=Duration.from_seconds(100), + max_delay=Duration.from_seconds(50), + backoff_rate=2.0, + jitter_strategy=JitterStrategy.NONE, + ) + strategy = create_retry_strategy(config) + + error = Exception("test error") + decision = strategy(error, 2) # Would be 200 without cap + assert decision.delay_seconds == 50 + + +def test_minimum_delay_one_second(): + """Test delay is at least 1 second.""" + config = RetryStrategyConfig( + initial_delay=Duration.from_seconds(0), jitter_strategy=JitterStrategy.NONE + ) + strategy = create_retry_strategy(config) + + error = Exception("test error") + decision = strategy(error, 1) + assert decision.delay_seconds == 1 + + +def test_delay_ceiling_applied(): + """Test delay is rounded up using math.ceil.""" + with patch("random.random", return_value=0.3): config = RetryStrategyConfig( - initial_delay_seconds=0, jitter_strategy=JitterStrategy.NONE + initial_delay=Duration.from_seconds(3), + jitter_strategy=JitterStrategy.FULL, ) strategy = create_retry_strategy(config) error = Exception("test error") decision = strategy(error, 1) + # base = 3, full jitter = 0.3 * 3 = 0.9, ceil(0.9) = 1 assert decision.delay_seconds == 1 - def test_delay_ceiling_applied(self): - """Test delay is rounded up using math.ceil.""" - with patch("random.random", return_value=0.3): - config = RetryStrategyConfig( - initial_delay_seconds=3, jitter_strategy=JitterStrategy.FULL - ) - strategy = create_retry_strategy(config) - error = Exception("test error") - decision = strategy(error, 1) - # 3 + (0.3 * 3) = 3.9, ceil(3.9) = 4 - assert decision.delay_seconds == 4 +# endregion -class TestRetryPresets: - """Test predefined retry presets.""" +# region Retry Presets Tests - def test_none_preset(self): - """Test none preset allows no retries.""" - strategy = RetryPresets.none() - error = Exception("test error") - decision = strategy(error, 1) - assert decision.should_retry is False +def test_none_preset(): + """Test none preset allows no retries.""" + strategy = RetryPresets.none() + error = Exception("test error") - def test_default_preset_config(self): - """Test default preset configuration.""" - strategy = RetryPresets.default() - error = Exception("test error") + decision = strategy(error, 1) + assert decision.should_retry is False - # Should retry within max attempts - decision = strategy(error, 1) - assert decision.should_retry is True - # Should not retry after max attempts - decision = strategy(error, 6) - assert decision.should_retry is False +def test_default_preset_config(): + """Test default preset configuration.""" + strategy = RetryPresets.default() + error = Exception("test error") - def test_transient_preset_config(self): - """Test transient preset configuration.""" - strategy = RetryPresets.transient() - error = Exception("test error") + # Should retry within max attempts + decision = strategy(error, 1) + assert decision.should_retry is True - # Should retry within max attempts - decision = strategy(error, 1) - assert decision.should_retry is True + # Should not retry after max attempts + decision = strategy(error, 6) + assert decision.should_retry is False - # Should not retry after max attempts - decision = strategy(error, 3) - assert decision.should_retry is False - def test_resource_availability_preset(self): - """Test resource availability preset allows longer retries.""" - strategy = RetryPresets.resource_availability() - error = Exception("test error") +def test_transient_preset_config(): + """Test transient preset configuration.""" + strategy = RetryPresets.transient() + error = Exception("test error") - # Should retry within max attempts - decision = strategy(error, 1) - assert decision.should_retry is True + # Should retry within max attempts + decision = strategy(error, 1) + assert decision.should_retry is True - # Should not retry after max attempts - decision = strategy(error, 5) - assert decision.should_retry is False + # Should not retry after max attempts + decision = strategy(error, 3) + assert decision.should_retry is False - def test_critical_preset_config(self): - """Test critical preset allows many retries.""" - strategy = RetryPresets.critical() - error = Exception("test error") - # Should retry within max attempts - decision = strategy(error, 5) - assert decision.should_retry is True +def test_resource_availability_preset(): + """Test resource availability preset allows longer retries.""" + strategy = RetryPresets.resource_availability() + error = Exception("test error") - # Should not retry after max attempts - decision = strategy(error, 10) - assert decision.should_retry is False + # Should retry within max attempts + decision = strategy(error, 1) + assert decision.should_retry is True - @patch("random.random") - def test_critical_preset_no_jitter(self, mock_random): - """Test critical preset uses no jitter.""" - mock_random.return_value = 0.5 # Should be ignored - strategy = RetryPresets.critical() - error = Exception("test error") + # Should not retry after max attempts + decision = strategy(error, 5) + assert decision.should_retry is False - decision = strategy(error, 1) - # With no jitter: 1 * (1.5^0) = 1 - assert decision.delay_seconds == 1 +def test_critical_preset_config(): + """Test critical preset allows many retries.""" + strategy = RetryPresets.critical() + error = Exception("test error") -class TestJitterIntegration: - """Test jitter integration with retry strategies.""" + # Should retry within max attempts + decision = strategy(error, 5) + assert decision.should_retry is True - @patch("random.random") - def test_full_jitter_integration(self, mock_random): - """Test full jitter integration in retry strategy.""" - mock_random.return_value = 0.8 - config = RetryStrategyConfig( - initial_delay_seconds=10, jitter_strategy=JitterStrategy.FULL - ) - strategy = create_retry_strategy(config) + # Should not retry after max attempts + decision = strategy(error, 10) + assert decision.should_retry is False - error = Exception("test error") - decision = strategy(error, 1) - # 10 + (0.8 * 10) = 18 - assert decision.delay_seconds == 18 - @patch("random.random") - def test_half_jitter_integration(self, mock_random): - """Test half jitter integration in retry strategy.""" - mock_random.return_value = 0.6 - config = RetryStrategyConfig( - initial_delay_seconds=10, jitter_strategy=JitterStrategy.HALF - ) - strategy = create_retry_strategy(config) +@patch("random.random") +def test_critical_preset_no_jitter(mock_random): + """Test critical preset uses no jitter.""" + mock_random.return_value = 0.5 # Should be ignored + strategy = RetryPresets.critical() + error = Exception("test error") - error = Exception("test error") - decision = strategy(error, 1) - # 10 + 10*(0.6 * 0.5 + 0.5) = 18 - assert decision.delay_seconds == 18 + decision = strategy(error, 1) + # With no jitter: 1 * (1.5^0) = 1 + assert decision.delay_seconds == 1 - @patch("random.random") - def test_half_jitter_integration_corrected(self, mock_random): - """Test half jitter with corrected understanding of implementation.""" - mock_random.return_value = 0.0 # Minimum jitter - config = RetryStrategyConfig( - initial_delay_seconds=10, jitter_strategy=JitterStrategy.HALF - ) - strategy = create_retry_strategy(config) - error = Exception("test error") - decision = strategy(error, 1) - # 10 + 10 * 0.5 = 15 - assert decision.delay_seconds == 15 +# endregion - def test_none_jitter_integration(self): - """Test no jitter integration in retry strategy.""" - config = RetryStrategyConfig( - initial_delay_seconds=10, jitter_strategy=JitterStrategy.NONE - ) - strategy = create_retry_strategy(config) - error = Exception("test error") - decision = strategy(error, 1) - assert decision.delay_seconds == 10 +# region Jitter Integration Tests -class TestEdgeCases: - """Test edge cases and error conditions.""" +@patch("random.random") +def test_full_jitter_integration(mock_random): + """Test full jitter integration in retry strategy.""" + mock_random.return_value = 0.8 + config = RetryStrategyConfig( + initial_delay=Duration.from_seconds(10), jitter_strategy=JitterStrategy.FULL + ) + strategy = create_retry_strategy(config) - def test_none_config(self): - """Test behavior when config is None.""" - strategy = create_retry_strategy(None) - error = Exception("test error") - decision = strategy(error, 1) - assert decision.should_retry is True - assert decision.delay_seconds >= 1 + error = Exception("test error") + decision = strategy(error, 1) + # base = 10, full jitter = 0.8 * 10 = 8 + assert decision.delay_seconds == 8 - def test_zero_backoff_rate(self): - """Test behavior with zero backoff rate.""" - config = RetryStrategyConfig( - initial_delay_seconds=5, backoff_rate=0, jitter_strategy=JitterStrategy.NONE - ) - strategy = create_retry_strategy(config) - error = Exception("test error") - decision = strategy(error, 1) - # 5 * (0^0) = 5 * 1 = 5 - assert decision.delay_seconds == 5 +@patch("random.random") +def test_half_jitter_integration(mock_random): + """Test half jitter integration in retry strategy.""" + mock_random.return_value = 0.6 + config = RetryStrategyConfig( + initial_delay=Duration.from_seconds(10), jitter_strategy=JitterStrategy.HALF + ) + strategy = create_retry_strategy(config) - def test_fractional_backoff_rate(self): - """Test behavior with fractional backoff rate.""" - config = RetryStrategyConfig( - initial_delay_seconds=8, - backoff_rate=0.5, - jitter_strategy=JitterStrategy.NONE, - ) - strategy = create_retry_strategy(config) + error = Exception("test error") + decision = strategy(error, 1) + # base = 10, half jitter = 10/2 + 0.6 * (10/2) = 5 + 3 = 8 + assert decision.delay_seconds == 8 - error = Exception("test error") - decision = strategy(error, 2) - # 8 * (0.5^1) = 4 - assert decision.delay_seconds == 4 - def test_empty_retryable_errors_list(self): - """Test behavior with empty retryable errors list.""" - config = RetryStrategyConfig(retryable_errors=[]) - strategy = create_retry_strategy(config) +@patch("random.random") +def test_half_jitter_integration_corrected(mock_random): + """Test half jitter with minimum random value.""" + mock_random.return_value = 0.0 # Minimum jitter + config = RetryStrategyConfig( + initial_delay=Duration.from_seconds(10), jitter_strategy=JitterStrategy.HALF + ) + strategy = create_retry_strategy(config) - error = Exception("test error") - decision = strategy(error, 1) - assert decision.should_retry is False + error = Exception("test error") + decision = strategy(error, 1) + # base = 10, half jitter = 10/2 + 0.0 * (10/2) = 5 + assert decision.delay_seconds == 5 - def test_multiple_error_patterns(self): - """Test multiple error patterns matching.""" - config = RetryStrategyConfig( - retryable_errors=["timeout", re.compile(r"network.*error")] - ) - strategy = create_retry_strategy(config) - # Test string match - error1 = Exception("connection timeout") - decision1 = strategy(error1, 1) - assert decision1.should_retry is True +def test_none_jitter_integration(): + """Test no jitter integration in retry strategy.""" + config = RetryStrategyConfig( + initial_delay=Duration.from_seconds(10), jitter_strategy=JitterStrategy.NONE + ) + strategy = create_retry_strategy(config) - # Test regex match - error2 = Exception("network connection error") - decision2 = strategy(error2, 1) - assert decision2.should_retry is True + error = Exception("test error") + decision = strategy(error, 1) + assert decision.delay_seconds == 10 - def test_mixed_error_types_and_patterns(self): - """Test combination of error types and patterns.""" - config = RetryStrategyConfig( - retryable_errors=["timeout"], retryable_error_types=[ValueError] - ) - strategy = create_retry_strategy(config) - # Should retry on ValueError even without message match - error = ValueError("some value error") - decision = strategy(error, 1) - assert decision.should_retry is True +# endregion + + +# region Default Behavior Tests + + +def test_no_filters_retries_all_errors(): + """Test that when neither filter is specified, all errors are retried.""" + config = RetryStrategyConfig() + strategy = create_retry_strategy(config) + + # Should retry any error + error1 = Exception("any error message") + decision1 = strategy(error1, 1) + assert decision1.should_retry is True + + error2 = ValueError("different error type") + decision2 = strategy(error2, 1) + assert decision2.should_retry is True + + +def test_only_retryable_errors_specified(): + """Test that when only retryable_errors is specified, only matching messages are retried.""" + config = RetryStrategyConfig(retryable_errors=["timeout"]) + strategy = create_retry_strategy(config) + + # Should retry matching error + error1 = Exception("connection timeout") + decision1 = strategy(error1, 1) + assert decision1.should_retry is True + + # Should NOT retry non-matching error + error2 = Exception("permission denied") + decision2 = strategy(error2, 1) + assert decision2.should_retry is False + + +def test_only_retryable_error_types_specified(): + """Test that when only retryable_error_types is specified, only matching types are retried.""" + config = RetryStrategyConfig(retryable_error_types=[ValueError, TypeError]) + strategy = create_retry_strategy(config) + + # Should retry matching type + error1 = ValueError("invalid value") + decision1 = strategy(error1, 1) + assert decision1.should_retry is True + + error2 = TypeError("type error") + decision2 = strategy(error2, 1) + assert decision2.should_retry is True + + # Should NOT retry non-matching type (even though message might match default pattern) + error3 = Exception("some error") + decision3 = strategy(error3, 1) + assert decision3.should_retry is False + + +def test_both_filters_specified_or_logic(): + """Test that when both filters are specified, errors matching either are retried (OR logic).""" + config = RetryStrategyConfig( + retryable_errors=["timeout"], retryable_error_types=[ValueError] + ) + strategy = create_retry_strategy(config) + + # Should retry on message match + error1 = Exception("connection timeout") + decision1 = strategy(error1, 1) + assert decision1.should_retry is True + + # Should retry on type match + error2 = ValueError("some value error") + decision2 = strategy(error2, 1) + assert decision2.should_retry is True + + # Should NOT retry when neither matches + error3 = RuntimeError("runtime error") + decision3 = strategy(error3, 1) + assert decision3.should_retry is False + + +def test_empty_retryable_errors_with_types(): + """Test that empty retryable_errors list with types specified only retries matching types.""" + config = RetryStrategyConfig( + retryable_errors=[], retryable_error_types=[ValueError] + ) + strategy = create_retry_strategy(config) + + # Should retry matching type + error1 = ValueError("value error") + decision1 = strategy(error1, 1) + assert decision1.should_retry is True + + # Should NOT retry non-matching type + error2 = Exception("some error") + decision2 = strategy(error2, 1) + assert decision2.should_retry is False + + +def test_empty_retryable_error_types_with_errors(): + """Test that empty retryable_error_types list with errors specified only retries matching messages.""" + config = RetryStrategyConfig(retryable_errors=["timeout"], retryable_error_types=[]) + strategy = create_retry_strategy(config) + + # Should retry matching message + error1 = Exception("connection timeout") + decision1 = strategy(error1, 1) + assert decision1.should_retry is True + + # Should NOT retry non-matching message + error2 = Exception("permission denied") + decision2 = strategy(error2, 1) + assert decision2.should_retry is False + + +# endregion + + +# region Edge Cases Tests + + +def test_none_config(): + """Test behavior when config is None.""" + strategy = create_retry_strategy(None) + error = Exception("test error") + decision = strategy(error, 1) + assert decision.should_retry is True + assert decision.delay_seconds >= 1 + + +def test_zero_backoff_rate(): + """Test behavior with zero backoff rate.""" + config = RetryStrategyConfig( + initial_delay=Duration.from_seconds(5), + backoff_rate=0, + jitter_strategy=JitterStrategy.NONE, + ) + strategy = create_retry_strategy(config) + + error = Exception("test error") + decision = strategy(error, 1) + # 5 * (0^0) = 5 * 1 = 5 + assert decision.delay_seconds == 5 + + +def test_fractional_backoff_rate(): + """Test behavior with fractional backoff rate.""" + config = RetryStrategyConfig( + initial_delay=Duration.from_seconds(8), + backoff_rate=0.5, + jitter_strategy=JitterStrategy.NONE, + ) + strategy = create_retry_strategy(config) + + error = Exception("test error") + decision = strategy(error, 2) + # 8 * (0.5^1) = 4 + assert decision.delay_seconds == 4 + + +def test_empty_retryable_errors_list(): + """Test behavior with empty retryable errors list.""" + config = RetryStrategyConfig(retryable_errors=[]) + strategy = create_retry_strategy(config) + + error = Exception("test error") + decision = strategy(error, 1) + assert decision.should_retry is False + + +def test_multiple_error_patterns(): + """Test multiple error patterns matching.""" + config = RetryStrategyConfig( + retryable_errors=["timeout", re.compile(r"network.*error")] + ) + strategy = create_retry_strategy(config) + + # Test string match + error1 = Exception("connection timeout") + decision1 = strategy(error1, 1) + assert decision1.should_retry is True + + # Test regex match + error2 = Exception("network connection error") + decision2 = strategy(error2, 1) + assert decision2.should_retry is True + + +def test_mixed_error_types_and_patterns(): + """Test combination of error types and patterns.""" + config = RetryStrategyConfig( + retryable_errors=["timeout"], retryable_error_types=[ValueError] + ) + strategy = create_retry_strategy(config) + + # Should retry on ValueError even without message match + error = ValueError("some value error") + decision = strategy(error, 1) + assert decision.should_retry is True + + +# endregion diff --git a/tests/serdes_test.py b/tests/serdes_test.py index 91baf2c..d511918 100644 --- a/tests/serdes_test.py +++ b/tests/serdes_test.py @@ -8,11 +8,18 @@ import pytest +from aws_durable_execution_sdk_python.concurrency.models import ( + BatchItem, + BatchItemStatus, + BatchResult, + CompletionReason, +) from aws_durable_execution_sdk_python.exceptions import ( DurableExecutionsError, ExecutionError, SerDesError, ) +from aws_durable_execution_sdk_python.lambda_service import ErrorObject from aws_durable_execution_sdk_python.serdes import ( BytesCodec, ContainerCodec, @@ -21,6 +28,7 @@ EncodedValue, ExtendedTypeSerDes, JsonSerDes, + PassThroughSerDes, PrimitiveCodec, SerDes, SerDesContext, @@ -730,6 +738,18 @@ def test_extended_serdes_errors(): # endregion +def test_pass_through_serdes(): + serdes = PassThroughSerDes() + + data = '"name": "test", "value": 123' + serialized = serialize(serdes, data, "test-op", "test-arn") + assert isinstance(serialized, str) + assert serialized == '"name": "test", "value": 123' + # Dict uses envelope format, so roundtrip through deserialize + deserialized = deserialize(serdes, serialized, "test-op", "test-arn") + assert deserialized == data + + # region EnvelopeSerDes Performance and Edge Cases def test_envelope_large_data_structure(): """Test with reasonably large data.""" @@ -894,3 +914,82 @@ def test_all_t_v_nested_dicts(): # endregion + + +# to_dict() support tests +def test_default_serdes_supports_to_dict_objects(): + """Test that default serdes automatically handles BatchResult serialization/deserialization.""" + + result = BatchResult( + all=[BatchItem(0, BatchItemStatus.SUCCEEDED, result="test")], + completion_reason=CompletionReason.ALL_COMPLETED, + ) + + # Default serdes should automatically handle BatchResult + serialized = serialize( + serdes=None, + value=result, + operation_id="test_op", + durable_execution_arn="arn:test", + ) + + # Deserialize returns BatchResult (not dict) + deserialized = deserialize( + serdes=None, + data=serialized, + operation_id="test_op", + durable_execution_arn="arn:test", + ) + + assert isinstance(deserialized, BatchResult) + assert deserialized.completion_reason == CompletionReason.ALL_COMPLETED + assert len(deserialized.all) == 1 + assert deserialized.all[0].result == "test" + + +def test_to_dict_output_is_serializable(): + """Test that to_dict() output is serializable by default serdes.""" + + result = BatchResult( + all=[ + BatchItem(0, BatchItemStatus.SUCCEEDED, result={"key": "value"}), + BatchItem( + 1, + BatchItemStatus.FAILED, + error=ErrorObject( + message="error", type="TestError", data=None, stack_trace=[] + ), + ), + ], + completion_reason=CompletionReason.ALL_COMPLETED, + ) + + # Convert to dict + result_dict = result.to_dict() + + # Dict should be serializable + serialized = serialize( + serdes=None, + value=result_dict, + operation_id="test_op", + durable_execution_arn="arn:test", + ) + + # Deserialize + deserialized_dict = deserialize( + serdes=None, + data=serialized, + operation_id="test_op", + durable_execution_arn="arn:test", + ) + + # Verify structure preserved + assert deserialized_dict["completionReason"] == "ALL_COMPLETED" + assert len(deserialized_dict["all"]) == 2 + assert deserialized_dict["all"][0]["result"] == {"key": "value"} + assert deserialized_dict["all"][1]["error"]["ErrorType"] == "TestError" + + # Can reconstruct BatchResult + reconstructed = BatchResult.from_dict(deserialized_dict) + assert len(reconstructed.all) == 2 + assert reconstructed.completion_reason == CompletionReason.ALL_COMPLETED diff --git a/tests/state_test.py b/tests/state_test.py index b4e9d9f..d997abf 100644 --- a/tests/state_test.py +++ b/tests/state_test.py @@ -16,6 +16,7 @@ from aws_durable_execution_sdk_python.exceptions import ( BackgroundThreadError, CallableRuntimeError, + OrphanedChildException, ) from aws_durable_execution_sdk_python.identifier import OperationIdentifier from aws_durable_execution_sdk_python.lambda_service import ( @@ -39,6 +40,7 @@ CheckpointedResult, ExecutionState, QueuedOperation, + ReplayStatus, ) from aws_durable_execution_sdk_python.threading import CompletionEvent @@ -1090,20 +1092,18 @@ def test_rejection_of_operations_from_completed_parents(): ) state.create_checkpoint(parent_complete, is_sync=False) - # Get initial queue size - initial_queue_size = state._checkpoint_queue.qsize() - - # Try to checkpoint child operation (should be rejected) + # Try to checkpoint child operation (should raise OrphanedChildException) child_checkpoint = OperationUpdate( operation_id="child_1", operation_type=OperationType.STEP, action=OperationAction.SUCCEED, parent_id="parent_1", ) - state.create_checkpoint(child_checkpoint, is_sync=False) + with pytest.raises(OrphanedChildException) as exc_info: + state.create_checkpoint(child_checkpoint, is_sync=False) - # Verify operation was rejected (queue size unchanged) - assert state._checkpoint_queue.qsize() == initial_queue_size + # Verify exception contains operation_id + assert exc_info.value.operation_id == "child_1" def test_nested_parallel_operations_deep_hierarchy(): @@ -1473,20 +1473,18 @@ def process_sync_checkpoint(): state.create_checkpoint(parent_complete, is_sync=True) processor.join(timeout=1.0) - # Get queue size before attempting to checkpoint orphaned child - initial_queue_size = state._checkpoint_queue.qsize() - - # Try to checkpoint child (should be rejected) + # Try to checkpoint child (should raise OrphanedChildException) child_checkpoint = OperationUpdate( operation_id="child_1", operation_type=OperationType.STEP, action=OperationAction.SUCCEED, parent_id="parent_1", ) - state.create_checkpoint(child_checkpoint, is_sync=True) + with pytest.raises(OrphanedChildException) as exc_info: + state.create_checkpoint(child_checkpoint, is_sync=True) - # Verify operation was rejected (queue size unchanged) - assert state._checkpoint_queue.qsize() == initial_queue_size + # Verify exception contains operation_id + assert exc_info.value.operation_id == "child_1" def test_mark_orphans_handles_cycles(): @@ -3242,3 +3240,28 @@ def test_create_checkpoint_sync_always_synchronous(): finally: state.stop_checkpointing() executor.shutdown(wait=True) + + +def test_state_replay_mode(): + operation1 = Operation( + operation_id="op1", + operation_type=OperationType.STEP, + status=OperationStatus.SUCCEEDED, + ) + operation2 = Operation( + operation_id="op2", + operation_type=OperationType.STEP, + status=OperationStatus.SUCCEEDED, + ) + execution_state = ExecutionState( + durable_execution_arn="arn:aws:test", + initial_checkpoint_token="test_token", # noqa: S106 + operations={"op1": operation1, "op2": operation2}, + service_client=Mock(), + replay_status=ReplayStatus.REPLAY, + ) + assert execution_state.is_replaying() is True + execution_state.track_replay(operation_id="op1") + assert execution_state.is_replaying() is True + execution_state.track_replay(operation_id="op2") + assert execution_state.is_replaying() is False diff --git a/tests/waits_test.py b/tests/waits_test.py index 09b7fe5..06267d8 100644 --- a/tests/waits_test.py +++ b/tests/waits_test.py @@ -2,7 +2,7 @@ from unittest.mock import patch -from aws_durable_execution_sdk_python.config import JitterStrategy +from aws_durable_execution_sdk_python.config import Duration, JitterStrategy from aws_durable_execution_sdk_python.serdes import JsonSerDes from aws_durable_execution_sdk_python.waits import ( WaitDecision, @@ -18,7 +18,7 @@ class TestWaitDecision: def test_wait_factory(self): """Test wait factory method.""" - decision = WaitDecision.wait(30) + decision = WaitDecision.wait(Duration.from_seconds(30)) assert decision.should_wait is True assert decision.delay_seconds == 30 @@ -34,7 +34,7 @@ class TestWaitForConditionDecision: def test_continue_waiting_factory(self): """Test continue_waiting factory method.""" - decision = WaitForConditionDecision.continue_waiting(45) + decision = WaitForConditionDecision.continue_waiting(Duration.from_seconds(45)) assert decision.should_continue is True assert decision.delay_seconds == 45 @@ -42,7 +42,7 @@ def test_stop_polling_factory(self): """Test stop_polling factory method.""" decision = WaitForConditionDecision.stop_polling() assert decision.should_continue is False - assert decision.delay_seconds == -1 + assert decision.delay_seconds == 0 class TestWaitStrategyConfig: @@ -97,7 +97,7 @@ def test_exponential_backoff_calculation(self, mock_random): mock_random.return_value = 0.5 config = WaitStrategyConfig( should_continue_polling=lambda x: True, - initial_delay_seconds=2, + initial_delay=Duration.from_seconds(2), backoff_rate=2.0, jitter_strategy=JitterStrategy.FULL, ) @@ -105,20 +105,20 @@ def test_exponential_backoff_calculation(self, mock_random): result = "pending" - # First attempt: 2 * (2^0) = 2, jitter adds 1, total = 3 + # First attempt: 2 * (2^0) = 2, FULL jitter with 0.5 = 0.5 * 2 = 1 decision = strategy(result, 1) - assert decision.delay_seconds == 3 + assert decision.delay_seconds == 1 - # Second attempt: 2 * (2^1) = 4, jitter adds 2, total = 6 + # Second attempt: 2 * (2^1) = 4, FULL jitter with 0.5 = 0.5 * 4 = 2 decision = strategy(result, 2) - assert decision.delay_seconds == 6 + assert decision.delay_seconds == 2 def test_max_delay_cap(self): """Test delay is capped at max_delay_seconds.""" config = WaitStrategyConfig( should_continue_polling=lambda x: True, - initial_delay_seconds=100, - max_delay_seconds=50, + initial_delay=Duration.from_seconds(100), + max_delay=Duration.from_seconds(50), backoff_rate=2.0, jitter_strategy=JitterStrategy.NONE, ) @@ -132,7 +132,7 @@ def test_minimum_delay_one_second(self): """Test delay is at least 1 second.""" config = WaitStrategyConfig( should_continue_polling=lambda x: True, - initial_delay_seconds=0, + initial_delay=Duration.from_seconds(0), jitter_strategy=JitterStrategy.NONE, ) strategy = create_wait_strategy(config) @@ -147,15 +147,15 @@ def test_full_jitter_integration(self, mock_random): mock_random.return_value = 0.8 config = WaitStrategyConfig( should_continue_polling=lambda x: True, - initial_delay_seconds=10, + initial_delay=Duration.from_seconds(10), jitter_strategy=JitterStrategy.FULL, ) strategy = create_wait_strategy(config) result = "pending" decision = strategy(result, 1) - # 10 + (0.8 * 10) = 18 - assert decision.delay_seconds == 18 + # FULL jitter: 0.8 * 10 = 8 + assert decision.delay_seconds == 8 @patch("random.random") def test_half_jitter_integration(self, mock_random): @@ -163,21 +163,21 @@ def test_half_jitter_integration(self, mock_random): mock_random.return_value = 0.0 # Minimum jitter config = WaitStrategyConfig( should_continue_polling=lambda x: True, - initial_delay_seconds=10, + initial_delay=Duration.from_seconds(10), jitter_strategy=JitterStrategy.HALF, ) strategy = create_wait_strategy(config) result = "pending" decision = strategy(result, 1) - # base: 10, jitter: 10 * (0.5 + 0.0 * 0.5) = 5, total: 10 + 5 = 15 - assert decision.delay_seconds == 15 + # HALF jitter: 10/2 + 0.0 * (10/2) = 5 + assert decision.delay_seconds == 5 def test_none_jitter_integration(self): """Test no jitter integration in wait strategy.""" config = WaitStrategyConfig( should_continue_polling=lambda x: True, - initial_delay_seconds=10, + initial_delay=Duration.from_seconds(10), jitter_strategy=JitterStrategy.NONE, ) strategy = create_wait_strategy(config) @@ -244,7 +244,7 @@ def test_zero_backoff_rate(self): """Test behavior with zero backoff rate.""" config = WaitStrategyConfig( should_continue_polling=lambda x: True, - initial_delay_seconds=5, + initial_delay=Duration.from_seconds(5), backoff_rate=0, jitter_strategy=JitterStrategy.NONE, ) @@ -259,7 +259,7 @@ def test_fractional_backoff_rate(self): """Test behavior with fractional backoff rate.""" config = WaitStrategyConfig( should_continue_polling=lambda x: True, - initial_delay_seconds=8, + initial_delay=Duration.from_seconds(8), backoff_rate=0.5, jitter_strategy=JitterStrategy.NONE, ) @@ -274,8 +274,8 @@ def test_large_backoff_rate(self): """Test behavior with large backoff rate hits max delay.""" config = WaitStrategyConfig( should_continue_polling=lambda x: True, - initial_delay_seconds=10, - max_delay_seconds=100, + initial_delay=Duration.from_seconds(10), + max_delay=Duration.from_seconds(100), backoff_rate=10.0, jitter_strategy=JitterStrategy.NONE, ) @@ -307,7 +307,7 @@ def test_negative_delay_clamped_to_one(self): """Test negative delay is clamped to 1.""" config = WaitStrategyConfig( should_continue_polling=lambda x: True, - initial_delay_seconds=0, + initial_delay=Duration.from_seconds(0), backoff_rate=0, jitter_strategy=JitterStrategy.NONE, ) @@ -323,15 +323,15 @@ def test_rounding_behavior(self, mock_random): mock_random.return_value = 0.3 config = WaitStrategyConfig( should_continue_polling=lambda x: True, - initial_delay_seconds=3, + initial_delay=Duration.from_seconds(3), jitter_strategy=JitterStrategy.FULL, ) strategy = create_wait_strategy(config) result = "pending" decision = strategy(result, 1) - # 3 + (0.3 * 3) = 3.9, round(3.9) = 4 - assert decision.delay_seconds == 4 + # FULL jitter: 0.3 * 3 = 0.9, ceil(0.9) = 1 + assert decision.delay_seconds == 1 class TestWaitForConditionConfig: @@ -341,7 +341,7 @@ def test_config_creation(self): """Test creating WaitForConditionConfig.""" def wait_strategy(state, attempt): - return WaitForConditionDecision.continue_waiting(10) + return WaitForConditionDecision.continue_waiting(Duration.from_seconds(10)) config = WaitForConditionConfig( wait_strategy=wait_strategy, initial_state={"count": 0}