diff --git a/.circleci/config.yml b/.circleci/config.yml index 57495e70..b55277ca 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -19,8 +19,8 @@ commands: CHANGED_FILES=$(git diff --name-only origin/main...HEAD) # Check if any relevant files changed - echo "$CHANGED_FILES" | grep -q -E "^(src/|tests/|tests_aws/|.circleci/)" || { - echo "No changes in src/, tests/, tests_aws/, or .circleci directories. Skipping tests." + echo "$CHANGED_FILES" | grep -q -E "^(src/|tests/|tests_autowrapt/|tests_aws/|.circleci/|pyproject.toml)" || { + echo "No changes in src/, tests/, tests_autowrapt/, tests_aws/, .circleci directories or pyproject.toml file. Skipping tests." circleci step halt } @@ -66,7 +66,7 @@ commands: name: Run Tests With Coverage Report environment: CASSANDRA_TEST: "<>" - GEVENT_STARLETTE_TEST: "<>" + GEVENT_TEST: "<>" KAFKA_TEST: "<>" command: | . venv/bin/activate @@ -90,29 +90,34 @@ commands: steps: - attach_workspace: at: . - - run: - name: Install Java - command: | - sudo apt-get update - sudo apt-get install openjdk-11-jdk - run: name: Run SonarQube to report the coverage command: | + python -m venv venv . venv/bin/activate + + pip install --upgrade pip coverage coverage combine ./coverage_results coverage xml -i - wget -O /tmp/sonar-scanner-cli.zip https://binaries.sonarsource.com/Distribution/sonar-scanner-cli/sonar-scanner-cli-4.8.1.3023.zip - unzip -d /tmp /tmp/sonar-scanner-cli.zip - if [[ -n "${CIRCLE_PR_NUMBER}" ]]; then - /tmp/sonar-scanner-4.8.1.3023/bin/sonar-scanner \ - -Dsonar.host.url=${SONARQUBE_URL} \ - -Dsonar.login="${SONARQUBE_LOGIN}" \ - -Dsonar.pullrequest.key="${CIRCLE_PR_NUMBER}" \ + + PR_NUMBER=$(echo ${CIRCLE_PULL_REQUEST} | sed 's/.*\///') + SONAR_TOKEN=${SONAR_TOKEN} + + pip install --index-url https://test.pypi.org/simple/ --extra-index-url https://pypi.org/simple/ pysonar-scanner + export SONAR_SCANNER_OPTS="-server" + + if [[ -n "${PR_NUMBER}" ]]; then + pysonar-scanner \ + -Dsonar.organization=instana \ + -Dsonar.projectKey=instana_python-sensor \ + -Dsonar.host.url="${SONARQUBE_URL}" \ + -Dsonar.pullrequest.key="${PR_NUMBER}" \ -Dsonar.pullrequest.branch="${CIRCLE_BRANCH}" else - /tmp/sonar-scanner-4.8.1.3023/bin/sonar-scanner \ - -Dsonar.host.url=${SONARQUBE_URL} \ - -Dsonar.login="${SONARQUBE_LOGIN}" \ + pysonar-scanner \ + -Dsonar.organization=instana \ + -Dsonar.projectKey=instana_python-sensor \ + -Dsonar.host.url="${SONARQUBE_URL}" \ -Dsonar.branch.name="${CIRCLE_BRANCH}" fi - store_artifacts: @@ -130,7 +135,7 @@ jobs: type: string docker: - image: public.ecr.aws/docker/library/python:<> - - image: public.ecr.aws/docker/library/postgres:16.2-bookworm + - image: public.ecr.aws/docker/library/postgres:16.10-trixie environment: POSTGRES_USER: root POSTGRES_PASSWORD: passw0rd @@ -156,38 +161,6 @@ jobs: - store-pytest-results - store-coverage-report - python314: - docker: - - image: ghcr.io/pvital/pvital-py3.14.0:latest - - image: public.ecr.aws/docker/library/postgres:16.2-bookworm - environment: - POSTGRES_USER: root - POSTGRES_PASSWORD: passw0rd - POSTGRES_DB: instana_test_db - - image: public.ecr.aws/docker/library/mariadb:11.3.2 - environment: - MYSQL_ROOT_PASSWORD: passw0rd - MYSQL_DATABASE: instana_test_db - - image: public.ecr.aws/docker/library/redis:7.2.4-bookworm - - image: public.ecr.aws/docker/library/rabbitmq:3.13.0 - - image: public.ecr.aws/docker/library/mongo:7.0.6 - - image: quay.io/thekevjames/gcloud-pubsub-emulator:latest - environment: - PUBSUB_EMULATOR_HOST: 0.0.0.0:8681 - PUBSUB_PROJECT1: test-project,test-topic - working_directory: ~/repo - steps: - - checkout - - check-if-tests-needed - - run: | - cp -a /root/base/venv ./venv - . venv/bin/activate - pip install 'wheel==0.45.1' - pip install -r requirements.txt - - run-tests-with-coverage-report - - store-pytest-results - - store-coverage-report - py39cassandra: docker: - image: public.ecr.aws/docker/library/python:3.9 @@ -208,7 +181,7 @@ jobs: - store-pytest-results - store-coverage-report - py39gevent_starlette: + py39gevent: docker: - image: public.ecr.aws/docker/library/python:3.9 working_directory: ~/repo @@ -219,10 +192,8 @@ jobs: - pip-install-tests-deps: requirements: "tests/requirements-gevent-starlette.txt" - run-tests-with-coverage-report: - # TODO: uncomment once gevent instrumentation is done - # gevent: "true" - # tests: "tests/frameworks/test_gevent.py tests/frameworks/test_starlette.py" - tests: "tests/frameworks/test_starlette.py" + gevent: "true" + tests: "tests/frameworks/test_gevent.py" - store-pytest-results - store-coverage-report @@ -241,18 +212,37 @@ jobs: - store-pytest-results - store-coverage-report - py312kafka: + py313kafka: docker: - - image: public.ecr.aws/docker/library/python:3.12 - - image: public.ecr.aws/bitnami/kafka:3.9.0 + - image: public.ecr.aws/docker/library/python:3.13 + - image: public.ecr.aws/ubuntu/zookeeper:3.1-22.04_edge + environment: + TZ: UTC + - image: public.ecr.aws/ubuntu/kafka:3.1-22.04_edge environment: - KAFKA_CFG_NODE_ID: 0 - KAFKA_CFG_PROCESS_ROLES: controller,broker - KAFKA_CFG_LISTENERS: PLAINTEXT://:9092,CONTROLLER://:9093,EXTERNAL://:9094 - KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP: CONTROLLER:PLAINTEXT,EXTERNAL:PLAINTEXT,PLAINTEXT:PLAINTEXT - KAFKA_CFG_CONTROLLER_QUORUM_VOTERS: 0@localhost:9093 - KAFKA_CFG_CONTROLLER_LISTENER_NAMES: CONTROLLER - KAFKA_CFG_ADVERTISED_LISTENERS: PLAINTEXT://localhost:9092,EXTERNAL://localhost:9094 + TZ: UTC + ZOOKEEPER_HOST: localhost + ZOOKEEPER_PORT: 2181 + command: + - /opt/kafka/config/server.properties + - --override + - listeners=INTERNAL://0.0.0.0:9093,EXTERNAL://0.0.0.0:9094 + - --override + - advertised.listeners=INTERNAL://localhost:9093,EXTERNAL://localhost:9094 + - --override + - listener.security.protocol.map=INTERNAL:PLAINTEXT,EXTERNAL:PLAINTEXT + - --override + - inter.broker.listener.name=INTERNAL + - --override + - broker.id=1 + - --override + - offsets.topic.replication.factor=1 + - --override + - transaction.state.log.replication.factor=1 + - --override + - transaction.state.log.min.isr=1 + - --override + - auto.create.topics.enable=true working_directory: ~/repo steps: - checkout @@ -288,39 +278,34 @@ jobs: final_job: docker: - - image: public.ecr.aws/docker/library/python:3.9 + - image: public.ecr.aws/docker/library/python:3.13 working_directory: ~/repo steps: - checkout - check-if-tests-needed - - pip-install-deps - - pip-install-tests-deps - - store-pytest-results - # - run_sonarqube + - run_sonarqube workflows: tests: + max_auto_reruns: 2 jobs: - python3x: matrix: parameters: - py-version: ["3.8", "3.9", "3.10", "3.11", "3.12", "3.13"] - - python314 + py-version: ["3.9", "3.10", "3.11", "3.12", "3.13", "3.14"] - py39cassandra - - py39gevent_starlette + - py39gevent - py312aws - - py312kafka + - py313kafka - autowrapt: matrix: parameters: - py-version: ["3.11", "3.12", "3.13"] + py-version: ["3.11", "3.12", "3.13", "3.14"] - final_job: requires: - python3x - # Uncomment the following when giving real support to 3.14 - # - python314 - py39cassandra - - py39gevent_starlette + - py39gevent - py312aws - - py312kafka + - py313kafka - autowrapt diff --git a/.github/scripts/announce_pr_on_slack.py b/.github/scripts/announce_pr_on_slack.py new file mode 100644 index 00000000..8745988b --- /dev/null +++ b/.github/scripts/announce_pr_on_slack.py @@ -0,0 +1,151 @@ +#!/usr/bin/env python3 +""" +GitHub Actions script to send Slack notifications for new pull requests. +""" + +import os +import sys +from typing import Tuple + +import httpx + + +def send_slack_message( + slack_team: str, slack_service: str, slack_token: str, message: str +) -> bool: + """Send a message to Slack channel.""" + + url = ( + f"https://hooks.slack.com/services/T%7Bslack_team%7D/B%7Bslack_service%7D/%7Bslack_token%7D" + ) + + headers = { + "Content-Type": "application/json", + } + + data = {"text": message} + + ret = False + with httpx.Client() as client: + response = client.post(url, headers=headers, json=data) + response.raise_for_status() + + result = response.text + if "ok" in result: + print("✅ Slack message sent successfully") + ret = True + else: + print(f"❌ Slack API error: {result}") + ret = False + + return ret + + +def ensure_environment_variables_are_present() -> ( + Tuple[str, str, str, str, str, str, str, str] +): + """ + Ensures that all necessary environment variables are present for the application to run. + + This function checks for the presence of required environment variables related to Slack bot token, + Pull Request (PR) details, and repository name. It also validates that the Slack channel is set. + + Raises: + SystemExit: If any of the required environment variables are missing. + + Returns: + A tuple containing the values of the following environment variables: + - SLACK_TOKEN: The token for the Slack bot. + - SLACK_TEAM: The ID of the Slack team. + - SLACK_SERVICE: The ID of the Slack service. + - PR_NUMBER: The number of the Pull Request. + - PR_TITLE: The title of the Pull Request. + - PR_URL: The URL of the Pull Request. + - PR_AUTHOR: The author of the Pull Request. + - REPO_NAME: The name of the repository. + """ + # Get environment variables + slack_token = os.getenv("SLACK_TOKEN") + slack_team = os.getenv("SLACK_TEAM") + slack_service = os.getenv("SLACK_SERVICE") + pr_number = os.getenv("PR_NUMBER") + pr_title = os.getenv("PR_TITLE") + pr_url = os.getenv("PR_URL") + pr_author = os.getenv("PR_AUTHOR") + repo_name = os.getenv("REPO_NAME") + + # Validate required environment variables + if not slack_token: + print("❌ SLACK_TOKEN environment variable is required") + sys.exit(1) + + if not slack_team: + print("❌ SLACK_TEAM environment variable is required") + sys.exit(1) + + if not slack_service: + print("❌ SLACK_SERVICE environment variable is required") + sys.exit(1) + + if not all([pr_number, pr_title, pr_url, pr_author, repo_name]): + print( + "❌ Missing required PR information (PR_NUMBER, PR_TITLE, PR_URL, PR_AUTHOR, REPO_NAME)" + ) + sys.exit(1) + + # Since we're validating these variables, we can assert they're not None + assert pr_number is not None + assert pr_title is not None + assert pr_url is not None + assert pr_author is not None + assert repo_name is not None + + return ( + slack_token, + slack_team, + slack_service, + pr_number, + pr_title, + pr_url, + pr_author, + repo_name, + ) + + +def main() -> None: + """Main function to process PR and send Slack notification.""" + + ( + slack_token, + slack_team, + slack_service, + pr_number, + pr_title, + pr_url, + pr_author, + repo_name, + ) = ensure_environment_variables_are_present() + + print(f"Processing PR #{pr_number}") + + # Create Slack message + message = ( + f":mega: Oyez! Oyez! Oyez!\n" + f"Hello Team. Please, review the opened PR #{pr_number} in {repo_name}\n" + f"*{pr_title}* by @{pr_author}\n" + f":pull-request-opened: {pr_url}" + ) + + # Send to Slack + success = send_slack_message(slack_team, slack_service, slack_token, message) + + if not success: + sys.exit(1) + + print("✅ Process completed successfully") + + +if __name__ == "__main__": + main() + +# Made with Bob diff --git a/.github/scripts/announce_release_on_slack.py b/.github/scripts/announce_release_on_slack.py new file mode 100755 index 00000000..5d97cb5a --- /dev/null +++ b/.github/scripts/announce_release_on_slack.py @@ -0,0 +1,91 @@ +#!/usr/bin/env python3 + +import logging +import os +import sys + +import httpx +from github import Github + + +def ensure_environment_variables_are_present() -> None: + required_env_vars = ( + "GITHUB_RELEASE_TAG", + "GITHUB_TOKEN", + "SLACK_TOKEN", + "SLACK_SERVICE", + "SLACK_TEAM", + ) + + for env_var in required_env_vars: + if env_var not in os.environ: + logging.fatal(f"❌ A required environment variable is missing: {env_var}") + sys.exit(1) + + +def get_gh_release_info_text_with_token(release_tag: str, access_token: str) -> str: + gh = Github(access_token) + repo_name = "instana/python-sensor" + repo = gh.get_repo(repo_name) + release = repo.get_release(release_tag) + + logging.info("GH Release fetched successfully %s", release) + + msg = ( + f":mega: Oyez! Oyez! Oyez!\n" + f"The Instana Python Tracer {release_tag} has been released.\n" + f":package: https://pypi.org/project/instana/ \n" + f":github: {release.html_url} \n" + f"**Release Notes:**\n" + f"{release.body}\n" + ) + + logging.info(msg) + return msg + + +def post_on_slack_channel( + slack_team: str, slack_service: str, slack_token: str, message_text: str +) -> None: + """Send a message to Slack channel.""" + + url = ( + f"https://hooks.slack.com/services/T%7Bslack_team%7D/B%7Bslack_service%7D/%7Bslack_token%7D" + ) + + headers = { + "Content-Type": "application/json", + } + body = {"text": message_text} + + with httpx.Client() as client: + response = client.post(url, headers=headers, json=body) + response.raise_for_status() + + result = response.text + if "ok" in result: + print("✅ Slack message sent successfully") + else: + print(f"❌ Slack API error: {result}") + + +def main() -> None: + # Setting this globally to DEBUG will also debug PyGithub, + # which will produce even more log output + logging.basicConfig(level=logging.INFO) + ensure_environment_variables_are_present() + + msg = get_gh_release_info_text_with_token( + os.environ["GITHUB_RELEASE_TAG"], os.environ["GITHUB_TOKEN"] + ) + + post_on_slack_channel( + os.environ["SLACK_TEAM"], + os.environ["SLACK_SERVICE"], + os.environ["SLACK_TOKEN"], + msg, + ) + + +if __name__ == "__main__": + main() diff --git a/.github/workflows/opened-pr-notification-on-slack.yml b/.github/workflows/opened-pr-notification-on-slack.yml new file mode 100644 index 00000000..11b392db --- /dev/null +++ b/.github/workflows/opened-pr-notification-on-slack.yml @@ -0,0 +1,42 @@ +name: PR Slack Notification + +permissions: + contents: read + pull-requests: read + +on: + pull_request: + types: [opened, reopened, ready_for_review] + +jobs: + notify-slack: + runs-on: ubuntu-latest + + if: ${{ !github.event.pull_request.draft }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + fetch-depth: 0 # Fetch all history to access commit messages + + - name: Set up Python + uses: actions/setup-python@v4 + with: + python-version: '3.13' + + - name: Install dependencies + run: | + pip install httpx + + - name: Send Slack notification + env: + SLACK_TOKEN: ${{ secrets.RUPY_PR_ANNOUNCEMENT_TOKEN }} + SLACK_SERVICE: ${{ secrets.RUPY_PR_ANNOUNCEMENT_CHANNEL_ID }} + SLACK_TEAM: ${{ secrets.RUPY_TOWN_CRIER_SERVICE_ID }} + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + PR_NUMBER: ${{ github.event.number }} + PR_TITLE: ${{ github.event.pull_request.title }} + PR_URL: ${{ github.event.pull_request.html_url }} + PR_AUTHOR: ${{ github.event.pull_request.user.login }} + REPO_NAME: ${{ github.repository }} + run: python .github/scripts/announce_pr_on_slack.py diff --git a/.github/workflows/pkg_release.yml b/.github/workflows/pkg_release.yml index 7e8a91e7..bd37c54c 100644 --- a/.github/workflows/pkg_release.yml +++ b/.github/workflows/pkg_release.yml @@ -11,13 +11,13 @@ name: Release new version on: push: tags: - - v3.* + - 'v3.*' + - '!v3.*post*' jobs: build: name: Build package runs-on: ubuntu-latest - if: ${{ startsWith(github.ref_name, 'v3') }} steps: - uses: actions/checkout@v4 - name: Set up Python @@ -80,3 +80,40 @@ jobs: path: dist/ - name: Publish to PyPI uses: pypa/gh-action-pypi-publish@release/v1 + + notify-slack: + name: Notify on Slack + needs: + - github-release + - publish-to-pypi + runs-on: ubuntu-latest + permissions: + contents: read + + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + fetch-depth: 0 # Fetch all history to access commit messages + + - name: Set up Python + uses: actions/setup-python@v4 + with: + python-version: '3.13' + + - name: Install dependencies + run: | + pip install httpx PyGithub + + # Send notification using the safely set environment variables + - name: Send Slack notification + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + GITHUB_RELEASE_TAG: ${{ github.ref_name }} + SLACK_TOKEN: ${{ secrets.RUPY_TRACER_RELEASES_TOKEN }} + SLACK_SERVICE: ${{ secrets.RUPY_TRACER_RELEASES_CHANNEL_ID }} + SLACK_TEAM: ${{ secrets.RUPY_TOWN_CRIER_SERVICE_ID }} + run: | + echo "New release published ${GITHUB_RELEASE_TAG}" + python .github/scripts/announce_release_on_slack.py + \ No newline at end of file diff --git a/.github/workflows/py3140_build.yml b/.github/workflows/py3140_build.yml deleted file mode 100644 index 1cff4a73..00000000 --- a/.github/workflows/py3140_build.yml +++ /dev/null @@ -1,58 +0,0 @@ -# This workflow builds a container image on top of the Python 3.14.0 RC images -# with all dependencies already compiled and installed to be used in the tests -# CI pipelines. - -name: Build Instana python-sensor-test-py3.14.0 -on: - workflow_dispatch: # Manual trigger. - schedule: - - cron: '1 0 * * 1,3' # Every Monday and Wednesday at midnight and one. -env: - IMAGE_NAME: python-sensor-test-py3.14.0 - IMAGE_TAG: latest - CONTAINER_FILE: ./Dockerfile-py3140 - IMAGE_REGISTRY: ghcr.io/${{ github.repository_owner }} - REGISTRY_USER: ${{ github.actor }} - REGISTRY_PASSWORD: ${{ github.token }} -jobs: - build-and-push: - name: Build container image. - runs-on: ubuntu-latest - permissions: - contents: read - packages: write - steps: - - uses: actions/checkout@v4 - - - name: Build image - id: build_image - uses: redhat-actions/buildah-build@v2 - with: - image: ${{ env.IMAGE_NAME }} - tags: ${{ env.IMAGE_TAG }} - containerfiles: ${{ env.CONTAINER_FILE }} - - - name: Echo Outputs - run: | - echo "Image: ${{ steps.build_image.outputs.image }}" - echo "Tags: ${{ steps.build_image.outputs.tags }}" - echo "Tagged Image: ${{ steps.build_image.outputs.image-with-tag }}" - - - name: Check images created - run: buildah images | grep '${{ env.IMAGE_NAME }}' - - # Push the image to GHCR (Image Registry) - - name: Push To GHCR - uses: redhat-actions/push-to-registry@v2 - id: push-to-ghcr - with: - image: ${{ steps.build_image.outputs.image }} - tags: ${{ steps.build_image.outputs.tags }} - registry: ${{ env.IMAGE_REGISTRY }} - username: ${{ env.REGISTRY_USER }} - password: ${{ env.REGISTRY_PASSWORD }} - extra-args: | - --disable-content-trust - - - name: Print image URL - run: echo "Image pushed to ${{ steps.push-to-ghcr.outputs.registry-paths }}" diff --git a/.github/workflows/release-notification-on-slack.yml b/.github/workflows/release-notification-on-slack.yml deleted file mode 100644 index 186466e1..00000000 --- a/.github/workflows/release-notification-on-slack.yml +++ /dev/null @@ -1,34 +0,0 @@ -name: Slack Post -on: - workflow_dispatch: # Manual trigger - inputs: - github_ref: - description: 'Manually provided value for GITHUB_RELEASE_TAG of a release' - required: true - type: string - - # https://docs.github.com/en/actions/using-workflows/events-that-trigger-workflows#release - release: - types: [published] -jobs: - build: - name: Slack Post - runs-on: ubuntu-latest - steps: - - name: 'Checkout the needed file only ./bin/announce_release_on_slack.py' - uses: actions/checkout@v3 - - run: | - if [[ ${{ github.event_name == 'workflow_dispatch' }} == true ]]; then - export GITHUB_RELEASE_TAG=${{ inputs.github_ref }} - else # release event - export GITHUB_RELEASE_TAG=$(basename ${GITHUB_REF}) - fi - echo "New release published ${GITHUB_RELEASE_TAG}" - pip3 install PyGithub - echo $PWD - ls -lah - ./bin/announce_release_on_slack.py - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - SLACK_BOT_TOKEN: ${{ secrets.SLACK_BOT_TOKEN }} - SLACK_CHANNEL_ID_RELEASES: ${{ secrets.SLACK_CHANNEL_ID_RELEASES }} diff --git a/.tekton/.currency/currency-scheduled-eventlistener.yaml b/.tekton/.currency/currency-scheduled-eventlistener.yaml index 8bf6e3ed..b410dc94 100644 --- a/.tekton/.currency/currency-scheduled-eventlistener.yaml +++ b/.tekton/.currency/currency-scheduled-eventlistener.yaml @@ -41,15 +41,14 @@ kind: CronJob metadata: name: python-currency-cronjob spec: - schedule: "35 0 * * Mon-Fri" + schedule: "35 1 * * Mon-Fri" jobTemplate: spec: template: spec: containers: - name: http-request-to-el-svc - # quay.io/curl/curl:8.11.0 - image: quay.io/curl/curl@sha256:b90c4281fe1a4c6cc2b6a665c531d448bba078d75ffa98187e7d7e530fca5209 + image: quay.io/curl/curl:latest imagePullPolicy: IfNotPresent args: ["curl", "-X", "POST", "--data", "{}", "el-python-currency-cron-listener.default.svc.cluster.local:8080"] restartPolicy: OnFailure diff --git a/.tekton/.currency/currency-tasks.yaml b/.tekton/.currency/currency-tasks.yaml index 46a41a35..7f5ead15 100644 --- a/.tekton/.currency/currency-tasks.yaml +++ b/.tekton/.currency/currency-tasks.yaml @@ -11,8 +11,7 @@ spec: mountPath: /workspace steps: - name: clone-repo - # public.ecr.aws/docker/library/alpine:3.20.3 - image: public.ecr.aws/docker/library/alpine@sha256:029a752048e32e843bd6defe3841186fb8d19a28dae8ec287f433bb9d6d1ad85 + image: public.ecr.aws/docker/library/alpine:latest script: | #!/bin/sh echo "Installing git" @@ -33,14 +32,14 @@ spec: mountPath: /workspace steps: - name: generate-currency-report - # public.ecr.aws/docker/library/python:3.12.10-bookworm - image: public.ecr.aws/docker/library/python@sha256:4ea730e54e2a87b716ffc58a426bd627baa182a3d4d5696d05c1bca2dde775aa + image: public.ecr.aws/docker/library/python:3.12-trixie script: | #!/usr/bin/env bash cd /workspace/python-sensor/.tekton/.currency python -m venv /tmp/venv source /tmp/venv/bin/activate + pip install --upgrade pip pip install -r resources/requirements.txt python scripts/generate_report.py @@ -63,8 +62,7 @@ spec: mountPath: /workspace steps: - name: upload-currency-report - # public.ecr.aws/docker/library/alpine:3.20.3 - image: public.ecr.aws/docker/library/alpine@sha256:029a752048e32e843bd6defe3841186fb8d19a28dae8ec287f433bb9d6d1ad85 + image: public.ecr.aws/docker/library/alpine:latest env: - name: GH_ENTERPRISE_TOKEN valueFrom: diff --git a/.tekton/.currency/docs/report.md b/.tekton/.currency/docs/report.md index c3a8fa22..a739efe1 100644 --- a/.tekton/.currency/docs/report.md +++ b/.tekton/.currency/docs/report.md @@ -3,31 +3,33 @@ | Package name | Support Policy | Beta version | Last Supported Version | Latest version | Up-to-date | Release date | Latest Version Published At | Days behind | Cloud Native | |:---------------------|:-----------------|:---------------|:-------------------------|:-----------------|:-------------|:---------------|:------------------------------|:--------------|:---------------| | ASGI | 45-days | No | 3.0 | 3.0 | Yes | 2019-03-04 | 2019-03-04 | 0 day/s | No | -| Celery | 45-days | No | 5.5.3 | 5.5.3 | Yes | 2025-06-01 | 2025-06-01 | 0 day/s | No | -| Django | 45-days | No | 5.2.3 | 5.2.3 | Yes | 2025-06-10 | 2025-06-10 | 0 day/s | No | -| FastAPI | 45-days | No | 0.115.12 | 0.115.12 | Yes | 2025-03-23 | 2025-03-23 | 0 day/s | No | -| Flask | 45-days | No | 3.1.1 | 3.1.1 | Yes | 2025-05-13 | 2025-05-13 | 0 day/s | No | +| WSGI | 0-day | Yes | 1.0.1 | 1.0.1 | Yes | 2010-09-26 | 2010-09-26 | 0 day/s | No | +| Django | 45-days | No | 5.2.7 | 5.2.7 | Yes | 2025-10-01 | 2025-10-01 | 0 day/s | No | +| FastAPI | 45-days | No | 0.118.0 | 0.118.0 | Yes | 2025-09-29 | 2025-09-29 | 0 day/s | No | +| Flask | 45-days | No | 3.1.2 | 3.1.2 | Yes | 2025-08-19 | 2025-08-19 | 0 day/s | No | | Pyramid | 45-days | No | 2.0.2 | 2.0.2 | Yes | 2023-08-25 | 2023-08-25 | 0 day/s | No | | Sanic | On demand | No | 25.3.0 | 25.3.0 | Yes | 2025-03-31 | 2025-03-31 | 0 day/s | No | -| Starlette | 45-days | No | 0.47.0 | 0.47.0 | Yes | 2025-05-29 | 2025-05-29 | 0 day/s | No | -| Tornado | 45-days | No | 6.5.1 | 6.5.1 | Yes | 2025-05-22 | 2025-05-22 | 0 day/s | No | -| Webapp2 | On demand | No | 2.5.2 | 2.5.2 | Yes | 2012-09-28 | 2012-09-28 | 0 day/s | No | -| WSGI | 0-day | Yes | 1.0.1 | 1.0.1 | Yes | 2010-09-26 | 2010-09-26 | 0 day/s | No | -| Aiohttp | 45-days | No | 3.12.13 | 3.12.13 | Yes | 2025-06-14 | 2025-06-14 | 0 day/s | No | -| Asynqp | Deprecated | No | 0.6 | 0.6 | Yes | 2019-01-20 | 2019-01-20 | 0 day/s | No | -| Boto3 | 45-days | No | 1.38.36 | 1.38.36 | Yes | 2025-06-12 | 2025-06-12 | 0 day/s | Yes | -| Google-cloud-pubsub | 45-days | No | 2.30.0 | 2.30.0 | Yes | 2025-06-09 | 2025-06-09 | 0 day/s | Yes | -| Google-cloud-storage | 45-days | No | 3.1.0 | 3.1.0 | Yes | 2025-02-28 | 2025-02-28 | 0 day/s | Yes | -| Grpcio | 45-days | No | 1.73.0 | 1.73.0 | Yes | 2025-06-09 | 2025-06-09 | 0 day/s | Yes | +| Starlette | 45-days | No | 0.48.0 | 0.48.0 | Yes | 2025-09-13 | 2025-09-13 | 0 day/s | No | +| Tornado | 45-days | No | 6.5.2 | 6.5.2 | Yes | 2025-08-08 | 2025-08-08 | 0 day/s | No | +| Aiohttp | 45-days | No | 3.13.0 | 3.13.0 | Yes | 2025-10-06 | 2025-10-06 | 0 day/s | No | +| Httpx | 45-days | No | 0.28.1 | 0.28.1 | Yes | 2024-12-06 | 2024-12-06 | 0 day/s | No | +| Requests | 45-days | No | 2.32.5 | 2.32.5 | Yes | 2025-08-18 | 2025-08-18 | 0 day/s | No | +| Urllib3 | 45-days | No | 2.5.0 | 2.5.0 | Yes | 2025-06-18 | 2025-06-18 | 0 day/s | No | +| Grpcio | 45-days | No | 1.75.1 | 1.75.1 | Yes | 2025-09-26 | 2025-09-26 | 0 day/s | Yes | +| Cassandra-driver | 45-days | No | 3.29.2 | 3.29.2 | Yes | 2024-09-10 | 2024-09-10 | 0 day/s | No | | Mysqlclient | 45-days | No | 2.2.7 | 2.2.7 | Yes | 2025-01-10 | 2025-01-10 | 0 day/s | Yes | -| Pika | 45-days | No | 1.3.2 | 1.3.2 | Yes | 2023-05-05 | 2023-05-05 | 0 day/s | No | -| PyMySQL | 45-days | No | 1.1.1 | 1.1.1 | Yes | 2024-05-21 | 2024-05-21 | 0 day/s | Yes | -| Pymongo | 45-days | No | 4.13.1 | 4.13.1 | Yes | 2025-06-11 | 2025-06-11 | 0 day/s | Yes | +| PyMySQL | 45-days | No | 1.1.2 | 1.1.2 | Yes | 2025-08-24 | 2025-08-24 | 0 day/s | Yes | +| Pymongo | 45-days | No | 4.15.3 | 4.15.3 | Yes | 2025-10-07 | 2025-10-07 | 0 day/s | Yes | | Psycopg2 | 45-days | No | 2.9.10 | 2.9.10 | Yes | 2024-10-16 | 2024-10-16 | 0 day/s | No | -| Redis | 45-days | No | 6.2.0 | 6.2.0 | Yes | 2025-05-28 | 2025-05-28 | 0 day/s | Yes | -| Requests | 45-days | No | 2.32.4 | 2.32.4 | Yes | 2025-06-09 | 2025-06-09 | 0 day/s | Yes | -| SQLAlchemy | 45-days | No | 2.0.41 | 2.0.41 | Yes | 2025-05-14 | 2025-05-14 | 0 day/s | Yes | -| Urllib3 | 45-days | No | 2.4.0 | 2.4.0 | Yes | 2025-04-10 | 2025-04-10 | 0 day/s | No | -| Spyne | 45-days | No | 2.14.0 | 2.14.0 | Yes | 2022-02-03 | 2022-02-03 | 0 day/s | No | -| Aio-pika | 45-days | No | 9.5.5 | 9.5.5 | Yes | 2025-02-26 | 2025-02-26 | 0 day/s | No | +| Redis | 45-days | No | 6.4.0 | 6.4.0 | Yes | 2025-08-07 | 2025-08-07 | 0 day/s | Yes | +| SQLAlchemy | 45-days | No | 2.0.43 | 2.0.43 | Yes | 2025-08-11 | 2025-08-11 | 0 day/s | Yes | | Aioamqp | 45-days | No | 0.15.0 | 0.15.0 | Yes | 2022-04-05 | 2022-04-05 | 0 day/s | No | +| Aio-pika | 45-days | No | 9.5.7 | 9.5.7 | Yes | 2025-08-05 | 2025-08-05 | 0 day/s | No | +| Confluent-kafka | 45-days | No | 2.11.1 | 2.11.1 | Yes | 2025-08-18 | 2025-08-18 | 0 day/s | No | +| Kafka-python-ng | 45-days | No | 2.2.3 | 2.2.3 | Yes | 2024-10-02 | 2024-10-02 | 0 day/s | No | +| Pika | 45-days | No | 1.3.2 | 1.3.2 | Yes | 2023-05-05 | 2023-05-05 | 0 day/s | No | +| Boto3 | 45-days | No | 1.40.47 | 1.40.47 | Yes | 2025-10-07 | 2025-10-07 | 0 day/s | Yes | +| Google-cloud-pubsub | 45-days | No | 2.31.1 | 2.31.1 | Yes | 2025-07-28 | 2025-07-28 | 0 day/s | Yes | +| Google-cloud-storage | 45-days | No | 3.4.0 | 3.4.0 | Yes | 2025-09-15 | 2025-09-15 | 0 day/s | Yes | +| Gevent | On demand | No | 25.9.1 | 25.9.1 | Yes | 2025-09-17 | 2025-09-17 | 0 day/s | No | +| Celery | 45-days | No | 5.5.3 | 5.5.3 | Yes | 2025-06-01 | 2025-06-01 | 0 day/s | No | diff --git a/.tekton/.currency/resources/table.json b/.tekton/.currency/resources/table.json index 4de524a9..238526f8 100644 --- a/.tekton/.currency/resources/table.json +++ b/.tekton/.currency/resources/table.json @@ -8,9 +8,10 @@ "Cloud Native": "No" }, { - "Package name": "Celery", - "Support Policy": "45-days", - "Beta version": "No", + "Package name": "WSGI", + "Support Policy": "0-day", + "Beta version": "Yes", + "Last Supported Version": "1.0.1", "Cloud Native": "No" }, { @@ -56,124 +57,133 @@ "Cloud Native": "No" }, { - "Package name": "Webapp2", - "Support Policy": "On demand", + "Package name": "Aiohttp", + "Support Policy": "45-days", "Beta version": "No", - "Last Supported Version": "2.5.2", "Cloud Native": "No" }, { - "Package name": "WSGI", - "Support Policy": "0-day", - "Beta version": "Yes", - "Last Supported Version": "1.0.1", + "Package name": "Httpx", + "Support Policy": "45-days", + "Beta version": "No", "Cloud Native": "No" }, { - "Package name": "Aiohttp", + "Package name": "Requests", "Support Policy": "45-days", "Beta version": "No", "Cloud Native": "No" }, { - "Package name": "Asynqp", - "Support Policy": "Deprecated", + "Package name": "Urllib3", + "Support Policy": "45-days", "Beta version": "No", - "Last Supported Version": "0.6", "Cloud Native": "No" }, { - "Package name": "Boto3", + "Package name": "Grpcio", "Support Policy": "45-days", "Beta version": "No", "Cloud Native": "Yes" }, { - "Package name": "Google-cloud-pubsub", + "Package name": "Cassandra-driver", "Support Policy": "45-days", "Beta version": "No", - "Cloud Native": "Yes" + "Cloud Native": "No" }, { - "Package name": "Google-cloud-storage", + "Package name": "Mysqlclient", "Support Policy": "45-days", "Beta version": "No", "Cloud Native": "Yes" }, { - "Package name": "Grpcio", + "Package name": "PyMySQL", "Support Policy": "45-days", "Beta version": "No", "Cloud Native": "Yes" }, { - "Package name": "Mysqlclient", + "Package name": "Pymongo", "Support Policy": "45-days", "Beta version": "No", "Cloud Native": "Yes" }, { - "Package name": "Pika", + "Package name": "Psycopg2", "Support Policy": "45-days", "Beta version": "No", "Cloud Native": "No" }, { - "Package name": "PyMySQL", + "Package name": "Redis", "Support Policy": "45-days", "Beta version": "No", "Cloud Native": "Yes" }, { - "Package name": "Pymongo", + "Package name": "SQLAlchemy", "Support Policy": "45-days", "Beta version": "No", "Cloud Native": "Yes" }, { - "Package name": "Psycopg2", + "Package name": "Aioamqp", "Support Policy": "45-days", "Beta version": "No", "Cloud Native": "No" }, { - "Package name": "Redis", + "Package name": "Aio-pika", "Support Policy": "45-days", "Beta version": "No", - "Cloud Native": "Yes" + "Cloud Native": "No" }, { - "Package name": "Requests", + "Package name": "Confluent-kafka", "Support Policy": "45-days", "Beta version": "No", - "Cloud Native": "Yes" + "Cloud Native": "No" }, { - "Package name": "SQLAlchemy", + "Package name": "Kafka-python-ng", "Support Policy": "45-days", "Beta version": "No", - "Cloud Native": "Yes" + "Cloud Native": "No" }, { - "Package name": "Urllib3", + "Package name": "Pika", "Support Policy": "45-days", "Beta version": "No", "Cloud Native": "No" }, { - "Package name": "Spyne", + "Package name": "Boto3", + "Support Policy": "45-days", + "Beta version": "No", + "Cloud Native": "Yes" + }, + { + "Package name": "Google-cloud-pubsub", "Support Policy": "45-days", "Beta version": "No", - "Cloud Native": "No" + "Cloud Native": "Yes" }, { - "Package name": "Aio-pika", + "Package name": "Google-cloud-storage", "Support Policy": "45-days", "Beta version": "No", + "Cloud Native": "Yes" + }, + { + "Package name": "Gevent", + "Support Policy": "On demand", + "Beta version": "No", "Cloud Native": "No" }, { - "Package name": "Aioamqp", + "Package name": "Celery", "Support Policy": "45-days", "Beta version": "No", "Cloud Native": "No" diff --git a/.tekton/.currency/scripts/generate_report.py b/.tekton/.currency/scripts/generate_report.py index 0d4ca056..9ae08d11 100644 --- a/.tekton/.currency/scripts/generate_report.py +++ b/.tekton/.currency/scripts/generate_report.py @@ -31,7 +31,7 @@ def get_upstream_version(dependency, last_supported_version): last_supported_version_release_date = "Not found" if dependency in SPEC_MAP: # webscrape info from official website - version_pattern = "(\d+\.\d+\.?\d*)" + version_pattern = r"(\d+\.\d+\.?\d*)" latest_version_release_date = "" url = SPEC_MAP[dependency] @@ -181,17 +181,17 @@ def process_taskrun_logs( f"Retrieving container logs from the successful taskrun pod {pod_name} of taskrun {taskrun_name}.." ) if task_name == "python-tracer-unittest-gevent-starlette-task": - match = re.search("Successfully installed .* (starlette-[^\s]+)", logs) - tekton_ci_output += f"{match[1]}\n" - elif task_name == "python-tracer-unittest-googlecloud-task": - match = re.search( - "Successfully installed .* (google-cloud-storage-[^\s]+)", logs - ) + match = re.search(r"Successfully installed .*(gevent-[^\s]+) .* (starlette-[^\s]+)", logs) + tekton_ci_output += f"{match[1]}\n{match[2]}\n" + elif task_name == "python-tracer-unittest-kafka-task": + match = re.search(r"Successfully installed .*(confluent-kafka-[^\s]+) .* (kafka-python-ng-[^\s]+)", logs) + tekton_ci_output += f"{match[1]}\n{match[2]}\n" + elif task_name == "python-tracer-unittest-cassandra-task": + match = re.search(r"Successfully installed .*(cassandra-driver-[^\s]+)", logs) tekton_ci_output += f"{match[1]}\n" elif task_name == "python-tracer-unittest-default-task": - for line in logs.splitlines(): - if "Successfully installed" in line: - tekton_ci_output += line + lines = re.findall(r"^Successfully installed .*", logs, re.M) + tekton_ci_output += "\n".join(lines) break else: print( @@ -202,41 +202,37 @@ def process_taskrun_logs( def get_tekton_ci_output(): """Get the latest successful scheduled tekton pipeline output""" - # config.load_kube_config() - config.load_incluster_config() + try: + config.load_incluster_config() + print("Using in-cluster Kubernetes configuration...") + except config.config_exception.ConfigException: + # Fall back to local config if running locally and not inside cluster + config.load_kube_config() + print("Using local Kubernetes configuration...") namespace = "default" core_v1_client = client.CoreV1Api() - task_name = "python-tracer-unittest-gevent-starlette-task" taskrun_filter = lambda tr: tr["status"]["conditions"][0]["type"] == "Succeeded" # noqa: E731 - starlette_taskruns = get_taskruns(namespace, task_name, taskrun_filter) - - tekton_ci_output = process_taskrun_logs( - starlette_taskruns, core_v1_client, namespace, task_name, "" - ) - task_name = "python-tracer-unittest-googlecloud-task" - taskrun_filter = ( # noqa: E731 - lambda tr: tr["metadata"]["name"].endswith("unittest-googlecloud-0") - and tr["status"]["conditions"][0]["type"] == "Succeeded" - ) - googlecloud_taskruns = get_taskruns(namespace, task_name, taskrun_filter) - - tekton_ci_output = process_taskrun_logs( - googlecloud_taskruns, core_v1_client, namespace, task_name, tekton_ci_output - ) - - task_name = "python-tracer-unittest-default-task" - taskrun_filter = ( # noqa: E731 - lambda tr: tr["metadata"]["name"].endswith("unittest-default-3") - and tr["status"]["conditions"][0]["type"] == "Succeeded" - ) - default_taskruns = get_taskruns(namespace, task_name, taskrun_filter) - - tekton_ci_output = process_taskrun_logs( - default_taskruns, core_v1_client, namespace, task_name, tekton_ci_output - ) + tasks = [ + "python-tracer-unittest-gevent-starlette-task", + "python-tracer-unittest-kafka-task", + "python-tracer-unittest-cassandra-task", + "python-tracer-unittest-default-task" + ] + + tekton_ci_output = "" + + for task_name in tasks: + try: + taskruns = get_taskruns(namespace, task_name, taskrun_filter) + + tekton_ci_output = process_taskrun_logs( + taskruns, core_v1_client, namespace, task_name, tekton_ci_output + ) + except Exception as exc: + print(f"Error processing task {task_name}: {str(exc)}") return tekton_ci_output diff --git a/.tekton/github-pr-pipeline.yaml.part b/.tekton/github-pr-pipeline.yaml.part index 5e442b7b..db2319ab 100644 --- a/.tekton/github-pr-pipeline.yaml.part +++ b/.tekton/github-pr-pipeline.yaml.part @@ -8,6 +8,24 @@ spec: type: string - name: git-commit-sha type: string + - name: py-39-imageDigest + type: string + default: public.ecr.aws/docker/library/python:3.9-trixie + - name: py-310-imageDigest + type: string + default: public.ecr.aws/docker/library/python:3.10-trixie + - name: py-311-imageDigest + type: string + default: public.ecr.aws/docker/library/python:3.11-trixie + - name: py-312-imageDigest + type: string + default: public.ecr.aws/docker/library/python:3.12-trixie + - name: py-313-imageDigest + type: string + default: public.ecr.aws/docker/library/python:3.13-trixie + - name: py-314-imageDigest + type: string + default: public.ecr.aws/docker/library/python:3.14-trixie workspaces: - name: python-tracer-ci-pipeline-pvc tasks: @@ -28,6 +46,9 @@ spec: - unittest-default - unittest-cassandra - unittest-gevent-starlette + - unittest-aws + - unittest-kafka +# - unittest-python-next taskRef: kind: Task name: github-set-status diff --git a/.tekton/github-set-status-task.yaml b/.tekton/github-set-status-task.yaml index 631d234b..f7ea7b4a 100644 --- a/.tekton/github-set-status-task.yaml +++ b/.tekton/github-set-status-task.yaml @@ -14,8 +14,7 @@ spec: secretName: githubtoken steps: - name: set-status - # quay.io/curl/curl:8.11.0 - image: quay.io/curl/curl@sha256:b90c4281fe1a4c6cc2b6a665c531d448bba078d75ffa98187e7d7e530fca5209 + image: quay.io/curl/curl:latest env: - name: SHA value: $(params.SHA) diff --git a/.tekton/pipeline.yaml b/.tekton/pipeline.yaml index d76916c2..a74ef6be 100644 --- a/.tekton/pipeline.yaml +++ b/.tekton/pipeline.yaml @@ -6,10 +6,20 @@ spec: params: - name: revision type: string + - name: py-312-imageDigest + type: string + default: public.ecr.aws/docker/library/python:3.12-trixie + - name: py-313-imageDigest + type: string + default: public.ecr.aws/docker/library/python:3.13-trixie + - name: py-314-imageDigest + type: string + default: public.ecr.aws/docker/library/python:3.14-trixie workspaces: - name: python-tracer-ci-pipeline-pvc tasks: - name: clone + displayName: "clone $(params.revision)" params: - name: revision value: $(params.revision) @@ -19,27 +29,15 @@ spec: - name: task-pvc workspace: python-tracer-ci-pipeline-pvc - name: unittest-default - displayName: "Platforms and Browsers: $(params.platform) and $(params.browser)" + displayName: "Python $(params.imageDigest)" runAfter: - clone matrix: params: - name: imageDigest value: - # public.ecr.aws/docker/library/python:3.8.20-bookworm - - "sha256:7aa279fb41dad2962d3c915aa6f6615134baa412ab5aafa9d4384dcaaa0af15d" - # public.ecr.aws/docker/library/python:3.9.22-bookworm - - "sha256:a847112640804ed2d03bb774d46bb1619bd37862fb2b7e48eebe425a168c153b" - # public.ecr.aws/docker/library/python:3.10.17-bookworm - - "sha256:e2c7fb05741c735679b26eda7dd34575151079f8c615875fbefe401972b14d85" - # public.ecr.aws/docker/library/python:3.11.12-bookworm - - "sha256:a3e280261e448b95d49423532ccd6e5329c39d171c10df1457891ff7c5e2301b" - # public.ecr.aws/docker/library/python:3.12.10-bookworm - - "sha256:4ea730e54e2a87b716ffc58a426bd627baa182a3d4d5696d05c1bca2dde775aa" - # public.ecr.aws/docker/library/python:3.13.3-bookworm - - "sha256:07bf1bd38e191e3ed18b5f3eb0006d5ab260cb8c967f49d3bf947e5c2e44d8a9" - # public.ecr.aws/docker/library/python:3.14.0b2-bookworm - - "sha256:4f8ae0a7847680b269d8ef51528053b2cfc9242377f349cbc3a36eacf579903f" + - $(params.py-313-imageDigest) + - $(params.py-314-imageDigest) taskRef: name: python-tracer-unittest-default-task workspaces: @@ -48,12 +46,9 @@ spec: - name: unittest-cassandra runAfter: - clone - matrix: - params: - - name: imageDigest - value: - # public.ecr.aws/docker/library/python:3.9.22-bookworm - - "sha256:a847112640804ed2d03bb774d46bb1619bd37862fb2b7e48eebe425a168c153b" + params: + - name: imageDigest + value: $(params.py-312-imageDigest) taskRef: name: python-tracer-unittest-cassandra-task workspaces: @@ -62,40 +57,20 @@ spec: - name: unittest-gevent-starlette runAfter: - clone - matrix: - params: - - name: imageDigest - value: - # public.ecr.aws/docker/library/python:3.9.22-bookworm - - "sha256:a847112640804ed2d03bb774d46bb1619bd37862fb2b7e48eebe425a168c153b" + params: + - name: imageDigest + value: $(params.py-313-imageDigest) taskRef: name: python-tracer-unittest-gevent-starlette-task workspaces: - name: task-pvc workspace: python-tracer-ci-pipeline-pvc - - name: unittest-aws - runAfter: - - clone - matrix: - params: - - name: imageDigest - value: - # public.ecr.aws/docker/library/python:3.12.10-bookworm - - "sha256:4ea730e54e2a87b716ffc58a426bd627baa182a3d4d5696d05c1bca2dde775aa" - taskRef: - name: python-tracer-unittest-aws-task - workspaces: - - name: task-pvc - workspace: python-tracer-ci-pipeline-pvc - name: unittest-kafka runAfter: - clone - matrix: - params: - - name: imageDigest - value: - # public.ecr.aws/docker/library/python:3.12.10-bookworm - - "sha256:4ea730e54e2a87b716ffc58a426bd627baa182a3d4d5696d05c1bca2dde775aa" + params: + - name: imageDigest + value: $(params.py-313-imageDigest) taskRef: name: python-tracer-unittest-kafka-task workspaces: diff --git a/.tekton/python-tracer-prepuller.yaml b/.tekton/python-tracer-prepuller.yaml index db1ab34c..3d711dab 100644 --- a/.tekton/python-tracer-prepuller.yaml +++ b/.tekton/python-tracer-prepuller.yaml @@ -14,68 +14,49 @@ spec: # Configure an init container for each image you want to pull initContainers: - name: prepuller-git - # public.ecr.aws/docker/library/alpine:3.20.3 - image: public.ecr.aws/docker/library/alpine@sha256:029a752048e32e843bd6defe3841186fb8d19a28dae8ec287f433bb9d6d1ad85 + image: public.ecr.aws/docker/library/alpine:latest command: ["sh", "-c", "'true'"] - name: prepuller-google-cloud-pubsub - # quay.io/thekevjames/gcloud-pubsub-emulator:501.0.0 - image: quay.io/thekevjames/gcloud-pubsub-emulator@sha256:9bad1f28e6a3d6cd5f462c654c736faa4cf49732d9422ddb427ad30f3037c0ff + image: quay.io/thekevjames/gcloud-pubsub-emulator:501.0.0 command: ["sh", "-c", "'true'"] - name: prepuller-cassandra - # public.ecr.aws/docker/library/cassandra:3.11.16-jammy - image: public.ecr.aws/docker/library/cassandra@sha256:b175d99b80f8108594d00c705288fdb3186b9fc07b30b4c292c3592cddb5f0b5 + image: public.ecr.aws/docker/library/cassandra:3.11.16-jammy command: ["sh", "-c", "'true'"] - name: prepuller-rabbitmq - # public.ecr.aws/docker/library/rabbitmq:3.13.0 - image: public.ecr.aws/docker/library/rabbitmq@sha256:39de1a4fc6c72d12bd5dfa23e8576536fd1c0cc8418344cd5a51addfc9a1145d + image: public.ecr.aws/docker/library/rabbitmq:3.13.0 command: ["sh", "-c", "'true'"] - name: prepuller-redis - # public.ecr.aws/docker/library/redis:7.2.4-bookworm - image: public.ecr.aws/docker/library/redis@sha256:9341b6548cc35b64a6de0085555264336e2f570e17ecff20190bf62222f2bd64 + image: public.ecr.aws/docker/library/redis:7.2.4-bookworm command: ["sh", "-c", "'true'"] - name: prepuller-mongo - # public.ecr.aws/docker/library/mongo:7.0.6 - image: public.ecr.aws/docker/library/mongo@sha256:3a023748ee30e915dd51642f1ef430c73c4e54937060054ca84c70417f510cc5 + image: public.ecr.aws/docker/library/mongo:7.0.6 command: ["sh", "-c", "'true'"] - name: prepuller-mariadb - # public.ecr.aws/docker/library/mariadb:11.3.2 - image: public.ecr.aws/docker/library/mariadb@sha256:a4a81ab6d190db84b67f286fd0511cdea619a24b63790b3db4fb69d263a5cd37 + image: public.ecr.aws/docker/library/mariadb:11.3.2 command: ["sh", "-c", "'true'"] - name: prepuller-postgres - # public.ecr.aws/docker/library/postgres:16.2-bookworm - image: public.ecr.aws/docker/library/postgres@sha256:07572430dbcd821f9f978899c3ab3a727f5029be9298a41662e1b5404d5b73e0 + image: public.ecr.aws/docker/library/postgres:16.10-trixie command: ["sh", "-c", "'true'"] - name: prepuller-kafka - # public.ecr.aws/bitnami/kafka:3.9.0 - image: public.ecr.aws/docker/library/kafka@sha256:d2890d68f96b36da3c8413fa94294f018b2f95d87cf108cbf71eab510572d9be - command: ["sh", "-c", "'true'"] - - name: prepuller-38 - # public.ecr.aws/docker/library/python:3.8.20-bookworm - image: public.ecr.aws/docker/library/python@ + image: public.ecr.aws/bitnami/kafka:3.9.0 command: ["sh", "-c", "'true'"] - name: prepuller-39 - # public.ecr.aws/docker/library/python:3.9.22-bookworm - image: public.ecr.aws/docker/library/python@sha256:a847112640804ed2d03bb774d46bb1619bd37862fb2b7e48eebe425a168c153b + image: public.ecr.aws/docker/library/python:3.9-trixie command: ["sh", "-c", "'true'"] - name: prepuller-310 - # public.ecr.aws/docker/library/python:3.10.17-bookworm - image: public.ecr.aws/docker/library/python@sha256:e2c7fb05741c735679b26eda7dd34575151079f8c615875fbefe401972b14d85 + image: public.ecr.aws/docker/library/python:3.10-trixie command: ["sh", "-c", "'true'"] - name: prepuller-311 - # public.ecr.aws/docker/library/python:3.11.12-bookworm - image: public.ecr.aws/docker/library/python@sha256:a3e280261e448b95d49423532ccd6e5329c39d171c10df1457891ff7c5e2301b + image: public.ecr.aws/docker/library/python:3.11-trixie command: ["sh", "-c", "'true'"] - name: prepuller-312 - # public.ecr.aws/docker/library/python:3.12.10-bookworm - image: public.ecr.aws/docker/library/python@sha256:4ea730e54e2a87b716ffc58a426bd627baa182a3d4d5696d05c1bca2dde775aa + image: public.ecr.aws/docker/library/python:3.12-trixie command: ["sh", "-c", "'true'"] - name: prepuller-313 - # public.ecr.aws/docker/library/python:3.13.3-bookworm - image: public.ecr.aws/docker/library/python@sha256:07bf1bd38e191e3ed18b5f3eb0006d5ab260cb8c967f49d3bf947e5c2e44d8a9 + image: public.ecr.aws/docker/library/python:3.13-trixie command: ["sh", "-c", "'true'"] - name: prepuller-314 - # public.ecr.aws/docker/library/python:3.14.0b2-bookworm - image: public.ecr.aws/docker/library/python@sha256:4f8ae0a7847680b269d8ef51528053b2cfc9242377f349cbc3a36eacf579903f + image: public.ecr.aws/docker/library/python:3.14-trixie command: ["sh", "-c", "'true'"] # Use the pause container to ensure the Pod goes into a `Running` phase diff --git a/.tekton/scheduled-eventlistener.yaml b/.tekton/scheduled-eventlistener.yaml index 9352fc45..f9b8e2a6 100644 --- a/.tekton/scheduled-eventlistener.yaml +++ b/.tekton/scheduled-eventlistener.yaml @@ -25,7 +25,7 @@ spec: - name: git-commit-sha value: $(tt.params.git-commit-sha) pipelineRef: - name: github-pr-python-tracer-ci-pipeline + name: python-tracer-ci-pipeline workspaces: - name: python-tracer-ci-pipeline-pvc volumeClaimTemplate: @@ -61,8 +61,7 @@ spec: spec: containers: - name: git - # public.ecr.aws/docker/library/alpine:3.20.3 - image: public.ecr.aws/docker/library/alpine@sha256:029a752048e32e843bd6defe3841186fb8d19a28dae8ec287f433bb9d6d1ad85 + image: public.ecr.aws/docker/library/alpine:latest script: | #!/bin/sh echo "Installing git" diff --git a/.tekton/task.yaml b/.tekton/task.yaml index b68593bf..f6b21a05 100644 --- a/.tekton/task.yaml +++ b/.tekton/task.yaml @@ -12,8 +12,7 @@ spec: mountPath: /workspace steps: - name: clone - # public.ecr.aws/docker/library/alpine:3.20.3 - image: public.ecr.aws/docker/library/alpine@sha256:029a752048e32e843bd6defe3841186fb8d19a28dae8ec287f433bb9d6d1ad85 + image: public.ecr.aws/docker/library/alpine:latest script: | #!/bin/sh echo "Installing git" @@ -29,8 +28,7 @@ metadata: spec: sidecars: - name: cassandra - # public.ecr.aws/docker/library/cassandra:3.11.16-jammy - image: public.ecr.aws/docker/library/cassandra@sha256:b175d99b80f8108594d00c705288fdb3186b9fc07b30b4c292c3592cddb5f0b5 + image: public.ecr.aws/docker/library/cassandra:3.11.16-jammy env: - name: MAX_HEAP_SIZE value: 2048m @@ -51,7 +49,7 @@ spec: mountPath: /workspace steps: - name: unittest - image: public.ecr.aws/docker/library/python@$(params.imageDigest) + image: $(params.imageDigest) env: - name: TEST_CONFIGURATION value: cassandra @@ -72,7 +70,7 @@ spec: mountPath: /workspace steps: - name: unittest - image: public.ecr.aws/docker/library/python@$(params.imageDigest) + image: $(params.imageDigest) env: - name: TEST_CONFIGURATION value: gevent_starlette @@ -87,8 +85,7 @@ metadata: spec: sidecars: - name: google-cloud-pubsub - # quay.io/thekevjames/gcloud-pubsub-emulator - image: quay.io/thekevjames/gcloud-pubsub-emulator@sha256:9bad1f28e6a3d6cd5f462c654c736faa4cf49732d9422ddb427ad30f3037c0ff + image: quay.io/thekevjames/gcloud-pubsub-emulator:latest env: - name: PUBSUB_EMULATOR_HOST value: 0.0.0.0:8681 @@ -98,19 +95,16 @@ spec: - containerPort: 8681 hostPort: 8681 - name: mariadb - # public.ecr.aws/docker/library/mariadb:11.3.2 - image: public.ecr.aws/docker/library/mariadb@sha256:a4a81ab6d190db84b67f286fd0511cdea619a24b63790b3db4fb69d263a5cd37 + image: public.ecr.aws/docker/library/mariadb:11.3.2 env: - name: MYSQL_ROOT_PASSWORD # or MARIADB_ROOT_PASSWORD value: passw0rd - name: MYSQL_DATABASE # or MARIADB_DATABASE value: instana_test_db - name: mongo - # public.ecr.aws/docker/library/mongo:7.0.6 - image: public.ecr.aws/docker/library/mongo@sha256:3a023748ee30e915dd51642f1ef430c73c4e54937060054ca84c70417f510cc5 + image: public.ecr.aws/docker/library/mongo:7.0.6 - name: postgres - # public.ecr.aws/docker/library/postgres:16.2-bookworm - image: public.ecr.aws/docker/library/postgres@sha256:07572430dbcd821f9f978899c3ab3a727f5029be9298a41662e1b5404d5b73e0 + image: public.ecr.aws/docker/library/postgres:16.10-trixie env: - name: POSTGRES_USER value: root @@ -126,11 +120,9 @@ spec: - pg_isready --host 127.0.0.1 --port 5432 --dbname=${POSTGRES_DB} timeoutSeconds: 10 - name: redis - # public.ecr.aws/docker/library/redis:7.2.4-bookworm - image: public.ecr.aws/docker/library/redis@sha256:9341b6548cc35b64a6de0085555264336e2f570e17ecff20190bf62222f2bd64 + image: public.ecr.aws/docker/library/redis:7.2.4-bookworm - name: rabbitmq - # public.ecr.aws/docker/library/rabbitmq:3.13.0 - image: public.ecr.aws/docker/library/rabbitmq@sha256:39de1a4fc6c72d12bd5dfa23e8576536fd1c0cc8418344cd5a51addfc9a1145d + image: public.ecr.aws/docker/library/rabbitmq:3.13.0 params: - name: imageDigest type: string @@ -139,7 +131,7 @@ spec: mountPath: /workspace steps: - name: unittest - image: public.ecr.aws/docker/library/python@$(params.imageDigest) + image: $(params.imageDigest) env: - name: TEST_CONFIGURATION value: default @@ -160,7 +152,7 @@ spec: mountPath: /workspace steps: - name: unittest - image: public.ecr.aws/docker/library/python@$(params.imageDigest) + image: $(params.imageDigest) env: - name: TEST_CONFIGURATION value: aws @@ -174,24 +166,46 @@ metadata: name: python-tracer-unittest-kafka-task spec: sidecars: + - name: zookeeper + image: public.ecr.aws/ubuntu/zookeeper:3.1-22.04_edge + ports: + - containerPort: 9093 + env: + - name: TZ + value: "UTC" - name: kafka - # public.ecr.aws/bitnami/kafka:3.9.0 - image: public.ecr.aws/bitnami/kafka@sha256:d2890d68f96b36da3c8413fa94294f018b2f95d87cf108cbf71eab510572d9be + image: public.ecr.aws/ubuntu/kafka:3.1-22.04_edge env: - - name: KAFKA_CFG_NODE_ID - value: "0" - - name: KAFKA_CFG_PROCESS_ROLES - value: "controller,broker" - - name: KAFKA_CFG_LISTENERS - value: "PLAINTEXT://:9092,CONTROLLER://:9093,EXTERNAL://:9094" - - name: KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP - value: "CONTROLLER:PLAINTEXT,EXTERNAL:PLAINTEXT,PLAINTEXT:PLAINTEXT" - - name: KAFKA_CFG_CONTROLLER_QUORUM_VOTERS - value: "0@kafka:9093" - - name: KAFKA_CFG_CONTROLLER_LISTENER_NAMES - value: "CONTROLLER" - - name: KAFKA_CFG_ADVERTISED_LISTENERS - value: "PLAINTEXT://kafka:9092,EXTERNAL://localhost:9094" + - name: TZ + value: "UTC" + - name: ZOOKEEPER_HOST + value: localhost + - name: ZOOKEEPER_PORT + value: "2181" + ports: + - containerPort: 9093 + - containerPort: 9094 + command: + - /opt/kafka/bin/kafka-server-start.sh + - /opt/kafka/config/server.properties + - --override + - listeners=INTERNAL://0.0.0.0:9093,EXTERNAL://0.0.0.0:9094 + - --override + - advertised.listeners=INTERNAL://localhost:9093,EXTERNAL://localhost:9094 + - --override + - listener.security.protocol.map=INTERNAL:PLAINTEXT,EXTERNAL:PLAINTEXT + - --override + - inter.broker.listener.name=INTERNAL + - --override + - broker.id=1 + - --override + - offsets.topic.replication.factor=1 + - --override + - transaction.state.log.replication.factor=1 + - --override + - transaction.state.log.min.isr=1 + - --override + - auto.create.topics.enable=true params: - name: imageDigest type: string @@ -200,10 +214,71 @@ spec: mountPath: /workspace steps: - name: unittest - image: public.ecr.aws/docker/library/python@$(params.imageDigest) + image: $(params.imageDigest) env: - name: TEST_CONFIGURATION value: kafka workingDir: /workspace/python-sensor/ command: - /workspace/python-sensor/.tekton/run_unittests.sh +--- +apiVersion: tekton.dev/v1 +kind: Task +metadata: + name: python-tracer-unittest-python-next-task +spec: + sidecars: + - name: google-cloud-pubsub + image: quay.io/thekevjames/gcloud-pubsub-emulator:latest + env: + - name: PUBSUB_EMULATOR_HOST + value: 0.0.0.0:8681 + - name: PUBSUB_PROJECT1 + value: test-project,test-topic + ports: + - containerPort: 8681 + hostPort: 8681 + - name: mariadb + image: public.ecr.aws/docker/library/mariadb:11.3.2 + env: + - name: MYSQL_ROOT_PASSWORD # or MARIADB_ROOT_PASSWORD + value: passw0rd + - name: MYSQL_DATABASE # or MARIADB_DATABASE + value: instana_test_db + - name: mongo + image: public.ecr.aws/docker/library/mongo:7.0.6 + - name: postgres + image: public.ecr.aws/docker/library/postgres:16.10-trixie + env: + - name: POSTGRES_USER + value: root + - name: POSTGRES_PASSWORD + value: passw0rd + - name: POSTGRES_DB + value: instana_test_db + readinessProbe: + exec: + command: + - sh + - -c + - pg_isready --host 127.0.0.1 --port 5432 --dbname=${POSTGRES_DB} + timeoutSeconds: 10 + - name: redis + image: public.ecr.aws/docker/library/redis:7.2.4-bookworm + - name: rabbitmq + image: public.ecr.aws/docker/library/rabbitmq:3.13.0 + params: + - name: py-version + type: string + workspaces: + - name: task-pvc + mountPath: /workspace + steps: + - name: unittest + image: public.ecr.aws/docker/library/python:$(params.py-version) + env: + - name: TEST_CONFIGURATION + value: default + workingDir: /workspace/python-sensor/ + command: + - /workspace/python-sensor/.tekton/run_unittests.sh diff --git a/Dockerfile b/Dockerfile index a193d6d1..ba04c9c6 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,5 +1,5 @@ # Development Container -FROM public.ecr.aws/docker/library/python:3.12-slim-bookworm +FROM public.ecr.aws/docker/library/python:3.14-slim RUN apt-get -y -qq update && \ apt-get -y -qq upgrade && \ diff --git a/Dockerfile-py3140 b/Dockerfile-py3140 deleted file mode 100644 index a8aa2331..00000000 --- a/Dockerfile-py3140 +++ /dev/null @@ -1,21 +0,0 @@ -FROM public.ecr.aws/docker/library/python:3.14.0b2 - -RUN apt-get update \ - && apt-get install -y --no-install-recommends \ - build-essential python3-dev \ - && apt-get clean \ - && rm -rf /var/lib/apt/lists/* - -ENV WORKDIR_=/root/base - -WORKDIR $WORKDIR_ -COPY ./tests/requirements-minimal.txt . -COPY ./tests/requirements-pre314.txt . - -ENV VIRTUAL_ENV="$WORKDIR_/venv" -RUN python -m venv $VIRTUAL_ENV - -ENV PATH="$VIRTUAL_ENV/bin:$PATH" - -RUN python -m pip install --upgrade pip \ - && python -m pip install -r requirements-pre314.txt diff --git a/README.md b/README.md index 514530c7..20e08ff5 100644 --- a/README.md +++ b/README.md @@ -15,9 +15,13 @@ Any feedback is welcome. Happy Python visibility. ## Installation -Instana remotely instruments your Python web servers automatically via [Instana AutoTrace™️]. To configure which Python processes this applies to, see the [configuration page]. +You can use automatic installation or manual installation as described in the following sections: -## Manual Installation +### Automatic installation + +Instana remotely instruments your Python applications automatically by [Instana AutoTrace webhook] in Kubernetes and Red Hat OpenShift clusters. However, if you prefer to install the package manually, see [Manual Installation](#manual-installation) as follows. + +### Manual Installation If you wish to instrument your applications manually, you can install the package with the following into the `virtualenv`, `pipenv`, or container (hosted on [PyPI]): @@ -27,7 +31,7 @@ or to alternatively update an existing installation: pip install -U instana -### Activating Without Code Changes +#### Activating Without Code Changes The Instana package can then be activated _without any code changes required_ by setting the following environment variable for your Python application: @@ -35,7 +39,7 @@ The Instana package can then be activated _without any code changes required_ by This will cause the Instana Python package to instrument your Python application automatically. Once it finds the Instana host agent, it will report Python metrics and distributed traces. -### Activating via Import +#### Activating With Code Changes Alternatively, if you prefer the manual method, import the `instana` package inside of your Python application: @@ -57,11 +61,11 @@ Want to instrument other languages? See our [Node.js], [Go], [Ruby] instrumenta [Instana]: https://www.instana.com/ "IBM Instana Observability" -[Instana AutoTrace™️]: https://www.ibm.com/docs/en/instana-observability/current?topic=kubernetes-instana-autotrace-webhook "Instana AutoTrace" +[Instana AutoTrace webhook]: https://www.ibm.com/docs/en/instana-observability/current?topic=kubernetes-instana-autotrace-webhook "Instana AutoTrace webhook" [configuration page]: https://www.ibm.com/docs/en/instana-observability/current?topic=package-python-configuration-configuring-instana#general "Instana Python package configuration" [PyPI]: https://pypi.python.org/pypi/instana "Instana package at PyPI" [installation document]: https://www.ibm.com/docs/en/instana-observability/current?topic=technologies-monitoring-python-instana-python-package#installation-methods "Instana Python package installation methods" -[documentation portal]: https://www.ibm.com/docs/en/instana-observability/current?topic=technologies-monitoring-python-instana-python-package "Instana Python package documentation" +[documentation portal]: https://ibm.biz/monitoring-python "Monitoring Python - IBM documentation" [Node.js]: https://github.com/instana/nodejs "Instana Node.JS Tracer" [Go]: https://github.com/instana/golang-sensor "Instana Go Tracer" [Ruby]: https://github.com/instana/ruby-sensor "Instana Ruby Tracer" diff --git a/bin/announce_release_on_slack.py b/bin/announce_release_on_slack.py deleted file mode 100755 index 2c6625dd..00000000 --- a/bin/announce_release_on_slack.py +++ /dev/null @@ -1,73 +0,0 @@ -#!/usr/bin/env python3 - -import json -import logging -import os -import requests -import sys - -from github import Github - - -def ensure_environment_variables_are_present(): - required_env_vars = ('GITHUB_RELEASE_TAG', 'GITHUB_TOKEN', - 'SLACK_BOT_TOKEN', 'SLACK_CHANNEL_ID_RELEASES') - - for v in required_env_vars: - if not os.environ.get(v): - logging.fatal("A required environment variable is missing: %s", v) - sys.exit(1) - - -def get_gh_release_info_text_with_token(release_tag, access_token): - g = Github(access_token) - repo_name = "instana/python-sensor" - repo = g.get_repo(repo_name) - release = repo.get_release(release_tag) - - logging.info("GH Release fetched successfully %s", release) - - msg = ( - f":mega: :package: A new version is released in {repo_name}\n" - f"Name: {release.title}\n" - f"Tag: {release.tag_name}\n" - f"Created at: {release.created_at}\n" - f"Published at: {release.published_at}\n" - f"{release.body}\n") - - logging.info(msg) - return msg - - -def post_on_slack_channel(slack_token, slack_channel_id, message_text): - api_url = "https://slack.com/api/chat.postMessage" - - headers = {"Authorization": f"Bearer {slack_token}", - "Content-Type": "application/json"} - body = {"channel": slack_channel_id, "text": message_text} - - response = requests.post(api_url, headers=headers, data=json.dumps(body)) - response_data = json.loads(response.text) - - if response_data["ok"]: - logging.info("Message sent successfully!") - else: - logging.fatal("Error sending message: %s", response_data['error']) - - -def main(): - # Setting this globally to DEBUG will also debug PyGithub, - # which will produce even more log output - logging.basicConfig(level=logging.INFO) - ensure_environment_variables_are_present() - - msg = get_gh_release_info_text_with_token(os.environ['GITHUB_RELEASE_TAG'], - os.environ['GITHUB_TOKEN']) - - post_on_slack_channel(os.environ['SLACK_BOT_TOKEN'], - os.environ['SLACK_CHANNEL_ID_RELEASES'], - msg) - - -if __name__ == "__main__": - main() diff --git a/bin/aws-lambda/build_and_publish_lambda_layer.py b/bin/aws-lambda/build_and_publish_lambda_layer.py index 22fd7ad6..0c8dad2d 100755 --- a/bin/aws-lambda/build_and_publish_lambda_layer.py +++ b/bin/aws-lambda/build_and_publish_lambda_layer.py @@ -107,7 +107,7 @@ ] if dev_mode: - target_regions = ["us-west-1"] + target_regions = ["us-east-1"] LAYER_NAME = "instana-py-dev" else: target_regions = [ @@ -149,6 +149,7 @@ LAYER_NAME = "instana-python" published = dict() +version = 0 for region in target_regions: print(f"===> Uploading layer to AWS {region} ") @@ -170,12 +171,14 @@ "--zip-file", aws_zip_filename, "--compatible-runtimes", - "python3.8", "python3.9", "python3.10", "python3.11", "python3.12", "python3.13", + "--compatible-architectures", + "x86_64", + "arm64", "--region", region, "--profile", @@ -217,5 +220,8 @@ print("===> Published list:") +print(f"AWS Lambda Layer v{version}") +print("| AWS Region | ARN |") +print("| :-- | :-- |") for key in published.keys(): - print(f"{key}\t{published[key]}") + print(f"| {key} | {published[key]} |") diff --git a/docker-compose.yml b/docker-compose.yml index 45393b76..299806a5 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -61,16 +61,39 @@ services: - "8681:8681" - "8682:8682" + # Sidecar container for Kafka + zookeeper: + image: public.ecr.aws/ubuntu/zookeeper:3.1-22.04_edge + ports: ["2181:2181"] + environment: [ "TZ=UTC" ] + kafka: - image: public.ecr.aws/bitnami/kafka:latest - ports: - - '9092:9092' - - '9094:9094' + image: public.ecr.aws/ubuntu/kafka:3.1-22.04_edge + depends_on: [zookeeper] + ports: + - "9094:9094" + - "9093:9093" environment: - - KAFKA_CFG_NODE_ID=0 - - KAFKA_CFG_PROCESS_ROLES=controller,broker - - KAFKA_CFG_LISTENERS=PLAINTEXT://:9092,CONTROLLER://:9093,EXTERNAL://:9094 - - KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP=CONTROLLER:PLAINTEXT,EXTERNAL:PLAINTEXT,PLAINTEXT:PLAINTEXT - - KAFKA_CFG_CONTROLLER_QUORUM_VOTERS=0@kafka:9093 - - KAFKA_CFG_CONTROLLER_LISTENER_NAMES=CONTROLLER - - KAFKA_CFG_ADVERTISED_LISTENERS=PLAINTEXT://kafka:9092,EXTERNAL://localhost:9094 + - TZ=UTC + - ZOOKEEPER_HOST=zookeeper + - ZOOKEEPER_PORT=2181 + command: + - /opt/kafka/config/server.properties + - --override + - listeners=INTERNAL://0.0.0.0:9093,EXTERNAL://0.0.0.0:9094 + - --override + - advertised.listeners=INTERNAL://kafka:9093,EXTERNAL://127.0.0.1:9094 + - --override + - listener.security.protocol.map=INTERNAL:PLAINTEXT,EXTERNAL:PLAINTEXT + - --override + - inter.broker.listener.name=INTERNAL + - --override + - broker.id=1 + - --override + - offsets.topic.replication.factor=1 + - --override + - transaction.state.log.replication.factor=1 + - --override + - transaction.state.log.min.isr=1 + - --override + - auto.create.topics.enable=true diff --git a/pyproject.toml b/pyproject.toml index 19ca2507..c934a36c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -9,7 +9,7 @@ dynamic = [ ] description = "Python Distributed Tracing & Metrics Sensor for Instana." readme = "README.md" -requires-python = ">=3.8" +requires-python = ">=3.9" license = "MIT" keywords = [ "performance", @@ -31,12 +31,12 @@ classifiers = [ "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", "Programming Language :: Python", - "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.13", + "Programming Language :: Python :: 3.14", "Programming Language :: Python :: Implementation :: CPython", "Topic :: Internet :: WWW/HTTP :: WSGI :: Middleware", "Topic :: System :: Monitoring", @@ -47,13 +47,11 @@ dependencies = [ "autowrapt>=1.0", "fysom>=2.1.2", "requests>=2.6.0", - "six>=1.12.0", "urllib3>=1.26.5", "opentelemetry-api>=1.27.0", "opentelemetry-semantic-conventions>=0.48b0", "typing_extensions>=4.12.2", "pyyaml>=6.0.2", - "setuptools>=69.0.0; python_version >= \"3.12\"", "psutil>=5.9.0; sys_platform == \"win32\"", ] @@ -70,7 +68,7 @@ dev = [ ] [project.urls] -Documentation = "https://www.ibm.com/docs/en/instana-observability/current?topic=technologies-monitoring-python-instana-python-package" +Documentation = "https://ibm.biz/monitoring-python" Issues = "https://github.com/instana/python-sensor/issues" Source = "https://github.com/instana/python-sensor" @@ -81,6 +79,8 @@ path = "src/instana/version.py" include = [ "/src", "/tests", + "/tests_autowrapt", + "/tests_aws", ] [tool.hatch.build.targets.wheel] diff --git a/sonar-project.properties b/sonar-project.properties index 56b3d211..b373d6be 100644 --- a/sonar-project.properties +++ b/sonar-project.properties @@ -1,11 +1,10 @@ -sonar.projectKey=Python-Tracer -sonar.projectName=Python Tracer +sonar.projectKey=instana_python-sensor +sonar.organization=instana +sonar.projectName=python-sensor sonar.sourceEncoding=utf-8 sonar.sources=src/instana/ -sonar.tests=tests/ sonar.python.coverage.reportPaths=coverage.xml sonar.python.version=3 sonar.links.homepage=https://github.com/instana/python-sensor/ sonar.links.ci=https://circleci.com/gh/instana/python-sensor sonar.links.issue=https://github.com/instana/python-sensor/issues -sonar.links.scm=https://github.com/instana/python-sensor/ diff --git a/src/instana/__init__.py b/src/instana/__init__.py index 7a9ec0b1..f9511537 100644 --- a/src/instana/__init__.py +++ b/src/instana/__init__.py @@ -13,12 +13,14 @@ import importlib import os import sys +from importlib import util as importlib_util from typing import Tuple from instana.collector.helpers.runtime import ( is_autowrapt_instrumented, is_webhook_instrumented, ) +from instana.util.config import is_truthy from instana.version import VERSION __author__ = "Instana Inc." @@ -70,7 +72,7 @@ def load(_: object) -> None: def apply_gevent_monkey_patch() -> None: from gevent import monkey - if os.environ.get("INSTANA_GEVENT_MONKEY_OPTIONS"): + if provided_options := os.environ.get("INSTANA_GEVENT_MONKEY_OPTIONS"): def short_key(k: str) -> str: return k[3:] if k.startswith("no-") else k @@ -81,12 +83,8 @@ def key_to_bool(k: str) -> bool: import inspect all_accepted_patch_all_args = inspect.getfullargspec(monkey.patch_all)[0] - provided_options = ( - os.environ.get("INSTANA_GEVENT_MONKEY_OPTIONS") - .replace(" ", "") - .replace("--", "") - .split(",") - ) + provided_options = provided_options.replace(" ", "").replace("--", "").split(",") + provided_options = [ k for k in provided_options if short_key(k) in all_accepted_patch_all_args ] @@ -115,9 +113,7 @@ def get_aws_lambda_handler() -> Tuple[str, str]: handler_function = "lambda_handler" try: - handler = os.environ.get("LAMBDA_HANDLER", False) - - if handler: + if handler := os.environ.get("LAMBDA_HANDLER", None): parts = handler.split(".") handler_function = parts.pop().strip() handler_module = ".".join(parts).strip() @@ -159,13 +155,10 @@ def boot_agent() -> None: import instana.singletons # noqa: F401 - # Instrumentation + # Import & initialize instrumentation if "INSTANA_DISABLE_AUTO_INSTR" not in os.environ: - # TODO: remove the following entries as the migration of the - # instrumentation codes are finalised. - - # Import & initialize instrumentation from instana.instrumentation import ( + aio_pika, # noqa: F401 aioamqp, # noqa: F401 asyncio, # noqa: F401 cassandra, # noqa: F401 @@ -173,7 +166,6 @@ def boot_agent() -> None: couchbase, # noqa: F401 fastapi, # noqa: F401 flask, # noqa: F401 - # gevent_inst, # noqa: F401 grpcio, # noqa: F401 httpx, # noqa: F401 logging, # noqa: F401 @@ -186,11 +178,11 @@ def boot_agent() -> None: pyramid, # noqa: F401 redis, # noqa: F401 sanic, # noqa: F401 + spyne, # noqa: F401 sqlalchemy, # noqa: F401 starlette, # noqa: F401 urllib3, # noqa: F401 - spyne, # noqa: F401 - aio_pika, # noqa: F401 + gevent, # noqa: F401 ) from instana.instrumentation.aiohttp import ( client as aiohttp_client, # noqa: F401 @@ -225,7 +217,23 @@ def boot_agent() -> None: ) -if "INSTANA_DISABLE" not in os.environ: +def _start_profiler() -> None: + """Start the Instana Auto Profile.""" + from instana.singletons import get_profiler + + if profiler := get_profiler(): + profiler.start() + + +if "INSTANA_DISABLE" in os.environ: # pragma: no cover + import warnings + + message = "Instana: The INSTANA_DISABLE environment variable is deprecated. Please use INSTANA_TRACING_DISABLE=True instead." + warnings.simplefilter("always") + warnings.warn(message, DeprecationWarning) + + +if not is_truthy(os.environ.get("INSTANA_TRACING_DISABLE", None)): # There are cases when sys.argv may not be defined at load time. Seems to happen in embedded Python, # and some Pipenv installs. If this is the case, it's best effort. if ( @@ -243,15 +251,12 @@ def boot_agent() -> None: if ( (is_autowrapt_instrumented() or is_webhook_instrumented()) and "INSTANA_DISABLE_AUTO_INSTR" not in os.environ - and importlib.util.find_spec("gevent") + and importlib_util.find_spec("gevent") ): apply_gevent_monkey_patch() + # AutoProfile if "INSTANA_AUTOPROFILE" in os.environ: - from instana.singletons import get_profiler - - profiler = get_profiler() - if profiler: - profiler.start() + _start_profiler() boot_agent() diff --git a/src/instana/agent/host.py b/src/instana/agent/host.py index 177ca44c..9ecc74ca 100644 --- a/src/instana/agent/host.py +++ b/src/instana/agent/host.py @@ -9,7 +9,7 @@ import json import os from datetime import datetime -from typing import Any, Dict, List, Optional, Union +from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union import requests import urllib3 @@ -17,7 +17,7 @@ from instana.agent.base import BaseAgent from instana.collector.host import HostCollector -from instana.fsm import Discovery, TheMachine +from instana.fsm import TheMachine from instana.log import logger from instana.options import StandardOptions from instana.util import to_json @@ -25,6 +25,9 @@ from instana.util.span_utils import get_operation_specifiers from instana.version import VERSION +if TYPE_CHECKING: + from instana.util.process_discovery import Discovery + class AnnounceData(object): """The Announce Payload""" @@ -135,10 +138,15 @@ def set_from( @return: None """ self.options.set_from(res_data) - self.announce_data = AnnounceData( - pid=res_data["pid"], - agentUuid=res_data["agentUuid"], - ) + + # Ensure required keys are present + if "pid" in res_data and "agentUuid" in res_data: + self.announce_data = AnnounceData( + pid=res_data["pid"], + agentUuid=res_data["agentUuid"], + ) + else: + logger.debug(f"Missing required keys in announce response: {res_data}") def get_from_structure(self) -> Dict[str, str]: """ @@ -176,7 +184,7 @@ def is_agent_listening( def announce( self, - discovery: Discovery, + discovery: "Discovery", ) -> Optional[Dict[str, Any]]: """ With the passed in Discovery class, attempt to announce to the host agent. diff --git a/src/instana/autoprofile/profiler.py b/src/instana/autoprofile/profiler.py index 2e685a0e..dc417c46 100644 --- a/src/instana/autoprofile/profiler.py +++ b/src/instana/autoprofile/profiler.py @@ -17,6 +17,7 @@ if TYPE_CHECKING: from types import FrameType + from instana.agent.host import HostAgent @@ -52,11 +53,11 @@ def start(self, **kwargs: Dict[str, Any]) -> None: return try: - if not min_version(3, 8): - raise Exception("Supported Python versions 3.8 or higher.") + if not min_version(3, 9): + raise EnvironmentError("Supported Python versions: 3.9 or higher.") if platform.python_implementation() != "CPython": - raise Exception("Supported Python interpreter is CPython.") + raise EnvironmentError("Supported Python interpreter: CPython.") if self.profiler_destroyed: logger.warning("Destroyed profiler cannot be started.") diff --git a/src/instana/fsm.py b/src/instana/fsm.py index 1897cf30..c4145a5f 100644 --- a/src/instana/fsm.py +++ b/src/instana/fsm.py @@ -8,61 +8,47 @@ import subprocess import sys import threading +from typing import TYPE_CHECKING, Any, Callable from fysom import Fysom -from .log import logger -from .util import get_default_gateway -from .version import VERSION +from instana.log import logger +from instana.util import get_default_gateway +from instana.util.process_discovery import Discovery +from instana.version import VERSION +if TYPE_CHECKING: + from instana.agent.host import HostAgent -class Discovery(object): - pid = 0 - name = None - args = None - fd = -1 - inode = "" - def __init__(self, **kwds): - self.__dict__.update(kwds) - - def to_dict(self): - kvs = dict() - kvs['pid'] = self.pid - kvs['name'] = self.name - kvs['args'] = self.args - kvs['fd'] = self.fd - kvs['inode'] = self.inode - return kvs - - -class TheMachine(object): +class TheMachine: RETRY_PERIOD = 30 THREAD_NAME = "Instana Machine" - agent = None - fsm = None - timer = None - warnedPeriodic = False - def __init__(self, agent): + def __init__(self, agent: "HostAgent") -> None: logger.debug("Initializing host agent state machine") self.agent = agent - self.fsm = Fysom({ - "events": [ - ("lookup", "*", "found"), - ("announce", "found", "announced"), - ("pending", "announced", "wait4init"), - ("ready", "wait4init", "good2go")], - "callbacks": { - # Can add the following to debug - # "onchangestate": self.print_state_change, - "onlookup": self.lookup_agent_host, - "onannounce": self.announce_sensor, - "onpending": self.on_ready, - "ongood2go": self.on_good2go}}) + self.fsm = Fysom( + { + "events": [ + ("lookup", "*", "found"), + ("announce", "found", "announced"), + ("pending", "announced", "wait4init"), + ("ready", "wait4init", "good2go"), + ], + "callbacks": { + # Can add the following to debug + # "onchangestate": self.print_state_change, + "onlookup": self.lookup_agent_host, + "onannounce": self.announce_sensor, + "onpending": self.on_ready, + "ongood2go": self.on_good2go, + }, + } + ) self.timer = threading.Timer(1, self.fsm.lookup) self.timer.daemon = True @@ -70,11 +56,12 @@ def __init__(self, agent): self.timer.start() @staticmethod - def print_state_change(e): - logger.debug('========= (%i#%s) FSM event: %s, src: %s, dst: %s ==========', - os.getpid(), threading.current_thread().name, e.event, e.src, e.dst) + def print_state_change(e: Any) -> None: + logger.debug( + f"========= ({os.getpid()}#{threading.current_thread().name}) FSM event: {e.event}, src: {e.src}, dst: {e.dst} ==========" + ) - def reset(self): + def reset(self) -> None: """ reset is called to start from scratch in a process. It may be called on first boot or after a detected fork. @@ -87,7 +74,7 @@ def reset(self): logger.debug("State machine being reset. Will start a new announce cycle.") self.fsm.lookup() - def lookup_agent_host(self, e): + def lookup_agent_host(self, e: Any) -> bool: host = self.agent.options.agent_host port = self.agent.options.agent_port @@ -105,39 +92,43 @@ def lookup_agent_host(self, e): return True if self.warnedPeriodic is False: - logger.info("Instana Host Agent couldn't be found. Will retry periodically...") + logger.info( + "Instana Host Agent couldn't be found. Will retry periodically..." + ) self.warnedPeriodic = True - self.schedule_retry(self.lookup_agent_host, e, self.THREAD_NAME + ": agent_lookup") + self.schedule_retry( + self.lookup_agent_host, e, f"{self.THREAD_NAME}: agent_lookup" + ) return False - def announce_sensor(self, e): - logger.debug("Attempting to make an announcement to the agent on %s:%d", - self.agent.options.agent_host, self.agent.options.agent_port) + def announce_sensor(self, e: Any) -> bool: + logger.debug( + f"Attempting to make an announcement to the agent on {self.agent.options.agent_host}:{self.agent.options.agent_port}" + ) pid = os.getpid() try: if os.path.isfile("/proc/self/cmdline"): with open("/proc/self/cmdline") as cmd: cmdinfo = cmd.read() - cmdline = cmdinfo.split('\x00') + cmdline = cmdinfo.split("\x00") else: # Python doesn't provide a reliable method to determine what # the OS process command line may be. Here we are forced to # rely on ps rather than adding a dependency on something like # psutil which requires dev packages, gcc etc... - proc = subprocess.Popen(["ps", "-p", str(pid), "-o", "command"], - stdout=subprocess.PIPE) + proc = subprocess.Popen( + ["ps", "-p", str(pid), "-o", "args"], stdout=subprocess.PIPE + ) (out, _) = proc.communicate() - parts = out.split(b'\n') + parts = out.split(b"\n") cmdline = [parts[1].decode("utf-8")] except Exception: cmdline = sys.argv logger.debug("announce_sensor", exc_info=True) - d = Discovery(pid=self.__get_real_pid(), - name=cmdline[0], - args=cmdline[1:]) + d = Discovery(pid=self.__get_real_pid(), name=cmdline[0], args=cmdline[1:]) # If we're on a system with a procfs if os.path.exists("/proc/"): @@ -146,47 +137,56 @@ def announce_sensor(self, e): # PermissionError: [Errno 13] Permission denied: '/proc/6/fd/8' # Use a try/except as a safety sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - sock.connect((self.agent.options.agent_host, self.agent.options.agent_port)) - path = "/proc/%d/fd/%d" % (pid, sock.fileno()) + sock.connect( + (self.agent.options.agent_host, self.agent.options.agent_port) + ) + path = f"/proc/{pid}/fd/{sock.fileno()}" d.fd = sock.fileno() d.inode = os.readlink(path) - except: + except: # noqa: E722 logger.debug("Error generating file descriptor: ", exc_info=True) payload = self.agent.announce(d) - if not payload: + if not payload or not isinstance(payload, dict): logger.debug("Cannot announce sensor. Scheduling retry.") - self.schedule_retry(self.announce_sensor, e, self.THREAD_NAME + ": announce") + self.schedule_retry( + self.announce_sensor, e, f"{self.THREAD_NAME}: announce" + ) return False - + self.agent.set_from(payload) self.fsm.pending() - logger.debug("Announced pid: %s (true pid: %s). Waiting for Agent Ready...", - str(pid), str(self.agent.announce_data.pid)) + logger.debug( + f"Announced PID: {pid} (true PID: {self.agent.announce_data.pid}). Waiting for Agent Ready..." + ) return True - def schedule_retry(self, fun, e, name): + def schedule_retry(self, fun: Callable, e: Any, name: str) -> None: self.timer = threading.Timer(self.RETRY_PERIOD, fun, [e]) self.timer.daemon = True self.timer.name = name self.timer.start() - def on_ready(self, _): + def on_ready(self, _: Any) -> None: self.agent.start() ns_pid = str(os.getpid()) true_pid = str(self.agent.announce_data.pid) - logger.info("Instana host agent available. We're in business. Announced PID: %s (true pid: %s)", ns_pid, true_pid) + logger.info( + f"Instana host agent available. We're in business. Announced PID: {ns_pid} (true PID: {true_pid})" + ) - def on_good2go(self, _): + def on_good2go(self, _: Any) -> None: ns_pid = str(os.getpid()) true_pid = str(self.agent.announce_data.pid) - self.agent.log_message_to_host_agent("Instana Python Package %s: PID %s (true pid: %s) is now online and reporting" % (VERSION, ns_pid, true_pid)) + self.agent.log_message_to_host_agent( + f"Instana Python Package {VERSION}: PID {ns_pid} (true PID: {true_pid}) is now online and reporting" + ) - def __get_real_pid(self): + def __get_real_pid(self) -> int: """ Attempts to determine the true process ID by querying the /proc//sched file. This works on systems with a proc filesystem. @@ -195,14 +195,14 @@ def __get_real_pid(self): pid = None if os.path.exists("/proc/"): - sched_file = "/proc/%d/sched" % os.getpid() + sched_file = f"/proc/{os.getpid()}/sched" if os.path.isfile(sched_file): try: file = open(sched_file) line = file.readline() - g = re.search(r'\((\d+),', line) - if len(g.groups()) == 1: + g = re.search(r"\((\d+),", line) + if g and len(g.groups()) == 1: pid = int(g.groups()[0]) except Exception: logger.debug("parsing sched file failed", exc_info=True) @@ -211,3 +211,6 @@ def __get_real_pid(self): pid = os.getpid() return pid + + +# Made with Bob diff --git a/src/instana/hooks/hook_uwsgi.py b/src/instana/hooks/hook_uwsgi.py index 6287a9f9..1995ffeb 100644 --- a/src/instana/hooks/hook_uwsgi.py +++ b/src/instana/hooks/hook_uwsgi.py @@ -44,8 +44,11 @@ def uwsgi_handle_fork() -> None: logger.debug( f"uWSGI --master={opt_master} --lazy-apps={opt_lazy_apps}: postfork hooks not applied" ) + except ImportError: logger.debug( "uwsgi hooks: decorators not available: likely not running under uWSGI" ) - pass + +except AttributeError: + logger.debug("uwsgi hooks: Running under uWSGI but decorators not available") diff --git a/src/instana/instrumentation/aio_pika.py b/src/instana/instrumentation/aio_pika.py index 5e3f58d0..a47e09f7 100644 --- a/src/instana/instrumentation/aio_pika.py +++ b/src/instana/instrumentation/aio_pika.py @@ -47,14 +47,22 @@ async def publish_with_instana( tracer, parent_span, _ = get_tracer_tuple() parent_context = parent_span.get_span_context() if parent_span else None + def _bind_args( + message: Type["AbstractMessage"], + routing_key: str, + *args: object, + **kwargs: object, + ) -> Tuple[object, ...]: + return (message, routing_key, args, kwargs) + + (message, routing_key, args, kwargs) = _bind_args( + *args, **kwargs + ) + with tracer.start_as_current_span( "rabbitmq", span_context=parent_context ) as span: connection = instance.channel._connection - message = kwargs["message"] if kwargs.get("message") else args[0] - routing_key = ( - kwargs["routing_key"] if kwargs.get("routing_key") else args[1] - ) _extract_span_attributes( span, connection, "publish", routing_key, instance.name @@ -66,6 +74,9 @@ async def publish_with_instana( message.properties.headers, disable_w3c_trace_context=True, ) + + args = (message, routing_key) + args + try: response = await wrapped(*args, **kwargs) except Exception as exc: @@ -100,12 +111,12 @@ async def callback_wrapper( _extract_span_attributes( span, connection, "consume", message.routing_key, message.exchange ) - try: - response = await wrapped(*args, **kwargs) - except Exception as exc: - span.record_exception(exc) - else: - return response + try: + response = await wrapped(*args, **kwargs) + except Exception as exc: + span.record_exception(exc) + else: + return response wrapped_callback = callback_wrapper(callback) if kwargs.get("callback"): diff --git a/src/instana/instrumentation/aws/s3.py b/src/instana/instrumentation/aws/s3.py index 932d902a..d13b8bff 100644 --- a/src/instana/instrumentation/aws/s3.py +++ b/src/instana/instrumentation/aws/s3.py @@ -57,16 +57,26 @@ def collect_s3_injected_attributes( with tracer.start_as_current_span("s3", span_context=parent_context) as span: try: span.set_attribute("s3.op", operations[wrapped.__name__]) - if wrapped.__name__ in ["download_file", "download_fileobj"]: - span.set_attribute("s3.bucket", args[0]) - else: - span.set_attribute("s3.bucket", args[1]) + if "Bucket" in kwargs: + span.set_attribute("s3.bucket", kwargs["Bucket"]) + elif len(args) > 1: + if wrapped.__name__ in ["download_file", "download_fileobj"]: + span.set_attribute("s3.bucket", args[0]) + else: + span.set_attribute("s3.bucket", args[1]) + except Exception: + logger.debug( + f"collect_s3_injected_attributes collect error: {wrapped.__name__}", exc_info=True + ) + + try: return wrapped(*args, **kwargs) except Exception as exc: span.record_exception(exc) logger.debug( - "collect_s3_injected_attributes: collect error", exc_info=True + f"collect_s3_injected_attributes error: {wrapped.__name__}", exc_info=True ) + raise for method in [ "upload_file", diff --git a/src/instana/instrumentation/fastapi.py b/src/instana/instrumentation/fastapi.py index b2e9b018..68b19f6a 100644 --- a/src/instana/instrumentation/fastapi.py +++ b/src/instana/instrumentation/fastapi.py @@ -71,6 +71,10 @@ def init_with_instana( kwargs["middleware"] = [Middleware(InstanaASGIMiddleware)] elif isinstance(middleware, list): middleware.append(Middleware(InstanaASGIMiddleware)) + elif isinstance(middleware, tuple): + kwargs["middleware"] = (*middleware, Middleware(InstanaASGIMiddleware)) + else: + logger.warning("Unsupported FastAPI middleware sequence type.") exception_handlers = kwargs.get("exception_handlers") if exception_handlers is None: diff --git a/src/instana/instrumentation/gevent.py b/src/instana/instrumentation/gevent.py index c083fb84..41ba057e 100644 --- a/src/instana/instrumentation/gevent.py +++ b/src/instana/instrumentation/gevent.py @@ -6,8 +6,11 @@ """ import sys -from ..log import logger -from ..singletons import tracer + +from opentelemetry import context +import contextvars + +from instana.log import logger def instrument_gevent(): @@ -16,26 +19,15 @@ def instrument_gevent(): logger.debug("Instrumenting gevent") import gevent - from opentracing.scope_managers.gevent import GeventScopeManager - from opentracing.scope_managers.gevent import _GeventScope def spawn_callback(new_greenlet): """Handles context propagation for newly spawning greenlets""" - parent_scope = tracer.scope_manager.active - if parent_scope is not None: - # New greenlet, new clean slate. Clone and make active in this new greenlet - # the currently active scope (but don't finish() the span on close - it's a - # clone/not the original and we don't want to close it prematurely) - # TODO: Change to our own ScopeManagers - parent_scope_clone = _GeventScope( - parent_scope.manager, parent_scope.span, finish_on_close=False - ) - tracer._scope_manager._set_greenlet_scope( - parent_scope_clone, new_greenlet - ) - - logger.debug(" -> Updating tracer to use gevent based context management") - tracer._scope_manager = GeventScopeManager() + parent_context = context.get_current() + new_context = contextvars.Context() + + new_context.run(lambda: context.attach(parent_context)) + new_greenlet.gr_context = new_context + gevent.Greenlet.add_spawn_callback(spawn_callback) except Exception: logger.debug("instrument_gevent: ", exc_info=True) @@ -43,11 +35,5 @@ def spawn_callback(new_greenlet): if "gevent" not in sys.modules: logger.debug("Instrumenting gevent: gevent not detected or loaded. Nothing done.") -elif not hasattr(sys.modules["gevent"], "version_info"): - logger.debug("gevent module has no 'version_info'. Skipping instrumentation.") -elif sys.modules["gevent"].version_info < (1, 4): - logger.debug( - "gevent < 1.4 detected. The Instana package supports gevent versions 1.4 and greater." - ) else: instrument_gevent() diff --git a/src/instana/instrumentation/kafka/confluent_kafka_python.py b/src/instana/instrumentation/kafka/confluent_kafka_python.py index 04b1164c..f2f327f1 100644 --- a/src/instana/instrumentation/kafka/confluent_kafka_python.py +++ b/src/instana/instrumentation/kafka/confluent_kafka_python.py @@ -1,19 +1,24 @@ # (c) Copyright IBM Corp. 2025 + try: + import contextvars from typing import Any, Callable, Dict, List, Optional, Tuple import confluent_kafka # noqa: F401 import wrapt from confluent_kafka import Consumer, Producer + from opentelemetry import context, trace from opentelemetry.trace import SpanKind from instana.log import logger from instana.propagators.format import Format - from instana.util.traceutils import ( - get_tracer_tuple, - tracing_is_off, - ) + from instana.singletons import get_tracer + from instana.span.span import InstanaSpan + from instana.util.traceutils import get_tracer_tuple, tracing_is_off + + consumer_token = None + consumer_span = contextvars.ContextVar("confluent_kafka_consumer_span") # As confluent_kafka is a wrapper around the C-developed librdkafka # (provided automatically via binary wheels), we have to create new classes @@ -47,6 +52,9 @@ def poll( ) -> Optional[confluent_kafka.Message]: return super().poll(timeout) + def close(self) -> None: + return super().close() + def trace_kafka_produce( wrapped: Callable[..., InstanaConfluentKafkaProducer.produce], instance: InstanaConfluentKafkaProducer, @@ -58,16 +66,20 @@ def trace_kafka_produce( tracer, parent_span, _ = get_tracer_tuple() parent_context = parent_span.get_span_context() if parent_span else None + + # Get the topic from either args or kwargs + topic = args[0] if args else kwargs.get("topic", "") + is_suppressed = tracer.exporter._HostAgent__is_endpoint_ignored( "kafka", "produce", - args[0], + topic, ) with tracer.start_as_current_span( "kafka-producer", span_context=parent_context, kind=SpanKind.PRODUCER ) as span: - span.set_attribute("kafka.service", args[0]) + span.set_attribute("kafka.service", topic) span.set_attribute("kafka.access", "produce") # context propagation @@ -78,6 +90,10 @@ def trace_kafka_produce( # dictionary. To maintain compatibility with the headers for the # Kafka Python library, we will use a list of tuples. headers = args[6] if len(args) > 6 else kwargs.get("headers", []) + + # Initialize headers if it's None + if headers is None: + headers = [] suppression_header = {"x_instana_l_s": "0" if is_suppressed else "1"} headers.append(suppression_header) @@ -105,25 +121,82 @@ def create_span( headers: Optional[List[Tuple[str, bytes]]] = [], exception: Optional[str] = None, ) -> None: - tracer, parent_span, _ = get_tracer_tuple() - parent_context = ( - parent_span.get_span_context() - if parent_span - else tracer.extract( - Format.KAFKA_HEADERS, - headers, - disable_w3c_trace_context=True, + try: + span = consumer_span.get(None) + if span is not None: + close_consumer_span(span) + + tracer, parent_span, _ = get_tracer_tuple() + + if not tracer: + tracer = get_tracer() + is_suppressed = False + + if topic: + is_suppressed = tracer.exporter._HostAgent__is_endpoint_ignored( + "kafka", + span_type, + topic, + ) + + if not is_suppressed and headers: + for header_name, header_value in headers: + if header_name == "x_instana_l_s" and header_value == b"0": + is_suppressed = True + break + + if is_suppressed: + return + + parent_context = ( + parent_span.get_span_context() + if parent_span + else ( + tracer.extract( + Format.KAFKA_HEADERS, + headers, + disable_w3c_trace_context=True, + ) + if tracer.exporter.options.kafka_trace_correlation + else None + ) + ) + span = tracer.start_span( + "kafka-consumer", span_context=parent_context, kind=SpanKind.CONSUMER ) - ) - with tracer.start_as_current_span( - "kafka-consumer", span_context=parent_context, kind=SpanKind.CONSUMER - ) as span: if topic: span.set_attribute("kafka.service", topic) span.set_attribute("kafka.access", span_type) - if exception: span.record_exception(exception) + span.end() + + save_consumer_span_into_context(span) + except Exception as e: + logger.debug( + f"Error while creating kafka-consumer span: {e}" + ) # pragma: no cover + + def save_consumer_span_into_context(span: "InstanaSpan") -> None: + global consumer_token + ctx = trace.set_span_in_context(span) + consumer_token = context.attach(ctx) + consumer_span.set(span) + + def close_consumer_span(span: "InstanaSpan") -> None: + global consumer_token + if span.is_recording(): + span.end() + consumer_span.set(None) + if consumer_token is not None: + context.detach(consumer_token) + consumer_token = None + + def clear_context() -> None: + global consumer_token + context.attach(trace.set_span_in_context(None)) + consumer_token = None + consumer_span.set(None) def trace_kafka_consume( wrapped: Callable[..., InstanaConfluentKafkaConsumer.consume], @@ -131,24 +204,41 @@ def trace_kafka_consume( args: Tuple[int, str, Tuple[Any, ...]], kwargs: Dict[str, Any], ) -> List[confluent_kafka.Message]: - if tracing_is_off(): - return wrapped(*args, **kwargs) - res = None exception = None try: res = wrapped(*args, **kwargs) + for message in res: + create_span("consume", message.topic(), message.headers()) + return res except Exception as exc: exception = exc - finally: - if res: - for message in res: - create_span("consume", message.topic(), message.headers()) - else: - create_span("consume", exception=exception) + create_span("consume", exception=exception) - return res + def trace_kafka_close( + wrapped: Callable[..., InstanaConfluentKafkaConsumer.close], + instance: InstanaConfluentKafkaConsumer, + args: Tuple[Any, ...], + kwargs: Dict[str, Any], + ) -> None: + try: + # Close any existing consumer span before closing the consumer + span = consumer_span.get(None) + if span is not None: + close_consumer_span(span) + + # Execute the actual close operation + res = wrapped(*args, **kwargs) + + logger.debug("Kafka consumer closed and spans cleaned up") + return res + + except Exception: + # Still try to clean up the span even if close fails + span = consumer_span.get(None) + if span is not None: + close_consumer_span(span) def trace_kafka_poll( wrapped: Callable[..., InstanaConfluentKafkaConsumer.poll], @@ -156,27 +246,20 @@ def trace_kafka_poll( args: Tuple[int, str, Tuple[Any, ...]], kwargs: Dict[str, Any], ) -> Optional[confluent_kafka.Message]: - if tracing_is_off(): - return wrapped(*args, **kwargs) - res = None exception = None try: res = wrapped(*args, **kwargs) + create_span("poll", res.topic(), res.headers()) + return res except Exception as exc: exception = exc - finally: - if res: - create_span("poll", res.topic(), res.headers()) - else: - create_span( - "poll", - next(iter(instance.list_topics().topics)), - exception=exception, - ) - - return res + create_span( + "poll", + next(iter(instance.list_topics().topics)), + exception=exception, + ) # Apply the monkey patch confluent_kafka.Producer = InstanaConfluentKafkaProducer @@ -189,6 +272,9 @@ def trace_kafka_poll( InstanaConfluentKafkaConsumer, "consume", trace_kafka_consume ) wrapt.wrap_function_wrapper(InstanaConfluentKafkaConsumer, "poll", trace_kafka_poll) + wrapt.wrap_function_wrapper( + InstanaConfluentKafkaConsumer, "close", trace_kafka_close + ) logger.debug("Instrumenting Kafka (confluent_kafka)") except ImportError: diff --git a/src/instana/instrumentation/kafka/kafka_python.py b/src/instana/instrumentation/kafka/kafka_python.py index 278390f9..307b7d52 100644 --- a/src/instana/instrumentation/kafka/kafka_python.py +++ b/src/instana/instrumentation/kafka/kafka_python.py @@ -1,23 +1,28 @@ # (c) Copyright IBM Corp. 2025 + try: + import contextvars import inspect from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple import kafka # noqa: F401 import wrapt + from opentelemetry import context, trace from opentelemetry.trace import SpanKind from instana.log import logger from instana.propagators.format import Format - from instana.util.traceutils import ( - get_tracer_tuple, - tracing_is_off, - ) + from instana.singletons import get_tracer + from instana.span.span import InstanaSpan + from instana.util.traceutils import get_tracer_tuple, tracing_is_off if TYPE_CHECKING: from kafka.producer.future import FutureRecordMetadata + consumer_token = None + consumer_span = contextvars.ContextVar("kafka_python_consumer_span") + @wrapt.patch_function_wrapper("kafka", "KafkaProducer.send") def trace_kafka_send( wrapped: Callable[..., "kafka.KafkaProducer.send"], @@ -30,19 +35,26 @@ def trace_kafka_send( tracer, parent_span, _ = get_tracer_tuple() parent_context = parent_span.get_span_context() if parent_span else None + + # Get the topic from either args or kwargs + topic = args[0] if args else kwargs.get("topic", "") + is_suppressed = tracer.exporter._HostAgent__is_endpoint_ignored( "kafka", "send", - args[0], + topic, ) with tracer.start_as_current_span( "kafka-producer", span_context=parent_context, kind=SpanKind.PRODUCER ) as span: - span.set_attribute("kafka.service", args[0]) + span.set_attribute("kafka.service", topic) span.set_attribute("kafka.access", "send") # context propagation headers = kwargs.get("headers", []) + if not is_suppressed and ("x_instana_l_s", b"0") in headers: + is_suppressed = True + suppression_header = {"x_instana_l_s": "0" if is_suppressed else "1"} headers.append(suppression_header) @@ -59,35 +71,84 @@ def trace_kafka_send( kwargs["headers"] = headers try: res = wrapped(*args, **kwargs) + return res except Exception as exc: span.record_exception(exc) - else: - return res def create_span( span_type: str, topic: Optional[str], headers: Optional[List[Tuple[str, bytes]]] = [], - exception: Optional[str] = None, + exception: Optional[Exception] = None, ) -> None: - tracer, parent_span, _ = get_tracer_tuple() - parent_context = ( - parent_span.get_span_context() - if parent_span - else tracer.extract( - Format.KAFKA_HEADERS, - headers, - disable_w3c_trace_context=True, + try: + span = consumer_span.get(None) + if span is not None: + close_consumer_span(span) + + tracer, parent_span, _ = get_tracer_tuple() + + if not tracer: + tracer = get_tracer() + + is_suppressed = False + if topic: + is_suppressed = tracer.exporter._HostAgent__is_endpoint_ignored( + "kafka", + span_type, + topic, + ) + + if not is_suppressed and headers: + if ("x_instana_l_s", b"0") in headers: + is_suppressed = True + + if is_suppressed: + return + + parent_context = ( + parent_span.get_span_context() + if parent_span + else tracer.extract( + Format.KAFKA_HEADERS, + headers, + disable_w3c_trace_context=True, + ) + ) + span = tracer.start_span( + "kafka-consumer", span_context=parent_context, kind=SpanKind.CONSUMER ) - ) - with tracer.start_as_current_span( - "kafka-consumer", span_context=parent_context, kind=SpanKind.CONSUMER - ) as span: if topic: span.set_attribute("kafka.service", topic) span.set_attribute("kafka.access", span_type) if exception: span.record_exception(exception) + span.end() + + save_consumer_span_into_context(span) + except Exception: + pass + + def save_consumer_span_into_context(span: "InstanaSpan") -> None: + global consumer_token + ctx = trace.set_span_in_context(span) + consumer_token = context.attach(ctx) + consumer_span.set(span) + + def close_consumer_span(span: "InstanaSpan") -> None: + global consumer_token + if span.is_recording(): + span.end() + consumer_span.set(None) + if consumer_token is not None: + context.detach(consumer_token) + consumer_token = None + + def clear_context() -> None: + global consumer_token + context.attach(trace.set_span_in_context(None)) + consumer_token = None + consumer_span.set(None) @wrapt.patch_function_wrapper("kafka", "KafkaConsumer.__next__") def trace_kafka_consume( @@ -96,29 +157,41 @@ def trace_kafka_consume( args: Tuple[int, str, Tuple[Any, ...]], kwargs: Dict[str, Any], ) -> "FutureRecordMetadata": - if tracing_is_off(): - return wrapped(*args, **kwargs) - exception = None res = None try: res = wrapped(*args, **kwargs) + create_span( + "consume", + res.topic if res else list(instance.subscription())[0], + res.headers, + ) + return res + except StopIteration: + pass except Exception as exc: exception = exc - finally: - if res: - create_span( - "consume", - res.topic if res else list(instance.subscription())[0], - res.headers, - ) - else: - create_span( - "consume", list(instance.subscription())[0], exception=exception - ) + create_span( + "consume", list(instance.subscription())[0], exception=exception + ) - return res + @wrapt.patch_function_wrapper("kafka", "KafkaConsumer.close") + def trace_kafka_close( + wrapped: Callable[..., None], + instance: "kafka.KafkaConsumer", + args: Tuple[Any, ...], + kwargs: Dict[str, Any], + ) -> None: + try: + span = consumer_span.get(None) + if span is not None: + close_consumer_span(span) + except Exception as e: + logger.debug( + f"Error while closing kafka-consumer span: {e}" + ) # pragma: no cover + return wrapped(*args, **kwargs) @wrapt.patch_function_wrapper("kafka", "KafkaConsumer.poll") def trace_kafka_poll( @@ -127,9 +200,6 @@ def trace_kafka_poll( args: Tuple[int, str, Tuple[Any, ...]], kwargs: Dict[str, Any], ) -> Optional[Dict[str, Any]]: - if tracing_is_off(): - return wrapped(*args, **kwargs) - # The KafkaConsumer.consume() from the kafka-python-ng call the # KafkaConsumer.poll() internally, so we do not consider it here. if any( @@ -143,23 +213,17 @@ def trace_kafka_poll( try: res = wrapped(*args, **kwargs) + for partition, consumer_records in res.items(): + for message in consumer_records: + create_span( + "poll", + partition.topic, + message.headers if hasattr(message, "headers") else [], + ) + return res except Exception as exc: exception = exc - finally: - if res: - for partition, consumer_records in res.items(): - for message in consumer_records: - create_span( - "poll", - partition.topic, - message.headers if hasattr(message, "headers") else [], - ) - else: - create_span( - "poll", list(instance.subscription())[0], exception=exception - ) - - return res + create_span("poll", list(instance.subscription())[0], exception=exception) logger.debug("Instrumenting Kafka (kafka-python)") except ImportError: diff --git a/src/instana/instrumentation/logging.py b/src/instana/instrumentation/logging.py index 9bb58885..fdbaaa58 100644 --- a/src/instana/instrumentation/logging.py +++ b/src/instana/instrumentation/logging.py @@ -10,6 +10,7 @@ import wrapt from instana.log import logger +from instana.singletons import agent from instana.util.runtime import get_runtime_env_info from instana.util.traceutils import get_tracer_tuple, tracing_is_off @@ -27,12 +28,18 @@ def log_with_instana( # We take into consideration if `stacklevel` is already present in `kwargs`. # This prevents the error `_log() got multiple values for keyword argument 'stacklevel'` - stacklevel_in = kwargs.pop("stacklevel", 1 if get_runtime_env_info()[0] not in ["ppc64le", "s390x"] else 2) - stacklevel = stacklevel_in + 1 + (sys.version_info >= (3, 14)) + stacklevel_in = kwargs.pop( + "stacklevel", 1 if get_runtime_env_info()[0] not in ["ppc64le", "s390x"] else 2 + ) + stacklevel = stacklevel_in + 1 try: - # Only needed if we're tracing and serious log - if tracing_is_off() or argv[0] < logging.WARN: + # Only needed if we're tracing and serious log and logging spans are not disabled + if ( + tracing_is_off() + or argv[0] < logging.WARN + or agent.options.is_span_disabled(category="logging") + ): return wrapped(*argv, **kwargs, stacklevel=stacklevel) tracer, parent_span, _ = get_tracer_tuple() diff --git a/src/instana/instrumentation/urllib3.py b/src/instana/instrumentation/urllib3.py index 4536d2be..52d3e9c8 100644 --- a/src/instana/instrumentation/urllib3.py +++ b/src/instana/instrumentation/urllib3.py @@ -11,7 +11,11 @@ from instana.propagators.format import Format from instana.singletons import agent from instana.util.secrets import strip_secrets_from_query -from instana.util.traceutils import get_tracer_tuple, tracing_is_off, extract_custom_headers +from instana.util.traceutils import ( + get_tracer_tuple, + tracing_is_off, + extract_custom_headers, +) if TYPE_CHECKING: from instana.span.span import InstanaSpan @@ -91,7 +95,23 @@ def urlopen_with_instana( tracer, parent_span, span_name = get_tracer_tuple() # If we're not tracing, just return; boto3 has it's own visibility - if tracing_is_off() or (span_name == "boto3"): + # Also, skip creating spans for internal Instana calls when + # 'com.instana' appears in either the full URL, the path argument, + # or the connection host. + request_url_or_path = ( + kwargs.get("request_url") + or kwargs.get("url") + or (args[1] if len(args) >= 2 else "") + or "" + ) + host = getattr(instance, "host", "") or "" + + if ( + tracing_is_off() + or span_name == "boto3" + or "com.instana" in request_url_or_path + or "com.instana" in host + ): return wrapped(*args, **kwargs) parent_context = parent_span.get_span_context() if parent_span else None diff --git a/src/instana/instrumentation/wsgi.py b/src/instana/instrumentation/wsgi.py index 5ab7a2f7..ea020495 100644 --- a/src/instana/instrumentation/wsgi.py +++ b/src/instana/instrumentation/wsgi.py @@ -5,7 +5,7 @@ Instana WSGI Middleware """ -from typing import Dict, Any, Callable, List, Tuple, Optional +from typing import Dict, Any, Callable, List, Tuple, Optional, Iterable, TYPE_CHECKING from opentelemetry.semconv.trace import SpanAttributes from opentelemetry import context, trace @@ -15,6 +15,8 @@ from instana.util.secrets import strip_secrets_from_query from instana.util.traceutils import extract_custom_headers +if TYPE_CHECKING: + from instana.span.span import InstanaSpan class InstanaWSGIMiddleware(object): """Instana WSGI middleware""" @@ -25,15 +27,29 @@ def __init__(self, app: object) -> None: def __call__(self, environ: Dict[str, Any], start_response: Callable) -> object: env = environ + # Extract context and start span + span_context = tracer.extract(Format.HTTP_HEADERS, env) + span = tracer.start_span("wsgi", span_context=span_context) + + # Attach context - this makes the span current + ctx = trace.set_span_in_context(span) + token = context.attach(ctx) + + # Extract custom headers from request + extract_custom_headers(span, env, format=True) + + # Set request attributes + _set_request_attributes(span, env) + def new_start_response( status: str, headers: List[Tuple[object, ...]], exc_info: Optional[Exception] = None, ) -> object: """Modified start response with additional headers.""" - extract_custom_headers(self.span, headers) + extract_custom_headers(span, headers) - tracer.inject(self.span.context, Format.HTTP_HEADERS, headers) + tracer.inject(span.context, Format.HTTP_HEADERS, headers) headers_str = [ (header[0], str(header[1])) @@ -41,39 +57,59 @@ def new_start_response( else header for header in headers ] - res = start_response(status, headers_str, exc_info) + # Set status code attribute sc = status.split(" ")[0] if 500 <= int(sc): - self.span.mark_as_errored() - - self.span.set_attribute(SpanAttributes.HTTP_STATUS_CODE, sc) - if self.span and self.span.is_recording(): - self.span.end() - if self.token: - context.detach(self.token) - return res - - span_context = tracer.extract(Format.HTTP_HEADERS, env) - self.span = tracer.start_span("wsgi", span_context=span_context) - - ctx = trace.set_span_in_context(self.span) - self.token = context.attach(ctx) - - extract_custom_headers(self.span, env, format=True) - - if "PATH_INFO" in env: - self.span.set_attribute("http.path", env["PATH_INFO"]) - if "QUERY_STRING" in env and len(env["QUERY_STRING"]): - scrubbed_params = strip_secrets_from_query( - env["QUERY_STRING"], - agent.options.secrets_matcher, - agent.options.secrets_list, - ) - self.span.set_attribute("http.params", scrubbed_params) - if "REQUEST_METHOD" in env: - self.span.set_attribute(SpanAttributes.HTTP_METHOD, env["REQUEST_METHOD"]) - if "HTTP_HOST" in env: - self.span.set_attribute("http.host", env["HTTP_HOST"]) - - return self.app(environ, new_start_response) + span.mark_as_errored() + + span.set_attribute(SpanAttributes.HTTP_STATUS_CODE, sc) + + return start_response(status, headers_str, exc_info) + + try: + iterable = self.app(environ, new_start_response) + + # Wrap the iterable to ensure span ends after iteration completes + return _end_span_after_iterating(iterable, span, token) + + except Exception as exc: + # If exception occurs before iteration completes, end span and detach token + if span and span.is_recording(): + span.record_exception(exc) + span.end() + if token: + context.detach(token) + raise exc + + +def _end_span_after_iterating( + iterable: Iterable[object], span: "InstanaSpan", token: object +) -> Iterable[object]: + try: + yield from iterable + finally: + # Ensure iterable cleanup (important for generators) + if hasattr(iterable, "close"): + iterable.close() + + # End span and detach token after iteration completes + if span and span.is_recording(): + span.end() + if token: + context.detach(token) + +def _set_request_attributes(span: "InstanaSpan", env: Dict[str, Any]) -> None: + if "PATH_INFO" in env: + span.set_attribute("http.path", env["PATH_INFO"]) + if "QUERY_STRING" in env and len(env["QUERY_STRING"]): + scrubbed_params = strip_secrets_from_query( + env["QUERY_STRING"], + agent.options.secrets_matcher, + agent.options.secrets_list, + ) + span.set_attribute("http.params", scrubbed_params) + if "REQUEST_METHOD" in env: + span.set_attribute(SpanAttributes.HTTP_METHOD, env["REQUEST_METHOD"]) + if "HTTP_HOST" in env: + span.set_attribute(SpanAttributes.HTTP_HOST, env["HTTP_HOST"]) diff --git a/src/instana/options.py b/src/instana/options.py index 356ea961..affaa266 100644 --- a/src/instana/options.py +++ b/src/instana/options.py @@ -16,12 +16,20 @@ import logging import os -from typing import Any, Dict +from typing import Any, Dict, Sequence from instana.configurator import config from instana.log import logger -from instana.util.config import (is_truthy, parse_ignored_endpoints, - parse_ignored_endpoints_from_yaml) +from instana.util.config import ( + SPAN_TYPE_TO_CATEGORY, + get_disable_trace_configurations_from_env, + get_disable_trace_configurations_from_local, + get_disable_trace_configurations_from_yaml, + is_truthy, + parse_ignored_endpoints, + parse_ignored_endpoints_from_yaml, + parse_span_disabling, +) from instana.util.runtime import determine_service_name @@ -37,6 +45,11 @@ def __init__(self, **kwds: Dict[str, Any]) -> None: self.ignore_endpoints = [] self.kafka_trace_correlation = True + # disabled_spans lists all categories and types that should be disabled + self.disabled_spans = [] + # enabled_spans lists all categories and types that should be enabled, preceding disabled_spans + self.enabled_spans = [] + self.set_trace_configurations() # Defaults @@ -75,8 +88,9 @@ def set_trace_configurations(self) -> None: ) # Check if either of the environment variables is truthy - if is_truthy(os.environ.get("INSTANA_ALLOW_EXIT_AS_ROOT", None)) or \ - is_truthy(os.environ.get("INSTANA_ALLOW_ROOT_EXIT_SPAN", None)): + if is_truthy(os.environ.get("INSTANA_ALLOW_EXIT_AS_ROOT", None)) or is_truthy( + os.environ.get("INSTANA_ALLOW_ROOT_EXIT_SPAN", None) + ): self.allow_exit_as_root = True # The priority is as follows: @@ -99,12 +113,69 @@ def set_trace_configurations(self) -> None: ) if "INSTANA_KAFKA_TRACE_CORRELATION" in os.environ: - self.kafka_trace_correlation = is_truthy(os.environ["INSTANA_KAFKA_TRACE_CORRELATION"]) + self.kafka_trace_correlation = is_truthy( + os.environ["INSTANA_KAFKA_TRACE_CORRELATION"] + ) elif isinstance(config.get("tracing"), dict) and "kafka" in config["tracing"]: self.kafka_trace_correlation = config["tracing"]["kafka"].get( "trace_correlation", True ) + self.set_disable_trace_configurations() + + def set_disable_trace_configurations(self) -> None: + disabled_spans = [] + enabled_spans = [] + + # The precedence is as follows: + # environment variables > in-code (local) config > agent config (configuration.yaml) + # For the env vars: INSTANA_TRACING_DISABLE > INSTANA_CONFIG_PATH + if "INSTANA_TRACING_DISABLE" in os.environ: + disabled_spans, enabled_spans = get_disable_trace_configurations_from_env() + elif "INSTANA_CONFIG_PATH" in os.environ: + disabled_spans, enabled_spans = get_disable_trace_configurations_from_yaml() + else: + # In-code (local) config + # The agent config (configuration.yaml) is handled in StandardOptions.set_disable_tracing() + disabled_spans, enabled_spans = ( + get_disable_trace_configurations_from_local() + ) + + self.disabled_spans.extend(disabled_spans) + self.enabled_spans.extend(enabled_spans) + + def is_span_disabled(self, category=None, span_type=None) -> bool: + """ + Check if a span is disabled based on its category and type. + + Args: + category (str): The span category (e.g., "logging", "databases") + span_type (str): The span type (e.g., "redis", "kafka") + + Returns: + bool: True if the span is disabled, False otherwise + """ + # If span_type is provided, check if it's disabled + if span_type and span_type in self.disabled_spans: + return True + + # If category is provided directly, check if it's disabled + if category and category in self.disabled_spans: + return True + + # If span_type is provided but not explicitly configured, + # check if its parent category is disabled. Also check for the precedence rules + if span_type and span_type in SPAN_TYPE_TO_CATEGORY: + parent_category = SPAN_TYPE_TO_CATEGORY[span_type] + if ( + parent_category in self.disabled_spans + and span_type not in self.enabled_spans + ): + return True + + # Default: not disabled + return False + class StandardOptions(BaseOptions): """The options class used when running directly on a host/node with an Instana agent""" @@ -177,6 +248,26 @@ def set_tracing(self, tracing: Dict[str, Any]) -> None: if "extra-http-headers" in tracing: self.extra_http_headers = tracing["extra-http-headers"] + # Handle span disabling configuration + if "disable" in tracing: + self.set_disable_tracing(tracing["disable"]) + + def set_disable_tracing(self, tracing_config: Sequence[Dict[str, Any]]) -> None: + # The precedence is as follows: + # environment variables > in-code (local) config > agent config (configuration.yaml) + if ( + "INSTANA_TRACING_DISABLE" not in os.environ + and "INSTANA_CONFIG_PATH" not in os.environ + and not ( + isinstance(config.get("tracing"), dict) + and "disable" in config["tracing"] + ) + ): + # agent config (configuration.yaml) + disabled_spans, enabled_spans = parse_span_disabling(tracing_config) + self.disabled_spans.extend(disabled_spans) + self.enabled_spans.extend(enabled_spans) + def set_from(self, res_data: Dict[str, Any]) -> None: """ Set the source identifiers given to use by the Instana Host agent. diff --git a/src/instana/propagators/http_propagator.py b/src/instana/propagators/http_propagator.py index 76ca3114..c6491076 100644 --- a/src/instana/propagators/http_propagator.py +++ b/src/instana/propagators/http_propagator.py @@ -5,6 +5,7 @@ from instana.log import logger from instana.propagators.base_propagator import BasePropagator from instana.util.ids import define_server_timing, hex_id_limited +from instana.span_context import SpanContext from opentelemetry.trace.span import format_span_id @@ -27,7 +28,26 @@ def inject(self, span_context, carrier, disable_w3c_trace_context=False): # Suppression `level` made in the child context or in the parent context # has priority over any non-suppressed `level` setting child_level = int(self.extract_instana_headers(dictionary_carrier)[2] or "1") - span_context.level = min(child_level, span_context.level) + new_level = min(child_level, span_context.level) + + if new_level != span_context.level: + # Create a new span context with the updated level + span_context = SpanContext( + trace_id=span_context.trace_id, + span_id=span_context.span_id, + is_remote=span_context.is_remote, + trace_flags=span_context.trace_flags, + trace_state=span_context.trace_state, + level=new_level, + synthetic=span_context.synthetic, + trace_parent=span_context.trace_parent, + instana_ancestor=span_context.instana_ancestor, + long_trace_id=span_context.long_trace_id, + correlation_type=span_context.correlation_type, + correlation_id=span_context.correlation_id, + traceparent=span_context.traceparent, + tracestate=span_context.tracestate + ) serializable_level = str(span_context.level) diff --git a/src/instana/propagators/kafka_propagator.py b/src/instana/propagators/kafka_propagator.py index 9ba27940..97bae58c 100644 --- a/src/instana/propagators/kafka_propagator.py +++ b/src/instana/propagators/kafka_propagator.py @@ -1,15 +1,12 @@ # (c) Copyright IBM Corp. 2025 -from typing import TYPE_CHECKING, Any, Dict, Optional +from typing import Any, Dict, Optional from opentelemetry.trace.span import format_span_id from instana.log import logger from instana.propagators.base_propagator import BasePropagator, CarrierT from instana.util.ids import hex_id_limited - -if TYPE_CHECKING: - from instana.span_context import SpanContext - +from instana.span_context import SpanContext class KafkaPropagator(BasePropagator): """ @@ -53,7 +50,7 @@ def extract_carrier_headers(self, carrier: CarrierT) -> Dict[str, Any]: def extract( self, carrier: CarrierT, disable_w3c_trace_context: bool = False - ) -> Optional["SpanContext"]: + ) -> Optional[SpanContext]: """ This method overrides one of the Base classes as with the introduction of W3C trace context for the Kafka requests more extracting steps and @@ -64,7 +61,7 @@ def extract( disable_w3c_trace_context (bool): A flag to disable the W3C trace context. Returns: - Optional["SpanContext"]: The extracted span context or None. + Optional[SpanContext]: The extracted span context or None. """ try: headers = self.extract_carrier_headers(carrier=carrier) @@ -79,7 +76,7 @@ def extract( # Assisted by watsonx Code Assistant def inject( self, - span_context: "SpanContext", + span_context: SpanContext, carrier: CarrierT, disable_w3c_trace_context: bool = True, ) -> None: @@ -103,7 +100,26 @@ def inject( # Suppression `level` made in the child context or in the parent context # has priority over any non-suppressed `level` setting suppression_level = int(self.extract_instana_headers(dictionary_carrier)[2]) - span_context.level = min(suppression_level, span_context.level) + new_level = min(suppression_level, span_context.level) + + if new_level != span_context.level: + # Create a new span context with the updated level + span_context = SpanContext( + trace_id=span_context.trace_id, + span_id=span_context.span_id, + is_remote=span_context.is_remote, + trace_flags=span_context.trace_flags, + trace_state=span_context.trace_state, + level=new_level, + synthetic=span_context.synthetic, + trace_parent=span_context.trace_parent, + instana_ancestor=span_context.instana_ancestor, + long_trace_id=span_context.long_trace_id, + correlation_type=span_context.correlation_type, + correlation_id=span_context.correlation_id, + traceparent=span_context.traceparent, + tracestate=span_context.tracestate + ) def inject_key_value(carrier, key, value): if isinstance(carrier, list): @@ -119,9 +135,9 @@ def inject_key_value(carrier, key, value): inject_key_value( carrier, self.KAFKA_HEADER_KEY_L_S, - str(suppression_level).encode("utf-8"), + str(span_context.level).encode("utf-8"), ) - if suppression_level == 1: + if span_context.level == 1: inject_key_value( carrier, self.KAFKA_HEADER_KEY_T, diff --git a/src/instana/span/base_span.py b/src/instana/span/base_span.py index 0d8491c2..b0c58080 100644 --- a/src/instana/span/base_span.py +++ b/src/instana/span/base_span.py @@ -1,7 +1,6 @@ # (c) Copyright IBM Corp. 2024 from typing import TYPE_CHECKING, Type -import six from instana.log import logger from instana.util import DictionaryOfStan @@ -83,12 +82,12 @@ def _validate_attribute(self, key, value): try: # Attribute keys must be some type of text or string type - if isinstance(key, (six.text_type, six.string_types)): + if isinstance(key, str): validated_key = key[0:1024] # Max key length of 1024 characters if isinstance( value, - (bool, float, int, list, dict, six.text_type, six.string_types), + (bool, float, int, list, dict, str), ): validated_value = value else: diff --git a/src/instana/util/config.py b/src/instana/util/config.py index 887c2292..6cc1e109 100644 --- a/src/instana/util/config.py +++ b/src/instana/util/config.py @@ -2,11 +2,43 @@ import itertools import os -from typing import Any, Dict, List, Union +from typing import Any, Dict, List, Sequence, Tuple, Union +from instana.configurator import config from instana.log import logger from instana.util.config_reader import ConfigReader +# List of supported span categories (technology or protocol) +SPAN_CATEGORIES = [ + "logging", + "databases", + "messaging", + "protocols", # http, grpc, etc. +] + +# Mapping of span type calls (framework, library name, instrumentation name) to categories +SPAN_TYPE_TO_CATEGORY = { + # Database types + "redis": "databases", + "mysql": "databases", + "postgresql": "databases", + "mongodb": "databases", + "cassandra": "databases", + "couchbase": "databases", + "dynamodb": "databases", + "sqlalchemy": "databases", + # Messaging types + "kafka": "messaging", + "rabbitmq": "messaging", + "pika": "messaging", + "aio_pika": "messaging", + "aioamqp": "messaging", + # Protocol types + "http": "protocols", + "grpc": "protocols", + "graphql": "protocols", +} + def parse_service_pair(pair: str) -> List[str]: """ @@ -151,10 +183,10 @@ def parse_ignored_endpoints_from_yaml(file_path: str) -> List[str]: def is_truthy(value: Any) -> bool: """ Check if a value is truthy, accepting various formats. - + @param value: The value to check @return: True if the value is considered truthy, False otherwise - + Accepts the following as True: - True (Python boolean) - "True", "true" (case-insensitive string) @@ -163,17 +195,128 @@ def is_truthy(value: Any) -> bool: """ if value is None: return False - + if isinstance(value, bool): return value - + if isinstance(value, int): return value == 1 - + if isinstance(value, str): value_lower = value.lower() return value_lower == "true" or value == "1" - + return False + +def parse_span_disabling( + disable_list: Sequence[Union[str, Dict[str, Any]]], +) -> Tuple[List[str], List[str]]: + """ + Process a list of span disabling configurations and return lists of disabled and enabled spans. + + @param disable_list: List of span disabling configurations + @return: Tuple of (disabled_spans, enabled_spans) + """ + if not isinstance(disable_list, list): + logger.debug( + f"parse_span_disabling: Invalid disable_list type: {type(disable_list)}" + ) + return [], [] + + disabled_spans = [] + enabled_spans = [] + + for item in disable_list: + if isinstance(item, str): + disabled = parse_span_disabling_str(item) + disabled_spans.extend(disabled) + elif isinstance(item, dict): + disabled, enabled = parse_span_disabling_dict(item) + disabled_spans.extend(disabled) + enabled_spans.extend(enabled) + else: + logger.debug( + f"parse_span_disabling: Invalid disable_list item type: {type(item)}" + ) + + return disabled_spans, enabled_spans + + +def parse_span_disabling_str(item: str) -> List[str]: + """ + Process a string span disabling configuration and return a list of disabled spans. + + @param item: String span disabling configuration + @return: List of disabled spans + """ + if item.lower() in SPAN_CATEGORIES or item.lower() in SPAN_TYPE_TO_CATEGORY.keys(): + return [item.lower()] + else: + logger.debug(f"set_span_disabling_str: Invalid span category/type: {item}") + return [] + + +def parse_span_disabling_dict(items: Dict[str, bool]) -> Tuple[List[str], List[str]]: + """ + Process a dictionary span disabling configuration and return lists of disabled and enabled spans. + + @param items: Dictionary span disabling configuration + @return: Tuple of (disabled_spans, enabled_spans) + """ + disabled_spans = [] + enabled_spans = [] + + for key, value in items.items(): + if key in SPAN_CATEGORIES or key in SPAN_TYPE_TO_CATEGORY.keys(): + if is_truthy(value): + disabled_spans.append(key) + else: + enabled_spans.append(key) + else: + logger.debug(f"set_span_disabling_dict: Invalid span category/type: {key}") + + return disabled_spans, enabled_spans + + +def get_disable_trace_configurations_from_env() -> Tuple[List[str], List[str]]: + # Read INSTANA_TRACING_DISABLE environment variable + if tracing_disable := os.environ.get("INSTANA_TRACING_DISABLE", None): + if is_truthy(tracing_disable): + # INSTANA_TRACING_DISABLE is True/true/1, then we disable all tracing + disabled_spans = [] + for category in SPAN_CATEGORIES: + disabled_spans.append(category) + return disabled_spans, [] + else: + # INSTANA_TRACING_DISABLE is a comma-separated list of span categories/types + tracing_disable_list = [x.strip() for x in tracing_disable.split(",")] + return parse_span_disabling(tracing_disable_list) + return [], [] + + +def get_disable_trace_configurations_from_yaml() -> Tuple[List[str], List[str]]: + config_reader = ConfigReader(os.environ.get("INSTANA_CONFIG_PATH", "")) + + if "tracing" in config_reader.data: + root_key = "tracing" + elif "com.instana.tracing" in config_reader.data: + logger.warning( + 'Please use "tracing" instead of "com.instana.tracing" for local configuration file.' + ) + root_key = "com.instana.tracing" + else: + return [], [] + + tracing_disable_config = config_reader.data[root_key].get("disable", "") + return parse_span_disabling(tracing_disable_config) + + +def get_disable_trace_configurations_from_local() -> Tuple[List[str], List[str]]: + if "tracing" in config: + if tracing_disable_config := config["tracing"].get("disable", None): + return parse_span_disabling(tracing_disable_config) + return [], [] + + # Made with Bob diff --git a/src/instana/util/config_reader.py b/src/instana/util/config_reader.py index ddec31ec..87b5f8c1 100644 --- a/src/instana/util/config_reader.py +++ b/src/instana/util/config_reader.py @@ -1,15 +1,18 @@ # (c) Copyright IBM Corp. 2025 -from typing import Union -from instana.log import logger import yaml +from instana.log import logger + class ConfigReader: - def __init__(self, file_path: Union[str]) -> None: + def __init__(self, file_path: str) -> None: self.file_path = file_path - self.data = None - self.load_file() + self.data = {} + if file_path: + self.load_file() + else: + logger.warning("ConfigReader: No configuration file specified") def load_file(self) -> None: """Loads and parses the YAML file""" @@ -17,6 +20,8 @@ def load_file(self) -> None: with open(self.file_path, "r") as file: self.data = yaml.safe_load(file) except FileNotFoundError: - logger.error(f"Configuration file has not found: {self.file_path}") + logger.error( + f"ConfigReader: Configuration file has not found: {self.file_path}" + ) except yaml.YAMLError as e: - logger.error(f"Error parsing YAML file: {e}") + logger.error(f"ConfigReader: Error parsing YAML file: {e}") diff --git a/src/instana/util/process_discovery.py b/src/instana/util/process_discovery.py new file mode 100644 index 00000000..6a83efe5 --- /dev/null +++ b/src/instana/util/process_discovery.py @@ -0,0 +1,13 @@ +# (c) Copyright IBM Corp. 2025 + +from dataclasses import dataclass +from typing import List, Optional + + +@dataclass +class Discovery: + pid: int = 0 # the PID of this process + name: Optional[str] = None # the name of the executable + args: Optional[List[str]] = None # the command line arguments + fd: int = -1 # the file descriptor of the socket associated with the connection to the agent for this HTTP request + inode: str = "" # the inode of the socket associated with the connection to the agent for this HTTP request diff --git a/src/instana/util/runtime.py b/src/instana/util/runtime.py index 832d37c2..a49cdfa3 100644 --- a/src/instana/util/runtime.py +++ b/src/instana/util/runtime.py @@ -135,7 +135,7 @@ def determine_service_name() -> str: uwsgi_type = "uWSGI worker%s" app_name = uwsgi_type % app_name - except ImportError: + except (ImportError, AttributeError): pass except Exception: logger.debug("non-fatal get_application_name: ", exc_info=True) diff --git a/src/instana/version.py b/src/instana/version.py index b28f22de..6db3016f 100644 --- a/src/instana/version.py +++ b/src/instana/version.py @@ -3,4 +3,4 @@ # Module version file. Used by setup.py and snapshot reporting. -VERSION = "3.6.0" +VERSION = "3.9.3" diff --git a/tests/__init__.py b/tests/__init__.py index 39799ddb..a38754a7 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -3,7 +3,7 @@ import os -if os.environ.get('GEVENT_STARLETTE_TEST'): +if os.environ.get('GEVENT_TEST'): from gevent import monkey monkey.patch_all() diff --git a/tests/agent/test_host.py b/tests/agent/test_host.py index 29b5fd10..613d4478 100644 --- a/tests/agent/test_host.py +++ b/tests/agent/test_host.py @@ -5,7 +5,7 @@ import json import logging import os -from typing import Generator +from typing import Any, Dict, Generator from unittest.mock import Mock import pytest @@ -14,12 +14,13 @@ from instana.agent.host import AnnounceData, HostAgent from instana.collector.host import HostCollector -from instana.fsm import Discovery, TheMachine +from instana.fsm import TheMachine from instana.options import StandardOptions from instana.recorder import StanRecorder from instana.singletons import get_agent from instana.span.span import InstanaSpan from instana.span_context import SpanContext +from instana.util.process_discovery import Discovery from instana.util.runtime import is_windows @@ -715,3 +716,37 @@ def test_is_service_or_endpoint_ignored(self) -> None: # don't ignore other services assert not self.agent._HostAgent__is_endpoint_ignored("service3") + assert not self.agent._HostAgent__is_endpoint_ignored("service3") + + @pytest.mark.parametrize( + "input_data", + [ + { + "agentUuid": "test-uuid", + }, + { + "pid": 1234, + }, + { + "extraHeaders": ["value-3"], + }, + ], + ids=["missing_pid", "missing_agent_uuid", "missing_both_required_keys"], + ) + def test_set_from_missing_required_keys( + self, input_data: Dict[str, Any], caplog: pytest.LogCaptureFixture + ) -> None: + """Test set_from when required keys are missing in res_data.""" + agent = HostAgent() + caplog.set_level(logging.DEBUG, logger="instana") + + res_data = { + "secrets": {"matcher": "value-1", "list": ["value-2"]}, + } + res_data.update(input_data) + + agent.set_from(res_data) + + assert agent.announce_data is None + assert "Missing required keys in announce response" in caplog.messages[-1] + assert str(res_data) in caplog.messages[-1] diff --git a/tests/apps/aiohttp_app/__init__.py b/tests/apps/aiohttp_app/__init__.py index 7429a949..b9cf68a2 100644 --- a/tests/apps/aiohttp_app/__init__.py +++ b/tests/apps/aiohttp_app/__init__.py @@ -8,7 +8,7 @@ APP_THREAD = None -if not any((os.environ.get('GEVENT_STARLETTE_TEST'), +if not any((os.environ.get('GEVENT_TEST'), os.environ.get('CASSANDRA_TEST'), sys.version_info < (3, 5, 3))): APP_THREAD = launch_background_thread(server, "AIOHTTP") diff --git a/tests/apps/aiohttp_app2/__init__.py b/tests/apps/aiohttp_app2/__init__.py index e382343a..96ce3f82 100644 --- a/tests/apps/aiohttp_app2/__init__.py +++ b/tests/apps/aiohttp_app2/__init__.py @@ -7,7 +7,7 @@ APP_THREAD = None -if not any((os.environ.get('GEVENT_STARLETTE_TEST'), +if not any((os.environ.get('GEVENT_TEST'), os.environ.get('CASSANDRA_TEST'), sys.version_info < (3, 5, 3))): APP_THREAD = launch_background_thread(server, "AIOHTTP") diff --git a/tests/apps/grpc_server/__init__.py b/tests/apps/grpc_server/__init__.py index 5a222deb..78439e5e 100644 --- a/tests/apps/grpc_server/__init__.py +++ b/tests/apps/grpc_server/__init__.py @@ -6,7 +6,7 @@ import time import threading -if not any((os.environ.get('GEVENT_STARLETTE_TEST'), +if not any((os.environ.get('GEVENT_TEST'), os.environ.get('CASSANDRA_TEST'), sys.version_info < (3, 5, 3))): # Background RPC application diff --git a/tests/apps/tornado_server/__init__.py b/tests/apps/tornado_server/__init__.py index 20a27361..7b0d6c76 100644 --- a/tests/apps/tornado_server/__init__.py +++ b/tests/apps/tornado_server/__init__.py @@ -8,7 +8,7 @@ app_thread = None -if not any((app_thread, os.environ.get('GEVENT_STARLETTE_TEST'), os.environ.get('CASSANDRA_TEST'))): +if not any((app_thread, os.environ.get('GEVENT_TEST'), os.environ.get('CASSANDRA_TEST'))): testenv["tornado_port"] = 10813 testenv["tornado_server"] = ("http://127.0.0.1/" + str(testenv["tornado_port"])) diff --git a/tests/clients/boto3/test_boto3_s3.py b/tests/clients/boto3/test_boto3_s3.py index b772ab42..d20b51cd 100644 --- a/tests/clients/boto3/test_boto3_s3.py +++ b/tests/clients/boto3/test_boto3_s3.py @@ -2,10 +2,12 @@ # (c) Copyright Instana Inc. 2020 import os +from io import BytesIO + import pytest +import boto3 from typing import Generator from moto import mock_aws -import boto3 from instana.singletons import tracer, agent from tests.helpers import get_first_span_by_filter @@ -18,13 +20,18 @@ class TestS3: + @classmethod + def setup_class(cls) -> None: + cls.bucket_name = "aws_bucket_name" + cls.object_name = "aws_key_name" + cls.recorder = tracer.span_processor + cls.mock = mock_aws() + @pytest.fixture(autouse=True) def _resource(self) -> Generator[None, None, None]: """Setup and Teardown""" # Clear all spans before a test run - self.recorder = tracer.span_processor self.recorder.clear_spans() - self.mock = mock_aws() self.mock.start() self.s3 = boto3.client("s3", region_name="us-east-1") yield @@ -33,19 +40,19 @@ def _resource(self) -> Generator[None, None, None]: agent.options.allow_exit_as_root = False def test_vanilla_create_bucket(self) -> None: - self.s3.create_bucket(Bucket="aws_bucket_name") + self.s3.create_bucket(Bucket=self.bucket_name) result = self.s3.list_buckets() assert len(result["Buckets"]) == 1 - assert result["Buckets"][0]["Name"] == "aws_bucket_name" + assert result["Buckets"][0]["Name"] == self.bucket_name def test_s3_create_bucket(self) -> None: with tracer.start_as_current_span("test"): - self.s3.create_bucket(Bucket="aws_bucket_name") + self.s3.create_bucket(Bucket=self.bucket_name) result = self.s3.list_buckets() assert len(result["Buckets"]) == 1 - assert result["Buckets"][0]["Name"] == "aws_bucket_name" + assert result["Buckets"][0]["Name"] == self.bucket_name spans = self.recorder.queued_spans() assert len(spans) == 2 @@ -65,11 +72,11 @@ def test_s3_create_bucket(self) -> None: assert not s3_span.ec assert s3_span.data["s3"]["op"] == "CreateBucket" - assert s3_span.data["s3"]["bucket"] == "aws_bucket_name" + assert s3_span.data["s3"]["bucket"] == self.bucket_name def test_s3_create_bucket_as_root_exit_span(self) -> None: agent.options.allow_exit_as_root = True - self.s3.create_bucket(Bucket="aws_bucket_name") + self.s3.create_bucket(Bucket=self.bucket_name) agent.options.allow_exit_as_root = False self.s3.list_buckets() @@ -83,7 +90,7 @@ def test_s3_create_bucket_as_root_exit_span(self) -> None: assert not s3_span.ec assert s3_span.data["s3"]["op"] == "CreateBucket" - assert s3_span.data["s3"]["bucket"] == "aws_bucket_name" + assert s3_span.data["s3"]["bucket"] == self.bucket_name def test_s3_list_buckets(self) -> None: with tracer.start_as_current_span("test"): @@ -113,21 +120,15 @@ def test_s3_list_buckets(self) -> None: assert not s3_span.data["s3"]["bucket"] def test_s3_vanilla_upload_file(self) -> None: - object_name = "aws_key_name" - bucket_name = "aws_bucket_name" - - self.s3.create_bucket(Bucket=bucket_name) - result = self.s3.upload_file(upload_filename, bucket_name, object_name) + self.s3.create_bucket(Bucket=self.bucket_name) + result = self.s3.upload_file(upload_filename, self.bucket_name, self.object_name) assert not result def test_s3_upload_file(self) -> None: - object_name = "aws_key_name" - bucket_name = "aws_bucket_name" - - self.s3.create_bucket(Bucket=bucket_name) + self.s3.create_bucket(Bucket=self.bucket_name) with tracer.start_as_current_span("test"): - self.s3.upload_file(upload_filename, bucket_name, object_name) + self.s3.upload_file(upload_filename, self.bucket_name, self.object_name) spans = self.recorder.queued_spans() assert len(spans) == 2 @@ -147,17 +148,14 @@ def test_s3_upload_file(self) -> None: assert not s3_span.ec assert s3_span.data["s3"]["op"] == "UploadFile" - assert s3_span.data["s3"]["bucket"] == "aws_bucket_name" + assert s3_span.data["s3"]["bucket"] == self.bucket_name def test_s3_upload_file_obj(self) -> None: - object_name = "aws_key_name" - bucket_name = "aws_bucket_name" - - self.s3.create_bucket(Bucket=bucket_name) + self.s3.create_bucket(Bucket=self.bucket_name) with tracer.start_as_current_span("test"): with open(upload_filename, "rb") as fd: - self.s3.upload_fileobj(fd, bucket_name, object_name) + self.s3.upload_fileobj(fd, self.bucket_name, self.object_name) spans = self.recorder.queued_spans() assert len(spans) == 2 @@ -177,17 +175,14 @@ def test_s3_upload_file_obj(self) -> None: assert not s3_span.ec assert s3_span.data["s3"]["op"] == "UploadFileObj" - assert s3_span.data["s3"]["bucket"] == "aws_bucket_name" + assert s3_span.data["s3"]["bucket"] == self.bucket_name def test_s3_download_file(self) -> None: - object_name = "aws_key_name" - bucket_name = "aws_bucket_name" - - self.s3.create_bucket(Bucket=bucket_name) - self.s3.upload_file(upload_filename, bucket_name, object_name) + self.s3.create_bucket(Bucket=self.bucket_name) + self.s3.upload_file(upload_filename, self.bucket_name, self.object_name) with tracer.start_as_current_span("test"): - self.s3.download_file(bucket_name, object_name, download_target_filename) + self.s3.download_file(self.bucket_name, self.object_name, download_target_filename) spans = self.recorder.queued_spans() assert len(spans) == 2 @@ -207,18 +202,15 @@ def test_s3_download_file(self) -> None: assert not s3_span.ec assert s3_span.data["s3"]["op"] == "DownloadFile" - assert s3_span.data["s3"]["bucket"] == "aws_bucket_name" + assert s3_span.data["s3"]["bucket"] == self.bucket_name def test_s3_download_file_obj(self) -> None: - object_name = "aws_key_name" - bucket_name = "aws_bucket_name" - - self.s3.create_bucket(Bucket=bucket_name) - self.s3.upload_file(upload_filename, bucket_name, object_name) + self.s3.create_bucket(Bucket=self.bucket_name) + self.s3.upload_file(upload_filename, self.bucket_name, self.object_name) with tracer.start_as_current_span("test"): with open(download_target_filename, "wb") as fd: - self.s3.download_fileobj(bucket_name, object_name, fd) + self.s3.download_fileobj(self.bucket_name, self.object_name, fd) spans = self.recorder.queued_spans() assert len(spans) == 2 @@ -238,15 +230,13 @@ def test_s3_download_file_obj(self) -> None: assert not s3_span.ec assert s3_span.data["s3"]["op"] == "DownloadFileObj" - assert s3_span.data["s3"]["bucket"] == "aws_bucket_name" + assert s3_span.data["s3"]["bucket"] == self.bucket_name def test_s3_list_obj(self) -> None: - bucket_name = "aws_bucket_name" - - self.s3.create_bucket(Bucket=bucket_name) + self.s3.create_bucket(Bucket=self.bucket_name) with tracer.start_as_current_span("test"): - self.s3.list_objects(Bucket=bucket_name) + self.s3.list_objects(Bucket=self.bucket_name) spans = self.recorder.queued_spans() assert len(spans) == 2 @@ -266,4 +256,49 @@ def test_s3_list_obj(self) -> None: assert not s3_span.ec assert s3_span.data["s3"]["op"] == "ListObjects" - assert s3_span.data["s3"]["bucket"] == "aws_bucket_name" + assert s3_span.data["s3"]["bucket"] == self.bucket_name + + def test_s3_resource_bucket_upload_fileobj(self) -> None: + """ + Verify boto3.resource().Bucket().upload_fileobj() works correctly with BytesIO objects + """ + test_data = b"somedata" + + # Create a bucket using the client first + self.s3.create_bucket(Bucket=self.bucket_name) + + s3_resource = boto3.resource( + "s3", + region_name="us-east-1" + ) + bucket = s3_resource.Bucket(name=self.bucket_name) + + with tracer.start_as_current_span("test"): + bucket.upload_fileobj(BytesIO(test_data), self.object_name) + + # Verify the upload was successful by retrieving the object + response = bucket.Object(self.object_name).get() + file_content = response["Body"].read() + + # Assert the content matches what we uploaded + assert file_content == test_data + + # Verify the spans were created correctly + spans = self.recorder.queued_spans() + assert len(spans) >= 2 + + filter = lambda span: span.n == "sdk" # noqa: E731 + test_span = get_first_span_by_filter(spans, filter) + assert test_span + + filter = lambda span: span.n == "s3" and span.data["s3"]["op"] == "UploadFileObj" # noqa: E731 + s3_span = get_first_span_by_filter(spans, filter) + assert s3_span + + assert s3_span.t == test_span.t + assert s3_span.p == test_span.s + + assert not test_span.ec + assert not s3_span.ec + + assert s3_span.data["s3"]["bucket"] == self.bucket_name diff --git a/tests/clients/kafka/test_confluent_kafka.py b/tests/clients/kafka/test_confluent_kafka.py index fb9ab4c8..a5c9b334 100644 --- a/tests/clients/kafka/test_confluent_kafka.py +++ b/tests/clients/kafka/test_confluent_kafka.py @@ -5,19 +5,24 @@ from typing import Generator import pytest -from confluent_kafka import ( - Consumer, - KafkaException, - Producer, -) +from confluent_kafka import Consumer, KafkaException, Producer from confluent_kafka.admin import AdminClient, NewTopic -from mock import patch +from mock import Mock, patch from opentelemetry.trace import SpanKind from opentelemetry.trace.span import format_span_id from instana.configurator import config +from instana.instrumentation.kafka import confluent_kafka_python +from instana.instrumentation.kafka.confluent_kafka_python import ( + clear_context, + close_consumer_span, + consumer_span, + save_consumer_span_into_context, + trace_kafka_close, +) from instana.options import StandardOptions from instana.singletons import agent, tracer +from instana.span.span import InstanaSpan from instana.util.config import parse_ignored_endpoints_from_yaml from tests.helpers import get_first_span_by_filter, testenv @@ -68,8 +73,12 @@ def _resource(self) -> Generator[None, None, None]: agent.options = StandardOptions() yield # teardown - # Ensure that allow_exit_as_root has the default value""" - agent.options.allow_exit_as_root = False + # Clear spans before resetting options + self.recorder.clear_spans() + + # Clear context + clear_context() + # Close connections self.kafka_client.delete_topics( [ @@ -107,24 +116,44 @@ def test_trace_confluent_kafka_produce(self) -> None: assert kafka_span.data["kafka"]["service"] == testenv["kafka_topic"] assert kafka_span.data["kafka"]["access"] == "produce" - def test_trace_confluent_kafka_consume(self) -> None: - agent.options.set_trace_configurations() - # Produce some events - self.producer.produce(testenv["kafka_topic"], value=b"raw_bytes1") - self.producer.flush(timeout=30) + def test_trace_confluent_kafka_produce_with_keyword_topic(self) -> None: + """Test that tracing works when topic is passed as a keyword argument.""" + with tracer.start_as_current_span("test"): + # Pass topic as a keyword argument + self.producer.produce(topic=testenv["kafka_topic"], value=b"raw_bytes") + self.producer.flush(timeout=10) - # Consume the events - consumer_config = self.kafka_config.copy() - consumer_config["group.id"] = "my-group" - consumer_config["auto.offset.reset"] = "earliest" + spans = self.recorder.queued_spans() + assert len(spans) == 2 - consumer = Consumer(consumer_config) - consumer.subscribe([testenv["kafka_topic"]]) + kafka_span = spans[0] + test_span = spans[1] - with tracer.start_as_current_span("test"): - msgs = consumer.consume(num_messages=1, timeout=60) # noqa: F841 + # Same traceId + assert test_span.t == kafka_span.t - consumer.close() + # Parent relationships + assert kafka_span.p == test_span.s + + # Error logging + assert not test_span.ec + assert not kafka_span.ec + + assert kafka_span.n == "kafka" + assert kafka_span.k == SpanKind.CLIENT + assert kafka_span.data["kafka"]["service"] == testenv["kafka_topic"] + assert kafka_span.data["kafka"]["access"] == "produce" + + def test_trace_confluent_kafka_produce_with_keyword_args(self) -> None: + """Test that tracing works when both topic and headers are passed as keyword arguments.""" + with tracer.start_as_current_span("test"): + # Pass both topic and headers as keyword arguments + self.producer.produce( + topic=testenv["kafka_topic"], + value=b"raw_bytes", + headers=[("custom-header", b"header-value")], + ) + self.producer.flush(timeout=10) spans = self.recorder.queued_spans() assert len(spans) == 2 @@ -143,9 +172,31 @@ def test_trace_confluent_kafka_consume(self) -> None: assert not kafka_span.ec assert kafka_span.n == "kafka" - assert kafka_span.k == SpanKind.SERVER + assert kafka_span.k == SpanKind.CLIENT assert kafka_span.data["kafka"]["service"] == testenv["kafka_topic"] - assert kafka_span.data["kafka"]["access"] == "consume" + assert kafka_span.data["kafka"]["access"] == "produce" + + def test_trace_confluent_kafka_consume(self) -> None: + agent.options.set_trace_configurations() + # Produce some events + self.producer.produce(testenv["kafka_topic"], value=b"raw_bytes1") + self.producer.flush(timeout=30) + + # Consume the events + consumer_config = self.kafka_config.copy() + consumer_config["group.id"] = "my-group" + consumer_config["auto.offset.reset"] = "earliest" + + consumer = Consumer(consumer_config) + consumer.subscribe([testenv["kafka_topic"]]) + + with tracer.start_as_current_span("test"): + msgs = consumer.consume(num_messages=1, timeout=60) # noqa: F841 + + consumer.close() + + spans = self.recorder.queued_spans() + assert len(spans) == 2 def test_trace_confluent_kafka_poll(self) -> None: # Produce some events @@ -162,15 +213,22 @@ def test_trace_confluent_kafka_poll(self) -> None: consumer.subscribe([testenv["kafka_topic"]]) with tracer.start_as_current_span("test"): - msg = consumer.poll(timeout=30) # noqa: F841 + msg = consumer.poll(timeout=3) # noqa: F841 consumer.close() spans = self.recorder.queued_spans() assert len(spans) == 2 - kafka_span = spans[0] - test_span = spans[1] + def filter(span): + return span.n == "kafka" and span.data["kafka"]["access"] == "poll" + + kafka_span = get_first_span_by_filter(spans, filter) + + def filter(span): + return span.n == "sdk" and span.data["sdk"]["name"] == "test" + + test_span = get_first_span_by_filter(spans, filter) # Same traceId assert test_span.t == kafka_span.t @@ -282,10 +340,7 @@ def test_ignore_confluent_kafka_consumer(self) -> None: consumer.close() spans = self.recorder.queued_spans() - assert len(spans) == 3 - - filtered_spans = agent.filter_spans(spans) - assert len(filtered_spans) == 1 + assert len(spans) == 1 @patch.dict( os.environ, @@ -323,7 +378,7 @@ def test_ignore_confluent_specific_topic(self) -> None: consumer.close() spans = self.recorder.queued_spans() - assert len(spans) == 5 + assert len(spans) == 4 filtered_spans = agent.filter_spans(spans) assert len(filtered_spans) == 3 @@ -362,7 +417,7 @@ def test_ignore_confluent_specific_topic_with_config_file(self) -> None: consumer.close() spans = self.recorder.queued_spans() - assert len(spans) == 3 + assert len(spans) == 2 filtered_spans = agent.filter_spans(spans) assert len(filtered_spans) == 1 @@ -482,7 +537,7 @@ def test_confluent_kafka_poll_root_exit_without_trace_correlation(self) -> None: agent.options.kafka_trace_correlation = False # Produce some events - self.producer.produce(testenv["kafka_topic"], b"raw_bytes1") + self.producer.produce(f'{testenv["kafka_topic"]}-wo-tc', b"raw_bytes1") self.producer.flush() # Consume the events @@ -491,7 +546,7 @@ def test_confluent_kafka_poll_root_exit_without_trace_correlation(self) -> None: consumer_config["auto.offset.reset"] = "earliest" consumer = Consumer(consumer_config) - consumer.subscribe([testenv["kafka_topic"]]) + consumer.subscribe([f'{testenv["kafka_topic"]}-wo-tc']) msg = consumer.poll(timeout=30) # noqa: F841 @@ -504,14 +559,14 @@ def test_confluent_kafka_poll_root_exit_without_trace_correlation(self) -> None: spans, lambda span: span.n == "kafka" and span.data["kafka"]["access"] == "produce" - and span.data["kafka"]["service"] == "span-topic", + and span.data["kafka"]["service"] == f'{testenv["kafka_topic"]}-wo-tc', ) poll_span = get_first_span_by_filter( spans, lambda span: span.n == "kafka" and span.data["kafka"]["access"] == "poll" - and span.data["kafka"]["service"] == "span-topic", + and span.data["kafka"]["service"] == f'{testenv["kafka_topic"]}-wo-tc', ) # Different traceId @@ -598,7 +653,7 @@ def test_confluent_kafka_downstream_suppression(self) -> None: consumer.close() spans = self.recorder.queued_spans() - assert len(spans) == 3 + assert len(spans) == 2 producer_span_1 = get_first_span_by_filter( spans, @@ -628,10 +683,7 @@ def test_confluent_kafka_downstream_suppression(self) -> None: assert producer_span_1 # consumer has been suppressed assert not consumer_span_1 - - assert producer_span_2.t == consumer_span_2.t - assert producer_span_2.s == consumer_span_2.p - assert producer_span_2.s != consumer_span_2.s + assert not consumer_span_2 for message in messages: if message.topic() == "span-topic_1": @@ -649,3 +701,75 @@ def test_confluent_kafka_downstream_suppression(self) -> None: testenv["kafka_topic"] + "_2", ] ) + + def test_save_consumer_span_into_context(self, span: "InstanaSpan") -> None: + """Test save_consumer_span_into_context function.""" + # Verify initial state + assert consumer_span.get(None) is None + assert confluent_kafka_python.consumer_token is None + + # Save span into context + save_consumer_span_into_context(span) + + # Verify token is stored + assert confluent_kafka_python.consumer_token is not None + + def test_close_consumer_span_recording_span(self, span: "InstanaSpan") -> None: + """Test close_consumer_span with a recording span.""" + # Save span into context first + save_consumer_span_into_context(span) + assert confluent_kafka_python.consumer_token is not None + + # Verify span is recording + assert span.is_recording() + + # Close the span + close_consumer_span(span) + + # Verify span was ended and context cleared + assert not span.is_recording() + assert consumer_span.get(None) is None + assert confluent_kafka_python.consumer_token is None + + def test_clear_context(self, span: "InstanaSpan") -> None: + """Test clear_context function.""" + # Save span into context + save_consumer_span_into_context(span) + + # Verify context has data + assert consumer_span.get(None) == span + assert confluent_kafka_python.consumer_token is not None + + # Clear context + clear_context() + + # Verify all context is cleared + assert consumer_span.get(None) is None + assert confluent_kafka_python.consumer_token is None + + def test_trace_kafka_close_exception_handling(self, span: "InstanaSpan") -> None: + """Test trace_kafka_close handles exceptions and still cleans up spans.""" + # Save span into context + save_consumer_span_into_context(span) + + # Verify span is in context + assert consumer_span.get(None) == span + assert confluent_kafka_python.consumer_token is not None + + # Mock a wrapped function that raises an exception + mock_wrapped = Mock(side_effect=Exception("Close operation failed")) + mock_instance = Mock() + + # Call trace_kafka_close - it should handle the exception gracefully + # and still clean up the span + trace_kafka_close(mock_wrapped, mock_instance, (), {}) + + # Verify the wrapped function was called + mock_wrapped.assert_called_once_with() + + # Verify that despite the exception, the span was cleaned up + assert consumer_span.get(None) is None + assert confluent_kafka_python.consumer_token is None + + # Verify span was ended + assert not span.is_recording() diff --git a/tests/clients/kafka/test_kafka_python.py b/tests/clients/kafka/test_kafka_python.py index dd568583..a1d0ccbb 100644 --- a/tests/clients/kafka/test_kafka_python.py +++ b/tests/clients/kafka/test_kafka_python.py @@ -12,8 +12,16 @@ from opentelemetry.trace.span import format_span_id from instana.configurator import config +from instana.instrumentation.kafka import kafka_python +from instana.instrumentation.kafka.kafka_python import ( + clear_context, + close_consumer_span, + consumer_span, + save_consumer_span_into_context, +) from instana.options import StandardOptions from instana.singletons import agent, tracer +from instana.span.span import InstanaSpan from instana.util.config import parse_ignored_endpoints_from_yaml from tests.helpers import get_first_span_by_filter, testenv @@ -72,6 +80,10 @@ def _resource(self) -> Generator[None, None, None]: agent.options.allow_exit_as_root = False # Close connections self.producer.close() + + # Clear context + clear_context() + self.kafka_client.delete_topics( [ testenv["kafka_topic"], @@ -109,6 +121,70 @@ def test_trace_kafka_python_send(self) -> None: assert kafka_span.data["kafka"]["service"] == testenv["kafka_topic"] assert kafka_span.data["kafka"]["access"] == "send" + def test_trace_kafka_python_send_with_keyword_topic(self) -> None: + """Test that tracing works when topic is passed as a keyword argument.""" + with tracer.start_as_current_span("test"): + # Pass topic as a keyword argument + future = self.producer.send( + topic=testenv["kafka_topic"], value=b"raw_bytes" + ) + + _ = future.get(timeout=10) # noqa: F841 + + spans = self.recorder.queued_spans() + assert len(spans) == 2 + + kafka_span = spans[0] + test_span = spans[1] + + # Same traceId + assert test_span.t == kafka_span.t + + # Parent relationships + assert kafka_span.p == test_span.s + + # Error logging + assert not test_span.ec + assert not kafka_span.ec + + assert kafka_span.n == "kafka" + assert kafka_span.k == SpanKind.CLIENT + assert kafka_span.data["kafka"]["service"] == testenv["kafka_topic"] + assert kafka_span.data["kafka"]["access"] == "send" + + def test_trace_kafka_python_send_with_keyword_args(self) -> None: + """Test that tracing works when both topic and headers are passed as keyword arguments.""" + with tracer.start_as_current_span("test"): + # Pass both topic and headers as keyword arguments + future = self.producer.send( + topic=testenv["kafka_topic"], + value=b"raw_bytes", + headers=[("custom-header", b"header-value")], + ) + + _ = future.get(timeout=10) # noqa: F841 + + spans = self.recorder.queued_spans() + assert len(spans) == 2 + + kafka_span = spans[0] + test_span = spans[1] + + # Same traceId + assert test_span.t == kafka_span.t + + # Parent relationships + assert kafka_span.p == test_span.s + + # Error logging + assert not test_span.ec + assert not kafka_span.ec + + assert kafka_span.n == "kafka" + assert kafka_span.k == SpanKind.CLIENT + assert kafka_span.data["kafka"]["service"] == testenv["kafka_topic"] + assert kafka_span.data["kafka"]["access"] == "send" + def test_trace_kafka_python_consume(self) -> None: # Produce some events self.producer.send(testenv["kafka_topic"], b"raw_bytes1") @@ -132,10 +208,17 @@ def test_trace_kafka_python_consume(self) -> None: consumer.close() spans = self.recorder.queued_spans() - assert len(spans) == 4 + assert len(spans) == 3 - kafka_span = spans[0] - test_span = spans[len(spans) - 1] + def filter(span): + return span.n == "kafka" and span.data["kafka"]["access"] == "consume" + + kafka_span = get_first_span_by_filter(spans, filter) + + def filter(span): + return span.n == "sdk" and span.data["sdk"]["name"] == "test" + + test_span = get_first_span_by_filter(spans, filter) # Same traceId assert test_span.t == kafka_span.t @@ -168,15 +251,22 @@ def test_trace_kafka_python_poll(self) -> None: ) with tracer.start_as_current_span("test"): - msg = consumer.poll() # noqa: F841 + msg = consumer.poll(timeout_ms=3000) # noqa: F841 consumer.close() spans = self.recorder.queued_spans() - assert len(spans) == 2 + assert len(spans) == 3 - kafka_span = spans[0] - test_span = spans[1] + def filter(span): + return span.n == "kafka" and span.data["kafka"]["access"] == "poll" + + kafka_span = get_first_span_by_filter(spans, filter) + + def filter(span): + return span.n == "sdk" and span.data["sdk"]["name"] == "test" + + test_span = get_first_span_by_filter(spans, filter) # Same traceId assert test_span.t == kafka_span.t @@ -194,27 +284,36 @@ def test_trace_kafka_python_poll(self) -> None: assert kafka_span.data["kafka"]["access"] == "poll" def test_trace_kafka_python_error(self) -> None: - # Consume the events consumer = KafkaConsumer( "inexistent_kafka_topic", bootstrap_servers=testenv["kafka_bootstrap_servers"], - auto_offset_reset="earliest", # consume earliest available messages - enable_auto_commit=False, # do not auto-commit offsets + auto_offset_reset="earliest", + enable_auto_commit=False, consumer_timeout_ms=1000, ) with tracer.start_as_current_span("test"): - for msg in consumer: - if msg is None: - break + consumer._client = None - consumer.close() + try: + for msg in consumer: + if msg is None: + break + except Exception: + pass spans = self.recorder.queued_spans() assert len(spans) == 2 - kafka_span = spans[0] - test_span = spans[1] + def filter(span): + return span.n == "kafka" and span.data["kafka"]["access"] == "consume" + + kafka_span = get_first_span_by_filter(spans, filter) + + def filter(span): + return span.n == "sdk" and span.data["sdk"]["name"] == "test" + + test_span = get_first_span_by_filter(spans, filter) # Same traceId assert test_span.t == kafka_span.t @@ -230,7 +329,10 @@ def test_trace_kafka_python_error(self) -> None: assert kafka_span.k == SpanKind.SERVER assert kafka_span.data["kafka"]["service"] == "inexistent_kafka_topic" assert kafka_span.data["kafka"]["access"] == "consume" - assert kafka_span.data["kafka"]["error"] == "StopIteration()" + assert ( + kafka_span.data["kafka"]["error"] + == "'NoneType' object has no attribute 'poll'" + ) def consume_from_topic(self, topic_name: str) -> None: consumer = KafkaConsumer( @@ -302,10 +404,7 @@ def test_ignore_kafka_consumer(self) -> None: self.consume_from_topic(testenv["kafka_topic"]) spans = self.recorder.queued_spans() - assert len(spans) == 4 - - filtered_spans = agent.filter_spans(spans) - assert len(filtered_spans) == 1 + assert len(spans) == 1 @patch.dict( os.environ, @@ -326,10 +425,10 @@ def test_ignore_specific_topic(self) -> None: self.consume_from_topic(testenv["kafka_topic"] + "_1") spans = self.recorder.queued_spans() - assert len(spans) == 11 + assert len(spans) == 7 filtered_spans = agent.filter_spans(spans) - assert len(filtered_spans) == 8 + assert len(filtered_spans) == 6 span_to_be_filtered = get_first_span_by_filter( spans, @@ -351,10 +450,7 @@ def test_ignore_specific_topic_with_config_file(self) -> None: self.consume_from_topic(testenv["kafka_topic"]) spans = self.recorder.queued_spans() - assert len(spans) == 3 - - filtered_spans = agent.filter_spans(spans) - assert len(filtered_spans) == 1 + assert len(spans) == 1 def test_kafka_consumer_root_exit(self) -> None: agent.options.allow_exit_as_root = True @@ -378,7 +474,7 @@ def test_kafka_consumer_root_exit(self) -> None: consumer.close() spans = self.recorder.queued_spans() - assert len(spans) == 4 + assert len(spans) == 3 producer_span = spans[0] consumer_span = spans[1] @@ -713,3 +809,50 @@ def test_kafka_downstream_suppression(self) -> None: format_span_id(producer_span_2.s).encode("utf-8"), ), ] + + def test_save_consumer_span_into_context(self, span: "InstanaSpan") -> None: + """Test save_consumer_span_into_context function.""" + # Verify initial state + assert consumer_span.get(None) is None + assert kafka_python.consumer_token is None + + # Save span into context + save_consumer_span_into_context(span) + + # Verify span is saved in context variable + assert consumer_span.get(None) == span + # Verify token is stored + assert kafka_python.consumer_token is not None + + def test_close_consumer_span_recording_span(self, span: "InstanaSpan") -> None: + """Test close_consumer_span with a recording span.""" + # Save span into context first + save_consumer_span_into_context(span) + assert kafka_python.consumer_token is not None + + # Verify span is recording + assert span.is_recording() + + # Close the span + close_consumer_span(span) + + # Verify span was ended and context cleared + assert not span.is_recording() + assert consumer_span.get(None) is None + assert kafka_python.consumer_token is None + + def test_clear_context(self, span: "InstanaSpan") -> None: + """Test clear_context function.""" + # Save span into context + save_consumer_span_into_context(span) + + # Verify context has data + assert consumer_span.get(None) == span + assert kafka_python.consumer_token is not None + + # Clear context + clear_context() + + # Verify all context is cleared + assert consumer_span.get(None) is None + assert kafka_python.consumer_token is None diff --git a/tests/clients/test_aio_pika.py b/tests/clients/test_aio_pika.py index 75c1afff..20e97618 100644 --- a/tests/clients/test_aio_pika.py +++ b/tests/clients/test_aio_pika.py @@ -56,6 +56,9 @@ async def publish_message(self, params_combination: str = "both_args") -> None: elif params_combination == "arg_kwarg": args = (message,) kwargs = {"routing_key": queue_name} + elif params_combination == "arg_kwarg_empty_key": + args = (message,) + kwargs = {"routing_key": ""} else: # params_combination == "both_args" args = (message, queue_name) @@ -86,6 +89,31 @@ async def consume_message(self, connect_method) -> None: if queue.name in message.body.decode(): break + async def consume_with_exception(self, connect_method) -> None: + connection = await connect_method() + + async def on_message(msg): + raise RuntimeError("Simulated Exception") + + async with connection: + # Creating channel + channel = await connection.channel() + + # Declaring queue + queue = await channel.declare_queue(self.queue_name) + + await queue.consume(on_message) + await asyncio.sleep(1) # Wait to ensure the message is processed + + def assert_span_info(self, rabbitmq_span: "ReadableSpan", sort: str, key: str = "test.queue") -> None: + assert rabbitmq_span.data["rabbitmq"]["exchange"] == "test.exchange" + assert rabbitmq_span.data["rabbitmq"]["sort"] == sort + assert rabbitmq_span.data["rabbitmq"]["address"] + assert rabbitmq_span.data["rabbitmq"]["key"] == key + assert rabbitmq_span.stack + assert isinstance(rabbitmq_span.stack, list) + assert len(rabbitmq_span.stack) > 0 + @pytest.mark.parametrize( "params_combination", ["both_args", "both_kwargs", "arg_kwarg"], @@ -111,13 +139,8 @@ def test_basic_publish(self, params_combination) -> None: assert not rabbitmq_span.ec # Span attributes - assert rabbitmq_span.data["rabbitmq"]["exchange"] == "test.exchange" - assert rabbitmq_span.data["rabbitmq"]["sort"] == "publish" - assert rabbitmq_span.data["rabbitmq"]["address"] - assert rabbitmq_span.data["rabbitmq"]["key"] == "test.queue" - assert rabbitmq_span.stack - assert isinstance(rabbitmq_span.stack, list) - assert len(rabbitmq_span.stack) > 0 + key = "" if params_combination == "arg_kwarg_empty_key" else self.queue_name + self.assert_span_info(rabbitmq_span, "publish", key) def test_basic_publish_as_root_exit_span(self) -> None: agent.options.allow_exit_as_root = True @@ -135,13 +158,7 @@ def test_basic_publish_as_root_exit_span(self) -> None: assert not rabbitmq_span.ec # Span attributes - assert rabbitmq_span.data["rabbitmq"]["exchange"] == "test.exchange" - assert rabbitmq_span.data["rabbitmq"]["sort"] == "publish" - assert rabbitmq_span.data["rabbitmq"]["address"] - assert rabbitmq_span.data["rabbitmq"]["key"] == "test.queue" - assert rabbitmq_span.stack - assert isinstance(rabbitmq_span.stack, list) - assert len(rabbitmq_span.stack) > 0 + self.assert_span_info(rabbitmq_span, "publish") @pytest.mark.parametrize( "connect_method", @@ -173,14 +190,38 @@ def test_basic_consume(self, connect_method) -> None: assert not test_span.ec # Span attributes - def assert_span_info(rabbitmq_span: "ReadableSpan", sort: str) -> None: - assert rabbitmq_span.data["rabbitmq"]["exchange"] == "test.exchange" - assert rabbitmq_span.data["rabbitmq"]["sort"] == sort - assert rabbitmq_span.data["rabbitmq"]["address"] - assert rabbitmq_span.data["rabbitmq"]["key"] == "test.queue" - assert rabbitmq_span.stack - assert isinstance(rabbitmq_span.stack, list) - assert len(rabbitmq_span.stack) > 0 - - assert_span_info(rabbitmq_publisher_span, "publish") - assert_span_info(rabbitmq_consumer_span, "consume") + self.assert_span_info(rabbitmq_publisher_span, "publish") + self.assert_span_info(rabbitmq_consumer_span, "consume") + + @pytest.mark.parametrize( + "connect_method", + [connect, connect_robust], + ) + def test_consume_with_exception(self, connect_method) -> None: + with tracer.start_as_current_span("test"): + self.loop.run_until_complete(self.publish_message()) + self.loop.run_until_complete(self.consume_with_exception(connect_method)) + + spans = self.recorder.queued_spans() + assert len(spans) == 3 + + rabbitmq_publisher_span = spans[0] + rabbitmq_consumer_span = spans[1] + test_span = spans[2] + + # Same traceId + assert test_span.t == rabbitmq_publisher_span.t + assert rabbitmq_publisher_span.t == rabbitmq_consumer_span.t + + # Parent relationships + assert rabbitmq_publisher_span.p == test_span.s + assert rabbitmq_consumer_span.p == rabbitmq_publisher_span.s + + # Error logging + assert not rabbitmq_publisher_span.ec + assert rabbitmq_consumer_span.ec == 1 + assert not test_span.ec + + # Span attributes + self.assert_span_info(rabbitmq_publisher_span, "publish") + self.assert_span_info(rabbitmq_consumer_span, "consume") diff --git a/tests/clients/test_google-cloud-pubsub.py b/tests/clients/test_google-cloud-pubsub.py index 678fc64d..db262e70 100644 --- a/tests/clients/test_google-cloud-pubsub.py +++ b/tests/clients/test_google-cloud-pubsub.py @@ -7,7 +7,6 @@ from typing import Generator import pytest -import six from google.api_core.exceptions import AlreadyExists from google.cloud.pubsub_v1 import PublisherClient, SubscriberClient from google.cloud.pubsub_v1.publisher import exceptions @@ -51,7 +50,7 @@ def test_publish(self) -> None: ) time.sleep(2.0) # for sanity result = future.result() - assert isinstance(result, six.string_types) + assert isinstance(result, str) spans = self.recorder.queued_spans() gcps_span, test_span = spans[0], spans[1] @@ -80,7 +79,7 @@ def test_publish_as_root_exit_span(self) -> None: ) time.sleep(2.0) # for sanity result = future.result() - assert isinstance(result, six.string_types) + assert isinstance(result, str) spans = self.recorder.queued_spans() assert len(spans) == 1 @@ -161,7 +160,7 @@ def test_subscribe(self) -> None: future = self.publisher.publish( self.topic_path, b"Test Message to PubSub", origin="instana" ) - assert isinstance(future.result(), six.string_types) + assert isinstance(future.result(), str) time.sleep(2.0) # for sanity diff --git a/tests/clients/test_google-cloud-storage.py b/tests/clients/test_google-cloud-storage.py index 15ce2e22..51b560ba 100644 --- a/tests/clients/test_google-cloud-storage.py +++ b/tests/clients/test_google-cloud-storage.py @@ -14,7 +14,7 @@ from opentelemetry.trace import SpanKind from mock import patch, Mock -from six.moves import http_client +from http import client as http_client from google.cloud import storage from google.api_core import iam, page_iterator diff --git a/tests/clients/test_logging.py b/tests/clients/test_logging.py index e924ac1c..0fa5d2dc 100644 --- a/tests/clients/test_logging.py +++ b/tests/clients/test_logging.py @@ -70,7 +70,7 @@ def test_parameters(self) -> None: try: a = 42 b = 0 - c = a / b + c = a / b # noqa: F841 except Exception as e: self.logger.exception("Exception: %s", str(e)) @@ -168,3 +168,78 @@ def main(): assert spans[0].k is SpanKind.CLIENT assert spans[0].data["log"].get("message") == "foo bar" + + +class TestLoggingDisabling: + @pytest.fixture(autouse=True) + def _resource(self) -> Generator[None, None, None]: + # Setup + self.recorder = tracer.span_processor + self.recorder.clear_spans() + self.logger = logging.getLogger("unit test") + + # Save original options + self.original_options = agent.options + + yield + + # Teardown + agent.options = self.original_options + agent.options.allow_exit_as_root = False + + def test_logging_enabled(self) -> None: + with tracer.start_as_current_span("test"): + self.logger.warning("test message") + + spans = self.recorder.queued_spans() + assert len(spans) == 2 + assert spans[0].k is SpanKind.CLIENT + assert spans[0].data["log"].get("message") == "test message" + + def test_logging_disabled(self) -> None: + # Disable logging spans + agent.options.disabled_spans = ["logging"] + + with tracer.start_as_current_span("test"): + self.logger.warning("test message") + + spans = self.recorder.queued_spans() + assert len(spans) == 1 # Only the parent span, no logging span + + def test_logging_disabled_via_env_var(self, monkeypatch): + # Disable logging spans via environment variable + monkeypatch.setenv("INSTANA_TRACING_DISABLE", "logging") + + # Create new options to read from environment + original_options = agent.options + agent.options = type(original_options)() + + with tracer.start_as_current_span("test"): + self.logger.warning("test message") + + spans = self.recorder.queued_spans() + assert len(spans) == 1 # Only the parent span, no logging span + + # Restore original options + agent.options = original_options + + def test_logging_disabled_via_yaml(self) -> None: + # Disable logging spans via YAML configuration + original_options = agent.options + agent.options = type(original_options)() + + # Simulate YAML configuration + tracing_config = {"disable": [{"logging": True}]} + agent.options.set_tracing(tracing_config) + + with tracer.start_as_current_span("test"): + self.logger.warning("test message") + + spans = self.recorder.queued_spans() + assert len(spans) == 1 # Only the parent span, no logging span + + # Restore original options + agent.options = original_options + + +# Made with Bob diff --git a/tests/clients/test_urllib3.py b/tests/clients/test_urllib3.py index 62b07d49..6c5fc318 100644 --- a/tests/clients/test_urllib3.py +++ b/tests/clients/test_urllib3.py @@ -992,3 +992,41 @@ def test_collect_kvs_exception( caplog.set_level(logging.DEBUG, logger="instana") collect_kvs({}, (), {}) assert "urllib3 _collect_kvs error: " in caplog.messages + + def test_internal_span_creation_with_url_in_hostname(self) -> None: + internal_url = "https://com.instana.example.com/api/test" + + with tracer.start_as_current_span("test"): + try: + self.http.request("GET", internal_url, retries=False, timeout=1) + except Exception: + pass + + spans = self.recorder.queued_spans() + + assert len(spans) == 1 + + test_span = spans[0] + assert test_span.data["sdk"]["name"] == "test" + + urllib3_spans = [span for span in spans if span.n == "urllib3"] + assert len(urllib3_spans) == 0 + + def test_internal_span_creation_with_url_in_path(self) -> None: + internal_url_path = "https://example.com/com.instana/api/test" + + with tracer.start_as_current_span("test"): + try: + self.http.request("GET", internal_url_path, retries=False, timeout=1) + except Exception: + pass + + spans = self.recorder.queued_spans() + + assert len(spans) == 1 + + test_span = spans[0] + assert test_span.data["sdk"]["name"] == "test" + + urllib3_spans = [span for span in spans if span.n == "urllib3"] + assert len(urllib3_spans) == 0 diff --git a/tests/conftest.py b/tests/conftest.py index 93f89221..44088c85 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -26,7 +26,6 @@ from instana.util.runtime import is_ppc64, is_s390x collect_ignore_glob = [ - "*test_gevent*", "*collector/test_gcr*", "*agent/test_google*", ] @@ -51,11 +50,10 @@ if not os.environ.get("COUCHBASE_TEST"): collect_ignore_glob.append("*test_couchbase*") -if not os.environ.get("GEVENT_STARLETTE_TEST"): +if not os.environ.get("GEVENT_TEST"): collect_ignore_glob.extend( [ "*test_gevent*", - "*test_starlette*", ] ) diff --git a/tests/frameworks/test_gevent.py b/tests/frameworks/test_gevent.py index 69a9a6c8..31847024 100644 --- a/tests/frameworks/test_gevent.py +++ b/tests/frameworks/test_gevent.py @@ -2,36 +2,41 @@ # (c) Copyright Instana Inc. 2020 import os -import unittest +import pytest +import urllib3 import gevent from gevent.pool import Group -import urllib3 -from opentracing.scope_managers.gevent import GeventScopeManager +from typing import Generator import tests.apps.flask_app -from instana.span import SDKSpan from instana.singletons import tracer -from ..helpers import testenv, get_spans_by_filter +from tests.helpers import testenv, get_spans_by_filter, filter_test_span -@unittest.skipIf(not os.environ.get("GEVENT_STARLETTE_TEST"), reason="") -class TestGEvent(unittest.TestCase): - def setUp(self): - self.http = urllib3.HTTPConnectionPool('127.0.0.1', port=testenv["flask_port"], maxsize=20) - self.recorder = tracer.recorder - self.recorder.clear_spans() - tracer._scope_manager = GeventScopeManager() +# Skip the tests if the environment variable `GEVENT_TEST` is not set +pytestmark = pytest.mark.skipif(not os.environ.get("GEVENT_TEST"), reason="GEVENT_TEST not set") + - def tearDown(self): - """ Do nothing for now """ - pass +class TestGEvent: + @classmethod + def setup_class(cls) -> None: + """Setup that runs once before all tests in the class""" + cls.http = urllib3.HTTPConnectionPool('127.0.0.1', port=testenv["flask_port"], maxsize=20) + cls.recorder = tracer.span_processor + + @pytest.fixture(autouse=True) + def setUp(self) -> Generator[None, None, None]: + """Clear all spans before each test run""" + self.recorder.clear_spans() def make_http_call(self, n=None): + """Helper function to make HTTP calls""" return self.http.request('GET', testenv["flask_server"] + '/') def spawn_calls(self): - with tracer.start_active_span('spawn_calls'): + """Helper function to spawn multiple HTTP calls""" + with tracer.start_as_current_span('spawn_calls'): jobs = [] jobs.append(gevent.spawn(self.make_http_call)) jobs.append(gevent.spawn(self.make_http_call)) @@ -39,86 +44,78 @@ def spawn_calls(self): gevent.joinall(jobs, timeout=2) def spawn_imap_unordered(self): + """Helper function to test imap_unordered""" igroup = Group() result = [] - with tracer.start_active_span('test'): + with tracer.start_as_current_span('test'): for i in igroup.imap_unordered(self.make_http_call, range(3)): result.append(i) def launch_gevent_chain(self): - with tracer.start_active_span('test'): + """Helper function to launch a chain of gevent calls""" + with tracer.start_as_current_span('test'): gevent.spawn(self.spawn_calls).join() def test_spawning(self): gevent.spawn(self.launch_gevent_chain) - gevent.sleep(2) - + spans = self.recorder.queued_spans() - - self.assertEqual(8, len(spans)) - - span_filter = lambda span: span.n == "sdk" \ - and span.data['sdk']['name'] == 'test' and span.p == None - test_spans = get_spans_by_filter(spans, span_filter) - self.assertIsNotNone(test_spans) - self.assertEqual(len(test_spans), 1) - + + assert len(spans) == 8 + + test_spans = get_spans_by_filter(spans, filter_test_span) + assert test_spans + assert len(test_spans) == 1 + test_span = test_spans[0] - self.assertTrue(type(test_spans[0]) is SDKSpan) - + span_filter = lambda span: span.n == "sdk" \ - and span.data['sdk']['name'] == 'spawn_calls' and span.p == test_span.s + and span.data['sdk']['name'] == 'spawn_calls' and span.p == test_span.s spawn_spans = get_spans_by_filter(spans, span_filter) - self.assertIsNotNone(spawn_spans) - self.assertEqual(len(spawn_spans), 1) - + assert spawn_spans + assert len(spawn_spans) == 1 + spawn_span = spawn_spans[0] - self.assertTrue(type(spawn_spans[0]) is SDKSpan) - + span_filter = lambda span: span.n == "urllib3" urllib3_spans = get_spans_by_filter(spans, span_filter) - + for urllib3_span in urllib3_spans: # spans should all have the same test span parent - self.assertEqual(urllib3_span.t, spawn_span.t) - self.assertEqual(urllib3_span.p, spawn_span.s) - + assert urllib3_span.t == spawn_span.t + assert urllib3_span.p == spawn_span.s + # find the wsgi span generated from this urllib3 request span_filter = lambda span: span.n == "wsgi" and span.p == urllib3_span.s wsgi_spans = get_spans_by_filter(spans, span_filter) - self.assertIsNotNone(wsgi_spans) - self.assertEqual(len(wsgi_spans), 1) + assert wsgi_spans is not None + assert len(wsgi_spans) == 1 def test_imap_unordered(self): - gevent.spawn(self.spawn_imap_unordered()) - + gevent.spawn(self.spawn_imap_unordered) gevent.sleep(2) - + spans = self.recorder.queued_spans() - self.assertEqual(7, len(spans)) - - span_filter = lambda span: span.n == "sdk" \ - and span.data['sdk']['name'] == 'test' and span.p == None - test_spans = get_spans_by_filter(spans, span_filter) - self.assertIsNotNone(test_spans) - self.assertEqual(len(test_spans), 1) - + assert len(spans) == 7 + + test_spans = get_spans_by_filter(spans, filter_test_span) + assert test_spans is not None + assert len(test_spans) == 1 + test_span = test_spans[0] - self.assertTrue(type(test_spans[0]) is SDKSpan) - + span_filter = lambda span: span.n == "urllib3" urllib3_spans = get_spans_by_filter(spans, span_filter) - self.assertEqual(len(urllib3_spans), 3) - + assert len(urllib3_spans) == 3 + for urllib3_span in urllib3_spans: # spans should all have the same test span parent - self.assertEqual(urllib3_span.t, test_span.t) - self.assertEqual(urllib3_span.p, test_span.s) - + assert urllib3_span.t == test_span.t + assert urllib3_span.p == test_span.s + # find the wsgi span generated from this urllib3 request span_filter = lambda span: span.n == "wsgi" and span.p == urllib3_span.s wsgi_spans = get_spans_by_filter(spans, span_filter) - self.assertIsNotNone(wsgi_spans) - self.assertEqual(len(wsgi_spans), 1) - + assert wsgi_spans is not None + assert len(wsgi_spans) == 1 diff --git a/tests/frameworks/test_gevent_autotrace.py b/tests/frameworks/test_gevent_autotrace.py index 41bf5f03..7a7a2b8b 100644 --- a/tests/frameworks/test_gevent_autotrace.py +++ b/tests/frameworks/test_gevent_autotrace.py @@ -3,27 +3,31 @@ import importlib import os -import unittest -import socket +import pytest import gevent from gevent import monkey from instana import apply_gevent_monkey_patch +# Teardown not working as expected, run each testcase separately +class TestGEventAutoTrace: -class TestGEventAutoTrace(unittest.TestCase): - def setUp(self): + @pytest.fixture(autouse=True) + def setup_environment(self): + """Setup test environment before each test""" # Ensure that the test suite is operational even when Django is installed # but not running or configured os.environ['DJANGO_SETTINGS_MODULE'] = '' - + self.default_patched_modules = ('socket', 'time', 'select', 'os', 'threading', 'ssl', 'subprocess', 'signal', 'queue',) - - def tearDown(self): + + yield + + # Teardown if os.environ.get('INSTANA_GEVENT_MONKEY_OPTIONS'): os.environ.pop('INSTANA_GEVENT_MONKEY_OPTIONS') - + # Clean up after gevent monkey patches, by restore from the saved dict for modname in monkey.saved.keys(): try: @@ -35,37 +39,34 @@ def tearDown(self): pass monkey.saved = {} - def test_default_patch_all(self): apply_gevent_monkey_patch() for module_name in self.default_patched_modules: - self.assertTrue(monkey.is_module_patched(module_name), - f"{module_name} is not patched") + assert monkey.is_module_patched(module_name), f"{module_name} is not patched" def test_instana_monkey_options_only_time(self): os.environ['INSTANA_GEVENT_MONKEY_OPTIONS'] = ( 'time,no-socket,no-select,no-os,no-select,no-threading,no-os,' 'no-ssl,no-subprocess,''no-signal,no-queue') apply_gevent_monkey_patch() - - self.assertTrue(monkey.is_module_patched('time'), "time module is not patched") + + assert monkey.is_module_patched('time'), "time module is not patched" not_patched_modules = (m for m in self.default_patched_modules if m not in ('time', 'threading')) - + for module_name in not_patched_modules: - self.assertFalse(monkey.is_module_patched(module_name), - f"{module_name} is patched, when it shouldn't be") - + assert not monkey.is_module_patched(module_name), \ + f"{module_name} is patched, when it shouldn't be" def test_instana_monkey_options_only_socket(self): os.environ['INSTANA_GEVENT_MONKEY_OPTIONS'] = ( '--socket, --no-time, --no-select, --no-os, --no-queue, --no-threading,' '--no-os, --no-ssl, no-subprocess, --no-signal, --no-select,') apply_gevent_monkey_patch() - - self.assertTrue(monkey.is_module_patched('socket'), "socket module is not patched") + + assert monkey.is_module_patched('socket'), "socket module is not patched" not_patched_modules = (m for m in self.default_patched_modules if m not in ('socket', 'threading')) - + for module_name in not_patched_modules: - self.assertFalse(monkey.is_module_patched(module_name), - f"{module_name} is patched, when it shouldn't be") + assert not monkey.is_module_patched(module_name), \ + f"{module_name} is patched, when it shouldn't be" diff --git a/tests/frameworks/test_sanic.py b/tests/frameworks/test_sanic.py index 31b98a49..7aa08e21 100644 --- a/tests/frameworks/test_sanic.py +++ b/tests/frameworks/test_sanic.py @@ -7,7 +7,7 @@ from instana.singletons import tracer, agent from instana.util.ids import hex_id -from tests.helpers import get_first_span_by_filter, get_first_span_by_name, is_test_span +from tests.helpers import get_first_span_by_filter, get_first_span_by_name, filter_test_span from tests.test_utils import _TraceContextMixin from tests.apps.sanic_app.server import app @@ -57,7 +57,7 @@ def test_basic_get(self) -> None: spans = self.recorder.queued_spans() assert len(spans) == 3 - test_span = get_first_span_by_filter(spans, is_test_span) + test_span = get_first_span_by_filter(spans, filter_test_span) assert test_span httpx_span = get_first_span_by_name(spans, "http") @@ -108,7 +108,7 @@ def test_404(self) -> None: spans = self.recorder.queued_spans() assert len(spans) == 3 - test_span = get_first_span_by_filter(spans, is_test_span) + test_span = get_first_span_by_filter(spans, filter_test_span) assert test_span httpx_span = get_first_span_by_name(spans, "http") @@ -159,7 +159,7 @@ def test_sanic_exception(self) -> None: spans = self.recorder.queued_spans() assert len(spans) == 4 - test_span = get_first_span_by_filter(spans, is_test_span) + test_span = get_first_span_by_filter(spans, filter_test_span) assert test_span httpx_span = get_first_span_by_name(spans, "http") @@ -210,7 +210,7 @@ def test_500_instana_exception(self) -> None: spans = self.recorder.queued_spans() assert len(spans) == 4 - test_span = get_first_span_by_filter(spans, is_test_span) + test_span = get_first_span_by_filter(spans, filter_test_span) assert test_span httpx_span = get_first_span_by_name(spans, "http") @@ -261,7 +261,7 @@ def test_500(self) -> None: spans = self.recorder.queued_spans() assert len(spans) == 4 - test_span = get_first_span_by_filter(spans, is_test_span) + test_span = get_first_span_by_filter(spans, filter_test_span) assert test_span httpx_span = get_first_span_by_name(spans, "http") @@ -312,7 +312,7 @@ def test_path_templates(self) -> None: spans = self.recorder.queued_spans() assert len(spans) == 3 - test_span = get_first_span_by_filter(spans, is_test_span) + test_span = get_first_span_by_filter(spans, filter_test_span) assert test_span httpx_span = get_first_span_by_name(spans, "http") @@ -363,7 +363,7 @@ def test_secret_scrubbing(self) -> None: spans = self.recorder.queued_spans() assert len(spans) == 3 - test_span = get_first_span_by_filter(spans, is_test_span) + test_span = get_first_span_by_filter(spans, filter_test_span) assert test_span httpx_span = get_first_span_by_name(spans, "http") @@ -417,7 +417,7 @@ def test_synthetic_request(self) -> None: spans = self.recorder.queued_spans() assert len(spans) == 3 - test_span = get_first_span_by_filter(spans, is_test_span) + test_span = get_first_span_by_filter(spans, filter_test_span) assert test_span httpx_span = get_first_span_by_name(spans, "http") @@ -476,7 +476,7 @@ def test_request_header_capture(self) -> None: spans = self.recorder.queued_spans() assert len(spans) == 3 - test_span = get_first_span_by_filter(spans, is_test_span) + test_span = get_first_span_by_filter(spans, filter_test_span) assert test_span httpx_span = get_first_span_by_name(spans, "http") @@ -523,7 +523,7 @@ def test_response_header_capture(self) -> None: spans = self.recorder.queued_spans() assert len(spans) == 3 - test_span = get_first_span_by_filter(spans, is_test_span) + test_span = get_first_span_by_filter(spans, filter_test_span) assert test_span httpx_span = get_first_span_by_name(spans, "http") diff --git a/tests/frameworks/test_starlette.py b/tests/frameworks/test_starlette.py index e332e024..d44f39d8 100644 --- a/tests/frameworks/test_starlette.py +++ b/tests/frameworks/test_starlette.py @@ -29,7 +29,6 @@ def _resource(self) -> Generator[None, None, None]: # Clear all spans before a test run. self.recorder = tracer.span_processor self.recorder.clear_spans() - yield def test_vanilla_get(self) -> None: result = self.client.get("/") diff --git a/tests/helpers.py b/tests/helpers.py index 850ba59b..f7c2efc4 100644 --- a/tests/helpers.py +++ b/tests/helpers.py @@ -108,7 +108,7 @@ def fail_with_message_and_span_dump(msg, spans): pytest.fail(msg + span_dump, True) -def is_test_span(span): +def filter_test_span(span): """ return the filter for test span """ diff --git a/tests/propagators/test_http_propagator.py b/tests/propagators/test_http_propagator.py index 25b36635..bac0a173 100644 --- a/tests/propagators/test_http_propagator.py +++ b/tests/propagators/test_http_propagator.py @@ -340,3 +340,45 @@ def test_w3c_off_x_instana_l_0( if "tracestate" in carrier_header.keys(): assert "tracestate" in downstream_carrier assert carrier_header["tracestate"] == downstream_carrier["tracestate"] + + def test_suppression_when_child_level_is_lower( + self, + _trace_id: int, + _span_id: int, + ) -> None: + """ + Test that span_context.level is updated when the child level (extracted from carrier) is lower than the current span_context.level. + """ + # Create a span context with level=1 + original_span_context = SpanContext( + trace_id=_trace_id, + span_id=_span_id, + is_remote=False, + level=1, + ) + + # Create a carrier with level=0 (suppression) + carrier_header = {"x-instana-l": "0"} + + # Inject the span context into the carrier + self.hptc.inject(original_span_context, carrier_header) + + # Extract the span context from the carrier to verify the level was updated + extracted_context = self.hptc.extract(carrier_header) + + # Verify that the level is 0 (suppressed) + assert extracted_context.level == 0 + assert extracted_context.suppression + + # Create a new carrier to test the propagation + downstream_carrier = {} + + # Inject the extracted context into the downstream carrier + self.hptc.inject(extracted_context, downstream_carrier) + + # Verify that the downstream carrier has the correct level + assert downstream_carrier.get("X-INSTANA-L") == "0" + + # Verify that no trace or span IDs are injected when suppressed + assert "X-INSTANA-T" not in downstream_carrier + assert "X-INSTANA-S" not in downstream_carrier diff --git a/tests/requirements-gevent-starlette.txt b/tests/requirements-gevent-starlette.txt index 86da4f49..17465bd6 100644 --- a/tests/requirements-gevent-starlette.txt +++ b/tests/requirements-gevent-starlette.txt @@ -1,6 +1,6 @@ -r requirements-minimal.txt flask>=0.12.2 -gevent>=1.4.0 +gevent>=23.9.0.post1 mock>=2.0.0 pyramid>=2.0.1 starlette>=0.12.13 diff --git a/tests/requirements-pre314.txt b/tests/requirements-pre314.txt index 0a025d53..2ad1e026 100644 --- a/tests/requirements-pre314.txt +++ b/tests/requirements-pre314.txt @@ -12,7 +12,7 @@ Django>=4.2.16 # fastapi>=0.115.0; python_version >= "3.13" flask>=2.3.2 # gevent is taking more than 20min to build on 3.14 -# gevent>=1.4.0 +# gevent>=23.9.0.post1 grpcio>=1.14.1 google-cloud-pubsub>=2.0.0 google-cloud-storage>=1.24.0 diff --git a/tests/requirements.txt b/tests/requirements.txt index 48afb6a9..6e8fc6ca 100644 --- a/tests/requirements.txt +++ b/tests/requirements.txt @@ -10,7 +10,6 @@ Django>=4.2.16 fastapi>=0.92.0; python_version < "3.13" fastapi>=0.115.0; python_version >= "3.13" flask>=2.3.2 -gevent>=1.4.0 grpcio>=1.14.1 google-cloud-pubsub>=2.0.0 google-cloud-storage>=1.24.0 diff --git a/tests/test_options.py b/tests/test_options.py index a2130c38..4c3d3869 100644 --- a/tests/test_options.py +++ b/tests/test_options.py @@ -4,8 +4,8 @@ import os from typing import Generator -from mock import patch import pytest +from mock import patch from instana.configurator import config from instana.options import ( @@ -41,6 +41,8 @@ def test_base_options(self) -> None: assert self.base_options.secrets_matcher == "contains-ignore-case" assert self.base_options.secrets_list == ["key", "pass", "secret"] assert not self.base_options.secrets + assert self.base_options.disabled_spans == [] + assert self.base_options.enabled_spans == [] def test_base_options_with_config(self) -> None: config["tracing"] = { @@ -62,6 +64,7 @@ def test_base_options_with_config(self) -> None: "INSTANA_EXTRA_HTTP_HEADERS": "SOMETHING;HERE", "INSTANA_IGNORE_ENDPOINTS": "service1;service2:method1,method2", "INSTANA_SECRETS": "secret1:username,password", + "INSTANA_TRACING_DISABLE": "logging, redis,kafka", }, ) def test_base_options_with_env_vars(self) -> None: @@ -80,6 +83,11 @@ def test_base_options_with_env_vars(self) -> None: assert self.base_options.secrets_matcher == "secret1" assert self.base_options.secrets_list == ["username", "password"] + assert "logging" in self.base_options.disabled_spans + assert "redis" in self.base_options.disabled_spans + assert "kafka" in self.base_options.disabled_spans + assert len(self.base_options.enabled_spans) == 0 + @patch.dict( os.environ, {"INSTANA_IGNORE_ENDPOINTS_PATH": "tests/util/test_configuration-1.yaml"}, @@ -108,17 +116,29 @@ def test_base_options_with_endpoint_file(self) -> None: "INSTANA_IGNORE_ENDPOINTS": "env_service1;env_service2:method1,method2", "INSTANA_KAFKA_TRACE_CORRELATION": "false", "INSTANA_IGNORE_ENDPOINTS_PATH": "tests/util/test_configuration-1.yaml", + "INSTANA_TRACING_DISABLE": "logging,redis, kafka", }, ) def test_set_trace_configurations_by_env_variable(self) -> None: # The priority is as follows: # environment variables > in-code configuration > # > agent config (configuration.yaml) > default value + + # in-code configuration + config["tracing"] = {} config["tracing"]["ignore_endpoints"] = ( "config_service1;config_service2:method1,method2" ) config["tracing"]["kafka"] = {"trace_correlation": True} - test_tracing = {"ignore-endpoints": "service1;service2:method1,method2"} + config["tracing"]["disable"] = [{"databases": True}] + + # agent config (configuration.yaml) + test_tracing = { + "ignore-endpoints": "service1;service2:method1,method2", + "disable": [ + {"messaging": True}, + ], + } # Setting by env variable self.base_options = StandardOptions() @@ -131,6 +151,14 @@ def test_set_trace_configurations_by_env_variable(self) -> None: ] assert not self.base_options.kafka_trace_correlation + # Check disabled_spans list + assert "logging" in self.base_options.disabled_spans + assert "redis" in self.base_options.disabled_spans + assert "kafka" in self.base_options.disabled_spans + assert "databases" not in self.base_options.disabled_spans + assert "messaging" not in self.base_options.disabled_spans + assert len(self.base_options.enabled_spans) == 0 + @patch.dict( os.environ, { @@ -138,12 +166,25 @@ def test_set_trace_configurations_by_env_variable(self) -> None: "INSTANA_IGNORE_ENDPOINTS_PATH": "tests/util/test_configuration-1.yaml", }, ) - def test_set_trace_configurations_by_local_configuration_file(self) -> None: + def test_set_trace_configurations_by_in_code_configuration(self) -> None: + # The priority is as follows: + # in-code configuration > agent config (configuration.yaml) > default value + + # in-code configuration + config["tracing"] = {} config["tracing"]["ignore_endpoints"] = ( "config_service1;config_service2:method1,method2" ) config["tracing"]["kafka"] = {"trace_correlation": True} - test_tracing = {"ignore-endpoints": "service1;service2:method1,method2"} + config["tracing"]["disable"] = [{"databases": True}] + + # agent config (configuration.yaml) + test_tracing = { + "ignore-endpoints": "service1;service2:method1,method2", + "disable": [ + {"messaging": True}, + ], + } self.base_options = StandardOptions() self.base_options.set_tracing(test_tracing) @@ -163,7 +204,16 @@ def test_set_trace_configurations_by_local_configuration_file(self) -> None: "kafka.*.topic4", ] + # Check disabled_spans list + assert "databases" in self.base_options.disabled_spans + assert "logging" not in self.base_options.disabled_spans + assert "redis" not in self.base_options.disabled_spans + assert "kafka" not in self.base_options.disabled_spans + assert "messaging" not in self.base_options.disabled_spans + assert len(self.base_options.enabled_spans) == 0 + def test_set_trace_configurations_by_in_code_variable(self) -> None: + config["tracing"] = {} config["tracing"]["ignore_endpoints"] = ( "config_service1;config_service2:method1,method2" ) @@ -184,6 +234,13 @@ def test_set_trace_configurations_by_agent_configuration(self) -> None: test_tracing = { "ignore-endpoints": "service1;service2:method1,method2", "trace-correlation": True, + "disable": [ + { + "messaging": True, + "logging": True, + "kafka": False, + }, + ], } self.base_options = StandardOptions() @@ -196,12 +253,78 @@ def test_set_trace_configurations_by_agent_configuration(self) -> None: ] assert self.base_options.kafka_trace_correlation + # Check disabled_spans list + assert "databases" not in self.base_options.disabled_spans + assert "logging" in self.base_options.disabled_spans + assert "messaging" in self.base_options.disabled_spans + assert "kafka" in self.base_options.enabled_spans + def test_set_trace_configurations_by_default(self) -> None: self.base_options = StandardOptions() self.base_options.set_tracing({}) assert not self.base_options.ignore_endpoints assert self.base_options.kafka_trace_correlation + assert len(self.base_options.disabled_spans) == 0 + assert len(self.base_options.enabled_spans) == 0 + + @patch.dict( + os.environ, + {"INSTANA_TRACING_DISABLE": "true"}, + ) + def test_set_trace_configurations_disable_all_tracing(self) -> None: + self.base_options = BaseOptions() + + # All categories should be disabled + assert "logging" in self.base_options.disabled_spans + assert "databases" in self.base_options.disabled_spans + assert "messaging" in self.base_options.disabled_spans + assert "protocols" in self.base_options.disabled_spans + + # Check is_span_disabled method + assert self.base_options.is_span_disabled(category="logging") + assert self.base_options.is_span_disabled(category="databases") + assert self.base_options.is_span_disabled(span_type="redis") + + @patch.dict( + os.environ, + { + "INSTANA_CONFIG_PATH": "tests/util/test_configuration-1.yaml", + }, + ) + def test_set_trace_configurations_disable_local_yaml(self) -> None: + self.base_options = BaseOptions() + + # All categories should be disabled + assert "logging" in self.base_options.disabled_spans + assert "databases" in self.base_options.disabled_spans + assert "redis" not in self.base_options.disabled_spans + assert "redis" in self.base_options.enabled_spans + + # Check is_span_disabled method + assert self.base_options.is_span_disabled(category="logging") + assert self.base_options.is_span_disabled(category="databases") + assert not self.base_options.is_span_disabled(span_type="redis") + + def test_is_span_disabled_method(self) -> None: + self.base_options = BaseOptions() + + # Default behavior - nothing disabled + assert not self.base_options.is_span_disabled(category="logging") + assert not self.base_options.is_span_disabled(span_type="redis") + + # Disable a category + self.base_options.disabled_spans = ["databases"] + assert not self.base_options.is_span_disabled(category="logging") + assert self.base_options.is_span_disabled(category="databases") + assert self.base_options.is_span_disabled(span_type="redis") + assert self.base_options.is_span_disabled(span_type="mysql") + + # Test precedence rules + self.base_options.enabled_spans = ["redis"] + assert self.base_options.is_span_disabled(category="databases") + assert self.base_options.is_span_disabled(span_type="mysql") + assert not self.base_options.is_span_disabled(span_type="redis") class TestStandardOptions: @@ -258,6 +381,25 @@ def test_set_tracing( ) assert not self.standart_options.extra_http_headers + def test_set_tracing_with_span_disabling(self) -> None: + self.standart_options = StandardOptions() + + test_tracing = { + "disable": [{"logging": True}, {"redis": False}, {"databases": True}] + } + self.standart_options.set_tracing(test_tracing) + + # Check disabled_spans and enabled_spans lists + assert "logging" in self.standart_options.disabled_spans + assert "databases" in self.standart_options.disabled_spans + assert "redis" in self.standart_options.enabled_spans + + # Check is_span_disabled method + assert self.standart_options.is_span_disabled(category="logging") + assert self.standart_options.is_span_disabled(category="databases") + assert self.standart_options.is_span_disabled(span_type="mysql") + assert not self.standart_options.is_span_disabled(span_type="redis") + def test_set_from(self) -> None: self.standart_options = StandardOptions() test_res_data = { @@ -493,3 +635,6 @@ def test_gcr_options_with_env_vars(self) -> None: assert self.gcr_options.endpoint_proxy == {"https": "proxy1"} assert self.gcr_options.timeout == 3 assert self.gcr_options.log_level == logging.INFO + + +# Made with Bob diff --git a/tests/test_span_disabling.py b/tests/test_span_disabling.py new file mode 100644 index 00000000..e1e1cbf5 --- /dev/null +++ b/tests/test_span_disabling.py @@ -0,0 +1,79 @@ +# (c) Copyright IBM Corp. 2025 + +import pytest + +from instana.options import BaseOptions, StandardOptions +from instana.singletons import agent + + +class TestSpanDisabling: + @pytest.fixture(autouse=True) + def setup(self): + # Save original options + self.original_options = agent.options + yield + # Restore original options + agent.options = self.original_options + + def test_is_span_disabled_default(self): + options = BaseOptions() + assert not options.is_span_disabled(category="logging") + assert not options.is_span_disabled(category="databases") + assert not options.is_span_disabled(span_type="redis") + + def test_disable_category(self): + options = BaseOptions() + options.disabled_spans = ["logging"] + assert options.is_span_disabled(category="logging") + assert not options.is_span_disabled(category="databases") + + def test_disable_type(self): + options = BaseOptions() + options.disabled_spans = ["redis"] + assert options.is_span_disabled(span_type="redis") + assert not options.is_span_disabled(span_type="mysql") + + def test_type_category_relationship(self): + options = BaseOptions() + options.disabled_spans = ["databases"] + assert options.is_span_disabled(span_type="redis") + assert options.is_span_disabled(span_type="mysql") + + def test_precedence_rules(self): + options = BaseOptions() + options.disabled_spans = ["databases"] + options.enabled_spans = ["redis"] + assert options.is_span_disabled(category="databases") + assert options.is_span_disabled(span_type="mysql") + assert not options.is_span_disabled(span_type="redis") + + @pytest.mark.parametrize("value", ["True", "true", "1"]) + def test_env_var_disable_all(self, value, monkeypatch): + monkeypatch.setenv("INSTANA_TRACING_DISABLE", value) + options = BaseOptions() + assert options.is_span_disabled(category="logging") is True + assert options.is_span_disabled(category="databases") is True + assert options.is_span_disabled(category="messaging") is True + assert options.is_span_disabled(category="protocols") is True + + def test_env_var_disable_specific(self, monkeypatch): + monkeypatch.setenv("INSTANA_TRACING_DISABLE", "logging, redis") + options = BaseOptions() + assert options.is_span_disabled(category="logging") is True + assert options.is_span_disabled(category="databases") is False + assert options.is_span_disabled(span_type="redis") is True + assert options.is_span_disabled(span_type="mysql") is False + + def test_yaml_config(self): + options = StandardOptions() + tracing_config = { + "disable": [{"logging": True}, {"redis": False}, {"databases": True}] + } + options.set_tracing(tracing_config) + assert options.is_span_disabled(category="logging") + assert options.is_span_disabled(category="databases") + assert options.is_span_disabled(span_type="mysql") + assert not options.is_span_disabled(span_type="redis") + + +# Made with Bob diff --git a/tests/util/test_config_reader.py b/tests/util/test_config_reader.py index b9bb063d..c5753f8e 100644 --- a/tests/util/test_config_reader.py +++ b/tests/util/test_config_reader.py @@ -1,16 +1,78 @@ # (c) Copyright IBM Corp. 2025 import logging +import os +from typing import TYPE_CHECKING, Generator import pytest +from yaml import YAMLError -from instana.util.config import parse_ignored_endpoints_from_yaml +from instana.util.config import ( + get_disable_trace_configurations_from_yaml, + parse_ignored_endpoints_from_yaml, +) +from instana.util.config_reader import ConfigReader + +if TYPE_CHECKING: + from pytest import LogCaptureFixture + from pytest_mock import MockerFixture class TestConfigReader: - def test_load_configuration_with_tracing( - self, caplog: pytest.LogCaptureFixture + @pytest.fixture(autouse=True) + def _resource( + self, + caplog: "LogCaptureFixture", + ) -> Generator[None, None, None]: + yield + caplog.clear() + if "INSTANA_CONFIG_PATH" in os.environ: + os.environ.pop("INSTANA_CONFIG_PATH") + + def test_config_reader_null(self, caplog: "LogCaptureFixture") -> None: + config_reader = ConfigReader(os.environ.get("INSTANA_CONFIG_PATH", "")) + assert config_reader.file_path == "" + assert config_reader.data == {} + assert "ConfigReader: No configuration file specified" in caplog.messages + + def test_config_reader_default(self) -> None: + filename = "tests/util/test_configuration-1.yaml" + os.environ["INSTANA_CONFIG_PATH"] = filename + config_reader = ConfigReader(os.environ.get("INSTANA_CONFIG_PATH", "")) + assert config_reader.file_path == filename + assert "tracing" in config_reader.data + assert len(config_reader.data["tracing"]) == 2 + + def test_config_reader_file_not_found_error( + self, caplog: "LogCaptureFixture" + ) -> None: + filename = "tests/util/test_configuration-3.yaml" + os.environ["INSTANA_CONFIG_PATH"] = filename + config_reader = ConfigReader(os.environ.get("INSTANA_CONFIG_PATH", "")) + assert config_reader.file_path == filename + assert config_reader.data == {} + assert ( + f"ConfigReader: Configuration file has not found: {filename}" + in caplog.messages + ) + + def test_config_reader_yaml_error( + self, caplog: "LogCaptureFixture", mocker: "MockerFixture" ) -> None: + filename = "tests/util/test_configuration-1.yaml" + exception_message = "BLAH" + mocker.patch( + "instana.util.config_reader.yaml.safe_load", + side_effect=YAMLError(exception_message), + ) + + config_reader = ConfigReader(filename) # noqa: F841 + assert ( + f"ConfigReader: Error parsing YAML file: {exception_message}" + in caplog.messages + ) + + def test_load_configuration_with_tracing(self, caplog: "LogCaptureFixture") -> None: caplog.set_level(logging.DEBUG, logger="instana") ignore_endpoints = parse_ignored_endpoints_from_yaml( @@ -32,12 +94,20 @@ def test_load_configuration_with_tracing( "kafka.*.topic4", ] + os.environ["INSTANA_CONFIG_PATH"] = "tests/util/test_configuration-1.yaml" + disabled_spans, enabled_spans = get_disable_trace_configurations_from_yaml() + # Check disabled_spans list + assert "logging" in disabled_spans + assert "databases" in disabled_spans + assert "redis" not in disabled_spans + assert "redis" in enabled_spans + assert ( 'Please use "tracing" instead of "com.instana.tracing" for local configuration file.' not in caplog.messages ) - def test_load_configuration_legacy(self, caplog: pytest.LogCaptureFixture) -> None: + def test_load_configuration_legacy(self, caplog: "LogCaptureFixture") -> None: caplog.set_level(logging.DEBUG, logger="instana") ignore_endpoints = parse_ignored_endpoints_from_yaml( @@ -58,6 +128,15 @@ def test_load_configuration_legacy(self, caplog: pytest.LogCaptureFixture) -> No "kafka.*.span-topic", "kafka.*.topic4", ] + + os.environ["INSTANA_CONFIG_PATH"] = "tests/util/test_configuration-2.yaml" + disabled_spans, enabled_spans = get_disable_trace_configurations_from_yaml() + # Check disabled_spans list + assert "logging" in disabled_spans + assert "databases" in disabled_spans + assert "redis" not in disabled_spans + assert "redis" in enabled_spans + assert ( 'Please use "tracing" instead of "com.instana.tracing" for local configuration file.' in caplog.messages diff --git a/tests/util/test_configuration-1.yaml b/tests/util/test_configuration-1.yaml index af890a35..ac61d362 100644 --- a/tests/util/test_configuration-1.yaml +++ b/tests/util/test_configuration-1.yaml @@ -17,3 +17,8 @@ tracing: endpoints: ["span-topic", "topic4"] # - methods: ["consume", "send"] # endpoints: ["*"] # Applied to all topics + disable: + - "logging": true + - "databases": true + - "redis": false + \ No newline at end of file diff --git a/tests/util/test_configuration-2.yaml b/tests/util/test_configuration-2.yaml index b418cd55..5ed83ec1 100644 --- a/tests/util/test_configuration-2.yaml +++ b/tests/util/test_configuration-2.yaml @@ -18,3 +18,7 @@ com.instana.tracing: endpoints: ["span-topic", "topic4"] # - methods: ["consume", "send"] # endpoints: ["*"] # Applied to all topics + disable: + - "logging": true + - "databases": true + - "redis": false