diff --git a/.codespellignore b/.codespellignore index 6bf3abc41e7..2b53a25e1e1 100644 --- a/.codespellignore +++ b/.codespellignore @@ -7,3 +7,4 @@ ans nam valu thirdparty +addOpt diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md index 53493602b6b..bd6dc5d902a 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.md +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -27,3 +27,5 @@ A clear and concise description of what the bug is. ### Expected behavior A clear and concise description of what you expected to happen. + +**Tip**: [React](https://github.blog/news-insights/product-news/add-reactions-to-pull-requests-issues-and-comments/) with 👍 to help prioritize this issue. Please use comments to provide useful context, avoiding `+1` or `me too`, to help us triage it. Learn more [here](https://opentelemetry.io/community/end-user/issue-participation/). diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md index 552b1333c21..b733d3d8aa1 100644 --- a/.github/ISSUE_TEMPLATE/feature_request.md +++ b/.github/ISSUE_TEMPLATE/feature_request.md @@ -27,3 +27,5 @@ A clear and concise list of any similar and existing solutions from other projec ### Additional Context Add any other context or screenshots about the feature request here. + +**Tip**: [React](https://github.blog/news-insights/product-news/add-reactions-to-pull-requests-issues-and-comments/) with 👍 to help prioritize this issue. Please use comments to provide useful context, avoiding `+1` or `me too`, to help us triage it. Learn more [here](https://opentelemetry.io/community/end-user/issue-participation/). diff --git a/.github/ISSUE_TEMPLATE/version_release.md b/.github/ISSUE_TEMPLATE/version_release.md index 1c048b04d10..3ec7cd5106f 100644 --- a/.github/ISSUE_TEMPLATE/version_release.md +++ b/.github/ISSUE_TEMPLATE/version_release.md @@ -20,3 +20,5 @@ assignees: '' - [ ] [Release contrib](https://github.com/open-telemetry/opentelemetry-go-contrib/blob/main/RELEASING.md#release-process) - [ ] [Sync website docs](https://github.com/open-telemetry/opentelemetry-go/blob/main/RELEASING.md#website-documentation) - [ ] [Close the milestone](https://github.com/open-telemetry/opentelemetry-go/blob/main/RELEASING.md#close-the-milestone) + +**Tip**: [React](https://github.blog/news-insights/product-news/add-reactions-to-pull-requests-issues-and-comments/) with 👍 to help prioritize this issue. Please use comments to provide useful context, avoiding `+1` or `me too`, to help us triage it. Learn more [here](https://opentelemetry.io/community/end-user/issue-participation/). diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml index 6d6b49aaf2e..b32779d9a48 100644 --- a/.github/workflows/benchmark.yml +++ b/.github/workflows/benchmark.yml @@ -9,13 +9,15 @@ on: permissions: read-all env: - DEFAULT_GO_VERSION: "~1.24.0" + DEFAULT_GO_VERSION: "~1.25.0" jobs: benchmark: + permissions: + contents: write # required for pushing to gh-pages branch name: Benchmarks - runs-on: equinix-bare-metal + runs-on: oracle-bare-metal-64cpu-512gb-x86-64 steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 - uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 with: go-version: ${{ env.DEFAULT_GO_VERSION }} @@ -24,7 +26,7 @@ jobs: - name: Run benchmarks run: make benchmark | tee output.txt - name: Download previous benchmark data - uses: actions/cache/restore@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 + uses: actions/cache/restore@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4 with: path: ./benchmarks # `github.event.before` means the commit before the push (i.e. the previous commit). @@ -44,7 +46,7 @@ jobs: # Add benchmark summary to GitHub workflow run report summary-always: true - name: Save benchmark data - uses: actions/cache/save@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 + uses: actions/cache/save@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4 # The cache will be saved even if a step fails. if: always() with: diff --git a/.github/workflows/changelog.yml b/.github/workflows/changelog.yml index 163693f3fc3..07e08fe75a0 100644 --- a/.github/workflows/changelog.yml +++ b/.github/workflows/changelog.yml @@ -20,7 +20,7 @@ jobs: if: ${{ !contains(github.event.pull_request.labels.*.name, 'dependencies') && !contains(github.event.pull_request.labels.*.name, 'Skip Changelog') && !contains(github.event.pull_request.title, '[chore]')}} steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 - name: Check for CHANGELOG changes run: | diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index b250ad2b02f..0bb9810a7a3 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -12,7 +12,7 @@ env: # backwards compatibility with the previous two minor releases and we # explicitly test our code for these versions so keeping this at prior # versions does not add value. - DEFAULT_GO_VERSION: "~1.24.0" + DEFAULT_GO_VERSION: "~1.25.0" # Declare default permissions as read only. permissions: read-all jobs: @@ -20,7 +20,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout Repo - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: fetch-depth: 0 ## Needed for "Set internal/tools/go.mod timestamp" step. - name: Install Go @@ -30,7 +30,7 @@ jobs: check-latest: true cache-dependency-path: "**/go.sum" - name: Tools cache - uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 + uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4 env: cache-name: go-tools-cache with: @@ -56,7 +56,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout Repo - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 - name: Setup Environment run: | echo "GOPATH=$(go env GOPATH)" >> $GITHUB_ENV @@ -73,7 +73,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout Repo - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 - name: Install Go uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 with: @@ -87,7 +87,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout Repo - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 - name: Install Go uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 with: @@ -101,7 +101,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout Repo - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 - name: Install Go uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 with: @@ -121,12 +121,12 @@ jobs: needs: [test-coverage] steps: - name: Checkout Repo - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - - uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0 + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/download-artifact@634f93cb2916e3fdff6788551b99b062d0335ce0 # v5.0.0 with: pattern: coverage-artifacts-${{ env.DEFAULT_GO_VERSION }} - name: Upload coverage report - uses: codecov/codecov-action@18283e04ce6e62d37312384ff67231eb8fd56d24 # v5.4.3 + uses: codecov/codecov-action@fdcc8476540edceab3de004e990f80d881c6cc00 # v5.5.0 with: fail_ci_if_error: true files: ./coverage.txt @@ -135,7 +135,7 @@ jobs: compatibility-test: strategy: matrix: - go-version: ["1.24.0", "1.23.0"] + go-version: ["1.25.0", "1.24.0", "1.23.0"] platform: - os: ubuntu-latest arch: "386" @@ -154,7 +154,7 @@ jobs: runs-on: ${{ matrix.platform.os }} steps: - name: Checkout code - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 - name: Install Go uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 with: diff --git a/.github/workflows/close-stale.yml b/.github/workflows/close-stale.yml index d88b74d6ac6..01f15ddc79b 100644 --- a/.github/workflows/close-stale.yml +++ b/.github/workflows/close-stale.yml @@ -5,10 +5,13 @@ on: - cron: "8 5 * * *" # arbitrary time not to DDOS GitHub permissions: - issues: write - pull-requests: write + contents: read + jobs: stale: + permissions: + issues: write + pull-requests: write runs-on: ubuntu-latest steps: - uses: actions/stale@5bef64f19d7facfb25b37b414482c7164d639639 # v9.1.0 diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 94c48195392..9c3ea0d2371 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -28,17 +28,17 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@ce28f5bb42b7a9f2c824e633a3f6ee835bab6858 # v3.29.0 + uses: github/codeql-action/init@3c3833e0f8c1c83d449a7478aa59c036a9165498 # v3.29.11 with: languages: go - name: Autobuild - uses: github/codeql-action/autobuild@ce28f5bb42b7a9f2c824e633a3f6ee835bab6858 # v3.29.0 + uses: github/codeql-action/autobuild@3c3833e0f8c1c83d449a7478aa59c036a9165498 # v3.29.11 - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@ce28f5bb42b7a9f2c824e633a3f6ee835bab6858 # v3.29.0 + uses: github/codeql-action/analyze@3c3833e0f8c1c83d449a7478aa59c036a9165498 # v3.29.11 diff --git a/.github/workflows/codespell.yaml b/.github/workflows/codespell.yaml index c7f5d8bb7f6..8c11460ad8d 100644 --- a/.github/workflows/codespell.yaml +++ b/.github/workflows/codespell.yaml @@ -11,7 +11,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout Repo - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 - name: Codespell run: make codespell - run: make check-clean-work-tree diff --git a/.github/workflows/fossa.yml b/.github/workflows/fossa.yml index 86b1a94e18b..f86b5a99322 100644 --- a/.github/workflows/fossa.yml +++ b/.github/workflows/fossa.yml @@ -12,7 +12,7 @@ jobs: fossa: runs-on: ubuntu-latest steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 - uses: fossas/fossa-action@3ebcea1862c6ffbd5cf1b4d0bd6b3fe7bd6f2cac # v1.7.0 with: diff --git a/.github/workflows/links-fail-fast.yml b/.github/workflows/links-fail-fast.yml index 4d73578997d..a154caba0e6 100644 --- a/.github/workflows/links-fail-fast.yml +++ b/.github/workflows/links-fail-fast.yml @@ -19,7 +19,7 @@ jobs: files: ${{ steps.changes.outputs.files }} steps: - name: Checkout Repo - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: fetch-depth: 0 - name: Get changed files @@ -36,13 +36,11 @@ jobs: runs-on: ubuntu-latest needs: changedfiles if: ${{needs.changedfiles.outputs.files}} - permissions: - contents: read steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 - name: Restore lychee cache - uses: actions/cache/restore@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 + uses: actions/cache/restore@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4 id: cache-restore with: path: .lycheecache @@ -50,7 +48,7 @@ jobs: restore-keys: cache-lychee- - name: Link Checker - uses: lycheeverse/lychee-action@82202e5e9c2f4ef1a55a3d02563e1cb6041e5332 # v2.4.1 + uses: lycheeverse/lychee-action@885c65f3dc543b57c898c8099f4e08c8afd178a2 # v2.6.1 env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} with: @@ -60,7 +58,7 @@ jobs: - name: Save lychee cache if: always() - uses: actions/cache/save@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 + uses: actions/cache/save@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4 with: path: .lycheecache key: ${{ steps.cache-restore.outputs.cache-primary-key }} diff --git a/.github/workflows/links.yml b/.github/workflows/links.yml index 5a64e635ddd..5702f4e9f44 100644 --- a/.github/workflows/links.yml +++ b/.github/workflows/links.yml @@ -14,14 +14,14 @@ jobs: check-links: runs-on: ubuntu-latest permissions: - contents: read + issues: write # required for creating issues from link checker reports steps: - name: Checkout Repo - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 - name: Restore lychee cache - uses: actions/cache/restore@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 + uses: actions/cache/restore@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4 id: cache-restore with: path: .lycheecache @@ -30,7 +30,7 @@ jobs: - name: Link Checker id: lychee - uses: lycheeverse/lychee-action@82202e5e9c2f4ef1a55a3d02563e1cb6041e5332 # v2.4.1 + uses: lycheeverse/lychee-action@885c65f3dc543b57c898c8099f4e08c8afd178a2 # v2.6.1 env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} with: @@ -38,7 +38,7 @@ jobs: - name: Save lychee cache if: always() - uses: actions/cache/save@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 + uses: actions/cache/save@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4 with: path: .lycheecache key: ${{ steps.cache-restore.outputs.cache-primary-key }} diff --git a/.github/workflows/markdown-fail-fast.yml b/.github/workflows/markdown-fail-fast.yml index 59972bc7a97..1e81cf99303 100644 --- a/.github/workflows/markdown-fail-fast.yml +++ b/.github/workflows/markdown-fail-fast.yml @@ -2,10 +2,14 @@ name: Markdown (Fail Fast) on: push: + paths: + - "**.md" pull_request: + paths: + - "**.md" -# Declare default permissions as read only. -permissions: read-all +permissions: + contents: read jobs: changedfiles: @@ -15,13 +19,36 @@ jobs: md: ${{ steps.changes.outputs.md }} steps: - name: Checkout Repo - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: - fetch-depth: 0 + # Shallow clone, but enough for `git diff HEAD~1 HEAD`. + fetch-depth: 2 - name: Get changed files id: changes + env: + EVENT_NAME: ${{ github.event_name }} + BASE_SHA: ${{ github.event.pull_request.base.sha || '' }} + HEAD_SHA: ${{ github.event.pull_request.head.sha || '' }} + shell: bash run: | - echo "md=$(git diff --name-only --diff-filter=ACMRTUXB origin/${{ github.event.pull_request.base.ref }} ${{ github.event.pull_request.head.sha }} | grep .md$ | xargs)" >> $GITHUB_OUTPUT + echo "Detecting changed markdown files..." + + if [[ "$EVENT_NAME" == "pull_request" ]]; then + echo "Running in pull_request context" + echo "Base SHA: $BASE_SHA" + echo "Head SHA: $HEAD_SHA" + CHANGED=$(git diff --name-only "$BASE_SHA" "$HEAD_SHA" | grep '\.md$' || true) + elif [[ "$EVENT_NAME" == "push" ]]; then + echo "Running in push context" + CHANGED=$(git diff --name-only HEAD~1 HEAD | grep '\.md$' || true) + else + echo "Unsupported event type: $EVENT_NAME" + exit 1 + fi + + MD=$(echo "$CHANGED" | tr '\n' ' ' | xargs) + echo "Markdown files changed: $MD" + echo "md=$MD" >> "$GITHUB_OUTPUT" lint: name: lint markdown files @@ -29,9 +56,9 @@ jobs: needs: changedfiles if: ${{needs.changedfiles.outputs.md}} steps: - - name: Checkout Repo - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - - name: Run linter - uses: docker://avtodev/markdown-lint:v1@sha256:6aeedc2f49138ce7a1cd0adffc1b1c0321b841dc2102408967d9301c031949ee - with: - args: ${{needs.changedfiles.outputs.md}} + - name: Checkout Repo + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - name: Run linter + uses: docker://avtodev/markdown-lint:v1@sha256:6aeedc2f49138ce7a1cd0adffc1b1c0321b841dc2102408967d9301c031949ee + with: + args: ${{needs.changedfiles.outputs.md}} diff --git a/.github/workflows/markdown.yml b/.github/workflows/markdown.yml index a12d46b5fb8..a4f71192db3 100644 --- a/.github/workflows/markdown.yml +++ b/.github/workflows/markdown.yml @@ -12,10 +12,12 @@ permissions: read-all jobs: lint-markdown: + permissions: + issues: write # required for creating issues from markdown lint reports runs-on: ubuntu-latest steps: - name: Checkout Repo - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 - name: Run linter id: markdownlint diff --git a/.github/workflows/protect-released-changelog.yml b/.github/workflows/protect-released-changelog.yml index ce6b39ee68d..a7bf8a292b8 100644 --- a/.github/workflows/protect-released-changelog.yml +++ b/.github/workflows/protect-released-changelog.yml @@ -17,7 +17,7 @@ jobs: if: ${{ !contains(github.event.pull_request.labels.*.name, 'Unlock Released Changelog')}} steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 - name: Protect the released changelog run: | diff --git a/.github/workflows/scorecard.yml b/.github/workflows/scorecard.yml index 0214c100d58..f12f5475ff4 100644 --- a/.github/workflows/scorecard.yml +++ b/.github/workflows/scorecard.yml @@ -26,7 +26,7 @@ jobs: steps: - name: "Checkout code" - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: persist-credentials: false @@ -58,6 +58,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard (optional). # Commenting out will disable upload of results to your repo's Code Scanning dashboard - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@ce28f5bb42b7a9f2c824e633a3f6ee835bab6858 # v3.29.0 + uses: github/codeql-action/upload-sarif@3c3833e0f8c1c83d449a7478aa59c036a9165498 # v3.29.11 with: sarif_file: results.sarif diff --git a/.golangci.yml b/.golangci.yml index 5f69cc027c2..b01762ffcc7 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -10,6 +10,7 @@ linters: - depguard - errcheck - errorlint + - gocritic - godot - gosec - govet @@ -86,6 +87,18 @@ linters: deny: - pkg: go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal desc: Do not use cross-module internal packages. + gocritic: + disabled-checks: + - appendAssign + - commentedOutCode + - dupArg + - hugeParam + - importShadow + - preferDecodeRune + - rangeValCopy + - unnamedResult + - whyNoLint + enable-all: true godot: exclude: # Exclude links. @@ -167,7 +180,10 @@ linters: - fmt.Print - fmt.Printf - fmt.Println + - name: unused-parameter + - name: unused-receiver - name: unnecessary-stmt + - name: use-any - name: useless-break - name: var-declaration - name: var-naming @@ -224,10 +240,6 @@ linters: - linters: - gosec text: 'G402: TLS MinVersion too low.' - paths: - - third_party$ - - builtin$ - - examples$ issues: max-issues-per-linter: 0 max-same-issues: 0 @@ -237,14 +249,12 @@ formatters: - goimports - golines settings: + gofumpt: + extra-rules: true goimports: local-prefixes: - - go.opentelemetry.io + - go.opentelemetry.io/otel golines: max-len: 120 exclusions: generated: lax - paths: - - third_party$ - - builtin$ - - examples$ diff --git a/.lycheeignore b/.lycheeignore index 40d62fa2eb8..5328505888d 100644 --- a/.lycheeignore +++ b/.lycheeignore @@ -2,5 +2,8 @@ http://localhost http://jaeger-collector https://github.com/open-telemetry/opentelemetry-go/milestone/ https://github.com/open-telemetry/opentelemetry-go/projects +# Weaver model URL for semantic-conventions repository. +https?:\/\/github\.com\/open-telemetry\/semantic-conventions\/archive\/refs\/tags\/[^.]+\.zip\[[^]]+] file:///home/runner/work/opentelemetry-go/opentelemetry-go/libraries file:///home/runner/work/opentelemetry-go/opentelemetry-go/manual +http://4.3.2.1:78/user/123 \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index 4acc75701b7..f3abcfdc2e3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,6 +11,93 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm +## [1.38.0/0.60.0/0.14.0/0.0.13] 2025-08-29 + +This release is the last to support [Go 1.23]. +The next release will require at least [Go 1.24]. + +### Added + +- Add native histogram exemplar support in `go.opentelemetry.io/otel/exporters/prometheus`. (#6772) +- Add template attribute functions to the `go.opentelmetry.io/otel/semconv/v1.34.0` package. (#6939) + - `ContainerLabel` + - `DBOperationParameter` + - `DBSystemParameter` + - `HTTPRequestHeader` + - `HTTPResponseHeader` + - `K8SCronJobAnnotation` + - `K8SCronJobLabel` + - `K8SDaemonSetAnnotation` + - `K8SDaemonSetLabel` + - `K8SDeploymentAnnotation` + - `K8SDeploymentLabel` + - `K8SJobAnnotation` + - `K8SJobLabel` + - `K8SNamespaceAnnotation` + - `K8SNamespaceLabel` + - `K8SNodeAnnotation` + - `K8SNodeLabel` + - `K8SPodAnnotation` + - `K8SPodLabel` + - `K8SReplicaSetAnnotation` + - `K8SReplicaSetLabel` + - `K8SStatefulSetAnnotation` + - `K8SStatefulSetLabel` + - `ProcessEnvironmentVariable` + - `RPCConnectRPCRequestMetadata` + - `RPCConnectRPCResponseMetadata` + - `RPCGRPCRequestMetadata` + - `RPCGRPCResponseMetadata` +- Add `ErrorType` attribute helper function to the `go.opentelmetry.io/otel/semconv/v1.34.0` package. (#6962) +- Add `WithAllowKeyDuplication` in `go.opentelemetry.io/otel/sdk/log` which can be used to disable deduplication for log records. (#6968) +- Add `WithCardinalityLimit` option to configure the cardinality limit in `go.opentelemetry.io/otel/sdk/metric`. (#6996, #7065, #7081, #7164, #7165, #7179) +- Add `Clone` method to `Record` in `go.opentelemetry.io/otel/log` that returns a copy of the record with no shared state. (#7001) +- Add experimental self-observability span and batch span processor metrics in `go.opentelemetry.io/otel/sdk/trace`. + Check the `go.opentelemetry.io/otel/sdk/trace/internal/x` package documentation for more information. (#7027, #6393, #7209) +- The `go.opentelemetry.io/otel/semconv/v1.36.0` package. + The package contains semantic conventions from the `v1.36.0` version of the OpenTelemetry Semantic Conventions. + See the [migration documentation](./semconv/v1.36.0/MIGRATION.md) for information on how to upgrade from `go.opentelemetry.io/otel/semconv/v1.34.0.`(#7032, #7041) +- Add support for configuring Prometheus name translation using `WithTranslationStrategy` option in `go.opentelemetry.io/otel/exporters/prometheus`. The current default translation strategy when UTF-8 mode is enabled is `NoUTF8EscapingWithSuffixes`, but a future release will change the default strategy to `UnderscoreEscapingWithSuffixes` for compliance with the specification. (#7111) +- Add experimental self-observability log metrics in `go.opentelemetry.io/otel/sdk/log`. + Check the `go.opentelemetry.io/otel/sdk/log/internal/x` package documentation for more information. (#7121) +- Add experimental self-observability trace exporter metrics in `go.opentelemetry.io/otel/exporters/stdout/stdouttrace`. + Check the `go.opentelemetry.io/otel/exporters/stdout/stdouttrace/internal/x` package documentation for more information. (#7133) +- Support testing of [Go 1.25]. (#7187) +- The `go.opentelemetry.io/otel/semconv/v1.37.0` package. + The package contains semantic conventions from the `v1.37.0` version of the OpenTelemetry Semantic Conventions. + See the [migration documentation](./semconv/v1.37.0/MIGRATION.md) for information on how to upgrade from `go.opentelemetry.io/otel/semconv/v1.36.0.`(#7254) + +### Changed + +- Optimize `TraceIDFromHex` and `SpanIDFromHex` in `go.opentelemetry.io/otel/sdk/trace`. (#6791) +- Change `AssertEqual` in `go.opentelemetry.io/otel/log/logtest` to accept `TestingT` in order to support benchmarks and fuzz tests. (#6908) +- Change `DefaultExemplarReservoirProviderSelector` in `go.opentelemetry.io/otel/sdk/metric` to use `runtime.GOMAXPROCS(0)` instead of `runtime.NumCPU()` for the `FixedSizeReservoirProvider` default size. (#7094) + +### Fixed + +- `SetBody` method of `Record` in `go.opentelemetry.io/otel/sdk/log` now deduplicates key-value collections (`log.Value` of `log.KindMap` from `go.opentelemetry.io/otel/log`). (#7002) +- Fix `go.opentelemetry.io/otel/exporters/prometheus` to not append a suffix if it's already present in metric name. (#7088) +- Fix the `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` self-observability component type and name. (#7195) +- Fix partial export count metric in `go.opentelemetry.io/otel/exporters/stdout/stdouttrace`. (#7199) + +### Deprecated + +- Deprecate `WithoutUnits` and `WithoutCounterSuffixes` options, preferring `WithTranslationStrategy` instead. (#7111) +- Deprecate support for `OTEL_GO_X_CARDINALITY_LIMIT` environment variable in `go.opentelemetry.io/otel/sdk/metric`. Use `WithCardinalityLimit` option instead. (#7166) + +## [0.59.1] 2025-07-21 + +### Changed + +- Retract `v0.59.0` release of `go.opentelemetry.io/otel/exporters/prometheus` module which appends incorrect unit suffixes. (#7046) +- Change `go.opentelemetry.io/otel/exporters/prometheus` to no longer deduplicate suffixes when UTF8 is enabled. + It is recommended to disable unit and counter suffixes in the exporter, and manually add suffixes if you rely on the existing behavior. (#7044) + +### Fixed + +- Fix `go.opentelemetry.io/otel/exporters/prometheus` to properly handle unit suffixes when the unit is in brackets. + E.g. `{spans}`. (#7044) + ## [1.37.0/0.59.0/0.13.0] 2025-06-25 ### Added @@ -3343,7 +3430,8 @@ It contains api and sdk for trace and meter. - CircleCI build CI manifest files. - CODEOWNERS file to track owners of this project. -[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.37.0...HEAD +[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.38.0...HEAD +[1.38.0/0.60.0/0.14.0/0.0.13]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.38.0 [1.37.0/0.59.0/0.13.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.37.0 [0.12.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/log/v0.12.2 [0.12.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/log/v0.12.1 @@ -3439,6 +3527,7 @@ It contains api and sdk for trace and meter. +[Go 1.25]: https://go.dev/doc/go1.25 [Go 1.24]: https://go.dev/doc/go1.24 [Go 1.23]: https://go.dev/doc/go1.23 [Go 1.22]: https://go.dev/doc/go1.22 diff --git a/CODEOWNERS b/CODEOWNERS index 945a07d2b07..26a03aed1d2 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -12,6 +12,6 @@ # https://help.github.com/en/articles/about-code-owners # -* @MrAlias @XSAM @dashpole @pellared @dmathieu +* @MrAlias @XSAM @dashpole @pellared @dmathieu @flc1125 CODEOWNERS @MrAlias @pellared @dashpole @XSAM @dmathieu diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index f9ddc281fc7..0b3ae855c19 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -192,6 +192,35 @@ should have `go test -bench` output in their description. should have [`benchstat`](https://pkg.go.dev/golang.org/x/perf/cmd/benchstat) output in their description. +## Dependencies + +This project uses [Go Modules] for dependency management. All modules will use +`go.mod` to explicitly list all direct and indirect dependencies, ensuring a +clear dependency graph. The `go.sum` file for each module will be committed to +the repository and used to verify the integrity of downloaded modules, +preventing malicious tampering. + +This project uses automated dependency update tools (i.e. dependabot, +renovatebot) to manage updates to dependencies. This ensures that dependencies +are kept up-to-date with the latest security patches and features and are +reviewed before being merged. If you would like to propose a change to a +dependency it should be done through a pull request that updates the `go.mod` +file and includes a description of the change. + +See the [versioning and compatibility](./VERSIONING.md) policy for more details +about dependency compatibility. + +[Go Modules]: https://pkg.go.dev/cmd/go#hdr-Modules__module_versions__and_more + +### Environment Dependencies + +This project does not partition dependencies based on the environment (i.e. +`development`, `staging`, `production`). + +Only the dependencies explicitly included in the released modules have be +tested and verified to work with the released code. No other guarantee is made +about the compatibility of other dependencies. + ## Documentation Each (non-internal, non-test) package must be documented using @@ -233,6 +262,10 @@ For a non-comprehensive but foundational overview of these best practices the [Effective Go](https://golang.org/doc/effective_go.html) documentation is an excellent starting place. +We also recommend following the +[Go Code Review Comments](https://go.dev/wiki/CodeReviewComments) +that collects common comments made during reviews of Go code. + As a convenience for developers building this project the `make precommit` will format, lint, validate, and in some cases fix the changes you plan to submit. This check will need to pass for your changes to be able to be @@ -586,6 +619,10 @@ See also: ### Testing +We allow using [`testify`](https://github.com/stretchr/testify) even though +it is seen as non-idiomatic according to +the [Go Test Comments](https://go.dev/wiki/TestComments#assert-libraries) page. + The tests should never leak goroutines. Use the term `ConcurrentSafe` in the test name when it aims to verify the @@ -640,13 +677,6 @@ should be canceled. ## Approvers and Maintainers -### Triagers - -- [Alex Kats](https://github.com/akats7), Capital One -- [Cheng-Zhen Yang](https://github.com/scorpionknifes), Independent - -### Approvers - ### Maintainers - [Damien Mathieu](https://github.com/dmathieu), Elastic ([GPG](https://keys.openpgp.org/search?q=5A126B972A81A6CE443E5E1B408B8E44F0873832)) @@ -655,6 +685,21 @@ should be canceled. - [Sam Xie](https://github.com/XSAM), Splunk ([GPG](https://keys.openpgp.org/search?q=AEA033782371ABB18EE39188B8044925D6FEEBEA)) - [Tyler Yahn](https://github.com/MrAlias), Splunk ([GPG](https://keys.openpgp.org/search?q=0x46B0F3E1A8B1BA5A)) +For more information about the maintainer role, see the [community repository](https://github.com/open-telemetry/community/blob/main/guides/contributor/membership.md#maintainer). + +### Approvers + +- [Flc](https://github.com/flc1125), Independent + +For more information about the approver role, see the [community repository](https://github.com/open-telemetry/community/blob/main/guides/contributor/membership.md#approver). + +### Triagers + +- [Alex Kats](https://github.com/akats7), Capital One +- [Cheng-Zhen Yang](https://github.com/scorpionknifes), Independent + +For more information about the triager role, see the [community repository](https://github.com/open-telemetry/community/blob/main/guides/contributor/membership.md#triager). + ### Emeritus - [Aaron Clawson](https://github.com/MadVikingGod) @@ -665,6 +710,8 @@ should be canceled. - [Josh MacDonald](https://github.com/jmacd) - [Liz Fong-Jones](https://github.com/lizthegrey) +For more information about the emeritus role, see the [community repository](https://github.com/open-telemetry/community/blob/main/guides/contributor/membership.md#emeritus-maintainerapprovertriager). + ### Become an Approver or a Maintainer See the [community membership document in OpenTelemetry community diff --git a/LICENSE b/LICENSE index 261eeb9e9f8..f1aee0f1100 100644 --- a/LICENSE +++ b/LICENSE @@ -199,3 +199,33 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +-------------------------------------------------------------------------------- + +Copyright 2009 The Go Authors. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google LLC nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/Makefile b/Makefile index 4fa423ca02d..bc0f1f92d1f 100644 --- a/Makefile +++ b/Makefile @@ -34,9 +34,6 @@ $(TOOLS)/%: $(TOOLS_MOD_DIR)/go.mod | $(TOOLS) MULTIMOD = $(TOOLS)/multimod $(TOOLS)/multimod: PACKAGE=go.opentelemetry.io/build-tools/multimod -SEMCONVGEN = $(TOOLS)/semconvgen -$(TOOLS)/semconvgen: PACKAGE=go.opentelemetry.io/build-tools/semconvgen - CROSSLINK = $(TOOLS)/crosslink $(TOOLS)/crosslink: PACKAGE=go.opentelemetry.io/build-tools/crosslink @@ -71,7 +68,7 @@ GOVULNCHECK = $(TOOLS)/govulncheck $(TOOLS)/govulncheck: PACKAGE=golang.org/x/vuln/cmd/govulncheck .PHONY: tools -tools: $(CROSSLINK) $(GOLANGCI_LINT) $(MISSPELL) $(GOCOVMERGE) $(STRINGER) $(PORTO) $(SEMCONVGEN) $(VERIFYREADMES) $(MULTIMOD) $(SEMCONVKIT) $(GOTMPL) $(GORELEASE) +tools: $(CROSSLINK) $(GOLANGCI_LINT) $(MISSPELL) $(GOCOVMERGE) $(STRINGER) $(PORTO) $(VERIFYREADMES) $(MULTIMOD) $(SEMCONVKIT) $(GOTMPL) $(GORELEASE) # Virtualized python tools via docker @@ -284,7 +281,7 @@ semconv-generate: $(SEMCONVKIT) docker run --rm \ -u $(DOCKER_USER) \ --env HOME=/tmp/weaver \ - --mount 'type=bind,source=$(PWD)/semconv,target=/home/weaver/templates/registry/go,readonly' \ + --mount 'type=bind,source=$(PWD)/semconv/templates,target=/home/weaver/templates,readonly' \ --mount 'type=bind,source=$(PWD)/semconv/${TAG},target=/home/weaver/target' \ --mount 'type=bind,source=$(HOME)/.weaver,target=/tmp/weaver/.weaver' \ $(WEAVER_IMAGE) registry generate \ diff --git a/README.md b/README.md index 5fa1b75c60e..6b7ab5f2193 100644 --- a/README.md +++ b/README.md @@ -53,18 +53,25 @@ Currently, this project supports the following environments. | OS | Go Version | Architecture | |----------|------------|--------------| +| Ubuntu | 1.25 | amd64 | | Ubuntu | 1.24 | amd64 | | Ubuntu | 1.23 | amd64 | +| Ubuntu | 1.25 | 386 | | Ubuntu | 1.24 | 386 | | Ubuntu | 1.23 | 386 | +| Ubuntu | 1.25 | arm64 | | Ubuntu | 1.24 | arm64 | | Ubuntu | 1.23 | arm64 | +| macOS 13 | 1.25 | amd64 | | macOS 13 | 1.24 | amd64 | | macOS 13 | 1.23 | amd64 | +| macOS | 1.25 | arm64 | | macOS | 1.24 | arm64 | | macOS | 1.23 | arm64 | +| Windows | 1.25 | amd64 | | Windows | 1.24 | amd64 | | Windows | 1.23 | amd64 | +| Windows | 1.25 | 386 | | Windows | 1.24 | 386 | | Windows | 1.23 | 386 | diff --git a/SECURITY-INSIGHTS.yml b/SECURITY-INSIGHTS.yml new file mode 100644 index 00000000000..8041fc62e4a --- /dev/null +++ b/SECURITY-INSIGHTS.yml @@ -0,0 +1,203 @@ +header: + schema-version: "1.0.0" + expiration-date: "2026-08-04T00:00:00.000Z" + last-updated: "2025-08-04" + last-reviewed: "2025-08-04" + commit-hash: 69e81088ad40f45a0764597326722dea8f3f00a8 + project-url: https://github.com/open-telemetry/opentelemetry-go + project-release: "v1.37.0" + changelog: https://github.com/open-telemetry/opentelemetry-go/blob/69e81088ad40f45a0764597326722dea8f3f00a8/CHANGELOG.md + license: https://github.com/open-telemetry/opentelemetry-go/blob/69e81088ad40f45a0764597326722dea8f3f00a8/LICENSE + +project-lifecycle: + status: active + bug-fixes-only: false + core-maintainers: + - https://github.com/dmathieu + - https://github.com/dashpole + - https://github.com/pellared + - https://github.com/XSAM + - https://github.com/MrAlias + release-process: | + See https://github.com/open-telemetry/opentelemetry-go/blob/69e81088ad40f45a0764597326722dea8f3f00a8/RELEASING.md + +contribution-policy: + accepts-pull-requests: true + accepts-automated-pull-requests: true + automated-tools-list: + - automated-tool: dependabot + action: allowed + comment: Automated dependency updates are accepted. + - automated-tool: renovatebot + action: allowed + comment: Automated dependency updates are accepted. + - automated-tool: opentelemetrybot + action: allowed + comment: Automated OpenTelemetry actions are accepted. + contributing-policy: https://github.com/open-telemetry/opentelemetry-go/blob/69e81088ad40f45a0764597326722dea8f3f00a8/CONTRIBUTING.md + code-of-conduct: https://github.com/open-telemetry/.github/blob/ffa15f76b65ec7bcc41f6a0b277edbb74f832206/CODE_OF_CONDUCT.md + +documentation: + - https://pkg.go.dev/go.opentelemetry.io/otel + - https://opentelemetry.io/docs/instrumentation/go/ + +distribution-points: + - pkg:golang/go.opentelemetry.io/otel + - pkg:golang/go.opentelemetry.io/otel/bridge/opencensus + - pkg:golang/go.opentelemetry.io/otel/bridge/opencensus/test + - pkg:golang/go.opentelemetry.io/otel/bridge/opentracing + - pkg:golang/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc + - pkg:golang/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp + - pkg:golang/go.opentelemetry.io/otel/exporters/otlp/otlptrace + - pkg:golang/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc + - pkg:golang/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp + - pkg:golang/go.opentelemetry.io/otel/exporters/stdout/stdoutmetric + - pkg:golang/go.opentelemetry.io/otel/exporters/stdout/stdouttrace + - pkg:golang/go.opentelemetry.io/otel/exporters/zipkin + - pkg:golang/go.opentelemetry.io/otel/metric + - pkg:golang/go.opentelemetry.io/otel/sdk + - pkg:golang/go.opentelemetry.io/otel/sdk/metric + - pkg:golang/go.opentelemetry.io/otel/trace + - pkg:golang/go.opentelemetry.io/otel/exporters/prometheus + - pkg:golang/go.opentelemetry.io/otel/log + - pkg:golang/go.opentelemetry.io/otel/log/logtest + - pkg:golang/go.opentelemetry.io/otel/sdk/log + - pkg:golang/go.opentelemetry.io/otel/sdk/log/logtest + - pkg:golang/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc + - pkg:golang/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp + - pkg:golang/go.opentelemetry.io/otel/exporters/stdout/stdoutlog + - pkg:golang/go.opentelemetry.io/otel/schema + +security-artifacts: + threat-model: + threat-model-created: false + comment: | + No formal threat model created yet. + self-assessment: + self-assessment-created: false + comment: | + No formal self-assessment yet. + +security-testing: + - tool-type: sca + tool-name: Dependabot + tool-version: latest + tool-url: https://github.com/dependabot + tool-rulesets: + - built-in + integration: + ad-hoc: false + ci: true + before-release: true + comment: | + Automated dependency updates. + - tool-type: sast + tool-name: golangci-lint + tool-version: latest + tool-url: https://github.com/golangci/golangci-lint + tool-rulesets: + - built-in + integration: + ad-hoc: false + ci: true + before-release: true + comment: | + Static analysis in CI. + - tool-type: fuzzing + tool-name: OSS-Fuzz + tool-version: latest + tool-url: https://github.com/google/oss-fuzz + tool-rulesets: + - default + integration: + ad-hoc: false + ci: false + before-release: false + comment: | + OpenTelemetry Go is integrated with OSS-Fuzz for continuous fuzz testing. See https://github.com/google/oss-fuzz/tree/f0f9b221190c6063a773bea606d192ebfc3d00cf/projects/opentelemetry-go for more details. + - tool-type: sast + tool-name: CodeQL + tool-version: latest + tool-url: https://github.com/github/codeql + tool-rulesets: + - default + integration: + ad-hoc: false + ci: true + before-release: true + comment: | + CodeQL static analysis is run in CI for all commits and pull requests to detect security vulnerabilities in the Go source code. See https://github.com/open-telemetry/opentelemetry-go/blob/d5b5b059849720144a03ca5c87561bfbdb940119/.github/workflows/codeql-analysis.yml for workflow details. + - tool-type: sca + tool-name: govulncheck + tool-version: latest + tool-url: https://pkg.go.dev/golang.org/x/vuln/cmd/govulncheck + tool-rulesets: + - default + integration: + ad-hoc: false + ci: true + before-release: true + comment: | + govulncheck is run in CI to detect known vulnerabilities in Go modules and code paths. See https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/.github/workflows/ci.yml for workflow configuration. + +security-assessments: + - auditor-name: 7ASecurity + auditor-url: https://7asecurity.com + auditor-report: https://7asecurity.com/reports/pentest-report-opentelemetry.pdf + report-year: 2023 + comment: | + This independent penetration test by 7ASecurity covered OpenTelemetry repositories including opentelemetry-go. The assessment focused on codebase review, threat modeling, and vulnerability identification. See the report for details of findings and recommendations applicable to opentelemetry-go. No critical vulnerabilities were found for this repository. + +security-contacts: + - type: email + value: cncf-opentelemetry-security@lists.cncf.io + primary: true + - type: website + value: https://github.com/open-telemetry/opentelemetry-go/security/policy + primary: false + +vulnerability-reporting: + accepts-vulnerability-reports: true + email-contact: cncf-opentelemetry-security@lists.cncf.io + security-policy: https://github.com/open-telemetry/opentelemetry-go/security/policy + comment: | + Security issues should be reported via email or GitHub security policy page. + +dependencies: + third-party-packages: true + dependencies-lists: + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/bridge/opencensus/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/bridge/opencensus/test/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/bridge/opentracing/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/exporters/otlp/otlplog/otlploggrpc/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/exporters/otlp/otlplog/otlploghttp/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/exporters/otlp/otlpmetric/otlpmetricgrpc/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/exporters/otlp/otlpmetric/otlpmetrichttp/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/exporters/otlp/otlptrace/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/exporters/otlp/otlptrace/otlptracegrpc/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/exporters/otlp/otlptrace/otlptracehttp/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/exporters/prometheus/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/exporters/stdout/stdoutlog/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/exporters/stdout/stdoutmetric/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/exporters/stdout/stdouttrace/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/exporters/zipkin/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/internal/tools/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/log/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/log/logtest/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/metric/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/schema/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/sdk/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/sdk/log/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/sdk/log/logtest/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/sdk/metric/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/trace/go.mod + - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/trace/internal/telemetry/test/go.mod + dependencies-lifecycle: + policy-url: https://github.com/open-telemetry/opentelemetry-go/blob/69e81088ad40f45a0764597326722dea8f3f00a8/CONTRIBUTING.md + comment: | + Dependency lifecycle managed via go.mod and renovatebot. + env-dependencies-policy: + policy-url: https://github.com/open-telemetry/opentelemetry-go/blob/69e81088ad40f45a0764597326722dea8f3f00a8/CONTRIBUTING.md + comment: | + See contributing policy for environment usage. diff --git a/attribute/benchmark_test.go b/attribute/benchmark_test.go index d81bbe8292a..65eda55371e 100644 --- a/attribute/benchmark_test.go +++ b/attribute/benchmark_test.go @@ -271,3 +271,112 @@ func BenchmarkStringSlice(b *testing.B) { }) b.Run("Emit", benchmarkEmit(kv)) } + +func BenchmarkSetEquals(b *testing.B) { + b.Run("Empty", func(b *testing.B) { + benchmarkSetEquals(b, attribute.EmptySet()) + }) + b.Run("1 string attribute", func(b *testing.B) { + set := attribute.NewSet(attribute.String("string", "42")) + benchmarkSetEquals(b, &set) + }) + b.Run("10 string attributes", func(b *testing.B) { + set := attribute.NewSet( + attribute.String("a", "42"), + attribute.String("b", "42"), + attribute.String("c", "42"), + attribute.String("d", "42"), + attribute.String("e", "42"), + attribute.String("f", "42"), + attribute.String("g", "42"), + attribute.String("h", "42"), + attribute.String("i", "42"), + attribute.String("j", "42"), + ) + benchmarkSetEquals(b, &set) + }) + b.Run("1 int attribute", func(b *testing.B) { + set := attribute.NewSet(attribute.Int("string", 42)) + benchmarkSetEquals(b, &set) + }) + b.Run("10 int attributes", func(b *testing.B) { + set := attribute.NewSet( + attribute.Int("a", 42), + attribute.Int("b", 42), + attribute.Int("c", 42), + attribute.Int("d", 42), + attribute.Int("e", 42), + attribute.Int("f", 42), + attribute.Int("g", 42), + attribute.Int("h", 42), + attribute.Int("i", 42), + attribute.Int("j", 42), + ) + benchmarkSetEquals(b, &set) + }) +} + +func benchmarkSetEquals(b *testing.B, set *attribute.Set) { + b.ResetTimer() + for range b.N { + if !set.Equals(set) { + b.Fatal("not equal") + } + } +} + +// BenchmarkEquivalentMapAccess measures how expensive it is to use +// Equivalent() as a map key. This is on the hot path for making synchronous +// measurements on the metrics API/SDK. It will likely be on the hot path for +// the trace and logs API/SDK in the future. +func BenchmarkEquivalentMapAccess(b *testing.B) { + b.Run("Empty", func(b *testing.B) { + benchmarkEquivalentMapAccess(b, attribute.EmptySet()) + }) + b.Run("1 string attribute", func(b *testing.B) { + set := attribute.NewSet(attribute.String("string", "42")) + benchmarkEquivalentMapAccess(b, &set) + }) + b.Run("10 string attributes", func(b *testing.B) { + set := attribute.NewSet( + attribute.String("a", "42"), + attribute.String("b", "42"), + attribute.String("c", "42"), + attribute.String("d", "42"), + attribute.String("e", "42"), + attribute.String("f", "42"), + attribute.String("g", "42"), + attribute.String("h", "42"), + attribute.String("i", "42"), + attribute.String("j", "42"), + ) + benchmarkEquivalentMapAccess(b, &set) + }) + b.Run("1 int attribute", func(b *testing.B) { + set := attribute.NewSet(attribute.Int("string", 42)) + benchmarkEquivalentMapAccess(b, &set) + }) + b.Run("10 int attributes", func(b *testing.B) { + set := attribute.NewSet( + attribute.Int("a", 42), + attribute.Int("b", 42), + attribute.Int("c", 42), + attribute.Int("d", 42), + attribute.Int("e", 42), + attribute.Int("f", 42), + attribute.Int("g", 42), + attribute.Int("h", 42), + attribute.Int("i", 42), + attribute.Int("j", 42), + ) + benchmarkEquivalentMapAccess(b, &set) + }) +} + +func benchmarkEquivalentMapAccess(b *testing.B, set *attribute.Set) { + values := map[attribute.Distinct]int{} + b.ResetTimer() + for range b.N { + values[set.Equivalent()]++ + } +} diff --git a/attribute/encoder.go b/attribute/encoder.go index 318e42fcabe..6333d34b310 100644 --- a/attribute/encoder.go +++ b/attribute/encoder.go @@ -78,7 +78,7 @@ func DefaultEncoder() Encoder { defaultEncoderOnce.Do(func() { defaultEncoderInstance = &defaultAttrEncoder{ pool: sync.Pool{ - New: func() interface{} { + New: func() any { return &bytes.Buffer{} }, }, @@ -96,11 +96,11 @@ func (d *defaultAttrEncoder) Encode(iter Iterator) string { for iter.Next() { i, keyValue := iter.IndexedAttribute() if i > 0 { - _, _ = buf.WriteRune(',') + _ = buf.WriteByte(',') } copyAndEscape(buf, string(keyValue.Key)) - _, _ = buf.WriteRune('=') + _ = buf.WriteByte('=') if keyValue.Value.Type() == STRING { copyAndEscape(buf, keyValue.Value.AsString()) @@ -122,14 +122,14 @@ func copyAndEscape(buf *bytes.Buffer, val string) { for _, ch := range val { switch ch { case '=', ',', escapeChar: - _, _ = buf.WriteRune(escapeChar) + _ = buf.WriteByte(escapeChar) } _, _ = buf.WriteRune(ch) } } -// Valid returns true if this encoder ID was allocated by -// `NewEncoderID`. Invalid encoder IDs will not be cached. +// Valid reports whether this encoder ID was allocated by +// [NewEncoderID]. Invalid encoder IDs will not be cached. func (id EncoderID) Valid() bool { return id.value != 0 } diff --git a/attribute/filter.go b/attribute/filter.go index 3eeaa5d4426..624ebbe3811 100644 --- a/attribute/filter.go +++ b/attribute/filter.go @@ -15,8 +15,8 @@ type Filter func(KeyValue) bool // // If keys is empty a deny-all filter is returned. func NewAllowKeysFilter(keys ...Key) Filter { - if len(keys) <= 0 { - return func(kv KeyValue) bool { return false } + if len(keys) == 0 { + return func(KeyValue) bool { return false } } allowed := make(map[Key]struct{}, len(keys)) @@ -34,8 +34,8 @@ func NewAllowKeysFilter(keys ...Key) Filter { // // If keys is empty an allow-all filter is returned. func NewDenyKeysFilter(keys ...Key) Filter { - if len(keys) <= 0 { - return func(kv KeyValue) bool { return true } + if len(keys) == 0 { + return func(KeyValue) bool { return true } } forbid := make(map[Key]struct{}, len(keys)) diff --git a/attribute/internal/attribute.go b/attribute/internal/attribute.go index b76d2bbfdbd..0875504302c 100644 --- a/attribute/internal/attribute.go +++ b/attribute/internal/attribute.go @@ -12,7 +12,7 @@ import ( ) // BoolSliceValue converts a bool slice into an array with same elements as slice. -func BoolSliceValue(v []bool) interface{} { +func BoolSliceValue(v []bool) any { var zero bool cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))).Elem() reflect.Copy(cp, reflect.ValueOf(v)) @@ -20,7 +20,7 @@ func BoolSliceValue(v []bool) interface{} { } // Int64SliceValue converts an int64 slice into an array with same elements as slice. -func Int64SliceValue(v []int64) interface{} { +func Int64SliceValue(v []int64) any { var zero int64 cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))).Elem() reflect.Copy(cp, reflect.ValueOf(v)) @@ -28,7 +28,7 @@ func Int64SliceValue(v []int64) interface{} { } // Float64SliceValue converts a float64 slice into an array with same elements as slice. -func Float64SliceValue(v []float64) interface{} { +func Float64SliceValue(v []float64) any { var zero float64 cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))).Elem() reflect.Copy(cp, reflect.ValueOf(v)) @@ -36,7 +36,7 @@ func Float64SliceValue(v []float64) interface{} { } // StringSliceValue converts a string slice into an array with same elements as slice. -func StringSliceValue(v []string) interface{} { +func StringSliceValue(v []string) any { var zero string cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))).Elem() reflect.Copy(cp, reflect.ValueOf(v)) @@ -44,7 +44,7 @@ func StringSliceValue(v []string) interface{} { } // AsBoolSlice converts a bool array into a slice into with same elements as array. -func AsBoolSlice(v interface{}) []bool { +func AsBoolSlice(v any) []bool { rv := reflect.ValueOf(v) if rv.Type().Kind() != reflect.Array { return nil @@ -57,7 +57,7 @@ func AsBoolSlice(v interface{}) []bool { } // AsInt64Slice converts an int64 array into a slice into with same elements as array. -func AsInt64Slice(v interface{}) []int64 { +func AsInt64Slice(v any) []int64 { rv := reflect.ValueOf(v) if rv.Type().Kind() != reflect.Array { return nil @@ -70,7 +70,7 @@ func AsInt64Slice(v interface{}) []int64 { } // AsFloat64Slice converts a float64 array into a slice into with same elements as array. -func AsFloat64Slice(v interface{}) []float64 { +func AsFloat64Slice(v any) []float64 { rv := reflect.ValueOf(v) if rv.Type().Kind() != reflect.Array { return nil @@ -83,7 +83,7 @@ func AsFloat64Slice(v interface{}) []float64 { } // AsStringSlice converts a string array into a slice into with same elements as array. -func AsStringSlice(v interface{}) []string { +func AsStringSlice(v any) []string { rv := reflect.ValueOf(v) if rv.Type().Kind() != reflect.Array { return nil diff --git a/attribute/internal/attribute_test.go b/attribute/internal/attribute_test.go index e2c35d9706e..dd710aeaa2e 100644 --- a/attribute/internal/attribute_test.go +++ b/attribute/internal/attribute_test.go @@ -8,28 +8,28 @@ import ( "testing" ) -var wrapFloat64SliceValue = func(v interface{}) interface{} { +var wrapFloat64SliceValue = func(v any) any { if vi, ok := v.([]float64); ok { return Float64SliceValue(vi) } return nil } -var wrapInt64SliceValue = func(v interface{}) interface{} { +var wrapInt64SliceValue = func(v any) any { if vi, ok := v.([]int64); ok { return Int64SliceValue(vi) } return nil } -var wrapBoolSliceValue = func(v interface{}) interface{} { +var wrapBoolSliceValue = func(v any) any { if vi, ok := v.([]bool); ok { return BoolSliceValue(vi) } return nil } -var wrapStringSliceValue = func(v interface{}) interface{} { +var wrapStringSliceValue = func(v any) any { if vi, ok := v.([]string); ok { return StringSliceValue(vi) } @@ -37,21 +37,21 @@ var wrapStringSliceValue = func(v interface{}) interface{} { } var ( - wrapAsBoolSlice = func(v interface{}) interface{} { return AsBoolSlice(v) } - wrapAsInt64Slice = func(v interface{}) interface{} { return AsInt64Slice(v) } - wrapAsFloat64Slice = func(v interface{}) interface{} { return AsFloat64Slice(v) } - wrapAsStringSlice = func(v interface{}) interface{} { return AsStringSlice(v) } + wrapAsBoolSlice = func(v any) any { return AsBoolSlice(v) } + wrapAsInt64Slice = func(v any) any { return AsInt64Slice(v) } + wrapAsFloat64Slice = func(v any) any { return AsFloat64Slice(v) } + wrapAsStringSlice = func(v any) any { return AsStringSlice(v) } ) func TestSliceValue(t *testing.T) { type args struct { - v interface{} + v any } tests := []struct { name string args args - want interface{} - fn func(interface{}) interface{} + want any + fn func(any) any }{ { name: "Float64SliceValue() two items", @@ -136,7 +136,7 @@ func BenchmarkStringSliceValue(b *testing.B) { func BenchmarkAsFloat64Slice(b *testing.B) { b.ReportAllocs() - var in interface{} = [2]float64{1, 2.3} + var in any = [2]float64{1, 2.3} b.ResetTimer() for i := 0; i < b.N; i++ { diff --git a/attribute/iterator.go b/attribute/iterator.go index f2ba89ce4bc..8df6249f023 100644 --- a/attribute/iterator.go +++ b/attribute/iterator.go @@ -25,8 +25,8 @@ type oneIterator struct { attr KeyValue } -// Next moves the iterator to the next position. Returns false if there are no -// more attributes. +// Next moves the iterator to the next position. +// Next reports whether there are more attributes. func (i *Iterator) Next() bool { i.idx++ return i.idx < i.Len() @@ -106,7 +106,8 @@ func (oi *oneIterator) advance() { } } -// Next returns true if there is another attribute available. +// Next moves the iterator to the next position. +// Next reports whether there is another attribute available. func (m *MergeIterator) Next() bool { if m.one.done && m.two.done { return false diff --git a/attribute/key.go b/attribute/key.go index d9a22c65020..80a9e5643f6 100644 --- a/attribute/key.go +++ b/attribute/key.go @@ -117,7 +117,7 @@ func (k Key) StringSlice(v []string) KeyValue { } } -// Defined returns true for non-empty keys. +// Defined reports whether the key is not empty. func (k Key) Defined() bool { return len(k) != 0 } diff --git a/attribute/key_test.go b/attribute/key_test.go index ddb600d713c..33c9b6a164c 100644 --- a/attribute/key_test.go +++ b/attribute/key_test.go @@ -40,7 +40,7 @@ func TestDefined(t *testing.T) { } func TestJSONValue(t *testing.T) { - var kvs interface{} = [2]attribute.KeyValue{ + var kvs any = [2]attribute.KeyValue{ attribute.String("A", "B"), attribute.Int64("C", 1), } diff --git a/attribute/kv.go b/attribute/kv.go index 3028f9a40f8..8c6928ca79b 100644 --- a/attribute/kv.go +++ b/attribute/kv.go @@ -13,7 +13,7 @@ type KeyValue struct { Value Value } -// Valid returns if kv is a valid OpenTelemetry attribute. +// Valid reports whether kv is a valid OpenTelemetry attribute. func (kv KeyValue) Valid() bool { return kv.Key.Defined() && kv.Value.Type() != INVALID } diff --git a/attribute/set.go b/attribute/set.go index 6cbefceadfe..64735d382ea 100644 --- a/attribute/set.go +++ b/attribute/set.go @@ -31,11 +31,11 @@ type ( // Distinct is a unique identifier of a Set. // - // Distinct is designed to be ensures equivalence stability: comparisons - // will return the save value across versions. For this reason, Distinct - // should always be used as a map key instead of a Set. + // Distinct is designed to ensure equivalence stability: comparisons will + // return the same value across versions. For this reason, Distinct should + // always be used as a map key instead of a Set. Distinct struct { - iface interface{} + iface any } // Sortable implements sort.Interface, used for sorting KeyValue. @@ -70,7 +70,7 @@ func (d Distinct) reflectValue() reflect.Value { return reflect.ValueOf(d.iface) } -// Valid returns true if this value refers to a valid Set. +// Valid reports whether this value refers to a valid Set. func (d Distinct) Valid() bool { return d.iface != nil } @@ -120,7 +120,7 @@ func (l *Set) Value(k Key) (Value, bool) { return Value{}, false } -// HasValue tests whether a key is defined in this set. +// HasValue reports whether a key is defined in this set. func (l *Set) HasValue(k Key) bool { if l == nil { return false @@ -155,7 +155,7 @@ func (l *Set) Equivalent() Distinct { return l.equivalent } -// Equals returns true if the argument set is equivalent to this set. +// Equals reports whether the argument set is equivalent to this set. func (l *Set) Equals(o *Set) bool { return l.Equivalent() == o.Equivalent() } @@ -344,7 +344,7 @@ func computeDistinct(kvs []KeyValue) Distinct { // computeDistinctFixed computes a Distinct for small slices. It returns nil // if the input is too large for this code path. -func computeDistinctFixed(kvs []KeyValue) interface{} { +func computeDistinctFixed(kvs []KeyValue) any { switch len(kvs) { case 1: return [1]KeyValue(kvs) @@ -373,7 +373,7 @@ func computeDistinctFixed(kvs []KeyValue) interface{} { // computeDistinctReflect computes a Distinct using reflection, works for any // size input. -func computeDistinctReflect(kvs []KeyValue) interface{} { +func computeDistinctReflect(kvs []KeyValue) any { at := reflect.New(reflect.ArrayOf(len(kvs), keyValueType)).Elem() for i, keyValue := range kvs { *(at.Index(i).Addr().Interface().(*KeyValue)) = keyValue @@ -387,7 +387,7 @@ func (l *Set) MarshalJSON() ([]byte, error) { } // MarshalLog is the marshaling function used by the logging system to represent this Set. -func (l Set) MarshalLog() interface{} { +func (l Set) MarshalLog() any { kvs := make(map[string]string) for _, kv := range l.ToSlice() { kvs[string(kv.Key)] = kv.Value.Emit() diff --git a/attribute/set_test.go b/attribute/set_test.go index c73f0212c39..1ba0a652782 100644 --- a/attribute/set_test.go +++ b/attribute/set_test.go @@ -214,21 +214,21 @@ func TestFiltering(t *testing.T) { { name: "None", in: []attribute.KeyValue{a, b, c}, - filter: func(kv attribute.KeyValue) bool { return false }, + filter: func(attribute.KeyValue) bool { return false }, kept: nil, drop: []attribute.KeyValue{a, b, c}, }, { name: "All", in: []attribute.KeyValue{a, b, c}, - filter: func(kv attribute.KeyValue) bool { return true }, + filter: func(attribute.KeyValue) bool { return true }, kept: []attribute.KeyValue{a, b, c}, drop: nil, }, { name: "Empty", in: []attribute.KeyValue{}, - filter: func(kv attribute.KeyValue) bool { return true }, + filter: func(attribute.KeyValue) bool { return true }, kept: nil, drop: nil, }, @@ -348,6 +348,133 @@ func args(m reflect.Method) []reflect.Value { return out } +func TestMarshalJSON(t *testing.T) { + for _, tc := range []struct { + desc string + kvs []attribute.KeyValue + wantJSON string + }{ + { + desc: "empty", + kvs: []attribute.KeyValue{}, + wantJSON: `[]`, + }, + { + desc: "single string attribute", + kvs: []attribute.KeyValue{attribute.String("A", "a")}, + wantJSON: `[{"Key":"A","Value":{"Type":"STRING","Value":"a"}}]`, + }, + { + desc: "many mixed attributes", + kvs: []attribute.KeyValue{ + attribute.Bool("A", true), + attribute.BoolSlice("B", []bool{true, false}), + attribute.Int("C", 1), + attribute.IntSlice("D", []int{2, 3}), + attribute.Int64("E", 22), + attribute.Int64Slice("F", []int64{33, 44}), + attribute.Float64("G", 1.1), + attribute.Float64Slice("H", []float64{2.2, 3.3}), + attribute.String("I", "Z"), + attribute.StringSlice("J", []string{"X", "Y"}), + attribute.Stringer("K", &simpleStringer{val: "foo"}), + }, + wantJSON: `[ + { + "Key": "A", + "Value": { + "Type": "BOOL", + "Value": true + } + }, + { + "Key": "B", + "Value": { + "Type": "BOOLSLICE", + "Value": [true, false] + } + }, + { + "Key": "C", + "Value": { + "Type": "INT64", + "Value": 1 + } + }, + { + "Key": "D", + "Value": { + "Type": "INT64SLICE", + "Value": [2, 3] + } + }, + { + "Key": "E", + "Value": { + "Type": "INT64", + "Value": 22 + } + }, + { + "Key": "F", + "Value": { + "Type": "INT64SLICE", + "Value": [33, 44] + } + }, + { + "Key": "G", + "Value": { + "Type": "FLOAT64", + "Value": 1.1 + } + }, + { + "Key": "H", + "Value": { + "Type": "FLOAT64SLICE", + "Value": [2.2, 3.3] + } + }, + { + "Key": "I", + "Value": { + "Type": "STRING", + "Value": "Z" + } + }, + { + "Key": "J", + "Value": { + "Type": "STRINGSLICE", + "Value": ["X", "Y"] + } + }, + { + "Key": "K", + "Value": { + "Type": "STRING", + "Value": "foo" + } + } + ]`, + }, + } { + t.Run(tc.desc, func(t *testing.T) { + set := attribute.NewSet(tc.kvs...) + by, err := set.MarshalJSON() + require.NoError(t, err) + assert.JSONEq(t, tc.wantJSON, string(by)) + }) + } +} + +type simpleStringer struct { + val string +} + +func (s *simpleStringer) String() string { return s.val } + func BenchmarkFiltering(b *testing.B) { var kvs [26]attribute.KeyValue buf := [1]byte{'A' - 1} diff --git a/attribute/value.go b/attribute/value.go index 817eecacf11..653c33a8619 100644 --- a/attribute/value.go +++ b/attribute/value.go @@ -22,7 +22,7 @@ type Value struct { vtype Type numeric uint64 stringly string - slice interface{} + slice any } const ( @@ -199,8 +199,8 @@ func (v Value) asStringSlice() []string { type unknownValueType struct{} -// AsInterface returns Value's data as interface{}. -func (v Value) AsInterface() interface{} { +// AsInterface returns Value's data as any. +func (v Value) AsInterface() any { switch v.Type() { case BOOL: return v.AsBool() @@ -262,7 +262,7 @@ func (v Value) Emit() string { func (v Value) MarshalJSON() ([]byte, error) { var jsonVal struct { Type string - Value interface{} + Value any } jsonVal.Type = v.Type().String() jsonVal.Value = v.AsInterface() diff --git a/attribute/value_test.go b/attribute/value_test.go index d1c88702d25..2f2a9e6a757 100644 --- a/attribute/value_test.go +++ b/attribute/value_test.go @@ -18,7 +18,7 @@ func TestValue(t *testing.T) { name string value attribute.Value wantType attribute.Type - wantValue interface{} + wantValue any }{ { name: "Key.Bool() correctly returns keys's internal bool value", diff --git a/baggage/baggage.go b/baggage/baggage.go index 0e1fe242203..f83a448ec61 100644 --- a/baggage/baggage.go +++ b/baggage/baggage.go @@ -812,7 +812,7 @@ var safeKeyCharset = [utf8.RuneSelf]bool{ // validateBaggageName checks if the string is a valid OpenTelemetry Baggage name. // Baggage name is a valid, non-empty UTF-8 string. func validateBaggageName(s string) bool { - if len(s) == 0 { + if s == "" { return false } @@ -828,7 +828,7 @@ func validateBaggageValue(s string) bool { // validateKey checks if the string is a valid W3C Baggage key. func validateKey(s string) bool { - if len(s) == 0 { + if s == "" { return false } diff --git a/bridge/opencensus/go.mod b/bridge/opencensus/go.mod index 962171511bb..b5a9f5f12fb 100644 --- a/bridge/opencensus/go.mod +++ b/bridge/opencensus/go.mod @@ -3,12 +3,12 @@ module go.opentelemetry.io/otel/bridge/opencensus go 1.23.0 require ( - github.com/stretchr/testify v1.10.0 + github.com/stretchr/testify v1.11.1 go.opencensus.io v0.24.0 - go.opentelemetry.io/otel v1.37.0 - go.opentelemetry.io/otel/sdk v1.37.0 - go.opentelemetry.io/otel/sdk/metric v1.37.0 - go.opentelemetry.io/otel/trace v1.37.0 + go.opentelemetry.io/otel v1.38.0 + go.opentelemetry.io/otel/sdk v1.38.0 + go.opentelemetry.io/otel/sdk/metric v1.38.0 + go.opentelemetry.io/otel/trace v1.38.0 ) require ( @@ -19,8 +19,8 @@ require ( github.com/google/uuid v1.6.0 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect go.opentelemetry.io/auto/sdk v1.1.0 // indirect - go.opentelemetry.io/otel/metric v1.37.0 // indirect - golang.org/x/sys v0.33.0 // indirect + go.opentelemetry.io/otel/metric v1.38.0 // indirect + golang.org/x/sys v0.35.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/bridge/opencensus/go.sum b/bridge/opencensus/go.sum index bece93b09c2..2f2c3cdcde4 100644 --- a/bridge/opencensus/go.sum +++ b/bridge/opencensus/go.sum @@ -55,8 +55,8 @@ github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpE github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= -github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= @@ -83,8 +83,8 @@ golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw= -golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/sys v0.35.0 h1:vz1N37gP5bs89s7He8XuIYXpyY0+QlsKmzipCbUtyxI= +golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= diff --git a/bridge/opencensus/internal/oc2otel/attributes.go b/bridge/opencensus/internal/oc2otel/attributes.go index 2a06e4acb4f..4179f5cfb74 100644 --- a/bridge/opencensus/internal/oc2otel/attributes.go +++ b/bridge/opencensus/internal/oc2otel/attributes.go @@ -21,7 +21,7 @@ func Attributes(attr []octrace.Attribute) []attribute.KeyValue { return otelAttr } -func AttributesFromMap(attr map[string]interface{}) []attribute.KeyValue { +func AttributesFromMap(attr map[string]any) []attribute.KeyValue { otelAttr := make([]attribute.KeyValue, 0, len(attr)) for k, v := range attr { otelAttr = append(otelAttr, attribute.KeyValue{ @@ -32,7 +32,7 @@ func AttributesFromMap(attr map[string]interface{}) []attribute.KeyValue { return otelAttr } -func AttributeValue(ocval interface{}) attribute.Value { +func AttributeValue(ocval any) attribute.Value { switch v := ocval.(type) { case bool: return attribute.BoolValue(v) diff --git a/bridge/opencensus/internal/oc2otel/attributes_test.go b/bridge/opencensus/internal/oc2otel/attributes_test.go index b0d8e203d02..661e8226823 100644 --- a/bridge/opencensus/internal/oc2otel/attributes_test.go +++ b/bridge/opencensus/internal/oc2otel/attributes_test.go @@ -38,7 +38,7 @@ func TestAttributes(t *testing.T) { } func TestAttributesFromMap(t *testing.T) { - in := map[string]interface{}{ + in := map[string]any{ "bool": true, "int64": int64(49), "float64": float64(1.618), diff --git a/bridge/opencensus/internal/oc2otel/span_context_test.go b/bridge/opencensus/internal/oc2otel/span_context_test.go index 2f04c056948..e5e6df7543f 100644 --- a/bridge/opencensus/internal/oc2otel/span_context_test.go +++ b/bridge/opencensus/internal/oc2otel/span_context_test.go @@ -8,12 +8,10 @@ import ( "github.com/stretchr/testify/assert" "go.opencensus.io/plugin/ochttp/propagation/tracecontext" - - "go.opentelemetry.io/otel/bridge/opencensus/internal/otel2oc" - octrace "go.opencensus.io/trace" "go.opencensus.io/trace/tracestate" + "go.opentelemetry.io/otel/bridge/opencensus/internal/otel2oc" "go.opentelemetry.io/otel/trace" ) diff --git a/bridge/opencensus/internal/ocmetric/metric_test.go b/bridge/opencensus/internal/ocmetric/metric_test.go index 5f1e3e75e0d..5ae61832d88 100644 --- a/bridge/opencensus/internal/ocmetric/metric_test.go +++ b/bridge/opencensus/internal/ocmetric/metric_test.go @@ -75,7 +75,7 @@ func TestConvertMetrics(t *testing.T) { Exemplar: &ocmetricdata.Exemplar{ Value: 0.8, Timestamp: exemplarTime, - Attachments: map[string]interface{}{ + Attachments: map[string]any{ ocmetricdata.AttachmentKeySpanContext: octrace.SpanContext{ TraceID: octrace.TraceID([16]byte{1}), SpanID: octrace.SpanID([8]byte{2}), @@ -89,7 +89,7 @@ func TestConvertMetrics(t *testing.T) { Exemplar: &ocmetricdata.Exemplar{ Value: 1.5, Timestamp: exemplarTime, - Attachments: map[string]interface{}{ + Attachments: map[string]any{ ocmetricdata.AttachmentKeySpanContext: octrace.SpanContext{ TraceID: octrace.TraceID([16]byte{3}), SpanID: octrace.SpanID([8]byte{4}), @@ -102,7 +102,7 @@ func TestConvertMetrics(t *testing.T) { Exemplar: &ocmetricdata.Exemplar{ Value: 2.6, Timestamp: exemplarTime, - Attachments: map[string]interface{}{ + Attachments: map[string]any{ ocmetricdata.AttachmentKeySpanContext: octrace.SpanContext{ TraceID: octrace.TraceID([16]byte{5}), SpanID: octrace.SpanID([8]byte{6}), @@ -124,7 +124,7 @@ func TestConvertMetrics(t *testing.T) { Exemplar: &ocmetricdata.Exemplar{ Value: 0.9, Timestamp: exemplarTime, - Attachments: map[string]interface{}{ + Attachments: map[string]any{ ocmetricdata.AttachmentKeySpanContext: octrace.SpanContext{ TraceID: octrace.TraceID([16]byte{7}), SpanID: octrace.SpanID([8]byte{8}), @@ -137,7 +137,7 @@ func TestConvertMetrics(t *testing.T) { Exemplar: &ocmetricdata.Exemplar{ Value: 1.1, Timestamp: exemplarTime, - Attachments: map[string]interface{}{ + Attachments: map[string]any{ ocmetricdata.AttachmentKeySpanContext: octrace.SpanContext{ TraceID: octrace.TraceID([16]byte{9}), SpanID: octrace.SpanID([8]byte{10}), @@ -150,7 +150,7 @@ func TestConvertMetrics(t *testing.T) { Exemplar: &ocmetricdata.Exemplar{ Value: 2.7, Timestamp: exemplarTime, - Attachments: map[string]interface{}{ + Attachments: map[string]any{ ocmetricdata.AttachmentKeySpanContext: octrace.SpanContext{ TraceID: octrace.TraceID([16]byte{11}), SpanID: octrace.SpanID([8]byte{12}), @@ -836,7 +836,7 @@ func TestConvertMetrics(t *testing.T) { Exemplar: &ocmetricdata.Exemplar{ Value: 0.8, Timestamp: exemplarTime, - Attachments: map[string]interface{}{ + Attachments: map[string]any{ ocmetricdata.AttachmentKeySpanContext: "notaspancontext", }, }, @@ -1188,7 +1188,7 @@ func BenchmarkConvertExemplar(b *testing.B) { data := make([]*ocmetricdata.Exemplar, b.N) for i := range data { a := make(ocmetricdata.Attachments, attchmentsN) - for j := 0; j < attchmentsN; j++ { + for j := range attchmentsN { a[strconv.Itoa(j)] = rand.Int64() } data[i] = &ocmetricdata.Exemplar{ @@ -1214,7 +1214,7 @@ func BenchmarkConvertQuantiles(b *testing.B) { data := make([]ocmetricdata.Snapshot, b.N) for i := range data { p := make(map[float64]float64, percentileN) - for j := 0; j < percentileN; j++ { + for range percentileN { v := rand.Float64() for v == 0 { // Convert from [0, 1) interval to (0, 1). diff --git a/bridge/opencensus/internal/otel2oc/span_context_test.go b/bridge/opencensus/internal/otel2oc/span_context_test.go index 51e6945c9c6..c3c7cf964dd 100644 --- a/bridge/opencensus/internal/otel2oc/span_context_test.go +++ b/bridge/opencensus/internal/otel2oc/span_context_test.go @@ -6,16 +6,12 @@ package otel2oc import ( "testing" - "go.opencensus.io/plugin/ochttp/propagation/tracecontext" - - "go.opentelemetry.io/otel/bridge/opencensus/internal/oc2otel" - "github.com/stretchr/testify/assert" - - "go.opencensus.io/trace/tracestate" - + "go.opencensus.io/plugin/ochttp/propagation/tracecontext" octrace "go.opencensus.io/trace" + "go.opencensus.io/trace/tracestate" + "go.opentelemetry.io/otel/bridge/opencensus/internal/oc2otel" "go.opentelemetry.io/otel/trace" ) diff --git a/bridge/opencensus/internal/span.go b/bridge/opencensus/internal/span.go index 3ddef4d037e..4e7e1c35bd7 100644 --- a/bridge/opencensus/internal/span.go +++ b/bridge/opencensus/internal/span.go @@ -39,7 +39,7 @@ func NewSpan(s trace.Span) *octrace.Span { return octrace.NewSpan(&Span{otelSpan: s}) } -// IsRecordingEvents returns true if events are being recorded for this span. +// IsRecordingEvents reports whether events are being recorded for this span. func (s *Span) IsRecordingEvents() bool { return s.otelSpan.IsRecording() } @@ -75,12 +75,12 @@ func (s *Span) Annotate(attributes []octrace.Attribute, str string) { } // Annotatef adds a formatted annotation with attributes to this span. -func (s *Span) Annotatef(attributes []octrace.Attribute, format string, a ...interface{}) { +func (s *Span) Annotatef(attributes []octrace.Attribute, format string, a ...any) { s.Annotate(attributes, fmt.Sprintf(format, a...)) } // AddMessageSendEvent adds a message send event to this span. -func (s *Span) AddMessageSendEvent(messageID, uncompressedByteSize, compressedByteSize int64) { +func (s *Span) AddMessageSendEvent(_, uncompressedByteSize, compressedByteSize int64) { s.otelSpan.AddEvent(MessageSendEvent, trace.WithAttributes( attribute.KeyValue{ @@ -95,7 +95,7 @@ func (s *Span) AddMessageSendEvent(messageID, uncompressedByteSize, compressedBy } // AddMessageReceiveEvent adds a message receive event to this span. -func (s *Span) AddMessageReceiveEvent(messageID, uncompressedByteSize, compressedByteSize int64) { +func (s *Span) AddMessageReceiveEvent(_, uncompressedByteSize, compressedByteSize int64) { s.otelSpan.AddEvent(MessageReceiveEvent, trace.WithAttributes( attribute.KeyValue{ diff --git a/bridge/opencensus/internal/span_test.go b/bridge/opencensus/internal/span_test.go index de06a86a39f..54893486468 100644 --- a/bridge/opencensus/internal/span_test.go +++ b/bridge/opencensus/internal/span_test.go @@ -272,7 +272,7 @@ func TestSpanAddLinkFails(t *testing.T) { ocS.AddLink(octrace.Link{ TraceID: octrace.TraceID([16]byte{1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}), SpanID: octrace.SpanID([8]byte{2, 0, 0, 0, 0, 0, 0, 0}), - Attributes: map[string]interface{}{ + Attributes: map[string]any{ "foo": "bar", "number": int64(3), }, diff --git a/bridge/opencensus/internal/tracer.go b/bridge/opencensus/internal/tracer.go index bb1b0839c24..db301976bb1 100644 --- a/bridge/opencensus/internal/tracer.go +++ b/bridge/opencensus/internal/tracer.go @@ -53,12 +53,12 @@ func (o *Tracer) StartSpanWithRemoteParent( } // FromContext returns the Span stored in a context. -func (o *Tracer) FromContext(ctx context.Context) *octrace.Span { +func (*Tracer) FromContext(ctx context.Context) *octrace.Span { return NewSpan(trace.SpanFromContext(ctx)) } // NewContext returns a new context with the given Span attached. -func (o *Tracer) NewContext(parent context.Context, s *octrace.Span) context.Context { +func (*Tracer) NewContext(parent context.Context, s *octrace.Span) context.Context { if otSpan, ok := s.Internal().(*Span); ok { return trace.ContextWithSpan(parent, otSpan.otelSpan) } diff --git a/bridge/opencensus/internal/tracer_test.go b/bridge/opencensus/internal/tracer_test.go index d1516fd362f..395735f6ec2 100644 --- a/bridge/opencensus/internal/tracer_test.go +++ b/bridge/opencensus/internal/tracer_test.go @@ -139,7 +139,7 @@ type differentSpan struct { octrace.SpanInterface } -func (s *differentSpan) String() string { return "testing span" } +func (*differentSpan) String() string { return "testing span" } func TestTracerNewContextErrors(t *testing.T) { h, restore := withHandler() diff --git a/bridge/opencensus/metric.go b/bridge/opencensus/metric.go index f4c8408db5b..5bf343b9165 100644 --- a/bridge/opencensus/metric.go +++ b/bridge/opencensus/metric.go @@ -23,7 +23,7 @@ type MetricProducer struct { // NewMetricProducer returns a metric.Producer that fetches metrics from // OpenCensus. -func NewMetricProducer(opts ...MetricOption) *MetricProducer { +func NewMetricProducer(...MetricOption) *MetricProducer { return &MetricProducer{ manager: metricproducer.GlobalManager(), } diff --git a/bridge/opencensus/test/go.mod b/bridge/opencensus/test/go.mod index c63e130e953..06ac93bc9b6 100644 --- a/bridge/opencensus/test/go.mod +++ b/bridge/opencensus/test/go.mod @@ -4,10 +4,10 @@ go 1.23.0 require ( go.opencensus.io v0.24.0 - go.opentelemetry.io/otel v1.37.0 - go.opentelemetry.io/otel/bridge/opencensus v1.37.0 - go.opentelemetry.io/otel/sdk v1.37.0 - go.opentelemetry.io/otel/trace v1.37.0 + go.opentelemetry.io/otel v1.38.0 + go.opentelemetry.io/otel/bridge/opencensus v1.38.0 + go.opentelemetry.io/otel/sdk v1.38.0 + go.opentelemetry.io/otel/trace v1.38.0 ) require ( @@ -16,9 +16,9 @@ require ( github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 // indirect github.com/google/uuid v1.6.0 // indirect go.opentelemetry.io/auto/sdk v1.1.0 // indirect - go.opentelemetry.io/otel/metric v1.37.0 // indirect - go.opentelemetry.io/otel/sdk/metric v1.37.0 // indirect - golang.org/x/sys v0.33.0 // indirect + go.opentelemetry.io/otel/metric v1.38.0 // indirect + go.opentelemetry.io/otel/sdk/metric v1.38.0 // indirect + golang.org/x/sys v0.35.0 // indirect ) replace go.opentelemetry.io/otel => ../../.. diff --git a/bridge/opencensus/test/go.sum b/bridge/opencensus/test/go.sum index 06cc67d1b4c..307bca40fa7 100644 --- a/bridge/opencensus/test/go.sum +++ b/bridge/opencensus/test/go.sum @@ -49,8 +49,8 @@ github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpE github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= -github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= @@ -77,8 +77,8 @@ golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw= -golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/sys v0.35.0 h1:vz1N37gP5bs89s7He8XuIYXpyY0+QlsKmzipCbUtyxI= +golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= diff --git a/bridge/opencensus/trace_test.go b/bridge/opencensus/trace_test.go index b4e7dbdd71a..ddadc1ca53d 100644 --- a/bridge/opencensus/trace_test.go +++ b/bridge/opencensus/trace_test.go @@ -10,7 +10,6 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - octrace "go.opencensus.io/trace" "go.opentelemetry.io/otel/sdk/trace" @@ -134,12 +133,10 @@ func TestOTelSpanContextToOC(t *testing.T) { gotTraceState = strings.Join(gotTraceStateEntries, ",") } assert.Equal(t, expectedTraceState, gotTraceState, "Tracestate should preserve entries") - } else { + } else if got.Tracestate != nil { // For empty tracestate cases, ensure the field is properly handled - if got.Tracestate != nil { - entries := got.Tracestate.Entries() - assert.Empty(t, entries, "Empty tracestate should result in empty entries") - } + entries := got.Tracestate.Entries() + assert.Empty(t, entries, "Empty tracestate should result in empty entries") } }) } diff --git a/bridge/opencensus/version.go b/bridge/opencensus/version.go index b693961f1e2..4c5b03698ec 100644 --- a/bridge/opencensus/version.go +++ b/bridge/opencensus/version.go @@ -5,5 +5,5 @@ package opencensus // import "go.opentelemetry.io/otel/bridge/opencensus" // Version is the current release version of the opencensus bridge. func Version() string { - return "1.37.0" + return "1.38.0" } diff --git a/bridge/opentracing/bridge.go b/bridge/opentracing/bridge.go index c26a7d7d8dd..d9952fa5977 100644 --- a/bridge/opentracing/bridge.go +++ b/bridge/opentracing/bridge.go @@ -6,6 +6,7 @@ package opentracing // import "go.opentelemetry.io/otel/bridge/opentracing" import ( "context" "fmt" + "maps" "strconv" "strings" "sync" @@ -139,7 +140,7 @@ func (s *bridgeSpan) SetOperationName(operationName string) ot.Span { // - uint32 -> int64 // - uint64 -> string // - float32 -> float64 -func (s *bridgeSpan) SetTag(key string, value interface{}) ot.Span { +func (s *bridgeSpan) SetTag(key string, value any) ot.Span { switch key { case string(otext.SpanKind): // TODO: Should we ignore it? @@ -202,7 +203,7 @@ func (e *bridgeFieldEncoder) EmitFloat64(key string, value float64) { e.emitCommon(key, value) } -func (e *bridgeFieldEncoder) EmitObject(key string, value interface{}) { +func (e *bridgeFieldEncoder) EmitObject(key string, value any) { e.emitCommon(key, value) } @@ -210,7 +211,7 @@ func (e *bridgeFieldEncoder) EmitLazyLogger(value otlog.LazyLogger) { value(e) } -func (e *bridgeFieldEncoder) emitCommon(key string, value interface{}) { +func (e *bridgeFieldEncoder) emitCommon(key string, value any) { e.pairs = append(e.pairs, otTagToOTelAttr(key, value)) } @@ -222,7 +223,7 @@ func otLogFieldsToOTelAttrs(fields []otlog.Field) []attribute.KeyValue { return encoder.pairs } -func (s *bridgeSpan) LogKV(alternatingKeyValues ...interface{}) { +func (s *bridgeSpan) LogKV(alternatingKeyValues ...any) { fields, err := otlog.InterleavedKVToFields(alternatingKeyValues...) if err != nil { return @@ -259,7 +260,7 @@ func (s *bridgeSpan) LogEvent(event string) { s.LogEventWithPayload(event, nil) } -func (s *bridgeSpan) LogEventWithPayload(event string, payload interface{}) { +func (s *bridgeSpan) LogEventWithPayload(event string, payload any) { data := ot.LogData{ Event: event, Payload: payload, @@ -320,10 +321,10 @@ var ( func NewBridgeTracer() *BridgeTracer { return &BridgeTracer{ setTracer: bridgeSetTracer{ - warningHandler: func(msg string) {}, + warningHandler: func(string) {}, otelTracer: noopTracer, }, - warningHandler: func(msg string) {}, + warningHandler: func(string) {}, propagator: nil, } } @@ -400,9 +401,7 @@ func (t *BridgeTracer) baggageGetHook(ctx context.Context, list iBaggage.List) i // need to return a copy to ensure this. merged := make(iBaggage.List, len(list)) - for k, v := range list { - merged[k] = v - } + maps.Copy(merged, list) for k, v := range items { // Overwrite according to OpenTelemetry specification. @@ -497,7 +496,7 @@ func (t *BridgeTracer) ContextWithSpanHook(ctx context.Context, span ot.Span) co return ctx } -func otTagsToOTelAttributesKindAndError(tags map[string]interface{}) ([]attribute.KeyValue, trace.SpanKind, bool) { +func otTagsToOTelAttributesKindAndError(tags map[string]any) ([]attribute.KeyValue, trace.SpanKind, bool) { kind := trace.SpanKindInternal err := false var pairs []attribute.KeyValue @@ -537,7 +536,7 @@ func otTagsToOTelAttributesKindAndError(tags map[string]interface{}) ([]attribut // - uint32 -> int64 // - uint64 -> string // - float32 -> float64 -func otTagToOTelAttr(k string, v interface{}) attribute.KeyValue { +func otTagToOTelAttr(k string, v any) attribute.KeyValue { key := otTagToOTelAttrKey(k) switch val := v.(type) { case bool: @@ -648,7 +647,7 @@ func (s fakeSpan) SpanContext() trace.SpanContext { // interface. // // Currently only the HTTPHeaders and TextMap formats are supported. -func (t *BridgeTracer) Inject(sm ot.SpanContext, format interface{}, carrier interface{}) error { +func (t *BridgeTracer) Inject(sm ot.SpanContext, format, carrier any) error { bridgeSC, ok := sm.(*bridgeSpanContext) if !ok { return ot.ErrInvalidSpanContext @@ -697,7 +696,7 @@ func (t *BridgeTracer) Inject(sm ot.SpanContext, format interface{}, carrier int // interface. // // Currently only the HTTPHeaders and TextMap formats are supported. -func (t *BridgeTracer) Extract(format interface{}, carrier interface{}) (ot.SpanContext, error) { +func (t *BridgeTracer) Extract(format, carrier any) (ot.SpanContext, error) { builtinFormat, ok := format.(ot.BuiltinFormat) if !ok { return nil, ot.ErrUnsupportedFormat @@ -764,7 +763,7 @@ func (t *textMapWrapper) Get(key string) string { return t.readerMap[key] } -func (t *textMapWrapper) Set(key string, value string) { +func (t *textMapWrapper) Set(key, value string) { t.TextMapWriter.Set(key, value) } @@ -791,7 +790,7 @@ func (t *textMapWrapper) loadMap() { }) } -func newTextMapWrapperForExtract(carrier interface{}) (*textMapWrapper, error) { +func newTextMapWrapperForExtract(carrier any) (*textMapWrapper, error) { t := &textMapWrapper{} reader, ok := carrier.(ot.TextMapReader) @@ -811,7 +810,7 @@ func newTextMapWrapperForExtract(carrier interface{}) (*textMapWrapper, error) { return t, nil } -func newTextMapWrapperForInject(carrier interface{}) (*textMapWrapper, error) { +func newTextMapWrapperForInject(carrier any) (*textMapWrapper, error) { t := &textMapWrapper{} writer, ok := carrier.(ot.TextMapWriter) @@ -833,12 +832,12 @@ func newTextMapWrapperForInject(carrier interface{}) (*textMapWrapper, error) { type textMapWriter struct{} -func (t *textMapWriter) Set(key string, value string) { +func (*textMapWriter) Set(string, string) { // maybe print a warning log. } type textMapReader struct{} -func (t *textMapReader) ForeachKey(handler func(key, val string) error) error { +func (*textMapReader) ForeachKey(func(string, string) error) error { return nil // maybe print a warning log. } diff --git a/bridge/opentracing/bridge_grpc_test.go b/bridge/opentracing/bridge_grpc_test.go index d1b234b7f0d..7b3d6be8406 100644 --- a/bridge/opentracing/bridge_grpc_test.go +++ b/bridge/opentracing/bridge_grpc_test.go @@ -22,7 +22,7 @@ import ( type testGRPCServer struct{} -func (*testGRPCServer) UnaryCall(ctx context.Context, r *testpb.SimpleRequest) (*testpb.SimpleResponse, error) { +func (*testGRPCServer) UnaryCall(_ context.Context, r *testpb.SimpleRequest) (*testpb.SimpleResponse, error) { return &testpb.SimpleResponse{Payload: r.Payload * 2}, nil } diff --git a/bridge/opentracing/bridge_test.go b/bridge/opentracing/bridge_test.go index 675af317013..c7c7bcea6d0 100644 --- a/bridge/opentracing/bridge_test.go +++ b/bridge/opentracing/bridge_test.go @@ -30,7 +30,7 @@ func newTestOnlyTextMapReader() *testOnlyTextMapReader { return &testOnlyTextMapReader{} } -func (t *testOnlyTextMapReader) ForeachKey(handler func(key string, val string) error) error { +func (*testOnlyTextMapReader) ForeachKey(handler func(key, val string) error) error { _ = handler("key1", "val1") _ = handler("key2", "val2") @@ -134,15 +134,15 @@ var ( type testTextMapPropagator struct{} -func (t testTextMapPropagator) Inject(ctx context.Context, carrier propagation.TextMapCarrier) { - carrier.Set(testHeader, strings.Join([]string{traceID.String(), spanID.String()}, ":")) +func (testTextMapPropagator) Inject(_ context.Context, carrier propagation.TextMapCarrier) { + carrier.Set(testHeader, traceID.String()+":"+spanID.String()) // Test for panic _ = carrier.Get("test") _ = carrier.Keys() } -func (t testTextMapPropagator) Extract(ctx context.Context, carrier propagation.TextMapCarrier) context.Context { +func (testTextMapPropagator) Extract(ctx context.Context, carrier propagation.TextMapCarrier) context.Context { traces := carrier.Get(testHeader) str := strings.Split(traces, ":") @@ -179,7 +179,7 @@ func (t testTextMapPropagator) Extract(ctx context.Context, carrier propagation. return trace.ContextWithRemoteSpanContext(ctx, sc) } -func (t testTextMapPropagator) Fields() []string { +func (testTextMapPropagator) Fields() []string { return []string{"test"} } @@ -198,7 +198,7 @@ func (t *textMapCarrier) Get(key string) string { return t.m[key] } -func (t *textMapCarrier) Set(key string, value string) { +func (t *textMapCarrier) Set(key, value string) { t.m[key] = value } @@ -214,15 +214,15 @@ func (t *textMapCarrier) Keys() []string { // testTextMapReader only implemented opentracing.TextMapReader interface. type testTextMapReader struct { - m *map[string]string + m map[string]string } -func newTestTextMapReader(m *map[string]string) *testTextMapReader { +func newTestTextMapReader(m map[string]string) *testTextMapReader { return &testTextMapReader{m: m} } -func (t *testTextMapReader) ForeachKey(handler func(key string, val string) error) error { - for key, val := range *t.m { +func (t *testTextMapReader) ForeachKey(handler func(key, val string) error) error { + for key, val := range t.m { if err := handler(key, val); err != nil { return err } @@ -233,15 +233,15 @@ func (t *testTextMapReader) ForeachKey(handler func(key string, val string) erro // testTextMapWriter only implemented opentracing.TextMapWriter interface. type testTextMapWriter struct { - m *map[string]string + m map[string]string } -func newTestTextMapWriter(m *map[string]string) *testTextMapWriter { +func newTestTextMapWriter(m map[string]string) *testTextMapWriter { return &testTextMapWriter{m: m} } func (t *testTextMapWriter) Set(key, val string) { - (*t.m)[key] = val + t.m[key] = val } type samplable interface { @@ -261,8 +261,8 @@ func TestBridgeTracer_ExtractAndInject(t *testing.T) { name string injectCarrierType ot.BuiltinFormat extractCarrierType ot.BuiltinFormat - extractCarrier interface{} - injectCarrier interface{} + extractCarrier any + injectCarrier any extractErr error injectErr error }{ @@ -290,9 +290,9 @@ func TestBridgeTracer_ExtractAndInject(t *testing.T) { { name: "support for opentracing.TextMapReader and opentracing.TextMapWriter,non-same instance", injectCarrierType: ot.TextMap, - injectCarrier: newTestTextMapWriter(&shareMap), + injectCarrier: newTestTextMapWriter(shareMap), extractCarrierType: ot.TextMap, - extractCarrier: newTestTextMapReader(&shareMap), + extractCarrier: newTestTextMapReader(shareMap), }, { name: "inject: format type is HTTPHeaders, but carrier is not HTTPHeadersCarrier", @@ -370,7 +370,7 @@ type nonDeferWrapperTracer struct { } func (t *nonDeferWrapperTracer) Start( - ctx context.Context, + _ context.Context, name string, opts ...trace.SpanStartOption, ) (context.Context, trace.Span) { @@ -393,7 +393,7 @@ func TestBridgeTracer_StartSpan(t *testing.T) { }, { name: "with wrapper tracer set", - before: func(t *testing.T, bridge *BridgeTracer) { + before: func(_ *testing.T, bridge *BridgeTracer) { wTracer := NewWrapperTracer(bridge, otel.Tracer("test")) bridge.SetOpenTelemetryTracer(wTracer) }, @@ -401,7 +401,7 @@ func TestBridgeTracer_StartSpan(t *testing.T) { }, { name: "with a non-deferred wrapper tracer", - before: func(t *testing.T, bridge *BridgeTracer) { + before: func(_ *testing.T, bridge *BridgeTracer) { wTracer := &nonDeferWrapperTracer{ NewWrapperTracer(bridge, otel.Tracer("test")), } @@ -436,7 +436,7 @@ func TestBridgeTracer_StartSpan(t *testing.T) { func Test_otTagToOTelAttr(t *testing.T) { key := attribute.Key("test") testCases := []struct { - value interface{} + value any expected attribute.KeyValue }{ { @@ -628,17 +628,17 @@ func TestBridgeSpanContextPromotedMethods(t *testing.T) { func TestBridgeCarrierBaggagePropagation(t *testing.T) { carriers := []struct { name string - factory func() interface{} + factory func() any format ot.BuiltinFormat }{ { name: "TextMapCarrier", - factory: func() interface{} { return ot.TextMapCarrier{} }, + factory: func() any { return ot.TextMapCarrier{} }, format: ot.TextMap, }, { name: "HTTPHeadersCarrier", - factory: func() interface{} { return ot.HTTPHeadersCarrier{} }, + factory: func() any { return ot.HTTPHeadersCarrier{} }, format: ot.HTTPHeaders, }, } @@ -895,87 +895,87 @@ func TestBridgeSpan_LogFields(t *testing.T) { func TestBridgeSpan_LogKV(t *testing.T) { testCases := []struct { name string - kv [2]interface{} + kv [2]any expected attribute.KeyValue }{ { name: "string", - kv: [2]interface{}{"string", "value"}, + kv: [2]any{"string", "value"}, expected: attribute.String("string", "value"), }, { name: "bool", - kv: [2]interface{}{"boolKey", true}, + kv: [2]any{"boolKey", true}, expected: attribute.Bool("boolKey", true), }, { name: "int", - kv: [2]interface{}{"intKey", int(12)}, + kv: [2]any{"intKey", int(12)}, expected: attribute.Int("intKey", 12), }, { name: "int8", - kv: [2]interface{}{"int8Key", int8(12)}, + kv: [2]any{"int8Key", int8(12)}, expected: attribute.Int64("int8Key", 12), }, { name: "int16", - kv: [2]interface{}{"int16Key", int16(12)}, + kv: [2]any{"int16Key", int16(12)}, expected: attribute.Int64("int16Key", 12), }, { name: "int32", - kv: [2]interface{}{"int32", int32(12)}, + kv: [2]any{"int32", int32(12)}, expected: attribute.Int64("int32", 12), }, { name: "int64", - kv: [2]interface{}{"int64Key", int64(12)}, + kv: [2]any{"int64Key", int64(12)}, expected: attribute.Int64("int64Key", 12), }, { name: "uint", - kv: [2]interface{}{"uintKey", uint(12)}, + kv: [2]any{"uintKey", uint(12)}, expected: attribute.String("uintKey", strconv.FormatUint(12, 10)), }, { name: "uint8", - kv: [2]interface{}{"uint8Key", uint8(12)}, + kv: [2]any{"uint8Key", uint8(12)}, expected: attribute.Int64("uint8Key", 12), }, { name: "uint16", - kv: [2]interface{}{"uint16Key", uint16(12)}, + kv: [2]any{"uint16Key", uint16(12)}, expected: attribute.Int64("uint16Key", 12), }, { name: "uint32", - kv: [2]interface{}{"uint32Key", uint32(12)}, + kv: [2]any{"uint32Key", uint32(12)}, expected: attribute.Int64("uint32Key", 12), }, { name: "uint64", - kv: [2]interface{}{"uint64Key", uint64(12)}, + kv: [2]any{"uint64Key", uint64(12)}, expected: attribute.String("uint64Key", strconv.FormatUint(12, 10)), }, { name: "float32", - kv: [2]interface{}{"float32Key", float32(12)}, + kv: [2]any{"float32Key", float32(12)}, expected: attribute.Float64("float32Key", float64(12)), }, { name: "float64", - kv: [2]interface{}{"float64Key", 1.1}, + kv: [2]any{"float64Key", 1.1}, expected: attribute.Float64("float64Key", 1.1), }, { name: "error", - kv: [2]interface{}{"errorKey", fmt.Errorf("error")}, + kv: [2]any{"errorKey", fmt.Errorf("error")}, expected: attribute.String("errorKey", "error"), }, { name: "objectKey", - kv: [2]interface{}{"objectKey", struct{}{}}, + kv: [2]any{"objectKey", struct{}{}}, expected: attribute.String("objectKey", "{}"), }, } diff --git a/bridge/opentracing/go.mod b/bridge/opentracing/go.mod index 0c05d6469bf..3beeacad668 100644 --- a/bridge/opentracing/go.mod +++ b/bridge/opentracing/go.mod @@ -10,10 +10,10 @@ require ( github.com/opentracing-contrib/go-grpc v0.1.1 github.com/opentracing-contrib/go-grpc/test v0.0.0-20250122020132-2f9c7e3db032 github.com/opentracing/opentracing-go v1.2.0 - github.com/stretchr/testify v1.10.0 - go.opentelemetry.io/otel v1.37.0 - go.opentelemetry.io/otel/trace v1.37.0 - google.golang.org/grpc v1.73.0 + github.com/stretchr/testify v1.11.1 + go.opentelemetry.io/otel v1.38.0 + go.opentelemetry.io/otel/trace v1.38.0 + google.golang.org/grpc v1.75.0 ) require ( @@ -23,12 +23,12 @@ require ( github.com/golang/protobuf v1.5.4 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect go.opentelemetry.io/auto/sdk v1.1.0 // indirect - go.opentelemetry.io/otel/metric v1.37.0 // indirect - golang.org/x/net v0.41.0 // indirect - golang.org/x/sys v0.33.0 // indirect - golang.org/x/text v0.26.0 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822 // indirect - google.golang.org/protobuf v1.36.6 // indirect + go.opentelemetry.io/otel/metric v1.38.0 // indirect + golang.org/x/net v0.43.0 // indirect + golang.org/x/sys v0.35.0 // indirect + golang.org/x/text v0.28.0 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250825161204-c5933d9347a5 // indirect + google.golang.org/protobuf v1.36.8 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/bridge/opentracing/go.sum b/bridge/opentracing/go.sum index c468181de0a..27d3e20ddac 100644 --- a/bridge/opentracing/go.sum +++ b/bridge/opentracing/go.sum @@ -28,26 +28,28 @@ github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= -github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= -go.opentelemetry.io/otel/sdk v1.35.0 h1:iPctf8iprVySXSKJffSS79eOjl9pvxV9ZqOWT0QejKY= -go.opentelemetry.io/otel/sdk v1.35.0/go.mod h1:+ga1bZliga3DxJ3CQGg3updiaAJoNECOgJREo9KHGQg= -go.opentelemetry.io/otel/sdk/metric v1.35.0 h1:1RriWBmCKgkeHEhM7a2uMjMUfP7MsOF5JpUCaEqEI9o= -go.opentelemetry.io/otel/sdk/metric v1.35.0/go.mod h1:is6XYCUMpcKi+ZsOvfluY5YstFnhW0BidkR+gL+qN+w= -golang.org/x/net v0.41.0 h1:vBTly1HeNPEn3wtREYfy4GZ/NECgw2Cnl+nK6Nz3uvw= -golang.org/x/net v0.41.0/go.mod h1:B/K4NNqkfmg07DQYrbwvSluqCJOOXwUjeb/5lOisjbA= -golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw= -golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= -golang.org/x/text v0.26.0 h1:P42AVeLghgTYr4+xUnTRKDMqpar+PtX7KWuNQL21L8M= -golang.org/x/text v0.26.0/go.mod h1:QK15LZJUUQVJxhz7wXgxSy/CJaTFjd0G+YLonydOVQA= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822 h1:fc6jSaCT0vBduLYZHYrBBNY4dsWuvgyff9noRNDdBeE= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= -google.golang.org/grpc v1.73.0 h1:VIWSmpI2MegBtTuFt5/JWy2oXxtjJ/e89Z70ImfD2ok= -google.golang.org/grpc v1.73.0/go.mod h1:50sbHOUqWoCQGI8V2HQLJM0B+LMlIUjNSZmow7EVBQc= -google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY= -google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= +go.opentelemetry.io/otel/sdk v1.37.0 h1:ItB0QUqnjesGRvNcmAcU0LyvkVyGJ2xftD29bWdDvKI= +go.opentelemetry.io/otel/sdk v1.37.0/go.mod h1:VredYzxUvuo2q3WRcDnKDjbdvmO0sCzOvVAiY+yUkAg= +go.opentelemetry.io/otel/sdk/metric v1.37.0 h1:90lI228XrB9jCMuSdA0673aubgRobVZFhbjxHHspCPc= +go.opentelemetry.io/otel/sdk/metric v1.37.0/go.mod h1:cNen4ZWfiD37l5NhS+Keb5RXVWZWpRE+9WyVCpbo5ps= +golang.org/x/net v0.43.0 h1:lat02VYK2j4aLzMzecihNvTlJNQUq316m2Mr9rnM6YE= +golang.org/x/net v0.43.0/go.mod h1:vhO1fvI4dGsIjh73sWfUVjj3N7CA9WkKJNQm2svM6Jg= +golang.org/x/sys v0.35.0 h1:vz1N37gP5bs89s7He8XuIYXpyY0+QlsKmzipCbUtyxI= +golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/text v0.28.0 h1:rhazDwis8INMIwQ4tpjLDzUhx6RlXqZNPEM0huQojng= +golang.org/x/text v0.28.0/go.mod h1:U8nCwOR8jO/marOQ0QbDiOngZVEBB7MAiitBuMjXiNU= +gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk= +gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250825161204-c5933d9347a5 h1:eaY8u2EuxbRv7c3NiGK0/NedzVsCcV6hDuU5qPX5EGE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250825161204-c5933d9347a5/go.mod h1:M4/wBTSeyLxupu3W3tJtOgB14jILAS/XWPSSa3TAlJc= +google.golang.org/grpc v1.75.0 h1:+TW+dqTd2Biwe6KKfhE5JpiYIBWq865PhKGSXiivqt4= +google.golang.org/grpc v1.75.0/go.mod h1:JtPAzKiq4v1xcAB2hydNlWI2RnF85XXcV0mhKXr2ecQ= +google.golang.org/protobuf v1.36.8 h1:xHScyCOEuuwZEc6UtSOvPbAT4zRh0xcNRYekJwfqyMc= +google.golang.org/protobuf v1.36.8/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= diff --git a/bridge/opentracing/mix_test.go b/bridge/opentracing/mix_test.go index b484a1864b8..8d502d44ea3 100644 --- a/bridge/opentracing/mix_test.go +++ b/bridge/opentracing/mix_test.go @@ -6,6 +6,7 @@ package opentracing import ( "context" "fmt" + "maps" "testing" ot "github.com/opentracing/opentracing-go" @@ -127,7 +128,7 @@ func newSimpleTest() *simpleTest { } } -func (st *simpleTest) setup(t *testing.T, tracer *mockTracer) { +func (st *simpleTest) setup(_ *testing.T, tracer *mockTracer) { tracer.SpareTraceIDs = append(tracer.SpareTraceIDs, st.traceID) tracer.SpareSpanIDs = append(tracer.SpareSpanIDs, st.spanIDs...) } @@ -144,7 +145,7 @@ func (st *simpleTest) runOTOtelOT(t *testing.T, ctx context.Context) { runOTOtelOT(t, ctx, "simple", st.noop) } -func (st *simpleTest) noop(t *testing.T, ctx context.Context) context.Context { +func (*simpleTest) noop(_ *testing.T, ctx context.Context) context.Context { return ctx } @@ -165,7 +166,7 @@ func newCurrentActiveSpanTest() *currentActiveSpanTest { } } -func (cast *currentActiveSpanTest) setup(t *testing.T, tracer *mockTracer) { +func (cast *currentActiveSpanTest) setup(_ *testing.T, tracer *mockTracer) { tracer.SpareTraceIDs = append(tracer.SpareTraceIDs, cast.traceID) tracer.SpareSpanIDs = append(tracer.SpareSpanIDs, cast.spanIDs...) @@ -220,7 +221,7 @@ func (cast *currentActiveSpanTest) runOTOtelOT(t *testing.T, ctx context.Context runOTOtelOT(t, ctx, "cast", cast.recordSpans) } -func (cast *currentActiveSpanTest) recordSpans(t *testing.T, ctx context.Context) context.Context { +func (cast *currentActiveSpanTest) recordSpans(_ *testing.T, ctx context.Context) context.Context { spanID := trace.SpanContextFromContext(ctx).SpanID() cast.recordedCurrentOtelSpanIDs = append(cast.recordedCurrentOtelSpanIDs, spanID) @@ -237,7 +238,7 @@ func (cast *currentActiveSpanTest) recordSpans(t *testing.T, ctx context.Context type contextIntactTest struct { contextKeyValues []mockContextKeyValue - recordedContextValues []interface{} + recordedContextValues []any recordIdx int } @@ -272,14 +273,14 @@ func newContextIntactTest() *contextIntactTest { } } -func (coin *contextIntactTest) setup(t *testing.T, tracer *mockTracer) { +func (coin *contextIntactTest) setup(_ *testing.T, tracer *mockTracer) { tracer.SpareContextKeyValues = append(tracer.SpareContextKeyValues, coin.contextKeyValues...) coin.recordedContextValues = nil coin.recordIdx = 0 } -func (coin *contextIntactTest) check(t *testing.T, tracer *mockTracer) { +func (coin *contextIntactTest) check(t *testing.T, _ *mockTracer) { if len(coin.recordedContextValues) != len(coin.contextKeyValues) { t.Errorf( "Expected to have %d recorded context values, got %d", @@ -289,7 +290,7 @@ func (coin *contextIntactTest) check(t *testing.T, tracer *mockTracer) { } minLen := min(len(coin.recordedContextValues), len(coin.contextKeyValues)) - for i := 0; i < minLen; i++ { + for i := range minLen { key := coin.contextKeyValues[i].Key value := coin.contextKeyValues[i].Value gotValue := coin.recordedContextValues[i] @@ -351,18 +352,18 @@ func newBaggageItemsPreservationTest() *baggageItemsPreservationTest { } } -func (bip *baggageItemsPreservationTest) setup(t *testing.T, tracer *mockTracer) { +func (bip *baggageItemsPreservationTest) setup(*testing.T, *mockTracer) { bip.step = 0 bip.recordedBaggage = nil } -func (bip *baggageItemsPreservationTest) check(t *testing.T, tracer *mockTracer) { +func (bip *baggageItemsPreservationTest) check(t *testing.T, _ *mockTracer) { if len(bip.recordedBaggage) != len(bip.baggageItems) { t.Errorf("Expected %d recordings, got %d", len(bip.baggageItems), len(bip.recordedBaggage)) } minLen := min(len(bip.recordedBaggage), len(bip.baggageItems)) - for i := 0; i < minLen; i++ { + for i := range minLen { recordedItems := bip.recordedBaggage[i] if len(recordedItems) != i+1 { t.Errorf( @@ -373,7 +374,7 @@ func (bip *baggageItemsPreservationTest) check(t *testing.T, tracer *mockTracer) ) } minItemLen := min(len(bip.baggageItems), i+1) - for j := 0; j < minItemLen; j++ { + for j := range minItemLen { expectedItem := bip.baggageItems[j] if gotValue, ok := recordedItems[expectedItem.key]; !ok { t.Errorf("Missing baggage item %q in recording %d", expectedItem.key, i+1) @@ -449,13 +450,13 @@ func newBaggageInteroperationTest() *baggageInteroperationTest { } } -func (bio *baggageInteroperationTest) setup(t *testing.T, tracer *mockTracer) { +func (bio *baggageInteroperationTest) setup(*testing.T, *mockTracer) { bio.step = 0 bio.recordedOTBaggage = nil bio.recordedOtelBaggage = nil } -func (bio *baggageInteroperationTest) check(t *testing.T, tracer *mockTracer) { +func (bio *baggageInteroperationTest) check(t *testing.T, _ *mockTracer) { checkBIORecording(t, "OT", bio.baggageItems, bio.recordedOTBaggage) checkBIORecording(t, "Otel", bio.baggageItems, bio.recordedOtelBaggage) } @@ -474,7 +475,7 @@ func checkBIORecording(t *testing.T, apiDesc string, initialItems []bipBaggage, t.Errorf("Expected %d recordings from %s, got %d", len(initialItems), apiDesc, len(recordings)) } minRecLen := min(len(initialItems), len(recordings)) - for i := 0; i < minRecLen; i++ { + for i := range minRecLen { recordedItems := recordings[i] expectedItemsInStep := (i + 1) * 2 if expectedItemsInStep != len(recordedItems) { @@ -487,9 +488,7 @@ func checkBIORecording(t *testing.T, apiDesc string, initialItems []bipBaggage, ) } recordedItemsCopy := make(map[string]string, len(recordedItems)) - for k, v := range recordedItems { - recordedItemsCopy[k] = v - } + maps.Copy(recordedItemsCopy, recordedItems) for j := 0; j < i+1; j++ { otKey, otelKey := generateBaggageKeys(initialItems[j].key) value := initialItems[j].value @@ -721,7 +720,7 @@ func runOTOtelOT( func TestOtTagToOTelAttrCheckTypeConversions(t *testing.T) { tableTest := []struct { key string - value interface{} + value any expectedValueType attribute.Type }{ { diff --git a/bridge/opentracing/mock.go b/bridge/opentracing/mock.go index 6ac7c7bd3d0..51ace87acf4 100644 --- a/bridge/opentracing/mock.go +++ b/bridge/opentracing/mock.go @@ -13,7 +13,7 @@ import ( "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/bridge/opentracing/migration" "go.opentelemetry.io/otel/codes" - semconv "go.opentelemetry.io/otel/semconv/v1.34.0" + semconv "go.opentelemetry.io/otel/semconv/v1.37.0" "go.opentelemetry.io/otel/trace" "go.opentelemetry.io/otel/trace/embedded" "go.opentelemetry.io/otel/trace/noop" @@ -28,8 +28,8 @@ var ( ) type mockContextKeyValue struct { - Key interface{} - Value interface{} + Key any + Value any } type mockTracer struct { @@ -66,7 +66,7 @@ func newMockTracer() *mockTracer { // Start returns a new trace span with the given name and options. func (t *mockTracer) Start( ctx context.Context, - name string, + _ string, opts ...trace.SpanStartOption, ) (context.Context, trace.Span) { config := trace.NewSpanStartConfig(opts...) @@ -132,7 +132,7 @@ func (t *mockTracer) getParentSpanID(ctx context.Context, config *trace.SpanConf return trace.SpanID{} } -func (t *mockTracer) getParentSpanContext(ctx context.Context, config *trace.SpanConfig) trace.SpanContext { +func (*mockTracer) getParentSpanContext(ctx context.Context, config *trace.SpanConfig) trace.SpanContext { if !config.NewRoot() { return trace.SpanContextFromContext(ctx) } @@ -172,7 +172,7 @@ func (t *mockTracer) getRandTraceID() trace.TraceID { } // DeferredContextSetupHook implements the DeferredContextSetupTracerExtension interface. -func (t *mockTracer) DeferredContextSetupHook(ctx context.Context, span trace.Span) context.Context { +func (t *mockTracer) DeferredContextSetupHook(ctx context.Context, _ trace.Span) context.Context { return t.addSpareContextValue(ctx) } @@ -309,4 +309,4 @@ func (s *mockSpan) OverrideTracer(tracer trace.Tracer) { s.officialTracer = tracer } -func (s *mockSpan) TracerProvider() trace.TracerProvider { return noop.NewTracerProvider() } +func (*mockSpan) TracerProvider() trace.TracerProvider { return noop.NewTracerProvider() } diff --git a/bridge/opentracing/provider_test.go b/bridge/opentracing/provider_test.go index 091916141a4..31a4c9ebb3a 100644 --- a/bridge/opentracing/provider_test.go +++ b/bridge/opentracing/provider_test.go @@ -21,7 +21,7 @@ type namedMockTracerProvider struct{ embedded.TracerProvider } var _ trace.TracerProvider = (*namedMockTracerProvider)(nil) // Tracer returns the WrapperTracer associated with the WrapperTracerProvider. -func (p *namedMockTracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.Tracer { +func (*namedMockTracerProvider) Tracer(name string, _ ...trace.TracerOption) trace.Tracer { return &namedMockTracer{ name: name, mockTracer: newMockTracer(), diff --git a/bridge/opentracing/wrapper.go b/bridge/opentracing/wrapper.go index 79be509b8e6..24a32f681dc 100644 --- a/bridge/opentracing/wrapper.go +++ b/bridge/opentracing/wrapper.go @@ -24,7 +24,7 @@ type WrapperTracerProvider struct { var _ trace.TracerProvider = (*WrapperTracerProvider)(nil) // Tracer returns the WrapperTracer associated with the WrapperTracerProvider. -func (p *WrapperTracerProvider) Tracer(_ string, _ ...trace.TracerOption) trace.Tracer { +func (p *WrapperTracerProvider) Tracer(string, ...trace.TracerOption) trace.Tracer { return p.wTracer } diff --git a/codes/codes.go b/codes/codes.go index 49a35b12255..d48847ed86c 100644 --- a/codes/codes.go +++ b/codes/codes.go @@ -67,7 +67,7 @@ func (c *Code) UnmarshalJSON(b []byte) error { return errors.New("nil receiver passed to UnmarshalJSON") } - var x interface{} + var x any if err := json.Unmarshal(b, &x); err != nil { return err } @@ -102,5 +102,5 @@ func (c *Code) MarshalJSON() ([]byte, error) { if !ok { return nil, fmt.Errorf("invalid code: %d", *c) } - return []byte(fmt.Sprintf("%q", str)), nil + return fmt.Appendf(nil, "%q", str), nil } diff --git a/dependencies.Dockerfile b/dependencies.Dockerfile index 935bd487631..a311fbb4835 100644 --- a/dependencies.Dockerfile +++ b/dependencies.Dockerfile @@ -1,4 +1,4 @@ # This is a renovate-friendly source of Docker images. -FROM python:3.13.5-slim-bullseye@sha256:5b9fc0d8ef79cfb5f300e61cb516e0c668067bbf77646762c38c94107e230dbc AS python -FROM otel/weaver:v0.15.2@sha256:b13acea09f721774daba36344861f689ac4bb8d6ecd94c4600b4d590c8fb34b9 AS weaver +FROM python:3.13.6-slim-bullseye@sha256:e98b521460ee75bca92175c16247bdf7275637a8faaeb2bcfa19d879ae5c4b9a AS python +FROM otel/weaver:v0.17.1@sha256:32523b5e44fb44418786347e9f7dde187d8797adb6d57a2ee99c245346c3cdfe AS weaver FROM avtodev/markdown-lint:v1@sha256:6aeedc2f49138ce7a1cd0adffc1b1c0321b841dc2102408967d9301c031949ee AS markdown diff --git a/exporters/otlp/otlplog/otlploggrpc/client.go b/exporters/otlp/otlplog/otlploggrpc/client.go index 1add3f33330..d1b31ef2aa6 100644 --- a/exporters/otlp/otlplog/otlploggrpc/client.go +++ b/exporters/otlp/otlplog/otlploggrpc/client.go @@ -9,6 +9,8 @@ import ( "fmt" "time" + collogpb "go.opentelemetry.io/proto/otlp/collector/logs/v1" + logpb "go.opentelemetry.io/proto/otlp/logs/v1" "google.golang.org/genproto/googleapis/rpc/errdetails" "google.golang.org/grpc" "google.golang.org/grpc/backoff" @@ -21,8 +23,6 @@ import ( "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/retry" - collogpb "go.opentelemetry.io/proto/otlp/collector/logs/v1" - logpb "go.opentelemetry.io/proto/otlp/logs/v1" ) // The methods of this type are not expected to be called concurrently. @@ -86,11 +86,12 @@ func newGRPCDialOptions(cfg config) []grpc.DialOption { dialOpts = append(dialOpts, grpc.WithDefaultServiceConfig(cfg.serviceConfig.Value)) } // Prioritize GRPCCredentials over Insecure (passing both is an error). - if cfg.gRPCCredentials.Value != nil { + switch { + case cfg.gRPCCredentials.Value != nil: dialOpts = append(dialOpts, grpc.WithTransportCredentials(cfg.gRPCCredentials.Value)) - } else if cfg.insecure.Value { + case cfg.insecure.Value: dialOpts = append(dialOpts, grpc.WithTransportCredentials(insecure.NewCredentials())) - } else { + default: // Default to using the host's root CA. dialOpts = append(dialOpts, grpc.WithTransportCredentials( credentials.NewTLS(nil), @@ -216,9 +217,9 @@ func newNoopClient() *noopClient { return &noopClient{} } -func (c *noopClient) UploadLogs(context.Context, []*logpb.ResourceLogs) error { return nil } +func (*noopClient) UploadLogs(context.Context, []*logpb.ResourceLogs) error { return nil } -func (c *noopClient) Shutdown(context.Context) error { return nil } +func (*noopClient) Shutdown(context.Context) error { return nil } // retryable returns if err identifies a request that can be retried and a // duration to wait for if an explicit throttle time is included in err. diff --git a/exporters/otlp/otlplog/otlploggrpc/client_test.go b/exporters/otlp/otlplog/otlploggrpc/client_test.go index 2b5cec5d850..e1526f735ab 100644 --- a/exporters/otlp/otlplog/otlploggrpc/client_test.go +++ b/exporters/otlp/otlplog/otlploggrpc/client_test.go @@ -14,6 +14,10 @@ import ( "github.com/google/go-cmp/cmp" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + collogpb "go.opentelemetry.io/proto/otlp/collector/logs/v1" + cpb "go.opentelemetry.io/proto/otlp/common/v1" + lpb "go.opentelemetry.io/proto/otlp/logs/v1" + rpb "go.opentelemetry.io/proto/otlp/resource/v1" "google.golang.org/genproto/googleapis/rpc/errdetails" "google.golang.org/grpc" "google.golang.org/grpc/codes" @@ -26,11 +30,7 @@ import ( "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/sdk/log" - semconv "go.opentelemetry.io/otel/semconv/v1.34.0" - collogpb "go.opentelemetry.io/proto/otlp/collector/logs/v1" - cpb "go.opentelemetry.io/proto/otlp/common/v1" - lpb "go.opentelemetry.io/proto/otlp/logs/v1" - rpb "go.opentelemetry.io/proto/otlp/resource/v1" + semconv "go.opentelemetry.io/otel/semconv/v1.37.0" ) var ( @@ -268,7 +268,7 @@ func TestNewClient(t *testing.T) { // The gRPC connection created by newClient. conn, err := grpc.NewClient("test", grpc.WithTransportCredentials(insecure.NewCredentials())) require.NoError(t, err) - newGRPCClientFn = func(target string, opts ...grpc.DialOption) (*grpc.ClientConn, error) { + newGRPCClientFn = func(string, ...grpc.DialOption) (*grpc.ClientConn, error) { return conn, nil } diff --git a/exporters/otlp/otlplog/otlploggrpc/config.go b/exporters/otlp/otlplog/otlploggrpc/config.go index d0cc79d54ec..3fda9fcb0b8 100644 --- a/exporters/otlp/otlplog/otlploggrpc/config.go +++ b/exporters/otlp/otlplog/otlploggrpc/config.go @@ -563,7 +563,7 @@ func loadCertificates(certPath, keyPath string) ([]tls.Certificate, error) { func insecureFromScheme(prev setting[bool], scheme string) setting[bool] { if scheme == "https" { return newSetting(false) - } else if len(scheme) > 0 { + } else if scheme != "" { return newSetting(true) } diff --git a/exporters/otlp/otlplog/otlploggrpc/config_test.go b/exporters/otlp/otlplog/otlploggrpc/config_test.go index e7a0cf1c9b4..5005d9cca0e 100644 --- a/exporters/otlp/otlplog/otlploggrpc/config_test.go +++ b/exporters/otlp/otlplog/otlploggrpc/config_test.go @@ -525,10 +525,8 @@ func assertTLSConfig(t *testing.T, want, got setting[*tls.Config]) { if want.Value.RootCAs == nil { assert.Nil(t, got.Value.RootCAs, "*tls.Config.RootCAs") - } else { - if assert.NotNil(t, got.Value.RootCAs, "RootCAs") { - assert.True(t, want.Value.RootCAs.Equal(got.Value.RootCAs), "RootCAs equal") - } + } else if assert.NotNil(t, got.Value.RootCAs, "RootCAs") { + assert.True(t, want.Value.RootCAs.Equal(got.Value.RootCAs), "RootCAs equal") } assert.Equal(t, want.Value.Certificates, got.Value.Certificates, "Certificates") } diff --git a/exporters/otlp/otlplog/otlploggrpc/exporter.go b/exporters/otlp/otlplog/otlploggrpc/exporter.go index 66895c3a1a0..898eecf7737 100644 --- a/exporters/otlp/otlplog/otlploggrpc/exporter.go +++ b/exporters/otlp/otlplog/otlploggrpc/exporter.go @@ -8,9 +8,10 @@ import ( "sync" "sync/atomic" + logpb "go.opentelemetry.io/proto/otlp/logs/v1" + "go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/transform" "go.opentelemetry.io/otel/sdk/log" - logpb "go.opentelemetry.io/proto/otlp/logs/v1" ) type logClient interface { @@ -88,6 +89,6 @@ func (e *Exporter) Shutdown(ctx context.Context) error { } // ForceFlush does nothing. The Exporter holds no state. -func (e *Exporter) ForceFlush(ctx context.Context) error { +func (*Exporter) ForceFlush(context.Context) error { return nil } diff --git a/exporters/otlp/otlplog/otlploggrpc/exporter_test.go b/exporters/otlp/otlplog/otlploggrpc/exporter_test.go index 88bb5704fe8..65a9c49be08 100644 --- a/exporters/otlp/otlplog/otlploggrpc/exporter_test.go +++ b/exporters/otlp/otlplog/otlploggrpc/exporter_test.go @@ -14,12 +14,12 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + collogpb "go.opentelemetry.io/proto/otlp/collector/logs/v1" + logpb "go.opentelemetry.io/proto/otlp/logs/v1" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/log" sdklog "go.opentelemetry.io/otel/sdk/log" - collogpb "go.opentelemetry.io/proto/otlp/collector/logs/v1" - logpb "go.opentelemetry.io/proto/otlp/logs/v1" ) var records []sdklog.Record @@ -117,7 +117,7 @@ func TestExporterForceFlush(t *testing.T) { assert.NoError(t, e.ForceFlush(ctx), "ForceFlush") } -func TestExporterConcurrentSafe(t *testing.T) { +func TestExporterConcurrentSafe(*testing.T) { e := newExporter(&mockClient{}) const goroutines = 10 @@ -125,7 +125,7 @@ func TestExporterConcurrentSafe(t *testing.T) { var wg sync.WaitGroup ctx, cancel := context.WithCancel(context.Background()) runs := new(uint64) - for i := 0; i < goroutines; i++ { + for range goroutines { wg.Add(1) go func() { defer wg.Done() diff --git a/exporters/otlp/otlplog/otlploggrpc/go.mod b/exporters/otlp/otlplog/otlploggrpc/go.mod index f851f0aa4fc..bce53da35fe 100644 --- a/exporters/otlp/otlplog/otlploggrpc/go.mod +++ b/exporters/otlp/otlplog/otlploggrpc/go.mod @@ -6,19 +6,19 @@ go 1.23.0 retract v0.12.0 require ( - github.com/cenkalti/backoff/v5 v5.0.2 + github.com/cenkalti/backoff/v5 v5.0.3 github.com/google/go-cmp v0.7.0 - github.com/stretchr/testify v1.10.0 - go.opentelemetry.io/otel v1.37.0 - go.opentelemetry.io/otel/log v0.13.0 - go.opentelemetry.io/otel/sdk v1.37.0 - go.opentelemetry.io/otel/sdk/log v0.13.0 - go.opentelemetry.io/otel/sdk/log/logtest v0.13.0 - go.opentelemetry.io/otel/trace v1.37.0 - go.opentelemetry.io/proto/otlp v1.7.0 - google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822 - google.golang.org/grpc v1.73.0 - google.golang.org/protobuf v1.36.6 + github.com/stretchr/testify v1.11.1 + go.opentelemetry.io/otel v1.38.0 + go.opentelemetry.io/otel/log v0.14.0 + go.opentelemetry.io/otel/sdk v1.38.0 + go.opentelemetry.io/otel/sdk/log v0.14.0 + go.opentelemetry.io/otel/sdk/log/logtest v0.14.0 + go.opentelemetry.io/otel/trace v1.38.0 + go.opentelemetry.io/proto/otlp v1.7.1 + google.golang.org/genproto/googleapis/rpc v0.0.0-20250825161204-c5933d9347a5 + google.golang.org/grpc v1.75.0 + google.golang.org/protobuf v1.36.8 ) require ( @@ -26,14 +26,14 @@ require ( github.com/go-logr/logr v1.4.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/google/uuid v1.6.0 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.1 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect go.opentelemetry.io/auto/sdk v1.1.0 // indirect - go.opentelemetry.io/otel/metric v1.37.0 // indirect - golang.org/x/net v0.41.0 // indirect - golang.org/x/sys v0.33.0 // indirect - golang.org/x/text v0.26.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822 // indirect + go.opentelemetry.io/otel/metric v1.38.0 // indirect + golang.org/x/net v0.43.0 // indirect + golang.org/x/sys v0.35.0 // indirect + golang.org/x/text v0.28.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20250825161204-c5933d9347a5 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) @@ -50,3 +50,5 @@ replace go.opentelemetry.io/otel/trace => ../../../../trace replace go.opentelemetry.io/otel/metric => ../../../../metric replace go.opentelemetry.io/otel/sdk/log/logtest => ../../../../sdk/log/logtest + +replace go.opentelemetry.io/otel/sdk/metric => ../../../../sdk/metric diff --git a/exporters/otlp/otlplog/otlploggrpc/go.sum b/exporters/otlp/otlplog/otlploggrpc/go.sum index 5be49ad71ed..dd1fde55b4a 100644 --- a/exporters/otlp/otlplog/otlploggrpc/go.sum +++ b/exporters/otlp/otlplog/otlploggrpc/go.sum @@ -1,5 +1,5 @@ -github.com/cenkalti/backoff/v5 v5.0.2 h1:rIfFVxEf1QsI7E1ZHfp/B4DF/6QBAUhmgkxc0H7Zss8= -github.com/cenkalti/backoff/v5 v5.0.2/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw= +github.com/cenkalti/backoff/v5 v5.0.3 h1:ZN+IMa753KfX5hd8vVaMixjnqRZ3y8CuJKRKj1xcsSM= +github.com/cenkalti/backoff/v5 v5.0.3/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= @@ -13,8 +13,8 @@ github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.1 h1:X5VWvz21y3gzm9Nw/kaUeku/1+uBhcekkmy4IkffJww= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.1/go.mod h1:Zanoh4+gvIgluNqcfMVTJueD4wSS5hT7zTt4Mrutd90= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2 h1:8Tjv8EJ+pM1xP8mK6egEbD1OgnVTyacbefKhmbLhIhU= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2/go.mod h1:pkJQ2tZHJ0aFOVEEot6oZmaVEZcRme73eIFmhiVuRWs= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= @@ -23,28 +23,28 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= -github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= -github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= -go.opentelemetry.io/otel/sdk/metric v1.35.0 h1:1RriWBmCKgkeHEhM7a2uMjMUfP7MsOF5JpUCaEqEI9o= -go.opentelemetry.io/otel/sdk/metric v1.35.0/go.mod h1:is6XYCUMpcKi+ZsOvfluY5YstFnhW0BidkR+gL+qN+w= -go.opentelemetry.io/proto/otlp v1.7.0 h1:jX1VolD6nHuFzOYso2E73H85i92Mv8JQYk0K9vz09os= -go.opentelemetry.io/proto/otlp v1.7.0/go.mod h1:fSKjH6YJ7HDlwzltzyMj036AJ3ejJLCgCSHGj4efDDo= -golang.org/x/net v0.41.0 h1:vBTly1HeNPEn3wtREYfy4GZ/NECgw2Cnl+nK6Nz3uvw= -golang.org/x/net v0.41.0/go.mod h1:B/K4NNqkfmg07DQYrbwvSluqCJOOXwUjeb/5lOisjbA= -golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw= -golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= -golang.org/x/text v0.26.0 h1:P42AVeLghgTYr4+xUnTRKDMqpar+PtX7KWuNQL21L8M= -golang.org/x/text v0.26.0/go.mod h1:QK15LZJUUQVJxhz7wXgxSy/CJaTFjd0G+YLonydOVQA= -google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822 h1:oWVWY3NzT7KJppx2UKhKmzPq4SRe0LdCijVRwvGeikY= -google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822/go.mod h1:h3c4v36UTKzUiuaOKQ6gr3S+0hovBtUrXzTG/i3+XEc= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822 h1:fc6jSaCT0vBduLYZHYrBBNY4dsWuvgyff9noRNDdBeE= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= -google.golang.org/grpc v1.73.0 h1:VIWSmpI2MegBtTuFt5/JWy2oXxtjJ/e89Z70ImfD2ok= -google.golang.org/grpc v1.73.0/go.mod h1:50sbHOUqWoCQGI8V2HQLJM0B+LMlIUjNSZmow7EVBQc= -google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY= -google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= +go.opentelemetry.io/proto/otlp v1.7.1 h1:gTOMpGDb0WTBOP8JaO72iL3auEZhVmAQg4ipjOVAtj4= +go.opentelemetry.io/proto/otlp v1.7.1/go.mod h1:b2rVh6rfI/s2pHWNlB7ILJcRALpcNDzKhACevjI+ZnE= +golang.org/x/net v0.43.0 h1:lat02VYK2j4aLzMzecihNvTlJNQUq316m2Mr9rnM6YE= +golang.org/x/net v0.43.0/go.mod h1:vhO1fvI4dGsIjh73sWfUVjj3N7CA9WkKJNQm2svM6Jg= +golang.org/x/sys v0.35.0 h1:vz1N37gP5bs89s7He8XuIYXpyY0+QlsKmzipCbUtyxI= +golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/text v0.28.0 h1:rhazDwis8INMIwQ4tpjLDzUhx6RlXqZNPEM0huQojng= +golang.org/x/text v0.28.0/go.mod h1:U8nCwOR8jO/marOQ0QbDiOngZVEBB7MAiitBuMjXiNU= +gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk= +gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E= +google.golang.org/genproto/googleapis/api v0.0.0-20250825161204-c5933d9347a5 h1:BIRfGDEjiHRrk0QKZe3Xv2ieMhtgRGeLcZQ0mIVn4EY= +google.golang.org/genproto/googleapis/api v0.0.0-20250825161204-c5933d9347a5/go.mod h1:j3QtIyytwqGr1JUDtYXwtMXWPKsEa5LtzIFN1Wn5WvE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250825161204-c5933d9347a5 h1:eaY8u2EuxbRv7c3NiGK0/NedzVsCcV6hDuU5qPX5EGE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250825161204-c5933d9347a5/go.mod h1:M4/wBTSeyLxupu3W3tJtOgB14jILAS/XWPSSa3TAlJc= +google.golang.org/grpc v1.75.0 h1:+TW+dqTd2Biwe6KKfhE5JpiYIBWq865PhKGSXiivqt4= +google.golang.org/grpc v1.75.0/go.mod h1:JtPAzKiq4v1xcAB2hydNlWI2RnF85XXcV0mhKXr2ecQ= +google.golang.org/protobuf v1.36.8 h1:xHScyCOEuuwZEc6UtSOvPbAT4zRh0xcNRYekJwfqyMc= +google.golang.org/protobuf v1.36.8/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= diff --git a/exporters/otlp/otlplog/otlploggrpc/internal/transform/log_test.go b/exporters/otlp/otlplog/otlploggrpc/internal/transform/log_test.go index c3212d19758..a9c71c72712 100644 --- a/exporters/otlp/otlplog/otlploggrpc/internal/transform/log_test.go +++ b/exporters/otlp/otlplog/otlploggrpc/internal/transform/log_test.go @@ -22,7 +22,7 @@ import ( "go.opentelemetry.io/otel/sdk/log" "go.opentelemetry.io/otel/sdk/log/logtest" "go.opentelemetry.io/otel/sdk/resource" - semconv "go.opentelemetry.io/otel/semconv/v1.34.0" + semconv "go.opentelemetry.io/otel/semconv/v1.37.0" "go.opentelemetry.io/otel/trace" ) diff --git a/exporters/otlp/otlplog/otlploggrpc/version.go b/exporters/otlp/otlplog/otlploggrpc/version.go index 42d186f31b3..818ecf9e9df 100644 --- a/exporters/otlp/otlplog/otlploggrpc/version.go +++ b/exporters/otlp/otlplog/otlploggrpc/version.go @@ -5,5 +5,5 @@ package otlploggrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlplog/o // Version is the current release version of the OpenTelemetry OTLP over gRPC logs exporter in use. func Version() string { - return "0.13.0" + return "0.14.0" } diff --git a/exporters/otlp/otlplog/otlploghttp/client.go b/exporters/otlp/otlplog/otlploghttp/client.go index 3af60258a65..59be105dbe6 100644 --- a/exporters/otlp/otlplog/otlploghttp/client.go +++ b/exporters/otlp/otlplog/otlploghttp/client.go @@ -18,12 +18,11 @@ import ( "sync" "time" - "google.golang.org/protobuf/proto" - - "go.opentelemetry.io/otel" collogpb "go.opentelemetry.io/proto/otlp/collector/logs/v1" logpb "go.opentelemetry.io/proto/otlp/logs/v1" + "google.golang.org/protobuf/proto" + "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/internal/retry" ) @@ -200,7 +199,7 @@ func (c *httpClient) uploadLogs(ctx context.Context, data []*logpb.ResourceLogs) return err } respStr := strings.TrimSpace(respData.String()) - if len(respStr) == 0 { + if respStr == "" { respStr = "(empty)" } bodyErr := fmt.Errorf("body: %s", respStr) @@ -220,7 +219,7 @@ func (c *httpClient) uploadLogs(ctx context.Context, data []*logpb.ResourceLogs) } var gzPool = sync.Pool{ - New: func() interface{} { + New: func() any { w := gzip.NewWriter(io.Discard) return w }, @@ -232,7 +231,7 @@ func (c *httpClient) newRequest(ctx context.Context, body []byte) (request, erro switch c.compression { case NoCompression: - r.ContentLength = (int64)(len(body)) + r.ContentLength = int64(len(body)) req.bodyReader = bodyReader(body) case GzipCompression: // Ensure the content length is not used. @@ -313,7 +312,7 @@ func (e retryableError) Unwrap() error { return e.err } -func (e retryableError) As(target interface{}) bool { +func (e retryableError) As(target any) bool { if e.err == nil { return false } diff --git a/exporters/otlp/otlplog/otlploghttp/client_test.go b/exporters/otlp/otlplog/otlploghttp/client_test.go index a7cc316e325..fdcbbf5e0aa 100644 --- a/exporters/otlp/otlplog/otlploghttp/client_test.go +++ b/exporters/otlp/otlplog/otlploghttp/client_test.go @@ -29,16 +29,15 @@ import ( "github.com/google/go-cmp/cmp" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "google.golang.org/protobuf/proto" - - "go.opentelemetry.io/otel" collogpb "go.opentelemetry.io/proto/otlp/collector/logs/v1" cpb "go.opentelemetry.io/proto/otlp/common/v1" lpb "go.opentelemetry.io/proto/otlp/logs/v1" rpb "go.opentelemetry.io/proto/otlp/resource/v1" + "google.golang.org/protobuf/proto" + "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/sdk/log" - semconv "go.opentelemetry.io/otel/semconv/v1.34.0" + semconv "go.opentelemetry.io/otel/semconv/v1.37.0" ) var ( @@ -354,7 +353,7 @@ func (c *httpCollector) record(r *http.Request) exportResult { return exportResult{Err: err} } -func (c *httpCollector) readBody(r *http.Request) (body []byte, err error) { +func (*httpCollector) readBody(r *http.Request) (body []byte, err error) { var reader io.ReadCloser switch r.Header.Get("Content-Encoding") { case "gzip": diff --git a/exporters/otlp/otlplog/otlploghttp/config_test.go b/exporters/otlp/otlplog/otlploghttp/config_test.go index a6863e3805a..e9a8a66fe76 100644 --- a/exporters/otlp/otlplog/otlploghttp/config_test.go +++ b/exporters/otlp/otlplog/otlploghttp/config_test.go @@ -460,10 +460,8 @@ func assertTLSConfig(t *testing.T, want, got setting[*tls.Config]) { if want.Value.RootCAs == nil { assert.Nil(t, got.Value.RootCAs, "*tls.Config.RootCAs") - } else { - if assert.NotNil(t, got.Value.RootCAs, "RootCAs") { - assert.True(t, want.Value.RootCAs.Equal(got.Value.RootCAs), "RootCAs equal") - } + } else if assert.NotNil(t, got.Value.RootCAs, "RootCAs") { + assert.True(t, want.Value.RootCAs.Equal(got.Value.RootCAs), "RootCAs equal") } assert.Equal(t, want.Value.Certificates, got.Value.Certificates, "Certificates") } diff --git a/exporters/otlp/otlplog/otlploghttp/exporter.go b/exporters/otlp/otlplog/otlploghttp/exporter.go index f1c8d3ae0a7..4436d0cd864 100644 --- a/exporters/otlp/otlplog/otlploghttp/exporter.go +++ b/exporters/otlp/otlplog/otlploghttp/exporter.go @@ -58,7 +58,7 @@ func (e *Exporter) Export(ctx context.Context, records []log.Record) error { // Shutdown shuts down the Exporter. Calls to Export or ForceFlush will perform // no operation after this is called. -func (e *Exporter) Shutdown(ctx context.Context) error { +func (e *Exporter) Shutdown(context.Context) error { if e.stopped.Swap(true) { return nil } @@ -68,6 +68,6 @@ func (e *Exporter) Shutdown(ctx context.Context) error { } // ForceFlush does nothing. The Exporter holds no state. -func (e *Exporter) ForceFlush(ctx context.Context) error { +func (*Exporter) ForceFlush(context.Context) error { return nil } diff --git a/exporters/otlp/otlplog/otlploghttp/exporter_test.go b/exporters/otlp/otlplog/otlploghttp/exporter_test.go index 418a3148df1..79fea41d622 100644 --- a/exporters/otlp/otlplog/otlploghttp/exporter_test.go +++ b/exporters/otlp/otlplog/otlploghttp/exporter_test.go @@ -13,9 +13,9 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + logpb "go.opentelemetry.io/proto/otlp/logs/v1" "go.opentelemetry.io/otel/sdk/log" - logpb "go.opentelemetry.io/proto/otlp/logs/v1" ) func TestExporterExportErrors(t *testing.T) { @@ -93,7 +93,7 @@ func TestExporterConcurrentSafe(t *testing.T) { var wg sync.WaitGroup ctx, cancel := context.WithCancel(context.Background()) runs := new(uint64) - for i := 0; i < goroutines; i++ { + for range goroutines { wg.Add(1) go func() { defer wg.Done() diff --git a/exporters/otlp/otlplog/otlploghttp/go.mod b/exporters/otlp/otlplog/otlploghttp/go.mod index a913abb25aa..c7afa225aec 100644 --- a/exporters/otlp/otlplog/otlploghttp/go.mod +++ b/exporters/otlp/otlplog/otlploghttp/go.mod @@ -6,17 +6,17 @@ go 1.23.0 retract v0.12.0 require ( - github.com/cenkalti/backoff/v5 v5.0.2 + github.com/cenkalti/backoff/v5 v5.0.3 github.com/google/go-cmp v0.7.0 - github.com/stretchr/testify v1.10.0 - go.opentelemetry.io/otel v1.37.0 - go.opentelemetry.io/otel/log v0.13.0 - go.opentelemetry.io/otel/sdk v1.37.0 - go.opentelemetry.io/otel/sdk/log v0.13.0 - go.opentelemetry.io/otel/sdk/log/logtest v0.13.0 - go.opentelemetry.io/otel/trace v1.37.0 - go.opentelemetry.io/proto/otlp v1.7.0 - google.golang.org/protobuf v1.36.6 + github.com/stretchr/testify v1.11.1 + go.opentelemetry.io/otel v1.38.0 + go.opentelemetry.io/otel/log v0.14.0 + go.opentelemetry.io/otel/sdk v1.38.0 + go.opentelemetry.io/otel/sdk/log v0.14.0 + go.opentelemetry.io/otel/sdk/log/logtest v0.14.0 + go.opentelemetry.io/otel/trace v1.38.0 + go.opentelemetry.io/proto/otlp v1.7.1 + google.golang.org/protobuf v1.36.8 ) require ( @@ -24,16 +24,16 @@ require ( github.com/go-logr/logr v1.4.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/google/uuid v1.6.0 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.1 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect go.opentelemetry.io/auto/sdk v1.1.0 // indirect - go.opentelemetry.io/otel/metric v1.37.0 // indirect - golang.org/x/net v0.41.0 // indirect - golang.org/x/sys v0.33.0 // indirect - golang.org/x/text v0.26.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822 // indirect - google.golang.org/grpc v1.73.0 // indirect + go.opentelemetry.io/otel/metric v1.38.0 // indirect + golang.org/x/net v0.43.0 // indirect + golang.org/x/sys v0.35.0 // indirect + golang.org/x/text v0.28.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20250825161204-c5933d9347a5 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250825161204-c5933d9347a5 // indirect + google.golang.org/grpc v1.75.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) @@ -50,3 +50,5 @@ replace go.opentelemetry.io/otel/sdk => ../../../../sdk replace go.opentelemetry.io/otel/metric => ../../../../metric replace go.opentelemetry.io/otel/log => ../../../../log + +replace go.opentelemetry.io/otel/sdk/metric => ../../../../sdk/metric diff --git a/exporters/otlp/otlplog/otlploghttp/go.sum b/exporters/otlp/otlplog/otlploghttp/go.sum index 5be49ad71ed..dd1fde55b4a 100644 --- a/exporters/otlp/otlplog/otlploghttp/go.sum +++ b/exporters/otlp/otlplog/otlploghttp/go.sum @@ -1,5 +1,5 @@ -github.com/cenkalti/backoff/v5 v5.0.2 h1:rIfFVxEf1QsI7E1ZHfp/B4DF/6QBAUhmgkxc0H7Zss8= -github.com/cenkalti/backoff/v5 v5.0.2/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw= +github.com/cenkalti/backoff/v5 v5.0.3 h1:ZN+IMa753KfX5hd8vVaMixjnqRZ3y8CuJKRKj1xcsSM= +github.com/cenkalti/backoff/v5 v5.0.3/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= @@ -13,8 +13,8 @@ github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.1 h1:X5VWvz21y3gzm9Nw/kaUeku/1+uBhcekkmy4IkffJww= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.1/go.mod h1:Zanoh4+gvIgluNqcfMVTJueD4wSS5hT7zTt4Mrutd90= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2 h1:8Tjv8EJ+pM1xP8mK6egEbD1OgnVTyacbefKhmbLhIhU= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2/go.mod h1:pkJQ2tZHJ0aFOVEEot6oZmaVEZcRme73eIFmhiVuRWs= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= @@ -23,28 +23,28 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= -github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= -github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= -go.opentelemetry.io/otel/sdk/metric v1.35.0 h1:1RriWBmCKgkeHEhM7a2uMjMUfP7MsOF5JpUCaEqEI9o= -go.opentelemetry.io/otel/sdk/metric v1.35.0/go.mod h1:is6XYCUMpcKi+ZsOvfluY5YstFnhW0BidkR+gL+qN+w= -go.opentelemetry.io/proto/otlp v1.7.0 h1:jX1VolD6nHuFzOYso2E73H85i92Mv8JQYk0K9vz09os= -go.opentelemetry.io/proto/otlp v1.7.0/go.mod h1:fSKjH6YJ7HDlwzltzyMj036AJ3ejJLCgCSHGj4efDDo= -golang.org/x/net v0.41.0 h1:vBTly1HeNPEn3wtREYfy4GZ/NECgw2Cnl+nK6Nz3uvw= -golang.org/x/net v0.41.0/go.mod h1:B/K4NNqkfmg07DQYrbwvSluqCJOOXwUjeb/5lOisjbA= -golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw= -golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= -golang.org/x/text v0.26.0 h1:P42AVeLghgTYr4+xUnTRKDMqpar+PtX7KWuNQL21L8M= -golang.org/x/text v0.26.0/go.mod h1:QK15LZJUUQVJxhz7wXgxSy/CJaTFjd0G+YLonydOVQA= -google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822 h1:oWVWY3NzT7KJppx2UKhKmzPq4SRe0LdCijVRwvGeikY= -google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822/go.mod h1:h3c4v36UTKzUiuaOKQ6gr3S+0hovBtUrXzTG/i3+XEc= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822 h1:fc6jSaCT0vBduLYZHYrBBNY4dsWuvgyff9noRNDdBeE= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= -google.golang.org/grpc v1.73.0 h1:VIWSmpI2MegBtTuFt5/JWy2oXxtjJ/e89Z70ImfD2ok= -google.golang.org/grpc v1.73.0/go.mod h1:50sbHOUqWoCQGI8V2HQLJM0B+LMlIUjNSZmow7EVBQc= -google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY= -google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= +go.opentelemetry.io/proto/otlp v1.7.1 h1:gTOMpGDb0WTBOP8JaO72iL3auEZhVmAQg4ipjOVAtj4= +go.opentelemetry.io/proto/otlp v1.7.1/go.mod h1:b2rVh6rfI/s2pHWNlB7ILJcRALpcNDzKhACevjI+ZnE= +golang.org/x/net v0.43.0 h1:lat02VYK2j4aLzMzecihNvTlJNQUq316m2Mr9rnM6YE= +golang.org/x/net v0.43.0/go.mod h1:vhO1fvI4dGsIjh73sWfUVjj3N7CA9WkKJNQm2svM6Jg= +golang.org/x/sys v0.35.0 h1:vz1N37gP5bs89s7He8XuIYXpyY0+QlsKmzipCbUtyxI= +golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/text v0.28.0 h1:rhazDwis8INMIwQ4tpjLDzUhx6RlXqZNPEM0huQojng= +golang.org/x/text v0.28.0/go.mod h1:U8nCwOR8jO/marOQ0QbDiOngZVEBB7MAiitBuMjXiNU= +gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk= +gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E= +google.golang.org/genproto/googleapis/api v0.0.0-20250825161204-c5933d9347a5 h1:BIRfGDEjiHRrk0QKZe3Xv2ieMhtgRGeLcZQ0mIVn4EY= +google.golang.org/genproto/googleapis/api v0.0.0-20250825161204-c5933d9347a5/go.mod h1:j3QtIyytwqGr1JUDtYXwtMXWPKsEa5LtzIFN1Wn5WvE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250825161204-c5933d9347a5 h1:eaY8u2EuxbRv7c3NiGK0/NedzVsCcV6hDuU5qPX5EGE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250825161204-c5933d9347a5/go.mod h1:M4/wBTSeyLxupu3W3tJtOgB14jILAS/XWPSSa3TAlJc= +google.golang.org/grpc v1.75.0 h1:+TW+dqTd2Biwe6KKfhE5JpiYIBWq865PhKGSXiivqt4= +google.golang.org/grpc v1.75.0/go.mod h1:JtPAzKiq4v1xcAB2hydNlWI2RnF85XXcV0mhKXr2ecQ= +google.golang.org/protobuf v1.36.8 h1:xHScyCOEuuwZEc6UtSOvPbAT4zRh0xcNRYekJwfqyMc= +google.golang.org/protobuf v1.36.8/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= diff --git a/exporters/otlp/otlplog/otlploghttp/internal/transform/log_test.go b/exporters/otlp/otlplog/otlploghttp/internal/transform/log_test.go index c3212d19758..a9c71c72712 100644 --- a/exporters/otlp/otlplog/otlploghttp/internal/transform/log_test.go +++ b/exporters/otlp/otlplog/otlploghttp/internal/transform/log_test.go @@ -22,7 +22,7 @@ import ( "go.opentelemetry.io/otel/sdk/log" "go.opentelemetry.io/otel/sdk/log/logtest" "go.opentelemetry.io/otel/sdk/resource" - semconv "go.opentelemetry.io/otel/semconv/v1.34.0" + semconv "go.opentelemetry.io/otel/semconv/v1.37.0" "go.opentelemetry.io/otel/trace" ) diff --git a/exporters/otlp/otlplog/otlploghttp/version.go b/exporters/otlp/otlplog/otlploghttp/version.go index c8e9c886796..a482321af3e 100644 --- a/exporters/otlp/otlplog/otlploghttp/version.go +++ b/exporters/otlp/otlplog/otlploghttp/version.go @@ -5,5 +5,5 @@ package otlploghttp // import "go.opentelemetry.io/otel/exporters/otlp/otlplog/o // Version is the current release version of the OpenTelemetry OTLP over HTTP/protobuf logs exporter in use. func Version() string { - return "0.13.0" + return "0.14.0" } diff --git a/exporters/otlp/otlpmetric/otlpmetricgrpc/client.go b/exporters/otlp/otlpmetric/otlpmetricgrpc/client.go index 82a4c2c2a1e..492480f8c9a 100644 --- a/exporters/otlp/otlpmetric/otlpmetricgrpc/client.go +++ b/exporters/otlp/otlpmetric/otlpmetricgrpc/client.go @@ -8,6 +8,8 @@ import ( "errors" "time" + colmetricpb "go.opentelemetry.io/proto/otlp/collector/metrics/v1" + metricpb "go.opentelemetry.io/proto/otlp/metrics/v1" "google.golang.org/genproto/googleapis/rpc/errdetails" "google.golang.org/grpc" "google.golang.org/grpc/codes" @@ -18,8 +20,6 @@ import ( "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal" "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf" "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/retry" - colmetricpb "go.opentelemetry.io/proto/otlp/collector/metrics/v1" - metricpb "go.opentelemetry.io/proto/otlp/metrics/v1" ) type client struct { diff --git a/exporters/otlp/otlpmetric/otlpmetricgrpc/exporter.go b/exporters/otlp/otlpmetric/otlpmetricgrpc/exporter.go index 3977c1f8a6c..35cdf466127 100644 --- a/exporters/otlp/otlpmetric/otlpmetricgrpc/exporter.go +++ b/exporters/otlp/otlpmetric/otlpmetricgrpc/exporter.go @@ -9,12 +9,13 @@ import ( "fmt" "sync" + metricpb "go.opentelemetry.io/proto/otlp/metrics/v1" + "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf" "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform" "go.opentelemetry.io/otel/internal/global" "go.opentelemetry.io/otel/sdk/metric" "go.opentelemetry.io/otel/sdk/metric/metricdata" - metricpb "go.opentelemetry.io/proto/otlp/metrics/v1" ) // Exporter is a OpenTelemetry metric Exporter using gRPC. @@ -91,7 +92,7 @@ func (e *Exporter) Export(ctx context.Context, rm *metricdata.ResourceMetrics) e // This method returns an error if the method is canceled by the passed context. // // This method is safe to call concurrently. -func (e *Exporter) ForceFlush(ctx context.Context) error { +func (*Exporter) ForceFlush(ctx context.Context) error { // The exporter and client hold no state, nothing to flush. return ctx.Err() } @@ -119,7 +120,7 @@ var errShutdown = errors.New("gRPC exporter is shutdown") type shutdownClient struct{} -func (c shutdownClient) err(ctx context.Context) error { +func (shutdownClient) err(ctx context.Context) error { if err := ctx.Err(); err != nil { return err } @@ -135,7 +136,7 @@ func (c shutdownClient) Shutdown(ctx context.Context) error { } // MarshalLog returns logging data about the Exporter. -func (e *Exporter) MarshalLog() interface{} { +func (*Exporter) MarshalLog() any { return struct{ Type string }{Type: "OTLP/gRPC"} } diff --git a/exporters/otlp/otlpmetric/otlpmetricgrpc/exporter_test.go b/exporters/otlp/otlpmetric/otlpmetricgrpc/exporter_test.go index 59f72ea284c..3039fc63b17 100644 --- a/exporters/otlp/otlpmetric/otlpmetricgrpc/exporter_test.go +++ b/exporters/otlp/otlpmetric/otlpmetricgrpc/exporter_test.go @@ -37,7 +37,7 @@ func TestExporterClientConcurrentSafe(t *testing.T) { done := make(chan struct{}) var wg, someWork sync.WaitGroup - for i := 0; i < goroutines; i++ { + for range goroutines { wg.Add(1) someWork.Add(1) go func() { diff --git a/exporters/otlp/otlpmetric/otlpmetricgrpc/go.mod b/exporters/otlp/otlpmetric/otlpmetricgrpc/go.mod index 75ae35f4fc7..fff9ba46a69 100644 --- a/exporters/otlp/otlpmetric/otlpmetricgrpc/go.mod +++ b/exporters/otlp/otlpmetric/otlpmetricgrpc/go.mod @@ -5,16 +5,16 @@ go 1.23.0 retract v0.32.2 // Contains unresolvable dependencies. require ( - github.com/cenkalti/backoff/v5 v5.0.2 + github.com/cenkalti/backoff/v5 v5.0.3 github.com/google/go-cmp v0.7.0 - github.com/stretchr/testify v1.10.0 - go.opentelemetry.io/otel v1.37.0 - go.opentelemetry.io/otel/sdk v1.37.0 - go.opentelemetry.io/otel/sdk/metric v1.37.0 - go.opentelemetry.io/proto/otlp v1.7.0 - google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822 - google.golang.org/grpc v1.73.0 - google.golang.org/protobuf v1.36.6 + github.com/stretchr/testify v1.11.1 + go.opentelemetry.io/otel v1.38.0 + go.opentelemetry.io/otel/sdk v1.38.0 + go.opentelemetry.io/otel/sdk/metric v1.38.0 + go.opentelemetry.io/proto/otlp v1.7.1 + google.golang.org/genproto/googleapis/rpc v0.0.0-20250825161204-c5933d9347a5 + google.golang.org/grpc v1.75.0 + google.golang.org/protobuf v1.36.8 ) require ( @@ -22,15 +22,15 @@ require ( github.com/go-logr/logr v1.4.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/google/uuid v1.6.0 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.1 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect go.opentelemetry.io/auto/sdk v1.1.0 // indirect - go.opentelemetry.io/otel/metric v1.37.0 // indirect - go.opentelemetry.io/otel/trace v1.37.0 // indirect - golang.org/x/net v0.41.0 // indirect - golang.org/x/sys v0.33.0 // indirect - golang.org/x/text v0.26.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822 // indirect + go.opentelemetry.io/otel/metric v1.38.0 // indirect + go.opentelemetry.io/otel/trace v1.38.0 // indirect + golang.org/x/net v0.43.0 // indirect + golang.org/x/sys v0.35.0 // indirect + golang.org/x/text v0.28.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20250825161204-c5933d9347a5 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/exporters/otlp/otlpmetric/otlpmetricgrpc/go.sum b/exporters/otlp/otlpmetric/otlpmetricgrpc/go.sum index 6ee0c70b7a4..dd1fde55b4a 100644 --- a/exporters/otlp/otlpmetric/otlpmetricgrpc/go.sum +++ b/exporters/otlp/otlpmetric/otlpmetricgrpc/go.sum @@ -1,5 +1,5 @@ -github.com/cenkalti/backoff/v5 v5.0.2 h1:rIfFVxEf1QsI7E1ZHfp/B4DF/6QBAUhmgkxc0H7Zss8= -github.com/cenkalti/backoff/v5 v5.0.2/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw= +github.com/cenkalti/backoff/v5 v5.0.3 h1:ZN+IMa753KfX5hd8vVaMixjnqRZ3y8CuJKRKj1xcsSM= +github.com/cenkalti/backoff/v5 v5.0.3/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= @@ -13,8 +13,8 @@ github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.1 h1:X5VWvz21y3gzm9Nw/kaUeku/1+uBhcekkmy4IkffJww= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.1/go.mod h1:Zanoh4+gvIgluNqcfMVTJueD4wSS5hT7zTt4Mrutd90= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2 h1:8Tjv8EJ+pM1xP8mK6egEbD1OgnVTyacbefKhmbLhIhU= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2/go.mod h1:pkJQ2tZHJ0aFOVEEot6oZmaVEZcRme73eIFmhiVuRWs= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= @@ -23,26 +23,28 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= -github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= -github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= -go.opentelemetry.io/proto/otlp v1.7.0 h1:jX1VolD6nHuFzOYso2E73H85i92Mv8JQYk0K9vz09os= -go.opentelemetry.io/proto/otlp v1.7.0/go.mod h1:fSKjH6YJ7HDlwzltzyMj036AJ3ejJLCgCSHGj4efDDo= -golang.org/x/net v0.41.0 h1:vBTly1HeNPEn3wtREYfy4GZ/NECgw2Cnl+nK6Nz3uvw= -golang.org/x/net v0.41.0/go.mod h1:B/K4NNqkfmg07DQYrbwvSluqCJOOXwUjeb/5lOisjbA= -golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw= -golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= -golang.org/x/text v0.26.0 h1:P42AVeLghgTYr4+xUnTRKDMqpar+PtX7KWuNQL21L8M= -golang.org/x/text v0.26.0/go.mod h1:QK15LZJUUQVJxhz7wXgxSy/CJaTFjd0G+YLonydOVQA= -google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822 h1:oWVWY3NzT7KJppx2UKhKmzPq4SRe0LdCijVRwvGeikY= -google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822/go.mod h1:h3c4v36UTKzUiuaOKQ6gr3S+0hovBtUrXzTG/i3+XEc= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822 h1:fc6jSaCT0vBduLYZHYrBBNY4dsWuvgyff9noRNDdBeE= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= -google.golang.org/grpc v1.73.0 h1:VIWSmpI2MegBtTuFt5/JWy2oXxtjJ/e89Z70ImfD2ok= -google.golang.org/grpc v1.73.0/go.mod h1:50sbHOUqWoCQGI8V2HQLJM0B+LMlIUjNSZmow7EVBQc= -google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY= -google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= +go.opentelemetry.io/proto/otlp v1.7.1 h1:gTOMpGDb0WTBOP8JaO72iL3auEZhVmAQg4ipjOVAtj4= +go.opentelemetry.io/proto/otlp v1.7.1/go.mod h1:b2rVh6rfI/s2pHWNlB7ILJcRALpcNDzKhACevjI+ZnE= +golang.org/x/net v0.43.0 h1:lat02VYK2j4aLzMzecihNvTlJNQUq316m2Mr9rnM6YE= +golang.org/x/net v0.43.0/go.mod h1:vhO1fvI4dGsIjh73sWfUVjj3N7CA9WkKJNQm2svM6Jg= +golang.org/x/sys v0.35.0 h1:vz1N37gP5bs89s7He8XuIYXpyY0+QlsKmzipCbUtyxI= +golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/text v0.28.0 h1:rhazDwis8INMIwQ4tpjLDzUhx6RlXqZNPEM0huQojng= +golang.org/x/text v0.28.0/go.mod h1:U8nCwOR8jO/marOQ0QbDiOngZVEBB7MAiitBuMjXiNU= +gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk= +gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E= +google.golang.org/genproto/googleapis/api v0.0.0-20250825161204-c5933d9347a5 h1:BIRfGDEjiHRrk0QKZe3Xv2ieMhtgRGeLcZQ0mIVn4EY= +google.golang.org/genproto/googleapis/api v0.0.0-20250825161204-c5933d9347a5/go.mod h1:j3QtIyytwqGr1JUDtYXwtMXWPKsEa5LtzIFN1Wn5WvE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250825161204-c5933d9347a5 h1:eaY8u2EuxbRv7c3NiGK0/NedzVsCcV6hDuU5qPX5EGE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250825161204-c5933d9347a5/go.mod h1:M4/wBTSeyLxupu3W3tJtOgB14jILAS/XWPSSa3TAlJc= +google.golang.org/grpc v1.75.0 h1:+TW+dqTd2Biwe6KKfhE5JpiYIBWq865PhKGSXiivqt4= +google.golang.org/grpc v1.75.0/go.mod h1:JtPAzKiq4v1xcAB2hydNlWI2RnF85XXcV0mhKXr2ecQ= +google.golang.org/protobuf v1.36.8 h1:xHScyCOEuuwZEc6UtSOvPbAT4zRh0xcNRYekJwfqyMc= +google.golang.org/protobuf v1.36.8/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= diff --git a/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/otest/client.go b/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/otest/client.go index 32717d2326d..8fe43420677 100644 --- a/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/otest/client.go +++ b/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/otest/client.go @@ -19,7 +19,7 @@ import ( "google.golang.org/protobuf/proto" "go.opentelemetry.io/otel" - semconv "go.opentelemetry.io/otel/semconv/v1.34.0" + semconv "go.opentelemetry.io/otel/semconv/v1.37.0" collpb "go.opentelemetry.io/proto/otlp/collector/metrics/v1" cpb "go.opentelemetry.io/proto/otlp/common/v1" mpb "go.opentelemetry.io/proto/otlp/metrics/v1" diff --git a/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform/metricdata_test.go b/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform/metricdata_test.go index 028e2c94f28..b2a9b03300e 100644 --- a/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform/metricdata_test.go +++ b/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform/metricdata_test.go @@ -17,7 +17,7 @@ import ( "go.opentelemetry.io/otel/sdk/instrumentation" "go.opentelemetry.io/otel/sdk/metric/metricdata" "go.opentelemetry.io/otel/sdk/resource" - semconv "go.opentelemetry.io/otel/semconv/v1.34.0" + semconv "go.opentelemetry.io/otel/semconv/v1.37.0" cpb "go.opentelemetry.io/proto/otlp/common/v1" mpb "go.opentelemetry.io/proto/otlp/metrics/v1" rpb "go.opentelemetry.io/proto/otlp/resource/v1" diff --git a/exporters/otlp/otlpmetric/otlpmetricgrpc/version.go b/exporters/otlp/otlpmetric/otlpmetricgrpc/version.go index b34d35b0b8b..7909cac56d9 100644 --- a/exporters/otlp/otlpmetric/otlpmetricgrpc/version.go +++ b/exporters/otlp/otlpmetric/otlpmetricgrpc/version.go @@ -5,5 +5,5 @@ package otlpmetricgrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlpme // Version is the current release version of the OpenTelemetry OTLP over gRPC metrics exporter in use. func Version() string { - return "1.37.0" + return "1.38.0" } diff --git a/exporters/otlp/otlpmetric/otlpmetrichttp/client.go b/exporters/otlp/otlpmetric/otlpmetrichttp/client.go index 23f1f003171..26af47e621b 100644 --- a/exporters/otlp/otlpmetric/otlpmetrichttp/client.go +++ b/exporters/otlp/otlpmetric/otlpmetrichttp/client.go @@ -18,14 +18,14 @@ import ( "sync" "time" + colmetricpb "go.opentelemetry.io/proto/otlp/collector/metrics/v1" + metricpb "go.opentelemetry.io/proto/otlp/metrics/v1" "google.golang.org/protobuf/proto" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal" "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf" "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/retry" - colmetricpb "go.opentelemetry.io/proto/otlp/collector/metrics/v1" - metricpb "go.opentelemetry.io/proto/otlp/metrics/v1" ) type client struct { @@ -203,7 +203,7 @@ func (c *client) UploadMetrics(ctx context.Context, protoMetrics *metricpb.Resou return err } respStr := strings.TrimSpace(respData.String()) - if len(respStr) == 0 { + if respStr == "" { respStr = "(empty)" } bodyErr := fmt.Errorf("body: %s", respStr) @@ -223,7 +223,7 @@ func (c *client) UploadMetrics(ctx context.Context, protoMetrics *metricpb.Resou } var gzPool = sync.Pool{ - New: func() interface{} { + New: func() any { w := gzip.NewWriter(io.Discard) return w }, @@ -235,7 +235,7 @@ func (c *client) newRequest(ctx context.Context, body []byte) (request, error) { switch c.compression { case NoCompression: - r.ContentLength = (int64)(len(body)) + r.ContentLength = int64(len(body)) req.bodyReader = bodyReader(body) case GzipCompression: // Ensure the content length is not used. @@ -316,7 +316,7 @@ func (e retryableError) Unwrap() error { return e.err } -func (e retryableError) As(target interface{}) bool { +func (e retryableError) As(target any) bool { if e.err == nil { return false } diff --git a/exporters/otlp/otlpmetric/otlpmetrichttp/client_test.go b/exporters/otlp/otlpmetric/otlpmetrichttp/client_test.go index ef5c6774a04..d8384067ae4 100644 --- a/exporters/otlp/otlpmetric/otlpmetrichttp/client_test.go +++ b/exporters/otlp/otlpmetric/otlpmetrichttp/client_test.go @@ -16,12 +16,12 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + mpb "go.opentelemetry.io/proto/otlp/metrics/v1" "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf" "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/otest" "go.opentelemetry.io/otel/sdk/metric" "go.opentelemetry.io/otel/sdk/metric/metricdata" - mpb "go.opentelemetry.io/proto/otlp/metrics/v1" ) type clientShim struct { diff --git a/exporters/otlp/otlpmetric/otlpmetrichttp/exporter.go b/exporters/otlp/otlpmetric/otlpmetrichttp/exporter.go index 50ac8f86ea3..292645a38cc 100644 --- a/exporters/otlp/otlpmetric/otlpmetrichttp/exporter.go +++ b/exporters/otlp/otlpmetric/otlpmetrichttp/exporter.go @@ -9,12 +9,13 @@ import ( "fmt" "sync" + metricpb "go.opentelemetry.io/proto/otlp/metrics/v1" + "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf" "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/transform" "go.opentelemetry.io/otel/internal/global" "go.opentelemetry.io/otel/sdk/metric" "go.opentelemetry.io/otel/sdk/metric/metricdata" - metricpb "go.opentelemetry.io/proto/otlp/metrics/v1" ) // Exporter is a OpenTelemetry metric Exporter using protobufs over HTTP. @@ -91,7 +92,7 @@ func (e *Exporter) Export(ctx context.Context, rm *metricdata.ResourceMetrics) e // This method returns an error if the method is canceled by the passed context. // // This method is safe to call concurrently. -func (e *Exporter) ForceFlush(ctx context.Context) error { +func (*Exporter) ForceFlush(ctx context.Context) error { // The exporter and client hold no state, nothing to flush. return ctx.Err() } @@ -119,7 +120,7 @@ var errShutdown = errors.New("HTTP exporter is shutdown") type shutdownClient struct{} -func (c shutdownClient) err(ctx context.Context) error { +func (shutdownClient) err(ctx context.Context) error { if err := ctx.Err(); err != nil { return err } @@ -135,7 +136,7 @@ func (c shutdownClient) Shutdown(ctx context.Context) error { } // MarshalLog returns logging data about the Exporter. -func (e *Exporter) MarshalLog() interface{} { +func (*Exporter) MarshalLog() any { return struct{ Type string }{Type: "OTLP/HTTP"} } diff --git a/exporters/otlp/otlpmetric/otlpmetrichttp/exporter_test.go b/exporters/otlp/otlpmetric/otlpmetrichttp/exporter_test.go index 8234bf0e22a..1c5fa9d8203 100644 --- a/exporters/otlp/otlpmetric/otlpmetrichttp/exporter_test.go +++ b/exporters/otlp/otlpmetric/otlpmetrichttp/exporter_test.go @@ -37,7 +37,7 @@ func TestExporterClientConcurrentSafe(t *testing.T) { done := make(chan struct{}) var wg, someWork sync.WaitGroup - for i := 0; i < goroutines; i++ { + for range goroutines { wg.Add(1) someWork.Add(1) go func() { diff --git a/exporters/otlp/otlpmetric/otlpmetrichttp/go.mod b/exporters/otlp/otlpmetric/otlpmetrichttp/go.mod index 55aaff886d4..5a4b67ea24d 100644 --- a/exporters/otlp/otlpmetric/otlpmetrichttp/go.mod +++ b/exporters/otlp/otlpmetric/otlpmetrichttp/go.mod @@ -5,15 +5,15 @@ go 1.23.0 retract v0.32.2 // Contains unresolvable dependencies. require ( - github.com/cenkalti/backoff/v5 v5.0.2 + github.com/cenkalti/backoff/v5 v5.0.3 github.com/google/go-cmp v0.7.0 - github.com/stretchr/testify v1.10.0 - go.opentelemetry.io/otel v1.37.0 - go.opentelemetry.io/otel/sdk v1.37.0 - go.opentelemetry.io/otel/sdk/metric v1.37.0 - go.opentelemetry.io/proto/otlp v1.7.0 - google.golang.org/grpc v1.73.0 - google.golang.org/protobuf v1.36.6 + github.com/stretchr/testify v1.11.1 + go.opentelemetry.io/otel v1.38.0 + go.opentelemetry.io/otel/sdk v1.38.0 + go.opentelemetry.io/otel/sdk/metric v1.38.0 + go.opentelemetry.io/proto/otlp v1.7.1 + google.golang.org/grpc v1.75.0 + google.golang.org/protobuf v1.36.8 ) require ( @@ -21,16 +21,16 @@ require ( github.com/go-logr/logr v1.4.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/google/uuid v1.6.0 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.1 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect go.opentelemetry.io/auto/sdk v1.1.0 // indirect - go.opentelemetry.io/otel/metric v1.37.0 // indirect - go.opentelemetry.io/otel/trace v1.37.0 // indirect - golang.org/x/net v0.41.0 // indirect - golang.org/x/sys v0.33.0 // indirect - golang.org/x/text v0.26.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822 // indirect + go.opentelemetry.io/otel/metric v1.38.0 // indirect + go.opentelemetry.io/otel/trace v1.38.0 // indirect + golang.org/x/net v0.43.0 // indirect + golang.org/x/sys v0.35.0 // indirect + golang.org/x/text v0.28.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20250825161204-c5933d9347a5 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250825161204-c5933d9347a5 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/exporters/otlp/otlpmetric/otlpmetrichttp/go.sum b/exporters/otlp/otlpmetric/otlpmetrichttp/go.sum index 6ee0c70b7a4..dd1fde55b4a 100644 --- a/exporters/otlp/otlpmetric/otlpmetrichttp/go.sum +++ b/exporters/otlp/otlpmetric/otlpmetrichttp/go.sum @@ -1,5 +1,5 @@ -github.com/cenkalti/backoff/v5 v5.0.2 h1:rIfFVxEf1QsI7E1ZHfp/B4DF/6QBAUhmgkxc0H7Zss8= -github.com/cenkalti/backoff/v5 v5.0.2/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw= +github.com/cenkalti/backoff/v5 v5.0.3 h1:ZN+IMa753KfX5hd8vVaMixjnqRZ3y8CuJKRKj1xcsSM= +github.com/cenkalti/backoff/v5 v5.0.3/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= @@ -13,8 +13,8 @@ github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.1 h1:X5VWvz21y3gzm9Nw/kaUeku/1+uBhcekkmy4IkffJww= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.1/go.mod h1:Zanoh4+gvIgluNqcfMVTJueD4wSS5hT7zTt4Mrutd90= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2 h1:8Tjv8EJ+pM1xP8mK6egEbD1OgnVTyacbefKhmbLhIhU= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2/go.mod h1:pkJQ2tZHJ0aFOVEEot6oZmaVEZcRme73eIFmhiVuRWs= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= @@ -23,26 +23,28 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= -github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= -github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= -go.opentelemetry.io/proto/otlp v1.7.0 h1:jX1VolD6nHuFzOYso2E73H85i92Mv8JQYk0K9vz09os= -go.opentelemetry.io/proto/otlp v1.7.0/go.mod h1:fSKjH6YJ7HDlwzltzyMj036AJ3ejJLCgCSHGj4efDDo= -golang.org/x/net v0.41.0 h1:vBTly1HeNPEn3wtREYfy4GZ/NECgw2Cnl+nK6Nz3uvw= -golang.org/x/net v0.41.0/go.mod h1:B/K4NNqkfmg07DQYrbwvSluqCJOOXwUjeb/5lOisjbA= -golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw= -golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= -golang.org/x/text v0.26.0 h1:P42AVeLghgTYr4+xUnTRKDMqpar+PtX7KWuNQL21L8M= -golang.org/x/text v0.26.0/go.mod h1:QK15LZJUUQVJxhz7wXgxSy/CJaTFjd0G+YLonydOVQA= -google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822 h1:oWVWY3NzT7KJppx2UKhKmzPq4SRe0LdCijVRwvGeikY= -google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822/go.mod h1:h3c4v36UTKzUiuaOKQ6gr3S+0hovBtUrXzTG/i3+XEc= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822 h1:fc6jSaCT0vBduLYZHYrBBNY4dsWuvgyff9noRNDdBeE= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= -google.golang.org/grpc v1.73.0 h1:VIWSmpI2MegBtTuFt5/JWy2oXxtjJ/e89Z70ImfD2ok= -google.golang.org/grpc v1.73.0/go.mod h1:50sbHOUqWoCQGI8V2HQLJM0B+LMlIUjNSZmow7EVBQc= -google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY= -google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= +go.opentelemetry.io/proto/otlp v1.7.1 h1:gTOMpGDb0WTBOP8JaO72iL3auEZhVmAQg4ipjOVAtj4= +go.opentelemetry.io/proto/otlp v1.7.1/go.mod h1:b2rVh6rfI/s2pHWNlB7ILJcRALpcNDzKhACevjI+ZnE= +golang.org/x/net v0.43.0 h1:lat02VYK2j4aLzMzecihNvTlJNQUq316m2Mr9rnM6YE= +golang.org/x/net v0.43.0/go.mod h1:vhO1fvI4dGsIjh73sWfUVjj3N7CA9WkKJNQm2svM6Jg= +golang.org/x/sys v0.35.0 h1:vz1N37gP5bs89s7He8XuIYXpyY0+QlsKmzipCbUtyxI= +golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/text v0.28.0 h1:rhazDwis8INMIwQ4tpjLDzUhx6RlXqZNPEM0huQojng= +golang.org/x/text v0.28.0/go.mod h1:U8nCwOR8jO/marOQ0QbDiOngZVEBB7MAiitBuMjXiNU= +gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk= +gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E= +google.golang.org/genproto/googleapis/api v0.0.0-20250825161204-c5933d9347a5 h1:BIRfGDEjiHRrk0QKZe3Xv2ieMhtgRGeLcZQ0mIVn4EY= +google.golang.org/genproto/googleapis/api v0.0.0-20250825161204-c5933d9347a5/go.mod h1:j3QtIyytwqGr1JUDtYXwtMXWPKsEa5LtzIFN1Wn5WvE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250825161204-c5933d9347a5 h1:eaY8u2EuxbRv7c3NiGK0/NedzVsCcV6hDuU5qPX5EGE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250825161204-c5933d9347a5/go.mod h1:M4/wBTSeyLxupu3W3tJtOgB14jILAS/XWPSSa3TAlJc= +google.golang.org/grpc v1.75.0 h1:+TW+dqTd2Biwe6KKfhE5JpiYIBWq865PhKGSXiivqt4= +google.golang.org/grpc v1.75.0/go.mod h1:JtPAzKiq4v1xcAB2hydNlWI2RnF85XXcV0mhKXr2ecQ= +google.golang.org/protobuf v1.36.8 h1:xHScyCOEuuwZEc6UtSOvPbAT4zRh0xcNRYekJwfqyMc= +google.golang.org/protobuf v1.36.8/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= diff --git a/exporters/otlp/otlpmetric/otlpmetrichttp/internal/otest/client.go b/exporters/otlp/otlpmetric/otlpmetrichttp/internal/otest/client.go index ca3ed751a0f..3db72c7f2d3 100644 --- a/exporters/otlp/otlpmetric/otlpmetrichttp/internal/otest/client.go +++ b/exporters/otlp/otlpmetric/otlpmetrichttp/internal/otest/client.go @@ -19,7 +19,7 @@ import ( "google.golang.org/protobuf/proto" "go.opentelemetry.io/otel" - semconv "go.opentelemetry.io/otel/semconv/v1.34.0" + semconv "go.opentelemetry.io/otel/semconv/v1.37.0" collpb "go.opentelemetry.io/proto/otlp/collector/metrics/v1" cpb "go.opentelemetry.io/proto/otlp/common/v1" mpb "go.opentelemetry.io/proto/otlp/metrics/v1" diff --git a/exporters/otlp/otlpmetric/otlpmetrichttp/internal/transform/metricdata_test.go b/exporters/otlp/otlpmetric/otlpmetrichttp/internal/transform/metricdata_test.go index 028e2c94f28..b2a9b03300e 100644 --- a/exporters/otlp/otlpmetric/otlpmetrichttp/internal/transform/metricdata_test.go +++ b/exporters/otlp/otlpmetric/otlpmetrichttp/internal/transform/metricdata_test.go @@ -17,7 +17,7 @@ import ( "go.opentelemetry.io/otel/sdk/instrumentation" "go.opentelemetry.io/otel/sdk/metric/metricdata" "go.opentelemetry.io/otel/sdk/resource" - semconv "go.opentelemetry.io/otel/semconv/v1.34.0" + semconv "go.opentelemetry.io/otel/semconv/v1.37.0" cpb "go.opentelemetry.io/proto/otlp/common/v1" mpb "go.opentelemetry.io/proto/otlp/metrics/v1" rpb "go.opentelemetry.io/proto/otlp/resource/v1" diff --git a/exporters/otlp/otlpmetric/otlpmetrichttp/version.go b/exporters/otlp/otlpmetric/otlpmetrichttp/version.go index 1175a65755b..b8fe7cb2901 100644 --- a/exporters/otlp/otlpmetric/otlpmetrichttp/version.go +++ b/exporters/otlp/otlpmetric/otlpmetrichttp/version.go @@ -5,5 +5,5 @@ package otlpmetrichttp // import "go.opentelemetry.io/otel/exporters/otlp/otlpme // Version is the current release version of the OpenTelemetry OTLP over HTTP/protobuf metrics exporter in use. func Version() string { - return "1.37.0" + return "1.38.0" } diff --git a/exporters/otlp/otlptrace/exporter.go b/exporters/otlp/otlptrace/exporter.go index 3f0a518ae0f..30446bd28b6 100644 --- a/exporters/otlp/otlptrace/exporter.go +++ b/exporters/otlp/otlptrace/exporter.go @@ -94,7 +94,7 @@ func NewUnstarted(client Client) *Exporter { } // MarshalLog is the marshaling function used by the logging system to represent this Exporter. -func (e *Exporter) MarshalLog() interface{} { +func (e *Exporter) MarshalLog() any { return struct { Type string Client Client diff --git a/exporters/otlp/otlptrace/exporter_test.go b/exporters/otlp/otlptrace/exporter_test.go index 5bb4ec413cc..7fcc1bde0c4 100644 --- a/exporters/otlp/otlptrace/exporter_test.go +++ b/exporters/otlp/otlptrace/exporter_test.go @@ -9,10 +9,10 @@ import ( "testing" "github.com/stretchr/testify/assert" + tracepb "go.opentelemetry.io/proto/otlp/trace/v1" "go.opentelemetry.io/otel/exporters/otlp/otlptrace" "go.opentelemetry.io/otel/sdk/trace/tracetest" - tracepb "go.opentelemetry.io/proto/otlp/trace/v1" ) type client struct { @@ -21,15 +21,15 @@ type client struct { var _ otlptrace.Client = &client{} -func (c *client) Start(ctx context.Context) error { +func (*client) Start(context.Context) error { return nil } -func (c *client) Stop(ctx context.Context) error { +func (*client) Stop(context.Context) error { return nil } -func (c *client) UploadTraces(ctx context.Context, protoSpans []*tracepb.ResourceSpans) error { +func (c *client) UploadTraces(context.Context, []*tracepb.ResourceSpans) error { return c.uploadErr } diff --git a/exporters/otlp/otlptrace/go.mod b/exporters/otlp/otlptrace/go.mod index 542029f7a19..df71337dded 100644 --- a/exporters/otlp/otlptrace/go.mod +++ b/exporters/otlp/otlptrace/go.mod @@ -4,12 +4,12 @@ go 1.23.0 require ( github.com/google/go-cmp v0.7.0 - github.com/stretchr/testify v1.10.0 - go.opentelemetry.io/otel v1.37.0 - go.opentelemetry.io/otel/sdk v1.37.0 - go.opentelemetry.io/otel/trace v1.37.0 - go.opentelemetry.io/proto/otlp v1.7.0 - google.golang.org/protobuf v1.36.6 + github.com/stretchr/testify v1.11.1 + go.opentelemetry.io/otel v1.38.0 + go.opentelemetry.io/otel/sdk v1.38.0 + go.opentelemetry.io/otel/trace v1.38.0 + go.opentelemetry.io/proto/otlp v1.7.1 + google.golang.org/protobuf v1.36.8 ) require ( @@ -19,8 +19,8 @@ require ( github.com/google/uuid v1.6.0 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect go.opentelemetry.io/auto/sdk v1.1.0 // indirect - go.opentelemetry.io/otel/metric v1.37.0 // indirect - golang.org/x/sys v0.33.0 // indirect + go.opentelemetry.io/otel/metric v1.38.0 // indirect + golang.org/x/sys v0.35.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) @@ -31,3 +31,5 @@ replace go.opentelemetry.io/otel/sdk => ../../../sdk replace go.opentelemetry.io/otel/trace => ../../../trace replace go.opentelemetry.io/otel/metric => ../../../metric + +replace go.opentelemetry.io/otel/sdk/metric => ../../../sdk/metric diff --git a/exporters/otlp/otlptrace/go.sum b/exporters/otlp/otlptrace/go.sum index cd7fb890d51..0916adff580 100644 --- a/exporters/otlp/otlptrace/go.sum +++ b/exporters/otlp/otlptrace/go.sum @@ -17,18 +17,18 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= -github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= -github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= -go.opentelemetry.io/proto/otlp v1.7.0 h1:jX1VolD6nHuFzOYso2E73H85i92Mv8JQYk0K9vz09os= -go.opentelemetry.io/proto/otlp v1.7.0/go.mod h1:fSKjH6YJ7HDlwzltzyMj036AJ3ejJLCgCSHGj4efDDo= +go.opentelemetry.io/proto/otlp v1.7.1 h1:gTOMpGDb0WTBOP8JaO72iL3auEZhVmAQg4ipjOVAtj4= +go.opentelemetry.io/proto/otlp v1.7.1/go.mod h1:b2rVh6rfI/s2pHWNlB7ILJcRALpcNDzKhACevjI+ZnE= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= -golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw= -golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= -google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY= -google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= +golang.org/x/sys v0.35.0 h1:vz1N37gP5bs89s7He8XuIYXpyY0+QlsKmzipCbUtyxI= +golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +google.golang.org/protobuf v1.36.8 h1:xHScyCOEuuwZEc6UtSOvPbAT4zRh0xcNRYekJwfqyMc= +google.golang.org/protobuf v1.36.8/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= diff --git a/exporters/otlp/otlptrace/internal/tracetransform/attribute.go b/exporters/otlp/otlptrace/internal/tracetransform/attribute.go index ca4544f0dae..d9bfd6e1765 100644 --- a/exporters/otlp/otlptrace/internal/tracetransform/attribute.go +++ b/exporters/otlp/otlptrace/internal/tracetransform/attribute.go @@ -6,9 +6,10 @@ package tracetransform // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform" import ( + commonpb "go.opentelemetry.io/proto/otlp/common/v1" + "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/sdk/resource" - commonpb "go.opentelemetry.io/proto/otlp/common/v1" ) // KeyValues transforms a slice of attribute KeyValues into OTLP key-values. diff --git a/exporters/otlp/otlptrace/internal/tracetransform/attribute_test.go b/exporters/otlp/otlptrace/internal/tracetransform/attribute_test.go index d67e3f0ec4d..03f26868e3c 100644 --- a/exporters/otlp/otlptrace/internal/tracetransform/attribute_test.go +++ b/exporters/otlp/otlptrace/internal/tracetransform/attribute_test.go @@ -7,9 +7,9 @@ import ( "testing" "github.com/stretchr/testify/assert" + commonpb "go.opentelemetry.io/proto/otlp/common/v1" "go.opentelemetry.io/otel/attribute" - commonpb "go.opentelemetry.io/proto/otlp/common/v1" ) type attributeTest struct { diff --git a/exporters/otlp/otlptrace/internal/tracetransform/instrumentation.go b/exporters/otlp/otlptrace/internal/tracetransform/instrumentation.go index 2e7690e43a2..43359c89449 100644 --- a/exporters/otlp/otlptrace/internal/tracetransform/instrumentation.go +++ b/exporters/otlp/otlptrace/internal/tracetransform/instrumentation.go @@ -4,8 +4,9 @@ package tracetransform // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform" import ( - "go.opentelemetry.io/otel/sdk/instrumentation" commonpb "go.opentelemetry.io/proto/otlp/common/v1" + + "go.opentelemetry.io/otel/sdk/instrumentation" ) func InstrumentationScope(il instrumentation.Scope) *commonpb.InstrumentationScope { diff --git a/exporters/otlp/otlptrace/internal/tracetransform/instrumentation_test.go b/exporters/otlp/otlptrace/internal/tracetransform/instrumentation_test.go index 3e5f03b3b6d..e08abcaf9b6 100644 --- a/exporters/otlp/otlptrace/internal/tracetransform/instrumentation_test.go +++ b/exporters/otlp/otlptrace/internal/tracetransform/instrumentation_test.go @@ -7,10 +7,10 @@ import ( "testing" "github.com/stretchr/testify/assert" + commonpb "go.opentelemetry.io/proto/otlp/common/v1" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/sdk/instrumentation" - commonpb "go.opentelemetry.io/proto/otlp/common/v1" ) func TestInstrumentationScope(t *testing.T) { diff --git a/exporters/otlp/otlptrace/internal/tracetransform/resource.go b/exporters/otlp/otlptrace/internal/tracetransform/resource.go index db7b698a566..526bb5e070b 100644 --- a/exporters/otlp/otlptrace/internal/tracetransform/resource.go +++ b/exporters/otlp/otlptrace/internal/tracetransform/resource.go @@ -4,8 +4,9 @@ package tracetransform // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform" import ( - "go.opentelemetry.io/otel/sdk/resource" resourcepb "go.opentelemetry.io/proto/otlp/resource/v1" + + "go.opentelemetry.io/otel/sdk/resource" ) // Resource transforms a Resource into an OTLP Resource. diff --git a/exporters/otlp/otlptrace/internal/tracetransform/span.go b/exporters/otlp/otlptrace/internal/tracetransform/span.go index bf27ef0220e..379bc8170df 100644 --- a/exporters/otlp/otlptrace/internal/tracetransform/span.go +++ b/exporters/otlp/otlptrace/internal/tracetransform/span.go @@ -6,12 +6,13 @@ package tracetransform // import "go.opentelemetry.io/otel/exporters/otlp/otlptr import ( "math" + tracepb "go.opentelemetry.io/proto/otlp/trace/v1" + "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" "go.opentelemetry.io/otel/sdk/instrumentation" tracesdk "go.opentelemetry.io/otel/sdk/trace" "go.opentelemetry.io/otel/trace" - tracepb "go.opentelemetry.io/proto/otlp/trace/v1" ) // Spans transforms a slice of OpenTelemetry spans into a slice of OTLP @@ -154,7 +155,6 @@ func links(links []tracesdk.Link) []*tracepb.Span_Link { for _, otLink := range links { // This redefinition is necessary to prevent otLink.*ID[:] copies // being reused -- in short we need a new otLink per iteration. - otLink := otLink tid := otLink.SpanContext.TraceID() sid := otLink.SpanContext.SpanID() @@ -189,7 +189,7 @@ func spanEvents(es []tracesdk.Event) []*tracepb.Span_Event { events := make([]*tracepb.Span_Event, len(es)) // Transform message events - for i := 0; i < len(es); i++ { + for i := range es { events[i] = &tracepb.Span_Event{ Name: es[i].Name, TimeUnixNano: uint64(max(0, es[i].Time.UnixNano())), // nolint:gosec // Overflow checked. diff --git a/exporters/otlp/otlptrace/internal/tracetransform/span_test.go b/exporters/otlp/otlptrace/internal/tracetransform/span_test.go index 563e60815f9..d420925fd8b 100644 --- a/exporters/otlp/otlptrace/internal/tracetransform/span_test.go +++ b/exporters/otlp/otlptrace/internal/tracetransform/span_test.go @@ -10,6 +10,7 @@ import ( "github.com/google/go-cmp/cmp" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + tracepb "go.opentelemetry.io/proto/otlp/trace/v1" "google.golang.org/protobuf/proto" "go.opentelemetry.io/otel/attribute" @@ -18,9 +19,8 @@ import ( "go.opentelemetry.io/otel/sdk/resource" tracesdk "go.opentelemetry.io/otel/sdk/trace" "go.opentelemetry.io/otel/sdk/trace/tracetest" - semconv "go.opentelemetry.io/otel/semconv/v1.34.0" + semconv "go.opentelemetry.io/otel/semconv/v1.37.0" "go.opentelemetry.io/otel/trace" - tracepb "go.opentelemetry.io/proto/otlp/trace/v1" ) func TestSpanKind(t *testing.T) { diff --git a/exporters/otlp/otlptrace/otlptracegrpc/client.go b/exporters/otlp/otlptrace/otlptracegrpc/client.go index 8236c995a9c..4b4cc76f4a9 100644 --- a/exporters/otlp/otlptrace/otlptracegrpc/client.go +++ b/exporters/otlp/otlptrace/otlptracegrpc/client.go @@ -9,6 +9,8 @@ import ( "sync" "time" + coltracepb "go.opentelemetry.io/proto/otlp/collector/trace/v1" + tracepb "go.opentelemetry.io/proto/otlp/trace/v1" "google.golang.org/genproto/googleapis/rpc/errdetails" "google.golang.org/grpc" "google.golang.org/grpc/codes" @@ -20,8 +22,6 @@ import ( "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal" "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig" "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry" - coltracepb "go.opentelemetry.io/proto/otlp/collector/trace/v1" - tracepb "go.opentelemetry.io/proto/otlp/trace/v1" ) type client struct { @@ -289,7 +289,7 @@ func throttleDelay(s *status.Status) (bool, time.Duration) { } // MarshalLog is the marshaling function used by the logging system to represent this Client. -func (c *client) MarshalLog() interface{} { +func (c *client) MarshalLog() any { return struct { Type string Endpoint string diff --git a/exporters/otlp/otlptrace/otlptracegrpc/client_test.go b/exporters/otlp/otlptrace/otlptracegrpc/client_test.go index 6ea62c34607..1aed9c31d8e 100644 --- a/exporters/otlp/otlptrace/otlptracegrpc/client_test.go +++ b/exporters/otlp/otlptrace/otlptracegrpc/client_test.go @@ -14,6 +14,8 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + coltracepb "go.opentelemetry.io/proto/otlp/collector/trace/v1" + commonpb "go.opentelemetry.io/proto/otlp/common/v1" "go.uber.org/goleak" "google.golang.org/grpc" "google.golang.org/grpc/backoff" @@ -29,8 +31,6 @@ import ( "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlptracetest" sdktrace "go.opentelemetry.io/otel/sdk/trace" "go.opentelemetry.io/otel/sdk/trace/tracetest" - coltracepb "go.opentelemetry.io/proto/otlp/collector/trace/v1" - commonpb "go.opentelemetry.io/proto/otlp/common/v1" ) func TestMain(m *testing.M) { @@ -173,7 +173,7 @@ func TestNewInvokeStartThenStopManyTimes(t *testing.T) { t.Cleanup(func() { require.NoError(t, exp.Shutdown(ctx)) }) // Invoke Start numerous times, should return errAlreadyStarted - for i := 0; i < 10; i++ { + for i := range 10 { if err := exp.Start(ctx); err == nil || !strings.Contains(err.Error(), "already started") { t.Fatalf("#%d unexpected Start error: %v", i, err) } @@ -183,7 +183,7 @@ func TestNewInvokeStartThenStopManyTimes(t *testing.T) { t.Fatalf("failed to Shutdown the exporter: %v", err) } // Invoke Shutdown numerous times - for i := 0; i < 10; i++ { + for i := range 10 { if err := exp.Shutdown(ctx); err != nil { t.Fatalf(`#%d got error (%v) expected none`, i, err) } diff --git a/exporters/otlp/otlptrace/otlptracegrpc/go.mod b/exporters/otlp/otlptrace/otlptracegrpc/go.mod index 3b07e893886..671f375ccfe 100644 --- a/exporters/otlp/otlptrace/otlptracegrpc/go.mod +++ b/exporters/otlp/otlptrace/otlptracegrpc/go.mod @@ -3,17 +3,17 @@ module go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc go 1.23.0 require ( - github.com/cenkalti/backoff/v5 v5.0.2 - github.com/stretchr/testify v1.10.0 - go.opentelemetry.io/otel v1.37.0 - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.37.0 - go.opentelemetry.io/otel/sdk v1.37.0 - go.opentelemetry.io/otel/trace v1.37.0 - go.opentelemetry.io/proto/otlp v1.7.0 + github.com/cenkalti/backoff/v5 v5.0.3 + github.com/stretchr/testify v1.11.1 + go.opentelemetry.io/otel v1.38.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0 + go.opentelemetry.io/otel/sdk v1.38.0 + go.opentelemetry.io/otel/trace v1.38.0 + go.opentelemetry.io/proto/otlp v1.7.1 go.uber.org/goleak v1.3.0 - google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822 - google.golang.org/grpc v1.73.0 - google.golang.org/protobuf v1.36.6 + google.golang.org/genproto/googleapis/rpc v0.0.0-20250825161204-c5933d9347a5 + google.golang.org/grpc v1.75.0 + google.golang.org/protobuf v1.36.8 ) require ( @@ -21,14 +21,14 @@ require ( github.com/go-logr/logr v1.4.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/google/uuid v1.6.0 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.1 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect go.opentelemetry.io/auto/sdk v1.1.0 // indirect - go.opentelemetry.io/otel/metric v1.37.0 // indirect - golang.org/x/net v0.41.0 // indirect - golang.org/x/sys v0.33.0 // indirect - golang.org/x/text v0.26.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822 // indirect + go.opentelemetry.io/otel/metric v1.38.0 // indirect + golang.org/x/net v0.43.0 // indirect + golang.org/x/sys v0.35.0 // indirect + golang.org/x/text v0.28.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20250825161204-c5933d9347a5 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) @@ -41,3 +41,5 @@ replace go.opentelemetry.io/otel/exporters/otlp/otlptrace => ../ replace go.opentelemetry.io/otel/trace => ../../../../trace replace go.opentelemetry.io/otel/metric => ../../../../metric + +replace go.opentelemetry.io/otel/sdk/metric => ../../../../sdk/metric diff --git a/exporters/otlp/otlptrace/otlptracegrpc/go.sum b/exporters/otlp/otlptrace/otlptracegrpc/go.sum index e93143096a3..d94909d3d9f 100644 --- a/exporters/otlp/otlptrace/otlptracegrpc/go.sum +++ b/exporters/otlp/otlptrace/otlptracegrpc/go.sum @@ -1,5 +1,5 @@ -github.com/cenkalti/backoff/v5 v5.0.2 h1:rIfFVxEf1QsI7E1ZHfp/B4DF/6QBAUhmgkxc0H7Zss8= -github.com/cenkalti/backoff/v5 v5.0.2/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw= +github.com/cenkalti/backoff/v5 v5.0.3 h1:ZN+IMa753KfX5hd8vVaMixjnqRZ3y8CuJKRKj1xcsSM= +github.com/cenkalti/backoff/v5 v5.0.3/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= @@ -13,8 +13,8 @@ github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.1 h1:X5VWvz21y3gzm9Nw/kaUeku/1+uBhcekkmy4IkffJww= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.1/go.mod h1:Zanoh4+gvIgluNqcfMVTJueD4wSS5hT7zTt4Mrutd90= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2 h1:8Tjv8EJ+pM1xP8mK6egEbD1OgnVTyacbefKhmbLhIhU= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2/go.mod h1:pkJQ2tZHJ0aFOVEEot6oZmaVEZcRme73eIFmhiVuRWs= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= @@ -23,30 +23,30 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= -github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= -github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= -go.opentelemetry.io/otel/sdk/metric v1.35.0 h1:1RriWBmCKgkeHEhM7a2uMjMUfP7MsOF5JpUCaEqEI9o= -go.opentelemetry.io/otel/sdk/metric v1.35.0/go.mod h1:is6XYCUMpcKi+ZsOvfluY5YstFnhW0BidkR+gL+qN+w= -go.opentelemetry.io/proto/otlp v1.7.0 h1:jX1VolD6nHuFzOYso2E73H85i92Mv8JQYk0K9vz09os= -go.opentelemetry.io/proto/otlp v1.7.0/go.mod h1:fSKjH6YJ7HDlwzltzyMj036AJ3ejJLCgCSHGj4efDDo= +go.opentelemetry.io/proto/otlp v1.7.1 h1:gTOMpGDb0WTBOP8JaO72iL3auEZhVmAQg4ipjOVAtj4= +go.opentelemetry.io/proto/otlp v1.7.1/go.mod h1:b2rVh6rfI/s2pHWNlB7ILJcRALpcNDzKhACevjI+ZnE= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= -golang.org/x/net v0.41.0 h1:vBTly1HeNPEn3wtREYfy4GZ/NECgw2Cnl+nK6Nz3uvw= -golang.org/x/net v0.41.0/go.mod h1:B/K4NNqkfmg07DQYrbwvSluqCJOOXwUjeb/5lOisjbA= -golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw= -golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= -golang.org/x/text v0.26.0 h1:P42AVeLghgTYr4+xUnTRKDMqpar+PtX7KWuNQL21L8M= -golang.org/x/text v0.26.0/go.mod h1:QK15LZJUUQVJxhz7wXgxSy/CJaTFjd0G+YLonydOVQA= -google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822 h1:oWVWY3NzT7KJppx2UKhKmzPq4SRe0LdCijVRwvGeikY= -google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822/go.mod h1:h3c4v36UTKzUiuaOKQ6gr3S+0hovBtUrXzTG/i3+XEc= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822 h1:fc6jSaCT0vBduLYZHYrBBNY4dsWuvgyff9noRNDdBeE= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= -google.golang.org/grpc v1.73.0 h1:VIWSmpI2MegBtTuFt5/JWy2oXxtjJ/e89Z70ImfD2ok= -google.golang.org/grpc v1.73.0/go.mod h1:50sbHOUqWoCQGI8V2HQLJM0B+LMlIUjNSZmow7EVBQc= -google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY= -google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= +golang.org/x/net v0.43.0 h1:lat02VYK2j4aLzMzecihNvTlJNQUq316m2Mr9rnM6YE= +golang.org/x/net v0.43.0/go.mod h1:vhO1fvI4dGsIjh73sWfUVjj3N7CA9WkKJNQm2svM6Jg= +golang.org/x/sys v0.35.0 h1:vz1N37gP5bs89s7He8XuIYXpyY0+QlsKmzipCbUtyxI= +golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/text v0.28.0 h1:rhazDwis8INMIwQ4tpjLDzUhx6RlXqZNPEM0huQojng= +golang.org/x/text v0.28.0/go.mod h1:U8nCwOR8jO/marOQ0QbDiOngZVEBB7MAiitBuMjXiNU= +gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk= +gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E= +google.golang.org/genproto/googleapis/api v0.0.0-20250825161204-c5933d9347a5 h1:BIRfGDEjiHRrk0QKZe3Xv2ieMhtgRGeLcZQ0mIVn4EY= +google.golang.org/genproto/googleapis/api v0.0.0-20250825161204-c5933d9347a5/go.mod h1:j3QtIyytwqGr1JUDtYXwtMXWPKsEa5LtzIFN1Wn5WvE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250825161204-c5933d9347a5 h1:eaY8u2EuxbRv7c3NiGK0/NedzVsCcV6hDuU5qPX5EGE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250825161204-c5933d9347a5/go.mod h1:M4/wBTSeyLxupu3W3tJtOgB14jILAS/XWPSSa3TAlJc= +google.golang.org/grpc v1.75.0 h1:+TW+dqTd2Biwe6KKfhE5JpiYIBWq865PhKGSXiivqt4= +google.golang.org/grpc v1.75.0/go.mod h1:JtPAzKiq4v1xcAB2hydNlWI2RnF85XXcV0mhKXr2ecQ= +google.golang.org/protobuf v1.36.8 h1:xHScyCOEuuwZEc6UtSOvPbAT4zRh0xcNRYekJwfqyMc= +google.golang.org/protobuf v1.36.8/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= diff --git a/exporters/otlp/otlptrace/otlptracegrpc/internal/otlptracetest/client.go b/exporters/otlp/otlptrace/otlptracegrpc/internal/otlptracetest/client.go index b5189d062eb..f3a35d75694 100644 --- a/exporters/otlp/otlptrace/otlptracegrpc/internal/otlptracetest/client.go +++ b/exporters/otlp/otlptrace/otlptracegrpc/internal/otlptracetest/client.go @@ -107,7 +107,7 @@ func testClientStopManyTimes(t *testing.T, client otlptrace.Client) { const num int = 20 wg.Add(num) errs := make([]error, num) - for i := 0; i < num; i++ { + for i := range num { go func(idx int) { defer wg.Done() <-ch diff --git a/exporters/otlp/otlptrace/otlptracegrpc/internal/otlptracetest/otlptest.go b/exporters/otlp/otlptrace/otlptracegrpc/internal/otlptracetest/otlptest.go index 060c289e175..b3b00c86172 100644 --- a/exporters/otlp/otlptrace/otlptracegrpc/internal/otlptracetest/otlptest.go +++ b/exporters/otlp/otlptrace/otlptracegrpc/internal/otlptracetest/otlptest.go @@ -48,7 +48,7 @@ func RunEndToEndTest(ctx context.Context, t *testing.T, exp *otlptrace.Exporter, tr2 := tp2.Tracer("test-tracer2") // Now create few spans m := 4 - for i := 0; i < m; i++ { + for i := range m { _, span := tr1.Start(ctx, "AlwaysSample") span.SetAttributes(attribute.Int64("i", int64(i))) span.End() @@ -109,7 +109,7 @@ func RunEndToEndTest(ctx context.Context, t *testing.T, exp *otlptrace.Exporter, if got, want := len(attrMap), m; got != want { t.Fatalf("span attribute unique values: got %d want %d", got, want) } - for i := 0; i < m; i++ { + for i := range m { _, ok := attrMap[int64(i)] if !ok { t.Fatalf("span with attribute %d missing", i) diff --git a/exporters/otlp/otlptrace/otlptracegrpc/mock_collector_test.go b/exporters/otlp/otlptrace/otlptracegrpc/mock_collector_test.go index 2dda930447d..d22f272114c 100644 --- a/exporters/otlp/otlptrace/otlptracegrpc/mock_collector_test.go +++ b/exporters/otlp/otlptrace/otlptracegrpc/mock_collector_test.go @@ -11,12 +11,12 @@ import ( "testing" "github.com/stretchr/testify/require" + collectortracepb "go.opentelemetry.io/proto/otlp/collector/trace/v1" + tracepb "go.opentelemetry.io/proto/otlp/trace/v1" "google.golang.org/grpc" "google.golang.org/grpc/metadata" "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlptracetest" - collectortracepb "go.opentelemetry.io/proto/otlp/collector/trace/v1" - tracepb "go.opentelemetry.io/proto/otlp/trace/v1" ) func makeMockCollector(t *testing.T, mockConfig *mockConfig) *mockCollector { diff --git a/exporters/otlp/otlptrace/otlptracehttp/client.go b/exporters/otlp/otlptrace/otlptracehttp/client.go index 583a8f86757..c7b1a551498 100644 --- a/exporters/otlp/otlptrace/otlptracehttp/client.go +++ b/exporters/otlp/otlptrace/otlptracehttp/client.go @@ -18,6 +18,8 @@ import ( "sync" "time" + coltracepb "go.opentelemetry.io/proto/otlp/collector/trace/v1" + tracepb "go.opentelemetry.io/proto/otlp/trace/v1" "google.golang.org/protobuf/proto" "go.opentelemetry.io/otel" @@ -25,14 +27,12 @@ import ( "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal" "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig" "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/retry" - coltracepb "go.opentelemetry.io/proto/otlp/collector/trace/v1" - tracepb "go.opentelemetry.io/proto/otlp/trace/v1" ) const contentTypeProto = "application/x-protobuf" var gzPool = sync.Pool{ - New: func() interface{} { + New: func() any { w := gzip.NewWriter(io.Discard) return w }, @@ -104,7 +104,7 @@ func NewClient(opts ...Option) otlptrace.Client { } // Start does nothing in a HTTP client. -func (d *client) Start(ctx context.Context) error { +func (*client) Start(ctx context.Context) error { // nothing to do select { case <-ctx.Done(): @@ -209,7 +209,7 @@ func (d *client) UploadTraces(ctx context.Context, protoSpans []*tracepb.Resourc return err } respStr := strings.TrimSpace(respData.String()) - if len(respStr) == 0 { + if respStr == "" { respStr = "(empty)" } bodyErr := fmt.Errorf("body: %s", respStr) @@ -230,7 +230,7 @@ func (d *client) UploadTraces(ctx context.Context, protoSpans []*tracepb.Resourc func (d *client) newRequest(body []byte) (request, error) { u := url.URL{Scheme: d.getScheme(), Host: d.cfg.Endpoint, Path: d.cfg.URLPath} - r, err := http.NewRequest(http.MethodPost, u.String(), nil) + r, err := http.NewRequest(http.MethodPost, u.String(), http.NoBody) if err != nil { return request{Request: r}, err } @@ -246,7 +246,7 @@ func (d *client) newRequest(body []byte) (request, error) { req := request{Request: r} switch Compression(d.cfg.Compression) { case NoCompression: - r.ContentLength = (int64)(len(body)) + r.ContentLength = int64(len(body)) req.bodyReader = bodyReader(body) case GzipCompression: // Ensure the content length is not used. @@ -274,7 +274,7 @@ func (d *client) newRequest(body []byte) (request, error) { } // MarshalLog is the marshaling function used by the logging system to represent this Client. -func (d *client) MarshalLog() interface{} { +func (d *client) MarshalLog() any { return struct { Type string Endpoint string @@ -340,7 +340,7 @@ func (e retryableError) Unwrap() error { return e.err } -func (e retryableError) As(target interface{}) bool { +func (e retryableError) As(target any) bool { if e.err == nil { return false } diff --git a/exporters/otlp/otlptrace/otlptracehttp/client_test.go b/exporters/otlp/otlptrace/otlptracehttp/client_test.go index 7f7ad55f674..8b33b4791a3 100644 --- a/exporters/otlp/otlptrace/otlptracehttp/client_test.go +++ b/exporters/otlp/otlptrace/otlptracehttp/client_test.go @@ -15,12 +15,12 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + coltracepb "go.opentelemetry.io/proto/otlp/collector/trace/v1" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/exporters/otlp/otlptrace" "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp" "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlptracetest" - coltracepb "go.opentelemetry.io/proto/otlp/collector/trace/v1" ) const ( diff --git a/exporters/otlp/otlptrace/otlptracehttp/go.mod b/exporters/otlp/otlptrace/otlptracehttp/go.mod index 9703b818342..6879c27f4ee 100644 --- a/exporters/otlp/otlptrace/otlptracehttp/go.mod +++ b/exporters/otlp/otlptrace/otlptracehttp/go.mod @@ -3,15 +3,15 @@ module go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp go 1.23.0 require ( - github.com/cenkalti/backoff/v5 v5.0.2 - github.com/stretchr/testify v1.10.0 - go.opentelemetry.io/otel v1.37.0 - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.37.0 - go.opentelemetry.io/otel/sdk v1.37.0 - go.opentelemetry.io/otel/trace v1.37.0 - go.opentelemetry.io/proto/otlp v1.7.0 - google.golang.org/grpc v1.73.0 - google.golang.org/protobuf v1.36.6 + github.com/cenkalti/backoff/v5 v5.0.3 + github.com/stretchr/testify v1.11.1 + go.opentelemetry.io/otel v1.38.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0 + go.opentelemetry.io/otel/sdk v1.38.0 + go.opentelemetry.io/otel/trace v1.38.0 + go.opentelemetry.io/proto/otlp v1.7.1 + google.golang.org/grpc v1.75.0 + google.golang.org/protobuf v1.36.8 ) require ( @@ -19,15 +19,15 @@ require ( github.com/go-logr/logr v1.4.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/google/uuid v1.6.0 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.1 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect go.opentelemetry.io/auto/sdk v1.1.0 // indirect - go.opentelemetry.io/otel/metric v1.37.0 // indirect - golang.org/x/net v0.41.0 // indirect - golang.org/x/sys v0.33.0 // indirect - golang.org/x/text v0.26.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822 // indirect + go.opentelemetry.io/otel/metric v1.38.0 // indirect + golang.org/x/net v0.43.0 // indirect + golang.org/x/sys v0.35.0 // indirect + golang.org/x/text v0.28.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20250825161204-c5933d9347a5 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250825161204-c5933d9347a5 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) @@ -40,3 +40,5 @@ replace go.opentelemetry.io/otel/sdk => ../../../../sdk replace go.opentelemetry.io/otel/trace => ../../../../trace replace go.opentelemetry.io/otel/metric => ../../../../metric + +replace go.opentelemetry.io/otel/sdk/metric => ../../../../sdk/metric diff --git a/exporters/otlp/otlptrace/otlptracehttp/go.sum b/exporters/otlp/otlptrace/otlptracehttp/go.sum index e93143096a3..d94909d3d9f 100644 --- a/exporters/otlp/otlptrace/otlptracehttp/go.sum +++ b/exporters/otlp/otlptrace/otlptracehttp/go.sum @@ -1,5 +1,5 @@ -github.com/cenkalti/backoff/v5 v5.0.2 h1:rIfFVxEf1QsI7E1ZHfp/B4DF/6QBAUhmgkxc0H7Zss8= -github.com/cenkalti/backoff/v5 v5.0.2/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw= +github.com/cenkalti/backoff/v5 v5.0.3 h1:ZN+IMa753KfX5hd8vVaMixjnqRZ3y8CuJKRKj1xcsSM= +github.com/cenkalti/backoff/v5 v5.0.3/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= @@ -13,8 +13,8 @@ github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.1 h1:X5VWvz21y3gzm9Nw/kaUeku/1+uBhcekkmy4IkffJww= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.1/go.mod h1:Zanoh4+gvIgluNqcfMVTJueD4wSS5hT7zTt4Mrutd90= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2 h1:8Tjv8EJ+pM1xP8mK6egEbD1OgnVTyacbefKhmbLhIhU= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2/go.mod h1:pkJQ2tZHJ0aFOVEEot6oZmaVEZcRme73eIFmhiVuRWs= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= @@ -23,30 +23,30 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= -github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= -github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= -go.opentelemetry.io/otel/sdk/metric v1.35.0 h1:1RriWBmCKgkeHEhM7a2uMjMUfP7MsOF5JpUCaEqEI9o= -go.opentelemetry.io/otel/sdk/metric v1.35.0/go.mod h1:is6XYCUMpcKi+ZsOvfluY5YstFnhW0BidkR+gL+qN+w= -go.opentelemetry.io/proto/otlp v1.7.0 h1:jX1VolD6nHuFzOYso2E73H85i92Mv8JQYk0K9vz09os= -go.opentelemetry.io/proto/otlp v1.7.0/go.mod h1:fSKjH6YJ7HDlwzltzyMj036AJ3ejJLCgCSHGj4efDDo= +go.opentelemetry.io/proto/otlp v1.7.1 h1:gTOMpGDb0WTBOP8JaO72iL3auEZhVmAQg4ipjOVAtj4= +go.opentelemetry.io/proto/otlp v1.7.1/go.mod h1:b2rVh6rfI/s2pHWNlB7ILJcRALpcNDzKhACevjI+ZnE= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= -golang.org/x/net v0.41.0 h1:vBTly1HeNPEn3wtREYfy4GZ/NECgw2Cnl+nK6Nz3uvw= -golang.org/x/net v0.41.0/go.mod h1:B/K4NNqkfmg07DQYrbwvSluqCJOOXwUjeb/5lOisjbA= -golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw= -golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= -golang.org/x/text v0.26.0 h1:P42AVeLghgTYr4+xUnTRKDMqpar+PtX7KWuNQL21L8M= -golang.org/x/text v0.26.0/go.mod h1:QK15LZJUUQVJxhz7wXgxSy/CJaTFjd0G+YLonydOVQA= -google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822 h1:oWVWY3NzT7KJppx2UKhKmzPq4SRe0LdCijVRwvGeikY= -google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822/go.mod h1:h3c4v36UTKzUiuaOKQ6gr3S+0hovBtUrXzTG/i3+XEc= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822 h1:fc6jSaCT0vBduLYZHYrBBNY4dsWuvgyff9noRNDdBeE= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= -google.golang.org/grpc v1.73.0 h1:VIWSmpI2MegBtTuFt5/JWy2oXxtjJ/e89Z70ImfD2ok= -google.golang.org/grpc v1.73.0/go.mod h1:50sbHOUqWoCQGI8V2HQLJM0B+LMlIUjNSZmow7EVBQc= -google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY= -google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= +golang.org/x/net v0.43.0 h1:lat02VYK2j4aLzMzecihNvTlJNQUq316m2Mr9rnM6YE= +golang.org/x/net v0.43.0/go.mod h1:vhO1fvI4dGsIjh73sWfUVjj3N7CA9WkKJNQm2svM6Jg= +golang.org/x/sys v0.35.0 h1:vz1N37gP5bs89s7He8XuIYXpyY0+QlsKmzipCbUtyxI= +golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/text v0.28.0 h1:rhazDwis8INMIwQ4tpjLDzUhx6RlXqZNPEM0huQojng= +golang.org/x/text v0.28.0/go.mod h1:U8nCwOR8jO/marOQ0QbDiOngZVEBB7MAiitBuMjXiNU= +gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk= +gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E= +google.golang.org/genproto/googleapis/api v0.0.0-20250825161204-c5933d9347a5 h1:BIRfGDEjiHRrk0QKZe3Xv2ieMhtgRGeLcZQ0mIVn4EY= +google.golang.org/genproto/googleapis/api v0.0.0-20250825161204-c5933d9347a5/go.mod h1:j3QtIyytwqGr1JUDtYXwtMXWPKsEa5LtzIFN1Wn5WvE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250825161204-c5933d9347a5 h1:eaY8u2EuxbRv7c3NiGK0/NedzVsCcV6hDuU5qPX5EGE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250825161204-c5933d9347a5/go.mod h1:M4/wBTSeyLxupu3W3tJtOgB14jILAS/XWPSSa3TAlJc= +google.golang.org/grpc v1.75.0 h1:+TW+dqTd2Biwe6KKfhE5JpiYIBWq865PhKGSXiivqt4= +google.golang.org/grpc v1.75.0/go.mod h1:JtPAzKiq4v1xcAB2hydNlWI2RnF85XXcV0mhKXr2ecQ= +google.golang.org/protobuf v1.36.8 h1:xHScyCOEuuwZEc6UtSOvPbAT4zRh0xcNRYekJwfqyMc= +google.golang.org/protobuf v1.36.8/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= diff --git a/exporters/otlp/otlptrace/otlptracehttp/internal/otlptracetest/client.go b/exporters/otlp/otlptrace/otlptracehttp/internal/otlptracetest/client.go index 715f15342dc..a6a435bc57e 100644 --- a/exporters/otlp/otlptrace/otlptracehttp/internal/otlptracetest/client.go +++ b/exporters/otlp/otlptrace/otlptracehttp/internal/otlptracetest/client.go @@ -107,7 +107,7 @@ func testClientStopManyTimes(t *testing.T, client otlptrace.Client) { const num int = 20 wg.Add(num) errs := make([]error, num) - for i := 0; i < num; i++ { + for i := range num { go func(idx int) { defer wg.Done() <-ch diff --git a/exporters/otlp/otlptrace/otlptracehttp/internal/otlptracetest/otlptest.go b/exporters/otlp/otlptrace/otlptracehttp/internal/otlptracetest/otlptest.go index 87d34b774d2..1662e0cb8de 100644 --- a/exporters/otlp/otlptrace/otlptracehttp/internal/otlptracetest/otlptest.go +++ b/exporters/otlp/otlptrace/otlptracehttp/internal/otlptracetest/otlptest.go @@ -48,7 +48,7 @@ func RunEndToEndTest(ctx context.Context, t *testing.T, exp *otlptrace.Exporter, tr2 := tp2.Tracer("test-tracer2") // Now create few spans m := 4 - for i := 0; i < m; i++ { + for i := range m { _, span := tr1.Start(ctx, "AlwaysSample") span.SetAttributes(attribute.Int64("i", int64(i))) span.End() @@ -109,7 +109,7 @@ func RunEndToEndTest(ctx context.Context, t *testing.T, exp *otlptrace.Exporter, if got, want := len(attrMap), m; got != want { t.Fatalf("span attribute unique values: got %d want %d", got, want) } - for i := 0; i < m; i++ { + for i := range m { _, ok := attrMap[int64(i)] if !ok { t.Fatalf("span with attribute %d missing", i) diff --git a/exporters/otlp/otlptrace/otlptracehttp/mock_collector_test.go b/exporters/otlp/otlptrace/otlptracehttp/mock_collector_test.go index bf1140e5d50..30454c16729 100644 --- a/exporters/otlp/otlptrace/otlptracehttp/mock_collector_test.go +++ b/exporters/otlp/otlptrace/otlptracehttp/mock_collector_test.go @@ -18,12 +18,12 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + collectortracepb "go.opentelemetry.io/proto/otlp/collector/trace/v1" + tracepb "go.opentelemetry.io/proto/otlp/trace/v1" "google.golang.org/protobuf/proto" "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig" "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlptracetest" - collectortracepb "go.opentelemetry.io/proto/otlp/collector/trace/v1" - tracepb "go.opentelemetry.io/proto/otlp/trace/v1" ) type mockCollector struct { diff --git a/exporters/otlp/otlptrace/version.go b/exporters/otlp/otlptrace/version.go index ed2ddce718b..3b79c1a0b5c 100644 --- a/exporters/otlp/otlptrace/version.go +++ b/exporters/otlp/otlptrace/version.go @@ -5,5 +5,5 @@ package otlptrace // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace" // Version is the current release version of the OpenTelemetry OTLP trace exporter in use. func Version() string { - return "1.37.0" + return "1.38.0" } diff --git a/exporters/prometheus/benchmark_test.go b/exporters/prometheus/benchmark_test.go index 27e63309032..47b7bf68765 100644 --- a/exporters/prometheus/benchmark_test.go +++ b/exporters/prometheus/benchmark_test.go @@ -22,7 +22,7 @@ func benchmarkCollect(b *testing.B, n int) { provider := metric.NewMeterProvider(metric.WithReader(exporter)) meter := provider.Meter("testmeter") - for i := 0; i < n; i++ { + for i := range n { counter, err := meter.Float64Counter(fmt.Sprintf("foo_%d", i)) require.NoError(b, err) counter.Add(ctx, float64(i)) diff --git a/exporters/prometheus/config.go b/exporters/prometheus/config.go index 52183884029..dc3542637be 100644 --- a/exporters/prometheus/config.go +++ b/exporters/prometheus/config.go @@ -4,11 +4,11 @@ package prometheus // import "go.opentelemetry.io/otel/exporters/prometheus" import ( - "strings" "sync" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" + "github.com/prometheus/otlptranslator" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/internal/global" @@ -19,6 +19,7 @@ import ( type config struct { registerer prometheus.Registerer disableTargetInfo bool + translationStrategy otlptranslator.TranslationStrategyOption withoutUnits bool withoutCounterSuffixes bool readerOpts []metric.ManualReaderOption @@ -27,9 +28,9 @@ type config struct { resourceAttributesFilter attribute.Filter } -var logDeprecatedLegacyScheme = sync.OnceFunc(func() { +var logTemporaryDefault = sync.OnceFunc(func() { global.Warn( - "prometheus exporter legacy scheme deprecated: support for the legacy NameValidationScheme will be removed in a future release", + "The default Prometheus naming translation strategy is planned to be changed from otlptranslator.NoUTF8EscapingWithSuffixes to otlptranslator.UnderscoreEscapingWithSuffixes in a future release. Add prometheus.WithTranslationStrategy(otlptranslator.NoUTF8EscapingWithSuffixes) to preserve the existing behavior, or prometheus.WithTranslationStrategy(otlptranslator.UnderscoreEscapingWithSuffixes) to opt into the future default behavior.", ) }) @@ -40,6 +41,30 @@ func newConfig(opts ...Option) config { cfg = opt.apply(cfg) } + if cfg.translationStrategy == "" { + // If no translation strategy was specified, deduce one based on the global + // NameValidationScheme. NOTE: this logic will change in the future, always + // defaulting to UnderscoreEscapingWithSuffixes + + //nolint:staticcheck // NameValidationScheme is deprecated but we still need it for now. + if model.NameValidationScheme == model.UTF8Validation { + logTemporaryDefault() + cfg.translationStrategy = otlptranslator.NoUTF8EscapingWithSuffixes + } else { + cfg.translationStrategy = otlptranslator.UnderscoreEscapingWithSuffixes + } + } else { + // Note, if the translation strategy implies that suffixes should be added, + // the user can still use WithoutUnits and WithoutCounterSuffixes to + // explicitly disable specific suffixes. We do not override their preference + // in this case. However if the chosen strategy disables suffixes, we should + // forcibly disable all of them. + if !cfg.translationStrategy.ShouldAddSuffixes() { + cfg.withoutCounterSuffixes = true + cfg.withoutUnits = true + } + } + if cfg.registerer == nil { cfg.registerer = prometheus.DefaultRegisterer } @@ -97,6 +122,30 @@ func WithoutTargetInfo() Option { }) } +// WithTranslationStrategy provides a standardized way to define how metric and +// label names should be handled during translation to Prometheus format. See: +// https://github.com/open-telemetry/opentelemetry-specification/blob/v1.48.0/specification/metrics/sdk_exporters/prometheus.md#configuration. +// The recommended approach is to use either +// [otlptranslator.UnderscoreEscapingWithSuffixes] for full Prometheus-style +// compatibility or [otlptranslator.NoTranslation] for OpenTelemetry-style names. +// +// By default, if the NameValidationScheme variable in +// [github.com/prometheus/common/model] is "legacy", the default strategy is +// [otlptranslator.UnderscoreEscapingWithSuffixes]. If the validation scheme is +// "utf8", then currently the default Strategy is +// [otlptranslator.NoUTF8EscapingWithSuffixes]. +// +// Notice: It is planned that a future release of this SDK will change the +// default to always be [otlptranslator.UnderscoreEscapingWithSuffixes] in all +// circumstances. Users wanting a different translation strategy should specify +// it explicitly. +func WithTranslationStrategy(strategy otlptranslator.TranslationStrategyOption) Option { + return optionFunc(func(cfg config) config { + cfg.translationStrategy = strategy + return cfg + }) +} + // WithoutUnits disables exporter's addition of unit suffixes to metric names, // and will also prevent unit comments from being added in OpenMetrics once // unit comments are supported. @@ -105,6 +154,12 @@ func WithoutTargetInfo() Option { // conventions. For example, the counter metric request.duration, with unit // milliseconds would become request_duration_milliseconds_total. // With this option set, the name would instead be request_duration_total. +// +// Can be used in conjunction with [WithTranslationStrategy] to disable unit +// suffixes in strategies that would otherwise add suffixes, but this behavior +// is not recommended and may be removed in a future release. +// +// Deprecated: Use [WithTranslationStrategy] instead. func WithoutUnits() Option { return optionFunc(func(cfg config) config { cfg.withoutUnits = true @@ -112,12 +167,19 @@ func WithoutUnits() Option { }) } -// WithoutCounterSuffixes disables exporter's addition _total suffixes on counters. +// WithoutCounterSuffixes disables exporter's addition _total suffixes on +// counters. // // By default, metric names include a _total suffix to follow Prometheus naming // conventions. For example, the counter metric happy.people would become // happy_people_total. With this option set, the name would instead be // happy_people. +// +// Can be used in conjunction with [WithTranslationStrategy] to disable counter +// suffixes in strategies that would otherwise add suffixes, but this behavior +// is not recommended and may be removed in a future release. +// +// Deprecated: Use [WithTranslationStrategy] instead. func WithoutCounterSuffixes() Option { return optionFunc(func(cfg config) config { cfg.withoutCounterSuffixes = true @@ -134,22 +196,13 @@ func WithoutScopeInfo() Option { }) } -// WithNamespace configures the Exporter to prefix metric with the given namespace. -// Metadata metrics such as target_info are not prefixed since these -// have special behavior based on their name. +// WithNamespace configures the Exporter to prefix metric with the given +// namespace. Metadata metrics such as target_info are not prefixed since these +// have special behavior based on their name. Namespaces will be prepended even +// if [otlptranslator.NoTranslation] is set as a translation strategy. If the provided namespace +// is empty, nothing will be prepended to metric names. func WithNamespace(ns string) Option { return optionFunc(func(cfg config) config { - if model.NameValidationScheme != model.UTF8Validation { // nolint:staticcheck // We need this check to keep supporting the legacy scheme. - logDeprecatedLegacyScheme() - // Only sanitize if prometheus does not support UTF-8. - ns = model.EscapeName(ns, model.NameEscapingScheme) - } - if !strings.HasSuffix(ns, "_") { - // namespace and metric names should be separated with an underscore, - // adds a trailing underscore if there is not one already. - ns = ns + "_" - } - cfg.namespace = ns return cfg }) diff --git a/exporters/prometheus/config_test.go b/exporters/prometheus/config_test.go index c24ccd72e6d..80c1f367aa0 100644 --- a/exporters/prometheus/config_test.go +++ b/exporters/prometheus/config_test.go @@ -8,6 +8,8 @@ import ( "testing" "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/common/model" + "github.com/prometheus/otlptranslator" "github.com/stretchr/testify/assert" "go.opentelemetry.io/otel/sdk/metric" @@ -21,15 +23,17 @@ func TestNewConfig(t *testing.T) { producer := &noopProducer{} testCases := []struct { - name string - options []Option - wantConfig config + name string + options []Option + wantConfig config + legacyValidation bool }{ { name: "Default", options: nil, wantConfig: config{ - registerer: prometheus.DefaultRegisterer, + translationStrategy: otlptranslator.NoUTF8EscapingWithSuffixes, + registerer: prometheus.DefaultRegisterer, }, }, { @@ -38,7 +42,8 @@ func TestNewConfig(t *testing.T) { WithRegisterer(registry), }, wantConfig: config{ - registerer: registry, + translationStrategy: otlptranslator.NoUTF8EscapingWithSuffixes, + registerer: registry, }, }, { @@ -47,8 +52,9 @@ func TestNewConfig(t *testing.T) { WithAggregationSelector(aggregationSelector), }, wantConfig: config{ - registerer: prometheus.DefaultRegisterer, - readerOpts: []metric.ManualReaderOption{metric.WithAggregationSelector(aggregationSelector)}, + translationStrategy: otlptranslator.NoUTF8EscapingWithSuffixes, + registerer: prometheus.DefaultRegisterer, + readerOpts: []metric.ManualReaderOption{metric.WithAggregationSelector(aggregationSelector)}, }, }, { @@ -57,8 +63,9 @@ func TestNewConfig(t *testing.T) { WithProducer(producer), }, wantConfig: config{ - registerer: prometheus.DefaultRegisterer, - readerOpts: []metric.ManualReaderOption{metric.WithProducer(producer)}, + translationStrategy: otlptranslator.NoUTF8EscapingWithSuffixes, + registerer: prometheus.DefaultRegisterer, + readerOpts: []metric.ManualReaderOption{metric.WithProducer(producer)}, }, }, { @@ -70,7 +77,8 @@ func TestNewConfig(t *testing.T) { }, wantConfig: config{ - registerer: registry, + translationStrategy: otlptranslator.NoUTF8EscapingWithSuffixes, + registerer: registry, readerOpts: []metric.ManualReaderOption{ metric.WithAggregationSelector(aggregationSelector), metric.WithProducer(producer), @@ -83,7 +91,8 @@ func TestNewConfig(t *testing.T) { WithRegisterer(nil), }, wantConfig: config{ - registerer: prometheus.DefaultRegisterer, + translationStrategy: otlptranslator.NoUTF8EscapingWithSuffixes, + registerer: prometheus.DefaultRegisterer, }, }, { @@ -92,8 +101,42 @@ func TestNewConfig(t *testing.T) { WithoutTargetInfo(), }, wantConfig: config{ - registerer: prometheus.DefaultRegisterer, - disableTargetInfo: true, + translationStrategy: otlptranslator.NoUTF8EscapingWithSuffixes, + registerer: prometheus.DefaultRegisterer, + disableTargetInfo: true, + }, + }, + { + name: "legacy validation mode default", + options: []Option{}, + legacyValidation: true, + wantConfig: config{ + translationStrategy: otlptranslator.UnderscoreEscapingWithSuffixes, + registerer: prometheus.DefaultRegisterer, + }, + }, + { + name: "legacy validation mode, unit suffixes disabled", + options: []Option{ + WithoutUnits(), + }, + legacyValidation: true, + wantConfig: config{ + translationStrategy: otlptranslator.UnderscoreEscapingWithSuffixes, + registerer: prometheus.DefaultRegisterer, + withoutUnits: true, + }, + }, + { + name: "legacy validation mode, counter suffixes disabled", + options: []Option{ + WithoutCounterSuffixes(), + }, + legacyValidation: true, + wantConfig: config{ + translationStrategy: otlptranslator.UnderscoreEscapingWithSuffixes, + registerer: prometheus.DefaultRegisterer, + withoutCounterSuffixes: true, }, }, { @@ -102,8 +145,45 @@ func TestNewConfig(t *testing.T) { WithoutUnits(), }, wantConfig: config{ - registerer: prometheus.DefaultRegisterer, - withoutUnits: true, + translationStrategy: otlptranslator.NoUTF8EscapingWithSuffixes, + registerer: prometheus.DefaultRegisterer, + withoutUnits: true, + }, + }, + { + name: "NoTranslation implies no suffixes", + options: []Option{ + WithTranslationStrategy(otlptranslator.NoTranslation), + }, + wantConfig: config{ + translationStrategy: otlptranslator.NoTranslation, + withoutUnits: true, + withoutCounterSuffixes: true, + registerer: prometheus.DefaultRegisterer, + }, + }, + { + name: "translation strategy does not override unit suffixes disabled", + options: []Option{ + WithTranslationStrategy(otlptranslator.UnderscoreEscapingWithSuffixes), + WithoutUnits(), + }, + wantConfig: config{ + translationStrategy: otlptranslator.UnderscoreEscapingWithSuffixes, + registerer: prometheus.DefaultRegisterer, + withoutUnits: true, + }, + }, + { + name: "translation strategy does not override counter suffixes disabled", + options: []Option{ + WithTranslationStrategy(otlptranslator.UnderscoreEscapingWithSuffixes), + WithoutCounterSuffixes(), + }, + wantConfig: config{ + translationStrategy: otlptranslator.UnderscoreEscapingWithSuffixes, + registerer: prometheus.DefaultRegisterer, + withoutCounterSuffixes: true, }, }, { @@ -112,18 +192,20 @@ func TestNewConfig(t *testing.T) { WithNamespace("test"), }, wantConfig: config{ - registerer: prometheus.DefaultRegisterer, - namespace: "test_", + translationStrategy: otlptranslator.NoUTF8EscapingWithSuffixes, + registerer: prometheus.DefaultRegisterer, + namespace: "test", }, }, { name: "with namespace with trailing underscore", options: []Option{ - WithNamespace("test_"), + WithNamespace("test"), }, wantConfig: config{ - registerer: prometheus.DefaultRegisterer, - namespace: "test_", + translationStrategy: otlptranslator.NoUTF8EscapingWithSuffixes, + registerer: prometheus.DefaultRegisterer, + namespace: "test", }, }, { @@ -132,13 +214,21 @@ func TestNewConfig(t *testing.T) { WithNamespace("test/"), }, wantConfig: config{ - registerer: prometheus.DefaultRegisterer, - namespace: "test/_", + translationStrategy: otlptranslator.NoUTF8EscapingWithSuffixes, + registerer: prometheus.DefaultRegisterer, + namespace: "test/", }, }, } for _, tt := range testCases { t.Run(tt.name, func(t *testing.T) { + if tt.legacyValidation { + //nolint:staticcheck + model.NameValidationScheme = model.LegacyValidation + } else { + //nolint:staticcheck + model.NameValidationScheme = model.UTF8Validation + } cfg := newConfig(tt.options...) // only check the length of readerOpts, since they are not comparable assert.Len(t, cfg.readerOpts, len(tt.wantConfig.readerOpts)) @@ -152,6 +242,6 @@ func TestNewConfig(t *testing.T) { type noopProducer struct{} -func (*noopProducer) Produce(ctx context.Context) ([]metricdata.ScopeMetrics, error) { +func (*noopProducer) Produce(context.Context) ([]metricdata.ScopeMetrics, error) { return nil, nil } diff --git a/exporters/prometheus/exporter.go b/exporters/prometheus/exporter.go index 7b44c12c541..0f29c0abbde 100644 --- a/exporters/prometheus/exporter.go +++ b/exporters/prometheus/exporter.go @@ -15,7 +15,7 @@ import ( "github.com/prometheus/client_golang/prometheus" dto "github.com/prometheus/client_model/go" - "github.com/prometheus/common/model" + "github.com/prometheus/otlptranslator" "google.golang.org/protobuf/proto" "go.opentelemetry.io/otel" @@ -27,20 +27,16 @@ import ( ) const ( - targetInfoMetricName = "target_info" targetInfoDescription = "Target metadata" scopeLabelPrefix = "otel_scope_" scopeNameLabel = scopeLabelPrefix + "name" scopeVersionLabel = scopeLabelPrefix + "version" scopeSchemaLabel = scopeLabelPrefix + "schema_url" - - traceIDExemplarKey = "trace_id" - spanIDExemplarKey = "span_id" ) var metricsPool = sync.Pool{ - New: func() interface{} { + New: func() any { return &metricdata.ResourceMetrics{} }, } @@ -52,7 +48,7 @@ type Exporter struct { } // MarshalLog returns logging data about the Exporter. -func (e *Exporter) MarshalLog() interface{} { +func (e *Exporter) MarshalLog() any { const t = "Prometheus exporter" if r, ok := e.Reader.(*metric.ManualReader); ok { @@ -93,12 +89,11 @@ type collector struct { targetInfo prometheus.Metric metricFamilies map[string]*dto.MetricFamily resourceKeyVals keyVals + metricNamer otlptranslator.MetricNamer + labelNamer otlptranslator.LabelNamer + unitNamer otlptranslator.UnitNamer } -// prometheus counters MUST have a _total suffix by default: -// https://github.com/open-telemetry/opentelemetry-specification/blob/v1.20.0/specification/compatibility/prometheus_and_openmetrics.md -const counterSuffix = "total" - // New returns a Prometheus Exporter. func New(opts ...Option) (*Exporter, error) { cfg := newConfig(opts...) @@ -108,6 +103,18 @@ func New(opts ...Option) (*Exporter, error) { // TODO (#3244): Enable some way to configure the reader, but not change temporality. reader := metric.NewManualReader(cfg.readerOpts...) + labelNamer := otlptranslator.LabelNamer{UTF8Allowed: !cfg.translationStrategy.ShouldEscape()} + escapedNamespace := cfg.namespace + if escapedNamespace != "" { + var err error + // If the namespace needs to be escaped, do that now when creating the new + // Collector object. The escaping is not persisted in the Config itself. + escapedNamespace, err = labelNamer.Build(escapedNamespace) + if err != nil { + return nil, err + } + } + collector := &collector{ reader: reader, disableTargetInfo: cfg.disableTargetInfo, @@ -115,8 +122,11 @@ func New(opts ...Option) (*Exporter, error) { withoutCounterSuffixes: cfg.withoutCounterSuffixes, disableScopeInfo: cfg.disableScopeInfo, metricFamilies: make(map[string]*dto.MetricFamily), - namespace: cfg.namespace, + namespace: escapedNamespace, resourceAttributesFilter: cfg.resourceAttributesFilter, + metricNamer: otlptranslator.NewMetricNamer(escapedNamespace, cfg.translationStrategy), + unitNamer: otlptranslator.UnitNamer{UTF8Allowed: !cfg.translationStrategy.ShouldEscape()}, + labelNamer: labelNamer, } if err := cfg.registerer.Register(collector); err != nil { @@ -131,7 +141,7 @@ func New(opts ...Option) (*Exporter, error) { } // Describe implements prometheus.Collector. -func (c *collector) Describe(ch chan<- *prometheus.Desc) { +func (*collector) Describe(chan<- *prometheus.Desc) { // The Opentelemetry SDK doesn't have information on which will exist when the collector // is registered. By returning nothing we are an "unchecked" collector in Prometheus, // and assume responsibility for consistency of the metrics produced. @@ -164,7 +174,11 @@ func (c *collector) Collect(ch chan<- prometheus.Metric) { defer c.mu.Unlock() if c.targetInfo == nil && !c.disableTargetInfo { - targetInfo, err := createInfoMetric(targetInfoMetricName, targetInfoDescription, metrics.Resource) + targetInfo, err := c.createInfoMetric( + otlptranslator.TargetInfoMetricName, + targetInfoDescription, + metrics.Resource, + ) if err != nil { // If the target info metric is invalid, disable sending it. c.disableTargetInfo = true @@ -181,7 +195,11 @@ func (c *collector) Collect(ch chan<- prometheus.Metric) { } if c.resourceAttributesFilter != nil && len(c.resourceKeyVals.keys) == 0 { - c.createResourceAttributes(metrics.Resource) + err := c.createResourceAttributes(metrics.Resource) + if err != nil { + otel.Handle(err) + return + } } for _, scopeMetrics := range metrics.ScopeMetrics { @@ -195,7 +213,11 @@ func (c *collector) Collect(ch chan<- prometheus.Metric) { kv.keys = append(kv.keys, scopeNameLabel, scopeVersionLabel, scopeSchemaLabel) kv.vals = append(kv.vals, scopeMetrics.Scope.Name, scopeMetrics.Scope.Version, scopeMetrics.Scope.SchemaURL) - attrKeys, attrVals := getAttrs(scopeMetrics.Scope.Attributes) + attrKeys, attrVals, err := getAttrs(scopeMetrics.Scope.Attributes, c.labelNamer) + if err != nil { + otel.Handle(err) + continue + } for i := range attrKeys { attrKeys[i] = scopeLabelPrefix + attrKeys[i] } @@ -211,7 +233,13 @@ func (c *collector) Collect(ch chan<- prometheus.Metric) { if typ == nil { continue } - name := c.getName(m, typ) + name, err := c.getName(m) + if err != nil { + // TODO(#7066): Handle this error better. It's not clear this can be + // reached, bad metric names should / will be caught at creation time. + otel.Handle(err) + continue + } drop, help := c.validateMetrics(name, m.Description, typ) if drop { @@ -224,21 +252,21 @@ func (c *collector) Collect(ch chan<- prometheus.Metric) { switch v := m.Data.(type) { case metricdata.Histogram[int64]: - addHistogramMetric(ch, v, m, name, kv) + addHistogramMetric(ch, v, m, name, kv, c.labelNamer) case metricdata.Histogram[float64]: - addHistogramMetric(ch, v, m, name, kv) + addHistogramMetric(ch, v, m, name, kv, c.labelNamer) case metricdata.ExponentialHistogram[int64]: - addExponentialHistogramMetric(ch, v, m, name, kv) + addExponentialHistogramMetric(ch, v, m, name, kv, c.labelNamer) case metricdata.ExponentialHistogram[float64]: - addExponentialHistogramMetric(ch, v, m, name, kv) + addExponentialHistogramMetric(ch, v, m, name, kv, c.labelNamer) case metricdata.Sum[int64]: - addSumMetric(ch, v, m, name, kv) + addSumMetric(ch, v, m, name, kv, c.labelNamer) case metricdata.Sum[float64]: - addSumMetric(ch, v, m, name, kv) + addSumMetric(ch, v, m, name, kv, c.labelNamer) case metricdata.Gauge[int64]: - addGaugeMetric(ch, v, m, name, kv) + addGaugeMetric(ch, v, m, name, kv, c.labelNamer) case metricdata.Gauge[float64]: - addGaugeMetric(ch, v, m, name, kv) + addGaugeMetric(ch, v, m, name, kv, c.labelNamer) } } } @@ -303,9 +331,14 @@ func addExponentialHistogramMetric[N int64 | float64]( m metricdata.Metrics, name string, kv keyVals, + labelNamer otlptranslator.LabelNamer, ) { for _, dp := range histogram.DataPoints { - keys, values := getAttrs(dp.Attributes) + keys, values, err := getAttrs(dp.Attributes, labelNamer) + if err != nil { + otel.Handle(err) + continue + } keys = append(keys, kv.keys...) values = append(values, kv.vals...) @@ -365,8 +398,7 @@ func addExponentialHistogramMetric[N int64 | float64]( otel.Handle(err) continue } - - // TODO(GiedriusS): add exemplars here after https://github.com/prometheus/client_golang/pull/1654#pullrequestreview-2434669425 is done. + m = addExemplars(m, dp.Exemplars, labelNamer) ch <- m } } @@ -377,9 +409,14 @@ func addHistogramMetric[N int64 | float64]( m metricdata.Metrics, name string, kv keyVals, + labelNamer otlptranslator.LabelNamer, ) { for _, dp := range histogram.DataPoints { - keys, values := getAttrs(dp.Attributes) + keys, values, err := getAttrs(dp.Attributes, labelNamer) + if err != nil { + otel.Handle(err) + continue + } keys = append(keys, kv.keys...) values = append(values, kv.vals...) @@ -396,7 +433,7 @@ func addHistogramMetric[N int64 | float64]( otel.Handle(err) continue } - m = addExemplars(m, dp.Exemplars) + m = addExemplars(m, dp.Exemplars, labelNamer) ch <- m } } @@ -407,6 +444,7 @@ func addSumMetric[N int64 | float64]( m metricdata.Metrics, name string, kv keyVals, + labelNamer otlptranslator.LabelNamer, ) { valueType := prometheus.CounterValue if !sum.IsMonotonic { @@ -414,7 +452,11 @@ func addSumMetric[N int64 | float64]( } for _, dp := range sum.DataPoints { - keys, values := getAttrs(dp.Attributes) + keys, values, err := getAttrs(dp.Attributes, labelNamer) + if err != nil { + otel.Handle(err) + continue + } keys = append(keys, kv.keys...) values = append(values, kv.vals...) @@ -427,7 +469,7 @@ func addSumMetric[N int64 | float64]( // GaugeValues don't support Exemplars at this time // https://github.com/prometheus/client_golang/blob/aef8aedb4b6e1fb8ac1c90790645169125594096/prometheus/metric.go#L199 if valueType != prometheus.GaugeValue { - m = addExemplars(m, dp.Exemplars) + m = addExemplars(m, dp.Exemplars, labelNamer) } ch <- m } @@ -439,9 +481,14 @@ func addGaugeMetric[N int64 | float64]( m metricdata.Metrics, name string, kv keyVals, + labelNamer otlptranslator.LabelNamer, ) { for _, dp := range gauge.DataPoints { - keys, values := getAttrs(dp.Attributes) + keys, values, err := getAttrs(dp.Attributes, labelNamer) + if err != nil { + otel.Handle(err) + continue + } keys = append(keys, kv.keys...) values = append(values, kv.vals...) @@ -457,12 +504,12 @@ func addGaugeMetric[N int64 | float64]( // getAttrs converts the attribute.Set to two lists of matching Prometheus-style // keys and values. -func getAttrs(attrs attribute.Set) ([]string, []string) { +func getAttrs(attrs attribute.Set, labelNamer otlptranslator.LabelNamer) ([]string, []string, error) { keys := make([]string, 0, attrs.Len()) values := make([]string, 0, attrs.Len()) itr := attrs.Iter() - if model.NameValidationScheme == model.UTF8Validation { // nolint:staticcheck // We need this check to keep supporting the legacy scheme. + if labelNamer.UTF8Allowed { // Do not perform sanitization if prometheus supports UTF-8. for itr.Next() { kv := itr.Attribute() @@ -475,7 +522,11 @@ func getAttrs(attrs attribute.Set) ([]string, []string) { keysMap := make(map[string][]string) for itr.Next() { kv := itr.Attribute() - key := model.EscapeName(string(kv.Key), model.NameEscapingScheme) + key, err := labelNamer.Build(string(kv.Key)) + if err != nil { + // TODO(#7066) Handle this error better. + return nil, nil, err + } if _, ok := keysMap[key]; !ok { keysMap[key] = []string{kv.Value.Emit()} } else { @@ -489,97 +540,32 @@ func getAttrs(attrs attribute.Set) ([]string, []string) { values = append(values, strings.Join(vals, ";")) } } - return keys, values + return keys, values, nil } -func createInfoMetric(name, description string, res *resource.Resource) (prometheus.Metric, error) { - keys, values := getAttrs(*res.Set()) +func (c *collector) createInfoMetric(name, description string, res *resource.Resource) (prometheus.Metric, error) { + keys, values, err := getAttrs(*res.Set(), c.labelNamer) + if err != nil { + return nil, err + } desc := prometheus.NewDesc(name, description, keys, nil) return prometheus.NewConstMetric(desc, prometheus.GaugeValue, float64(1), values...) } -func unitMapGetOrDefault(unit string) string { - if promUnit, ok := unitSuffixes[unit]; ok { - return promUnit - } - return unit -} - -var unitSuffixes = map[string]string{ - // Time - "d": "days", - "h": "hours", - "min": "minutes", - "s": "seconds", - "ms": "milliseconds", - "us": "microseconds", - "ns": "nanoseconds", - - // Bytes - "By": "bytes", - "KiBy": "kibibytes", - "MiBy": "mebibytes", - "GiBy": "gibibytes", - "TiBy": "tibibytes", - "KBy": "kilobytes", - "MBy": "megabytes", - "GBy": "gigabytes", - "TBy": "terabytes", - - // SI - "m": "meters", - "V": "volts", - "A": "amperes", - "J": "joules", - "W": "watts", - "g": "grams", - - // Misc - "Cel": "celsius", - "Hz": "hertz", - "1": "ratio", - "%": "percent", -} - -// getName returns the sanitized name, prefixed with the namespace and suffixed with unit. -func (c *collector) getName(m metricdata.Metrics, typ *dto.MetricType) string { - name := m.Name - if model.NameValidationScheme != model.UTF8Validation { // nolint:staticcheck // We need this check to keep supporting the legacy scheme. - // Only sanitize if prometheus does not support UTF-8. - logDeprecatedLegacyScheme() - name = model.EscapeName(name, model.NameEscapingScheme) +// getName returns the sanitized name, translated according to the selected +// TranslationStrategy and namespace option. +func (c *collector) getName(m metricdata.Metrics) (string, error) { + translatorMetric := otlptranslator.Metric{ + Name: m.Name, + Type: c.namingMetricType(m), } - addCounterSuffix := !c.withoutCounterSuffixes && *typ == dto.MetricType_COUNTER - if addCounterSuffix { - // Remove the _total suffix here, as we will re-add the total suffix - // later, and it needs to come after the unit suffix. - name = strings.TrimSuffix(name, counterSuffix) - // If the last character is an underscore, or would be converted to an underscore, trim it from the name. - // an underscore will be added back in later. - if convertsToUnderscore(rune(name[len(name)-1])) { - name = name[:len(name)-1] - } - } - if c.namespace != "" { - name = c.namespace + name - } - if suffix := unitMapGetOrDefault(m.Unit); suffix != "" && !c.withoutUnits && !strings.HasSuffix(name, suffix) { - name += "_" + suffix + if !c.withoutUnits { + translatorMetric.Unit = m.Unit } - if addCounterSuffix { - name += "_" + counterSuffix - } - return name + return c.metricNamer.Build(translatorMetric) } -// convertsToUnderscore returns true if the character would be converted to an -// underscore when the escaping scheme is underscore escaping. This is meant to -// capture any character that should be considered a "delimiter". -func convertsToUnderscore(b rune) bool { - return (b < 'a' || b > 'z') && (b < 'A' || b > 'Z') && b != ':' && (b < '0' || b > '9') -} - -func (c *collector) metricType(m metricdata.Metrics) *dto.MetricType { +func (*collector) metricType(m metricdata.Metrics) *dto.MetricType { switch v := m.Data.(type) { case metricdata.ExponentialHistogram[int64], metricdata.ExponentialHistogram[float64]: return dto.MetricType_HISTOGRAM.Enum() @@ -601,13 +587,47 @@ func (c *collector) metricType(m metricdata.Metrics) *dto.MetricType { return nil } -func (c *collector) createResourceAttributes(res *resource.Resource) { +// namingMetricType provides the metric type for naming purposes. +func (c *collector) namingMetricType(m metricdata.Metrics) otlptranslator.MetricType { + switch v := m.Data.(type) { + case metricdata.ExponentialHistogram[int64], metricdata.ExponentialHistogram[float64]: + return otlptranslator.MetricTypeHistogram + case metricdata.Histogram[int64], metricdata.Histogram[float64]: + return otlptranslator.MetricTypeHistogram + case metricdata.Sum[float64]: + // If counter suffixes are disabled, treat them like non-monotonic + // suffixes for the purposes of naming. + if v.IsMonotonic && !c.withoutCounterSuffixes { + return otlptranslator.MetricTypeMonotonicCounter + } + return otlptranslator.MetricTypeNonMonotonicCounter + case metricdata.Sum[int64]: + // If counter suffixes are disabled, treat them like non-monotonic + // suffixes for the purposes of naming. + if v.IsMonotonic && !c.withoutCounterSuffixes { + return otlptranslator.MetricTypeMonotonicCounter + } + return otlptranslator.MetricTypeNonMonotonicCounter + case metricdata.Gauge[int64], metricdata.Gauge[float64]: + return otlptranslator.MetricTypeGauge + case metricdata.Summary: + return otlptranslator.MetricTypeSummary + } + return otlptranslator.MetricTypeUnknown +} + +func (c *collector) createResourceAttributes(res *resource.Resource) error { c.mu.Lock() defer c.mu.Unlock() resourceAttrs, _ := res.Set().Filter(c.resourceAttributesFilter) - resourceKeys, resourceValues := getAttrs(resourceAttrs) + resourceKeys, resourceValues, err := getAttrs(resourceAttrs, c.labelNamer) + if err != nil { + return err + } + c.resourceKeyVals = keyVals{keys: resourceKeys, vals: resourceValues} + return nil } func (c *collector) validateMetrics(name, description string, metricType *dto.MetricType) (drop bool, help string) { @@ -648,16 +668,24 @@ func (c *collector) validateMetrics(name, description string, metricType *dto.Me return false, "" } -func addExemplars[N int64 | float64](m prometheus.Metric, exemplars []metricdata.Exemplar[N]) prometheus.Metric { +func addExemplars[N int64 | float64]( + m prometheus.Metric, + exemplars []metricdata.Exemplar[N], + labelNamer otlptranslator.LabelNamer, +) prometheus.Metric { if len(exemplars) == 0 { return m } promExemplars := make([]prometheus.Exemplar, len(exemplars)) for i, exemplar := range exemplars { - labels := attributesToLabels(exemplar.FilteredAttributes) + labels, err := attributesToLabels(exemplar.FilteredAttributes, labelNamer) + if err != nil { + otel.Handle(err) + return m + } // Overwrite any existing trace ID or span ID attributes - labels[traceIDExemplarKey] = hex.EncodeToString(exemplar.TraceID[:]) - labels[spanIDExemplarKey] = hex.EncodeToString(exemplar.SpanID[:]) + labels[otlptranslator.ExemplarTraceIDKey] = hex.EncodeToString(exemplar.TraceID) + labels[otlptranslator.ExemplarSpanIDKey] = hex.EncodeToString(exemplar.SpanID) promExemplars[i] = prometheus.Exemplar{ Value: float64(exemplar.Value), Timestamp: exemplar.Time, @@ -674,11 +702,14 @@ func addExemplars[N int64 | float64](m prometheus.Metric, exemplars []metricdata return metricWithExemplar } -func attributesToLabels(attrs []attribute.KeyValue) prometheus.Labels { +func attributesToLabels(attrs []attribute.KeyValue, labelNamer otlptranslator.LabelNamer) (prometheus.Labels, error) { labels := make(map[string]string) for _, attr := range attrs { - key := model.EscapeName(string(attr.Key), model.NameEscapingScheme) - labels[key] = attr.Value.Emit() + name, err := labelNamer.Build(string(attr.Key)) + if err != nil { + return nil, err + } + labels[name] = attr.Value.Emit() } - return labels + return labels, nil } diff --git a/exporters/prometheus/exporter_test.go b/exporters/prometheus/exporter_test.go index be993fd892f..08f5c5a46d9 100644 --- a/exporters/prometheus/exporter_test.go +++ b/exporters/prometheus/exporter_test.go @@ -6,6 +6,7 @@ package prometheus import ( "context" "errors" + "fmt" "math" "os" "sync" @@ -15,7 +16,7 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/testutil" dto "github.com/prometheus/client_model/go" - "github.com/prometheus/common/model" + "github.com/prometheus/otlptranslator" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -25,7 +26,7 @@ import ( "go.opentelemetry.io/otel/sdk/metric" "go.opentelemetry.io/otel/sdk/metric/metricdata" "go.opentelemetry.io/otel/sdk/resource" - semconv "go.opentelemetry.io/otel/semconv/v1.34.0" + semconv "go.opentelemetry.io/otel/semconv/v1.37.0" "go.opentelemetry.io/otel/trace" ) @@ -37,12 +38,13 @@ func TestPrometheusExporter(t *testing.T) { recordMetrics func(ctx context.Context, meter otelmetric.Meter) options []Option expectedFile string - disableUTF8 bool + strategy otlptranslator.TranslationStrategyOption checkMetricFamilies func(t testing.TB, dtos []*dto.MetricFamily) }{ { name: "counter", expectedFile: "testdata/counter.txt", + strategy: otlptranslator.UnderscoreEscapingWithSuffixes, recordMetrics: func(ctx context.Context, meter otelmetric.Meter) { opt := otelmetric.WithAttributes( attribute.Key("A").String("B"), @@ -68,10 +70,15 @@ func TestPrometheusExporter(t *testing.T) { ) counter.Add(ctx, 5, otelmetric.WithAttributeSet(attrs2)) }, + options: []Option{ + WithNamespace("my.dotted.namespace"), + WithTranslationStrategy(otlptranslator.UnderscoreEscapingWithSuffixes), + }, }, { name: "counter that already has the unit suffix", - expectedFile: "testdata/counter_with_unit_suffix.txt", + expectedFile: "testdata/counter_noutf8_with_unit_suffix.txt", + strategy: otlptranslator.UnderscoreEscapingWithSuffixes, recordMetrics: func(ctx context.Context, meter otelmetric.Meter) { opt := otelmetric.WithAttributes( attribute.Key("A").String("B"), @@ -109,7 +116,7 @@ func TestPrometheusExporter(t *testing.T) { attribute.Key("F").Int(42), ) counter, err := meter.Float64Counter( - "foo", + "foo.dotted", otelmetric.WithDescription("a simple counter"), otelmetric.WithUnit("madeup"), ) @@ -127,9 +134,39 @@ func TestPrometheusExporter(t *testing.T) { counter.Add(ctx, 5, otelmetric.WithAttributeSet(attrs2)) }, }, + { + name: "counter with bracketed unit", + expectedFile: "testdata/counter_no_unit.txt", + recordMetrics: func(ctx context.Context, meter otelmetric.Meter) { + opt := otelmetric.WithAttributes( + attribute.Key("A").String("B"), + attribute.Key("C").String("D"), + attribute.Key("E").Bool(true), + attribute.Key("F").Int(42), + ) + counter, err := meter.Float64Counter( + "foo", + otelmetric.WithDescription("a simple counter"), + otelmetric.WithUnit("{spans}"), + ) + require.NoError(t, err) + counter.Add(ctx, 5, opt) + counter.Add(ctx, 10.3, opt) + counter.Add(ctx, 9, opt) + + attrs2 := attribute.NewSet( + attribute.Key("A").String("D"), + attribute.Key("C").String("B"), + attribute.Key("E").Bool(true), + attribute.Key("F").Int(42), + ) + counter.Add(ctx, 5, otelmetric.WithAttributeSet(attrs2)) + }, + }, { name: "counter that already has a total suffix", expectedFile: "testdata/counter.txt", + strategy: otlptranslator.UnderscoreEscapingWithSuffixes, recordMetrics: func(ctx context.Context, meter otelmetric.Meter) { opt := otelmetric.WithAttributes( attribute.Key("A").String("B"), @@ -155,6 +192,10 @@ func TestPrometheusExporter(t *testing.T) { ) counter.Add(ctx, 5, otelmetric.WithAttributeSet(attrs2)) }, + options: []Option{ + WithNamespace("my.dotted.namespace"), + WithTranslationStrategy(otlptranslator.UnderscoreEscapingWithSuffixes), + }, }, { name: "counter with suffixes disabled", @@ -194,14 +235,13 @@ func TestPrometheusExporter(t *testing.T) { attribute.Key("A").String("B"), attribute.Key("C").String("D"), ) - gauge, err := meter.Float64UpDownCounter( + gauge, err := meter.Float64Gauge( "bar", otelmetric.WithDescription("a fun little gauge"), otelmetric.WithUnit("1"), ) require.NoError(t, err) - gauge.Add(ctx, 1.0, opt) - gauge.Add(ctx, -.25, opt) + gauge.Record(ctx, .75, opt) }, }, { @@ -271,7 +311,7 @@ func TestPrometheusExporter(t *testing.T) { { name: "sanitized attributes to labels", expectedFile: "testdata/sanitized_labels.txt", - disableUTF8: true, + strategy: otlptranslator.UnderscoreEscapingWithSuffixes, options: []Option{WithoutUnits()}, recordMetrics: func(ctx context.Context, meter otelmetric.Meter) { opt := otelmetric.WithAttributes( @@ -398,14 +438,13 @@ func TestPrometheusExporter(t *testing.T) { attribute.Key("A").String("B"), attribute.Key("C").String("D"), ) - gauge, err := meter.Int64UpDownCounter( + gauge, err := meter.Int64Gauge( "bar", otelmetric.WithDescription("a fun little gauge"), otelmetric.WithUnit("1"), ) require.NoError(t, err) - gauge.Add(ctx, 2, opt) - gauge.Add(ctx, -1, opt) + gauge.Record(ctx, 1, opt) }, }, { @@ -490,6 +529,43 @@ func TestPrometheusExporter(t *testing.T) { { name: "counter utf-8", expectedFile: "testdata/counter_utf8.txt", + options: []Option{ + WithNamespace("my.dotted.namespace"), + WithTranslationStrategy(otlptranslator.NoUTF8EscapingWithSuffixes), + }, + recordMetrics: func(ctx context.Context, meter otelmetric.Meter) { + opt := otelmetric.WithAttributes( + attribute.Key("A.G").String("B"), + attribute.Key("C.H").String("D"), + attribute.Key("E.I").Bool(true), + attribute.Key("F.J").Int(42), + ) + counter, err := meter.Float64Counter( + "foo.things", + otelmetric.WithDescription("a simple counter"), + otelmetric.WithUnit("s"), + ) + require.NoError(t, err) + counter.Add(ctx, 5, opt) + counter.Add(ctx, 10.3, opt) + counter.Add(ctx, 9, opt) + + attrs2 := attribute.NewSet( + attribute.Key("A.G").String("D"), + attribute.Key("C.H").String("B"), + attribute.Key("E.I").Bool(true), + attribute.Key("F.J").Int(42), + ) + counter.Add(ctx, 5, otelmetric.WithAttributeSet(attrs2)) + }, + }, + { + name: "counter utf-8 notranslation", + expectedFile: "testdata/counter_utf8_notranslation.txt", + strategy: otlptranslator.NoTranslation, + options: []Option{ + WithNamespace("my.dotted.namespace"), + }, recordMetrics: func(ctx context.Context, meter otelmetric.Meter) { opt := otelmetric.WithAttributes( attribute.Key("A.G").String("B"), @@ -556,16 +632,10 @@ func TestPrometheusExporter(t *testing.T) { for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { - if tc.disableUTF8 { - model.NameValidationScheme = model.LegacyValidation // nolint:staticcheck // We need this check to keep supporting the legacy scheme. - defer func() { - // Reset to defaults - model.NameValidationScheme = model.UTF8Validation // nolint:staticcheck // We need this check to keep supporting the legacy scheme. - }() - } ctx := context.Background() registry := prometheus.NewRegistry() - exporter, err := New(append(tc.options, WithRegisterer(registry))...) + opts := append(tc.options, WithRegisterer(registry), WithTranslationStrategy(tc.strategy)) + exporter, err := New(opts...) require.NoError(t, err) var res *resource.Resource @@ -632,7 +702,10 @@ func TestPrometheusExporter(t *testing.T) { func TestMultiScopes(t *testing.T) { ctx := context.Background() registry := prometheus.NewRegistry() - exporter, err := New(WithRegisterer(registry)) + exporter, err := New( + WithTranslationStrategy(otlptranslator.UnderscoreEscapingWithSuffixes), + WithRegisterer(registry), + ) require.NoError(t, err) res, err := resource.New(ctx, @@ -855,7 +928,7 @@ func TestDuplicateMetrics(t *testing.T) { }, { name: "conflict_type_counter_and_updowncounter", - recordMetrics: func(ctx context.Context, meterA, meterB otelmetric.Meter) { + recordMetrics: func(ctx context.Context, meterA, _ otelmetric.Meter) { counter, err := meterA.Int64Counter("foo", otelmetric.WithUnit("By"), otelmetric.WithDescription("meter foo")) @@ -876,7 +949,7 @@ func TestDuplicateMetrics(t *testing.T) { }, { name: "conflict_type_histogram_and_updowncounter", - recordMetrics: func(ctx context.Context, meterA, meterB otelmetric.Meter) { + recordMetrics: func(ctx context.Context, meterA, _ otelmetric.Meter) { fooA, err := meterA.Int64UpDownCounter("foo", otelmetric.WithUnit("By"), otelmetric.WithDescription("meter gauge foo")) @@ -901,7 +974,15 @@ func TestDuplicateMetrics(t *testing.T) { // initialize registry exporter ctx := context.Background() registry := prometheus.NewRegistry() - exporter, err := New(append(tc.options, WithRegisterer(registry))...) + // This test does not set the Translation Strategy, so it defaults to + // UnderscoreEscapingWithSuffixes. + opts := append( + []Option{ + WithTranslationStrategy(otlptranslator.UnderscoreEscapingWithSuffixes), + }, + tc.options..., + ) + exporter, err := New(append(opts, WithRegisterer(registry))...) require.NoError(t, err) // initialize resource @@ -958,7 +1039,7 @@ func TestCollectorConcurrentSafe(t *testing.T) { var wg sync.WaitGroup concurrencyLevel := 10 - for i := 0; i < concurrencyLevel; i++ { + for range concurrencyLevel { wg.Add(1) go func() { defer wg.Done() @@ -978,7 +1059,7 @@ func TestShutdownExporter(t *testing.T) { ctx := context.Background() registry := prometheus.NewRegistry() - for i := 0; i < 3; i++ { + for range 3 { exporter, err := New(WithRegisterer(registry)) require.NoError(t, err) provider := metric.NewMeterProvider( @@ -1011,28 +1092,27 @@ func TestExemplars(t *testing.T) { attribute.Key("F.4").Int(42), ) expectedNonEscapedLabels := map[string]string{ - traceIDExemplarKey: "01000000000000000000000000000000", - spanIDExemplarKey: "0100000000000000", - "A.1": "B", - "C.2": "D", - "E.3": "true", - "F.4": "42", + otlptranslator.ExemplarTraceIDKey: "01000000000000000000000000000000", + otlptranslator.ExemplarSpanIDKey: "0100000000000000", + "A.1": "B", + "C.2": "D", + "E.3": "true", + "F.4": "42", } expectedEscapedLabels := map[string]string{ - traceIDExemplarKey: "01000000000000000000000000000000", - spanIDExemplarKey: "0100000000000000", - "A_1": "B", - "C_2": "D", - "E_3": "true", - "F_4": "42", + otlptranslator.ExemplarTraceIDKey: "01000000000000000000000000000000", + otlptranslator.ExemplarSpanIDKey: "0100000000000000", + "A_1": "B", + "C_2": "D", + "E_3": "true", + "F_4": "42", } for _, tc := range []struct { name string recordMetrics func(ctx context.Context, meter otelmetric.Meter) expectedExemplarValue float64 expectedLabels map[string]string - escapingScheme model.EscapingScheme - validationScheme model.ValidationScheme + strategy otlptranslator.TranslationStrategyOption }{ { name: "escaped counter", @@ -1043,8 +1123,7 @@ func TestExemplars(t *testing.T) { }, expectedExemplarValue: 9, expectedLabels: expectedEscapedLabels, - escapingScheme: model.UnderscoreEscaping, - validationScheme: model.LegacyValidation, + strategy: otlptranslator.UnderscoreEscapingWithSuffixes, }, { name: "escaped histogram", @@ -1055,8 +1134,7 @@ func TestExemplars(t *testing.T) { }, expectedExemplarValue: 9, expectedLabels: expectedEscapedLabels, - escapingScheme: model.UnderscoreEscaping, - validationScheme: model.LegacyValidation, + strategy: otlptranslator.UnderscoreEscapingWithSuffixes, }, { name: "non-escaped counter", @@ -1067,8 +1145,7 @@ func TestExemplars(t *testing.T) { }, expectedExemplarValue: 9, expectedLabels: expectedNonEscapedLabels, - escapingScheme: model.NoEscaping, - validationScheme: model.UTF8Validation, + strategy: otlptranslator.NoTranslation, }, { name: "non-escaped histogram", @@ -1079,24 +1156,30 @@ func TestExemplars(t *testing.T) { }, expectedExemplarValue: 9, expectedLabels: expectedNonEscapedLabels, - escapingScheme: model.NoEscaping, - validationScheme: model.UTF8Validation, + strategy: otlptranslator.NoTranslation, + }, + { + name: "exponential histogram", + recordMetrics: func(ctx context.Context, meter otelmetric.Meter) { + hist, err := meter.Int64Histogram("exponential_histogram") + require.NoError(t, err) + hist.Record(ctx, 9, attrsOpt) + }, + expectedExemplarValue: 9, + expectedLabels: expectedNonEscapedLabels, + strategy: otlptranslator.NoTranslation, }, } { t.Run(tc.name, func(t *testing.T) { - originalEscapingScheme := model.NameEscapingScheme - originalValidationScheme := model.NameValidationScheme // nolint:staticcheck // We need this check to keep supporting the legacy scheme. - model.NameEscapingScheme = tc.escapingScheme - model.NameValidationScheme = tc.validationScheme // nolint:staticcheck // We need this check to keep supporting the legacy scheme. - // Restore original value after the test is complete - defer func() { - model.NameEscapingScheme = originalEscapingScheme - model.NameValidationScheme = originalValidationScheme // nolint:staticcheck // We need this check to keep supporting the legacy scheme. - }() // initialize registry exporter ctx := context.Background() registry := prometheus.NewRegistry() - exporter, err := New(WithRegisterer(registry), WithoutTargetInfo(), WithoutScopeInfo()) + exporter, err := New( + WithRegisterer(registry), + WithoutTargetInfo(), + WithoutScopeInfo(), + WithTranslationStrategy(tc.strategy), + ) require.NoError(t, err) // initialize resource @@ -1113,13 +1196,24 @@ func TestExemplars(t *testing.T) { metric.WithReader(exporter), metric.WithResource(res), metric.WithView(metric.NewView( - metric.Instrument{Name: "*"}, + metric.Instrument{Name: "foo"}, metric.Stream{ // filter out all attributes so they are added as filtered // attributes to the exemplar AttributeFilter: attribute.NewAllowKeysFilter(), }, - )), + ), + ), + metric.WithView(metric.NewView( + metric.Instrument{Name: "exponential_histogram"}, + metric.Stream{ + Aggregation: metric.AggregationBase2ExponentialHistogram{ + MaxSize: 20, + }, + AttributeFilter: attribute.NewAllowKeysFilter(), + }, + ), + ), ) meter := provider.Meter("meter", otelmetric.WithInstrumentationVersion("v0.1.0")) @@ -1148,16 +1242,23 @@ func TestExemplars(t *testing.T) { case dto.MetricType_COUNTER: exemplar = metric.GetCounter().GetExemplar() case dto.MetricType_HISTOGRAM: - for _, b := range metric.GetHistogram().GetBucket() { + h := metric.GetHistogram() + for _, b := range h.GetBucket() { if b.GetExemplar() != nil { exemplar = b.GetExemplar() continue } } + if h.GetZeroThreshold() != 0 || h.GetZeroCount() != 0 || + len(h.PositiveSpan) != 0 || len(h.NegativeSpan) != 0 { + require.NotNil(t, h.Exemplars) + exemplar = h.Exemplars[0] + } } require.NotNil(t, exemplar) require.Equal(t, tc.expectedExemplarValue, exemplar.GetValue()) require.Len(t, exemplar.GetLabel(), len(tc.expectedLabels)) + for _, label := range exemplar.GetLabel() { val, ok := tc.expectedLabels[label.GetName()] require.True(t, ok) @@ -1241,7 +1342,14 @@ func TestExponentialHistogramScaleValidation(t *testing.T) { Description: "test", } - addExponentialHistogramMetric(ch, histogram, m, "test_histogram", keyVals{}) + addExponentialHistogramMetric( + ch, + histogram, + m, + "test_histogram", + keyVals{}, + otlptranslator.LabelNamer{}, + ) assert.Error(t, capturedError) assert.Contains(t, capturedError.Error(), "scale -5 is below minimum") select { @@ -1398,7 +1506,14 @@ func TestExponentialHistogramHighScaleDownscaling(t *testing.T) { } // This should not produce any errors and should properly downscale buckets - addExponentialHistogramMetric(ch, histogram, m, "test_high_scale_histogram", keyVals{}) + addExponentialHistogramMetric( + ch, + histogram, + m, + "test_high_scale_histogram", + keyVals{}, + otlptranslator.LabelNamer{}, + ) // Verify a metric was produced select { @@ -1453,7 +1568,14 @@ func TestExponentialHistogramHighScaleDownscaling(t *testing.T) { } // This should not produce any errors and should properly downscale buckets - addExponentialHistogramMetric(ch, histogram, m, "test_very_high_scale_histogram", keyVals{}) + addExponentialHistogramMetric( + ch, + histogram, + m, + "test_very_high_scale_histogram", + keyVals{}, + otlptranslator.LabelNamer{}, + ) // Verify a metric was produced select { @@ -1508,7 +1630,14 @@ func TestExponentialHistogramHighScaleDownscaling(t *testing.T) { } // This should handle negative buckets correctly - addExponentialHistogramMetric(ch, histogram, m, "test_histogram_with_negative_buckets", keyVals{}) + addExponentialHistogramMetric( + ch, + histogram, + m, + "test_histogram_with_negative_buckets", + keyVals{}, + otlptranslator.LabelNamer{}, + ) // Verify a metric was produced select { @@ -1557,7 +1686,14 @@ func TestExponentialHistogramHighScaleDownscaling(t *testing.T) { } // This should handle int64 exponential histograms correctly - addExponentialHistogramMetric(ch, histogram, m, "test_int64_exponential_histogram", keyVals{}) + addExponentialHistogramMetric( + ch, + histogram, + m, + "test_int64_exponential_histogram", + keyVals{}, + otlptranslator.LabelNamer{}, + ) // Verify a metric was produced select { @@ -1608,3 +1744,154 @@ func TestDownscaleExponentialBucketEdgeCases(t *testing.T) { assert.Equal(t, expected, result) }) } + +// TestEscapingErrorHandling increases test coverage by exercising some error +// conditions. +func TestEscapingErrorHandling(t *testing.T) { + testCases := []struct { + name string + namespace string + counterName string + customScopeAttrs []attribute.KeyValue + customResourceAttrs []attribute.KeyValue + labelName string + expectNewErr string + expectMetricErr string + checkMetricFamilies func(t testing.TB, dtos []*dto.MetricFamily) + }{ + { + name: "simple happy path", + counterName: "foo", + checkMetricFamilies: func(t testing.TB, mfs []*dto.MetricFamily) { + require.Len(t, mfs, 2) + for _, mf := range mfs { + if mf.GetName() == "target_info" { + continue + } + require.Equal(t, "foo_seconds_total", mf.GetName()) + } + }, + }, + { + name: "bad namespace", + namespace: "$%^&", + counterName: "foo", + expectNewErr: `normalization for label name "$%^&" resulted in invalid name "____"`, + }, + { + name: "good namespace, names should be escaped", + namespace: "my-strange-namespace", + counterName: "foo", + labelName: "bar", + checkMetricFamilies: func(t testing.TB, mfs []*dto.MetricFamily) { + for _, mf := range mfs { + if mf.GetName() == "target_info" { + continue + } + require.Contains(t, mf.GetName(), "my_strange_namespace") + require.NotContains(t, mf.GetName(), "my-strange-namespace") + } + }, + }, + { + name: "bad resource attribute", + counterName: "foo", + customResourceAttrs: []attribute.KeyValue{ + attribute.Key("$%^&").String("B"), + }, + checkMetricFamilies: func(t testing.TB, mfs []*dto.MetricFamily) { + require.Empty(t, mfs) + }, + }, + { + name: "bad scope metric attribute", + counterName: "foo", + customScopeAttrs: []attribute.KeyValue{ + attribute.Key("$%^&").String("B"), + }, + checkMetricFamilies: func(t testing.TB, mfs []*dto.MetricFamily) { + require.Len(t, mfs, 1) + require.Equal(t, "target_info", mfs[0].GetName()) + }, + }, + { + name: "bad translated metric name", + counterName: "$%^&", + expectMetricErr: `invalid instrument name: $%^&: must start with a letter`, + }, + { + // label names are not translated and therefore not checked until + // collection time, and there is no place to catch and return this error. + // Instead we drop the metric. + name: "bad translated label name", + counterName: "foo", + labelName: "$%^&", + checkMetricFamilies: func(t testing.TB, mfs []*dto.MetricFamily) { + require.Len(t, mfs, 1) + require.Equal(t, "target_info", mfs[0].GetName()) + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + ctx := context.Background() + registry := prometheus.NewRegistry() + + sc := trace.NewSpanContext(trace.SpanContextConfig{ + SpanID: trace.SpanID{0o1}, + TraceID: trace.TraceID{0o1}, + TraceFlags: trace.FlagsSampled, + }) + ctx = trace.ContextWithSpanContext(ctx, sc) + + exporter, err := New( + WithRegisterer(registry), + WithTranslationStrategy(otlptranslator.UnderscoreEscapingWithSuffixes), + WithNamespace(tc.namespace), + WithResourceAsConstantLabels(attribute.NewDenyKeysFilter()), + ) + if tc.expectNewErr != "" { + require.ErrorContains(t, err, tc.expectNewErr) + return + } + require.NoError(t, err) + + res, err := resource.New(ctx, + resource.WithAttributes(semconv.ServiceName("prometheus_test")), + resource.WithAttributes(semconv.TelemetrySDKVersion("latest")), + resource.WithAttributes(tc.customResourceAttrs...), + ) + require.NoError(t, err) + provider := metric.NewMeterProvider( + metric.WithReader(exporter), + metric.WithResource(res), + ) + + fooCounter, err := provider.Meter( + "meterfoo", + otelmetric.WithInstrumentationVersion("v0.1.0"), + otelmetric.WithInstrumentationAttributes(tc.customScopeAttrs...), + ). + Int64Counter( + tc.counterName, + otelmetric.WithUnit("s"), + otelmetric.WithDescription(fmt.Sprintf(`meter %q counter`, tc.counterName))) + if tc.expectMetricErr != "" { + require.ErrorContains(t, err, tc.expectMetricErr) + return + } + require.NoError(t, err) + var opts []otelmetric.AddOption + if tc.labelName != "" { + opts = append(opts, otelmetric.WithAttributes(attribute.String(tc.labelName, "foo"))) + } + fooCounter.Add(ctx, 100, opts...) + got, err := registry.Gather() + require.NoError(t, err) + if tc.checkMetricFamilies != nil { + tc.checkMetricFamilies(t, got) + } + }) + } +} diff --git a/exporters/prometheus/go.mod b/exporters/prometheus/go.mod index 811296379b7..fea302d962c 100644 --- a/exporters/prometheus/go.mod +++ b/exporters/prometheus/go.mod @@ -2,17 +2,22 @@ module go.opentelemetry.io/otel/exporters/prometheus go 1.23.0 +// v0.59.0 produces incorrect metric names when bracketed units are used. +// https://github.com/open-telemetry/opentelemetry-go/issues/7039 +retract v0.59.0 + require ( - github.com/prometheus/client_golang v1.22.0 + github.com/prometheus/client_golang v1.23.0 github.com/prometheus/client_model v0.6.2 github.com/prometheus/common v0.65.0 - github.com/stretchr/testify v1.10.0 - go.opentelemetry.io/otel v1.37.0 - go.opentelemetry.io/otel/metric v1.37.0 - go.opentelemetry.io/otel/sdk v1.37.0 - go.opentelemetry.io/otel/sdk/metric v1.37.0 - go.opentelemetry.io/otel/trace v1.37.0 - google.golang.org/protobuf v1.36.6 + github.com/prometheus/otlptranslator v0.0.2 + github.com/stretchr/testify v1.11.1 + go.opentelemetry.io/otel v1.38.0 + go.opentelemetry.io/otel/metric v1.38.0 + go.opentelemetry.io/otel/sdk v1.38.0 + go.opentelemetry.io/otel/sdk/metric v1.38.0 + go.opentelemetry.io/otel/trace v1.38.0 + google.golang.org/protobuf v1.36.8 ) require ( @@ -22,12 +27,13 @@ require ( github.com/go-logr/logr v1.4.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/google/uuid v1.6.0 // indirect + github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc // indirect github.com/kylelemons/godebug v1.1.0 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/prometheus/procfs v0.16.1 // indirect + github.com/prometheus/procfs v0.17.0 // indirect go.opentelemetry.io/auto/sdk v1.1.0 // indirect - golang.org/x/sys v0.33.0 // indirect + golang.org/x/sys v0.35.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/exporters/prometheus/go.sum b/exporters/prometheus/go.sum index 899399caf25..cfd7532eefd 100644 --- a/exporters/prometheus/go.sum +++ b/exporters/prometheus/go.sum @@ -13,6 +13,8 @@ github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc h1:GN2Lv3MGO7AS6PrRoT6yV5+wkrOpcszoIsO4+4ds248= +github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc/go.mod h1:+JKpmjMGhpgPL+rXZ5nsZieVzvarn86asRlBg4uNGnk= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= @@ -23,24 +25,28 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q= -github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0= +github.com/prometheus/client_golang v1.23.0 h1:ust4zpdl9r4trLY/gSjlm07PuiBq2ynaXXlptpfy8Uc= +github.com/prometheus/client_golang v1.23.0/go.mod h1:i/o0R9ByOnHX0McrTMTyhYvKE4haaf2mW08I+jGAjEE= github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk= github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= github.com/prometheus/common v0.65.0 h1:QDwzd+G1twt//Kwj/Ww6E9FQq1iVMmODnILtW1t2VzE= github.com/prometheus/common v0.65.0/go.mod h1:0gZns+BLRQ3V6NdaerOhMbwwRbNh9hkGINtQAsP5GS8= -github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg= -github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is= +github.com/prometheus/otlptranslator v0.0.2 h1:+1CdeLVrRQ6Psmhnobldo0kTp96Rj80DRXRd5OSnMEQ= +github.com/prometheus/otlptranslator v0.0.2/go.mod h1:P8AwMgdD7XEr6QRUJ2QWLpiAZTgTE2UYgjlu3svompI= +github.com/prometheus/procfs v0.17.0 h1:FuLQ+05u4ZI+SS/w9+BWEM2TXiHKsUQ9TADiRH7DuK0= +github.com/prometheus/procfs v0.17.0/go.mod h1:oPQLaDAMRbA+u8H5Pbfq+dl3VDAvHxMUOVhe0wYB2zw= github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= -github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= -github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= -golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw= -golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= -google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY= -google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +golang.org/x/sys v0.35.0 h1:vz1N37gP5bs89s7He8XuIYXpyY0+QlsKmzipCbUtyxI= +golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +google.golang.org/protobuf v1.36.8 h1:xHScyCOEuuwZEc6UtSOvPbAT4zRh0xcNRYekJwfqyMc= +google.golang.org/protobuf v1.36.8/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= diff --git a/exporters/prometheus/testdata/conflict_help_two_counters_1.txt b/exporters/prometheus/testdata/conflict_help_two_counters_1.txt index 94cf1a89e74..bfc629ed683 100644 --- a/exporters/prometheus/testdata/conflict_help_two_counters_1.txt +++ b/exporters/prometheus/testdata/conflict_help_two_counters_1.txt @@ -4,4 +4,4 @@ bar_bytes_total{otel_scope_name="ma",otel_scope_schema_url="",otel_scope_version bar_bytes_total{otel_scope_name="mb",otel_scope_schema_url="",otel_scope_version="v0.1.0",type="bar"} 100 # HELP target_info Target metadata # TYPE target_info gauge -target_info{"service.name"="prometheus_test","telemetry.sdk.language"="go","telemetry.sdk.name"="opentelemetry","telemetry.sdk.version"="latest"} 1 +target_info{service_name="prometheus_test",telemetry_sdk_language="go",telemetry_sdk_name="opentelemetry",telemetry_sdk_version="latest"} 1 diff --git a/exporters/prometheus/testdata/conflict_help_two_counters_2.txt b/exporters/prometheus/testdata/conflict_help_two_counters_2.txt index 984e9b3664e..431f66ac9d4 100644 --- a/exporters/prometheus/testdata/conflict_help_two_counters_2.txt +++ b/exporters/prometheus/testdata/conflict_help_two_counters_2.txt @@ -4,4 +4,4 @@ bar_bytes_total{otel_scope_name="ma",otel_scope_schema_url="",otel_scope_version bar_bytes_total{otel_scope_name="mb",otel_scope_schema_url="",otel_scope_version="v0.1.0",type="bar"} 100 # HELP target_info Target metadata # TYPE target_info gauge -target_info{"service.name"="prometheus_test","telemetry.sdk.language"="go","telemetry.sdk.name"="opentelemetry","telemetry.sdk.version"="latest"} 1 +target_info{service_name="prometheus_test",telemetry_sdk_language="go",telemetry_sdk_name="opentelemetry",telemetry_sdk_version="latest"} 1 diff --git a/exporters/prometheus/testdata/conflict_help_two_histograms_1.txt b/exporters/prometheus/testdata/conflict_help_two_histograms_1.txt index 780b5974991..51af0329947 100644 --- a/exporters/prometheus/testdata/conflict_help_two_histograms_1.txt +++ b/exporters/prometheus/testdata/conflict_help_two_histograms_1.txt @@ -38,4 +38,4 @@ bar_bytes_sum{A="B",otel_scope_name="mb",otel_scope_schema_url="",otel_scope_ver bar_bytes_count{A="B",otel_scope_name="mb",otel_scope_schema_url="",otel_scope_version="v0.1.0"} 1 # HELP target_info Target metadata # TYPE target_info gauge -target_info{"service.name"="prometheus_test","telemetry.sdk.language"="go","telemetry.sdk.name"="opentelemetry","telemetry.sdk.version"="latest"} 1 +target_info{service_name="prometheus_test",telemetry_sdk_language="go",telemetry_sdk_name="opentelemetry",telemetry_sdk_version="latest"} 1 diff --git a/exporters/prometheus/testdata/conflict_help_two_histograms_2.txt b/exporters/prometheus/testdata/conflict_help_two_histograms_2.txt index b6280e1fdb6..4d496a69fc3 100644 --- a/exporters/prometheus/testdata/conflict_help_two_histograms_2.txt +++ b/exporters/prometheus/testdata/conflict_help_two_histograms_2.txt @@ -38,4 +38,4 @@ bar_bytes_sum{A="B",otel_scope_name="mb",otel_scope_schema_url="",otel_scope_ver bar_bytes_count{A="B",otel_scope_name="mb",otel_scope_schema_url="",otel_scope_version="v0.1.0"} 1 # HELP target_info Target metadata # TYPE target_info gauge -target_info{"service.name"="prometheus_test","telemetry.sdk.language"="go","telemetry.sdk.name"="opentelemetry","telemetry.sdk.version"="latest"} 1 +target_info{service_name="prometheus_test",telemetry_sdk_language="go",telemetry_sdk_name="opentelemetry",telemetry_sdk_version="latest"} 1 diff --git a/exporters/prometheus/testdata/conflict_help_two_updowncounters_1.txt b/exporters/prometheus/testdata/conflict_help_two_updowncounters_1.txt index 77552f17b04..2e28fe3a9ab 100644 --- a/exporters/prometheus/testdata/conflict_help_two_updowncounters_1.txt +++ b/exporters/prometheus/testdata/conflict_help_two_updowncounters_1.txt @@ -4,4 +4,4 @@ bar_bytes{otel_scope_name="ma",otel_scope_schema_url="",otel_scope_version="v0.1 bar_bytes{otel_scope_name="mb",otel_scope_schema_url="",otel_scope_version="v0.1.0",type="bar"} 100 # HELP target_info Target metadata # TYPE target_info gauge -target_info{"service.name"="prometheus_test","telemetry.sdk.language"="go","telemetry.sdk.name"="opentelemetry","telemetry.sdk.version"="latest"} 1 +target_info{service_name="prometheus_test",telemetry_sdk_language="go",telemetry_sdk_name="opentelemetry",telemetry_sdk_version="latest"} 1 diff --git a/exporters/prometheus/testdata/conflict_help_two_updowncounters_2.txt b/exporters/prometheus/testdata/conflict_help_two_updowncounters_2.txt index 29177cac010..928039d1201 100644 --- a/exporters/prometheus/testdata/conflict_help_two_updowncounters_2.txt +++ b/exporters/prometheus/testdata/conflict_help_two_updowncounters_2.txt @@ -4,4 +4,4 @@ bar_bytes{otel_scope_name="ma",otel_scope_schema_url="",otel_scope_version="v0.1 bar_bytes{otel_scope_name="mb",otel_scope_schema_url="",otel_scope_version="v0.1.0",type="bar"} 100 # HELP target_info Target metadata # TYPE target_info gauge -target_info{"service.name"="prometheus_test","telemetry.sdk.language"="go","telemetry.sdk.name"="opentelemetry","telemetry.sdk.version"="latest"} 1 +target_info{service_name="prometheus_test",telemetry_sdk_language="go",telemetry_sdk_name="opentelemetry",telemetry_sdk_version="latest"} 1 diff --git a/exporters/prometheus/testdata/conflict_type_counter_and_updowncounter_1.txt b/exporters/prometheus/testdata/conflict_type_counter_and_updowncounter_1.txt index 62faeacea37..9984a1824ff 100644 --- a/exporters/prometheus/testdata/conflict_type_counter_and_updowncounter_1.txt +++ b/exporters/prometheus/testdata/conflict_type_counter_and_updowncounter_1.txt @@ -3,4 +3,4 @@ foo_total{otel_scope_name="ma",otel_scope_schema_url="",otel_scope_version="v0.1.0",type="foo"} 100 # HELP target_info Target metadata # TYPE target_info gauge -target_info{"service.name"="prometheus_test","telemetry.sdk.language"="go","telemetry.sdk.name"="opentelemetry","telemetry.sdk.version"="latest"} 1 +target_info{service_name="prometheus_test",telemetry_sdk_language="go",telemetry_sdk_name="opentelemetry",telemetry_sdk_version="latest"} 1 diff --git a/exporters/prometheus/testdata/conflict_type_counter_and_updowncounter_2.txt b/exporters/prometheus/testdata/conflict_type_counter_and_updowncounter_2.txt index abf56f47373..56552dfe9c3 100644 --- a/exporters/prometheus/testdata/conflict_type_counter_and_updowncounter_2.txt +++ b/exporters/prometheus/testdata/conflict_type_counter_and_updowncounter_2.txt @@ -3,4 +3,4 @@ foo_total{otel_scope_name="ma",otel_scope_schema_url="",otel_scope_version="v0.1.0",type="foo"} 100 # HELP target_info Target metadata # TYPE target_info gauge -target_info{"service.name"="prometheus_test","telemetry.sdk.language"="go","telemetry.sdk.name"="opentelemetry","telemetry.sdk.version"="latest"} 1 +target_info{service_name="prometheus_test",telemetry_sdk_language="go",telemetry_sdk_name="opentelemetry",telemetry_sdk_version="latest"} 1 diff --git a/exporters/prometheus/testdata/conflict_type_histogram_and_updowncounter_1.txt b/exporters/prometheus/testdata/conflict_type_histogram_and_updowncounter_1.txt index f6b4074d26e..e7fda228606 100644 --- a/exporters/prometheus/testdata/conflict_type_histogram_and_updowncounter_1.txt +++ b/exporters/prometheus/testdata/conflict_type_histogram_and_updowncounter_1.txt @@ -3,4 +3,4 @@ foo_bytes{A="B",otel_scope_name="ma",otel_scope_schema_url="",otel_scope_version="v0.1.0"} 100 # HELP target_info Target metadata # TYPE target_info gauge -target_info{"service.name"="prometheus_test","telemetry.sdk.language"="go","telemetry.sdk.name"="opentelemetry","telemetry.sdk.version"="latest"} 1 +target_info{service_name="prometheus_test",telemetry_sdk_language="go",telemetry_sdk_name="opentelemetry",telemetry_sdk_version="latest"} 1 diff --git a/exporters/prometheus/testdata/conflict_type_histogram_and_updowncounter_2.txt b/exporters/prometheus/testdata/conflict_type_histogram_and_updowncounter_2.txt index 31b685c5b86..ee8916ff87c 100644 --- a/exporters/prometheus/testdata/conflict_type_histogram_and_updowncounter_2.txt +++ b/exporters/prometheus/testdata/conflict_type_histogram_and_updowncounter_2.txt @@ -20,4 +20,4 @@ foo_bytes_sum{A="B",otel_scope_name="ma",otel_scope_schema_url="",otel_scope_ver foo_bytes_count{A="B",otel_scope_name="ma",otel_scope_schema_url="",otel_scope_version="v0.1.0"} 1 # HELP target_info Target metadata # TYPE target_info gauge -target_info{"service.name"="prometheus_test","telemetry.sdk.language"="go","telemetry.sdk.name"="opentelemetry","telemetry.sdk.version"="latest"} 1 +target_info{service_name="prometheus_test",telemetry_sdk_language="go",telemetry_sdk_name="opentelemetry",telemetry_sdk_version="latest"} 1 diff --git a/exporters/prometheus/testdata/conflict_unit_two_counters.txt b/exporters/prometheus/testdata/conflict_unit_two_counters.txt index cb44b557200..81a1938ea18 100644 --- a/exporters/prometheus/testdata/conflict_unit_two_counters.txt +++ b/exporters/prometheus/testdata/conflict_unit_two_counters.txt @@ -4,4 +4,4 @@ bar_total{otel_scope_name="ma",otel_scope_schema_url="",otel_scope_version="v0.1 bar_total{otel_scope_name="mb",otel_scope_schema_url="",otel_scope_version="v0.1.0",type="bar"} 100 # HELP target_info Target metadata # TYPE target_info gauge -target_info{"service.name"="prometheus_test","telemetry.sdk.language"="go","telemetry.sdk.name"="opentelemetry","telemetry.sdk.version"="latest"} 1 +target_info{service_name="prometheus_test",telemetry_sdk_language="go",telemetry_sdk_name="opentelemetry",telemetry_sdk_version="latest"} 1 diff --git a/exporters/prometheus/testdata/conflict_unit_two_histograms.txt b/exporters/prometheus/testdata/conflict_unit_two_histograms.txt index 900469cd750..57d894934eb 100644 --- a/exporters/prometheus/testdata/conflict_unit_two_histograms.txt +++ b/exporters/prometheus/testdata/conflict_unit_two_histograms.txt @@ -38,4 +38,4 @@ bar_sum{A="B",otel_scope_name="mb",otel_scope_schema_url="",otel_scope_version=" bar_count{A="B",otel_scope_name="mb",otel_scope_schema_url="",otel_scope_version="v0.1.0"} 1 # HELP target_info Target metadata # TYPE target_info gauge -target_info{"service.name"="prometheus_test","telemetry.sdk.language"="go","telemetry.sdk.name"="opentelemetry","telemetry.sdk.version"="latest"} 1 +target_info{service_name="prometheus_test",telemetry_sdk_language="go",telemetry_sdk_name="opentelemetry",telemetry_sdk_version="latest"} 1 diff --git a/exporters/prometheus/testdata/conflict_unit_two_updowncounters.txt b/exporters/prometheus/testdata/conflict_unit_two_updowncounters.txt index 643e21ceae9..60ba48d232a 100644 --- a/exporters/prometheus/testdata/conflict_unit_two_updowncounters.txt +++ b/exporters/prometheus/testdata/conflict_unit_two_updowncounters.txt @@ -4,4 +4,4 @@ bar{otel_scope_name="ma",otel_scope_schema_url="",otel_scope_version="v0.1.0",ty bar{otel_scope_name="mb",otel_scope_schema_url="",otel_scope_version="v0.1.0",type="bar"} 100 # HELP target_info Target metadata # TYPE target_info gauge -target_info{"service.name"="prometheus_test","telemetry.sdk.language"="go","telemetry.sdk.name"="opentelemetry","telemetry.sdk.version"="latest"} 1 +target_info{service_name="prometheus_test",telemetry_sdk_language="go",telemetry_sdk_name="opentelemetry",telemetry_sdk_version="latest"} 1 diff --git a/exporters/prometheus/testdata/counter.txt b/exporters/prometheus/testdata/counter.txt index 87893ad2ae5..7b25077996a 100755 --- a/exporters/prometheus/testdata/counter.txt +++ b/exporters/prometheus/testdata/counter.txt @@ -1,7 +1,7 @@ -# HELP foo_seconds_total a simple counter -# TYPE foo_seconds_total counter -foo_seconds_total{A="B",C="D",E="true",F="42",otel_scope_fizz="buzz",otel_scope_name="testmeter",otel_scope_schema_url="",otel_scope_version="v0.1.0"} 24.3 -foo_seconds_total{A="D",C="B",E="true",F="42",otel_scope_fizz="buzz",otel_scope_name="testmeter",otel_scope_schema_url="",otel_scope_version="v0.1.0"} 5 +# HELP my_dotted_namespace_foo_seconds_total a simple counter +# TYPE my_dotted_namespace_foo_seconds_total counter +my_dotted_namespace_foo_seconds_total{A="B",C="D",E="true",F="42",otel_scope_fizz="buzz",otel_scope_name="testmeter",otel_scope_schema_url="",otel_scope_version="v0.1.0"} 24.3 +my_dotted_namespace_foo_seconds_total{A="D",C="B",E="true",F="42",otel_scope_fizz="buzz",otel_scope_name="testmeter",otel_scope_schema_url="",otel_scope_version="v0.1.0"} 5 # HELP target_info Target metadata # TYPE target_info gauge -target_info{"service.name"="prometheus_test","telemetry.sdk.language"="go","telemetry.sdk.name"="opentelemetry","telemetry.sdk.version"="latest"} 1 +target_info{service_name="prometheus_test",telemetry_sdk_language="go",telemetry_sdk_name="opentelemetry",telemetry_sdk_version="latest"} 1 diff --git a/exporters/prometheus/testdata/counter_no_unit.txt b/exporters/prometheus/testdata/counter_no_unit.txt new file mode 100755 index 00000000000..0d5bc97c064 --- /dev/null +++ b/exporters/prometheus/testdata/counter_no_unit.txt @@ -0,0 +1,7 @@ +# HELP foo_total a simple counter +# TYPE foo_total counter +foo_total{A="B",C="D",E="true",F="42",otel_scope_fizz="buzz",otel_scope_name="testmeter",otel_scope_schema_url="",otel_scope_version="v0.1.0"} 24.3 +foo_total{A="D",C="B",E="true",F="42",otel_scope_fizz="buzz",otel_scope_name="testmeter",otel_scope_schema_url="",otel_scope_version="v0.1.0"} 5 +# HELP target_info Target metadata +# TYPE target_info gauge +target_info{"service.name"="prometheus_test","telemetry.sdk.language"="go","telemetry.sdk.name"="opentelemetry","telemetry.sdk.version"="latest"} 1 diff --git a/exporters/prometheus/testdata/counter_noutf8_with_unit_suffix.txt b/exporters/prometheus/testdata/counter_noutf8_with_unit_suffix.txt new file mode 100755 index 00000000000..1e1daab96c4 --- /dev/null +++ b/exporters/prometheus/testdata/counter_noutf8_with_unit_suffix.txt @@ -0,0 +1,7 @@ +# HELP "foo_seconds_total" a simple counter +# TYPE "foo_seconds_total" counter +{"foo_seconds_total",A="B",C="D",E="true",F="42",otel_scope_fizz="buzz",otel_scope_name="testmeter",otel_scope_schema_url="",otel_scope_version="v0.1.0"} 24.3 +{"foo_seconds_total",A="D",C="B",E="true",F="42",otel_scope_fizz="buzz",otel_scope_name="testmeter",otel_scope_schema_url="",otel_scope_version="v0.1.0"} 5 +# HELP target_info Target metadata +# TYPE target_info gauge +target_info{service_name="prometheus_test",telemetry_sdk_language="go",telemetry_sdk_name="opentelemetry",telemetry_sdk_version="latest"} 1 diff --git a/exporters/prometheus/testdata/counter_utf8.txt b/exporters/prometheus/testdata/counter_utf8.txt index 4496bab9318..8d5eb011815 100755 --- a/exporters/prometheus/testdata/counter_utf8.txt +++ b/exporters/prometheus/testdata/counter_utf8.txt @@ -1,7 +1,7 @@ -# HELP "foo.things_seconds_total" a simple counter -# TYPE "foo.things_seconds_total" counter -{"foo.things_seconds_total","A.G"="B","C.H"="D","E.I"="true","F.J"="42",otel_scope_fizz="buzz",otel_scope_name="testmeter",otel_scope_schema_url="",otel_scope_version="v0.1.0"} 24.3 -{"foo.things_seconds_total","A.G"="D","C.H"="B","E.I"="true","F.J"="42",otel_scope_fizz="buzz",otel_scope_name="testmeter",otel_scope_schema_url="",otel_scope_version="v0.1.0"} 5 +# HELP "my.dotted.namespace_foo.things_seconds_total" a simple counter +# TYPE "my.dotted.namespace_foo.things_seconds_total" counter +{"my.dotted.namespace_foo.things_seconds_total","A.G"="B","C.H"="D","E.I"="true","F.J"="42",otel_scope_fizz="buzz",otel_scope_name="testmeter",otel_scope_schema_url="",otel_scope_version="v0.1.0"} 24.3 +{"my.dotted.namespace_foo.things_seconds_total","A.G"="D","C.H"="B","E.I"="true","F.J"="42",otel_scope_fizz="buzz",otel_scope_name="testmeter",otel_scope_schema_url="",otel_scope_version="v0.1.0"} 5 # HELP target_info Target metadata # TYPE target_info gauge target_info{"service.name"="prometheus_test","telemetry.sdk.language"="go","telemetry.sdk.name"="opentelemetry","telemetry.sdk.version"="latest"} 1 diff --git a/exporters/prometheus/testdata/counter_utf8_notranslation.txt b/exporters/prometheus/testdata/counter_utf8_notranslation.txt new file mode 100755 index 00000000000..2a36062ba43 --- /dev/null +++ b/exporters/prometheus/testdata/counter_utf8_notranslation.txt @@ -0,0 +1,7 @@ +# HELP "my.dotted.namespace_foo.things" a simple counter +# TYPE "my.dotted.namespace_foo.things" counter +{"my.dotted.namespace_foo.things","A.G"="B","C.H"="D","E.I"="true","F.J"="42",otel_scope_fizz="buzz",otel_scope_name="testmeter",otel_scope_schema_url="",otel_scope_version="v0.1.0"} 24.3 +{"my.dotted.namespace_foo.things","A.G"="D","C.H"="B","E.I"="true","F.J"="42",otel_scope_fizz="buzz",otel_scope_name="testmeter",otel_scope_schema_url="",otel_scope_version="v0.1.0"} 5 +# HELP target_info Target metadata +# TYPE target_info gauge +target_info{"service.name"="prometheus_test","telemetry.sdk.language"="go","telemetry.sdk.name"="opentelemetry","telemetry.sdk.version"="latest"} 1 diff --git a/exporters/prometheus/testdata/counter_with_custom_unit_suffix.txt b/exporters/prometheus/testdata/counter_with_custom_unit_suffix.txt index 75facd31f9d..ce62f1dfe39 100644 --- a/exporters/prometheus/testdata/counter_with_custom_unit_suffix.txt +++ b/exporters/prometheus/testdata/counter_with_custom_unit_suffix.txt @@ -1,7 +1,7 @@ -# HELP "foo_madeup_total" a simple counter -# TYPE "foo_madeup_total" counter -{"foo_madeup_total",A="B",C="D",E="true",F="42",otel_scope_fizz="buzz",otel_scope_name="testmeter",otel_scope_schema_url="",otel_scope_version="v0.1.0"} 24.3 -{"foo_madeup_total",A="D",C="B",E="true",F="42",otel_scope_fizz="buzz",otel_scope_name="testmeter",otel_scope_schema_url="",otel_scope_version="v0.1.0"} 5 +# HELP "foo.dotted_madeup_total" a simple counter +# TYPE "foo.dotted_madeup_total" counter +{"foo.dotted_madeup_total",A="B",C="D",E="true",F="42",otel_scope_fizz="buzz",otel_scope_name="testmeter",otel_scope_schema_url="",otel_scope_version="v0.1.0"} 24.3 +{"foo.dotted_madeup_total",A="D",C="B",E="true",F="42",otel_scope_fizz="buzz",otel_scope_name="testmeter",otel_scope_schema_url="",otel_scope_version="v0.1.0"} 5 # HELP target_info Target metadata # TYPE target_info gauge target_info{"service.name"="prometheus_test","telemetry.sdk.language"="go","telemetry.sdk.name"="opentelemetry","telemetry.sdk.version"="latest"} 1 diff --git a/exporters/prometheus/testdata/counter_with_unit_suffix.txt b/exporters/prometheus/testdata/counter_with_unit_suffix.txt index 5ee732197ec..2ecb6a0cc2c 100755 --- a/exporters/prometheus/testdata/counter_with_unit_suffix.txt +++ b/exporters/prometheus/testdata/counter_with_unit_suffix.txt @@ -4,4 +4,4 @@ {"foo.seconds_total",A="D",C="B",E="true",F="42",otel_scope_fizz="buzz",otel_scope_name="testmeter",otel_scope_schema_url="",otel_scope_version="v0.1.0"} 5 # HELP target_info Target metadata # TYPE target_info gauge -target_info{"service.name"="prometheus_test","telemetry.sdk.language"="go","telemetry.sdk.name"="opentelemetry","telemetry.sdk.version"="latest"} 1 +target_info{service_name="prometheus_test",telemetry_sdk_language="go",telemetry_sdk_name="opentelemetry",telemetry_sdk_version="latest"} 1 diff --git a/exporters/prometheus/testdata/multi_scopes.txt b/exporters/prometheus/testdata/multi_scopes.txt index 023aab8b719..b78af9c70b8 100644 --- a/exporters/prometheus/testdata/multi_scopes.txt +++ b/exporters/prometheus/testdata/multi_scopes.txt @@ -6,4 +6,4 @@ bar_seconds_total{otel_scope_name="meterbar",otel_scope_schema_url="",otel_scope foo_seconds_total{otel_scope_name="meterfoo",otel_scope_schema_url="",otel_scope_version="v0.1.0",type="foo"} 100 # HELP target_info Target metadata # TYPE target_info gauge -target_info{"service.name"="prometheus_test","telemetry.sdk.language"="go","telemetry.sdk.name"="opentelemetry","telemetry.sdk.version"="latest"} 1 +target_info{service_name="prometheus_test",telemetry_sdk_language="go",telemetry_sdk_name="opentelemetry",telemetry_sdk_version="latest"} 1 diff --git a/exporters/prometheus/testdata/no_conflict_two_counters.txt b/exporters/prometheus/testdata/no_conflict_two_counters.txt index 602bce8f31b..f77339a7a23 100644 --- a/exporters/prometheus/testdata/no_conflict_two_counters.txt +++ b/exporters/prometheus/testdata/no_conflict_two_counters.txt @@ -4,4 +4,4 @@ foo_bytes_total{A="B",otel_scope_name="ma",otel_scope_schema_url="",otel_scope_v foo_bytes_total{A="B",otel_scope_name="mb",otel_scope_schema_url="",otel_scope_version="v0.1.0"} 100 # HELP target_info Target metadata # TYPE target_info gauge -target_info{"service.name"="prometheus_test","telemetry.sdk.language"="go","telemetry.sdk.name"="opentelemetry","telemetry.sdk.version"="latest"} 1 +target_info{service_name="prometheus_test",telemetry_sdk_language="go",telemetry_sdk_name="opentelemetry",telemetry_sdk_version="latest"} 1 diff --git a/exporters/prometheus/testdata/no_conflict_two_histograms.txt b/exporters/prometheus/testdata/no_conflict_two_histograms.txt index 52ba5d32881..67ce3fce5fb 100644 --- a/exporters/prometheus/testdata/no_conflict_two_histograms.txt +++ b/exporters/prometheus/testdata/no_conflict_two_histograms.txt @@ -38,4 +38,4 @@ foo_bytes_sum{A="B",otel_scope_name="mb",otel_scope_schema_url="",otel_scope_ver foo_bytes_count{A="B",otel_scope_name="mb",otel_scope_schema_url="",otel_scope_version="v0.1.0"} 1 # HELP target_info Target metadata # TYPE target_info gauge -target_info{"service.name"="prometheus_test","telemetry.sdk.language"="go","telemetry.sdk.name"="opentelemetry","telemetry.sdk.version"="latest"} 1 +target_info{service_name="prometheus_test",telemetry_sdk_language="go",telemetry_sdk_name="opentelemetry",telemetry_sdk_version="latest"} 1 diff --git a/exporters/prometheus/testdata/no_conflict_two_updowncounters.txt b/exporters/prometheus/testdata/no_conflict_two_updowncounters.txt index e1ebcc0d90b..4268d6fa73e 100644 --- a/exporters/prometheus/testdata/no_conflict_two_updowncounters.txt +++ b/exporters/prometheus/testdata/no_conflict_two_updowncounters.txt @@ -4,4 +4,4 @@ foo_bytes{A="B",otel_scope_name="ma",otel_scope_schema_url="",otel_scope_version foo_bytes{A="B",otel_scope_name="mb",otel_scope_schema_url="",otel_scope_version="v0.1.0"} 100 # HELP target_info Target metadata # TYPE target_info gauge -target_info{"service.name"="prometheus_test","telemetry.sdk.language"="go","telemetry.sdk.name"="opentelemetry","telemetry.sdk.version"="latest"} 1 +target_info{service_name="prometheus_test",telemetry_sdk_language="go",telemetry_sdk_name="opentelemetry",telemetry_sdk_version="latest"} 1 diff --git a/exporters/stdout/stdoutlog/exporter.go b/exporters/stdout/stdoutlog/exporter.go index e2bf9bfa2ea..3d48d67081e 100644 --- a/exporters/stdout/stdoutlog/exporter.go +++ b/exporters/stdout/stdoutlog/exporter.go @@ -67,6 +67,6 @@ func (e *Exporter) Shutdown(context.Context) error { } // ForceFlush performs no action. -func (e *Exporter) ForceFlush(context.Context) error { +func (*Exporter) ForceFlush(context.Context) error { return nil } diff --git a/exporters/stdout/stdoutlog/exporter_test.go b/exporters/stdout/stdoutlog/exporter_test.go index 77b7fbac8db..d8f0c09fc2f 100644 --- a/exporters/stdout/stdoutlog/exporter_test.go +++ b/exporters/stdout/stdoutlog/exporter_test.go @@ -11,16 +11,15 @@ import ( "testing" "time" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/sdk/instrumentation" - "go.opentelemetry.io/otel/sdk/log/logtest" - "go.opentelemetry.io/otel/sdk/resource" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/log" + "go.opentelemetry.io/otel/sdk/instrumentation" sdklog "go.opentelemetry.io/otel/sdk/log" + "go.opentelemetry.io/otel/sdk/log/logtest" + "go.opentelemetry.io/otel/sdk/resource" "go.opentelemetry.io/otel/trace" ) @@ -361,7 +360,7 @@ func TestExporterConcurrentSafe(t *testing.T) { const goroutines = 10 var wg sync.WaitGroup wg.Add(goroutines) - for i := 0; i < goroutines; i++ { + for range goroutines { go func() { defer wg.Done() err := exporter.Export(context.Background(), []sdklog.Record{{}}) diff --git a/exporters/stdout/stdoutlog/go.mod b/exporters/stdout/stdoutlog/go.mod index d87d10eeb05..87626160aa0 100644 --- a/exporters/stdout/stdoutlog/go.mod +++ b/exporters/stdout/stdoutlog/go.mod @@ -6,13 +6,13 @@ go 1.23.0 retract v0.12.0 require ( - github.com/stretchr/testify v1.10.0 - go.opentelemetry.io/otel v1.37.0 - go.opentelemetry.io/otel/log v0.13.0 - go.opentelemetry.io/otel/sdk v1.37.0 - go.opentelemetry.io/otel/sdk/log v0.13.0 - go.opentelemetry.io/otel/sdk/log/logtest v0.13.0 - go.opentelemetry.io/otel/trace v1.37.0 + github.com/stretchr/testify v1.11.1 + go.opentelemetry.io/otel v1.38.0 + go.opentelemetry.io/otel/log v0.14.0 + go.opentelemetry.io/otel/sdk v1.38.0 + go.opentelemetry.io/otel/sdk/log v0.14.0 + go.opentelemetry.io/otel/sdk/log/logtest v0.14.0 + go.opentelemetry.io/otel/trace v1.38.0 ) require ( @@ -22,8 +22,8 @@ require ( github.com/google/uuid v1.6.0 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect go.opentelemetry.io/auto/sdk v1.1.0 // indirect - go.opentelemetry.io/otel/metric v1.37.0 // indirect - golang.org/x/sys v0.33.0 // indirect + go.opentelemetry.io/otel/metric v1.38.0 // indirect + golang.org/x/sys v0.35.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) @@ -40,3 +40,5 @@ replace go.opentelemetry.io/otel/trace => ../../../trace replace go.opentelemetry.io/otel/sdk => ../../../sdk replace go.opentelemetry.io/otel/metric => ../../../metric + +replace go.opentelemetry.io/otel/sdk/metric => ../../../sdk/metric diff --git a/exporters/stdout/stdoutlog/go.sum b/exporters/stdout/stdoutlog/go.sum index 47deaf0ed85..c241c13d6e5 100644 --- a/exporters/stdout/stdoutlog/go.sum +++ b/exporters/stdout/stdoutlog/go.sum @@ -17,12 +17,12 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= -github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= -github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= -golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw= -golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/sys v0.35.0 h1:vz1N37gP5bs89s7He8XuIYXpyY0+QlsKmzipCbUtyxI= +golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= diff --git a/exporters/stdout/stdoutlog/record.go b/exporters/stdout/stdoutlog/record.go index 6816342824d..6cb0c8c01d6 100644 --- a/exporters/stdout/stdoutlog/record.go +++ b/exporters/stdout/stdoutlog/record.go @@ -27,7 +27,7 @@ type value struct { func (v value) MarshalJSON() ([]byte, error) { var jsonVal struct { Type string - Value interface{} + Value any } jsonVal.Type = v.Kind().String() diff --git a/exporters/stdout/stdoutmetric/example_test.go b/exporters/stdout/stdoutmetric/example_test.go index 4b5c98a038a..0820d8514ab 100644 --- a/exporters/stdout/stdoutmetric/example_test.go +++ b/exporters/stdout/stdoutmetric/example_test.go @@ -15,7 +15,7 @@ import ( "go.opentelemetry.io/otel/sdk/metric" "go.opentelemetry.io/otel/sdk/metric/metricdata" "go.opentelemetry.io/otel/sdk/resource" - semconv "go.opentelemetry.io/otel/semconv/v1.34.0" + semconv "go.opentelemetry.io/otel/semconv/v1.37.0" ) var ( diff --git a/exporters/stdout/stdoutmetric/exporter.go b/exporters/stdout/stdoutmetric/exporter.go index 76f15b96b44..07a31f82909 100644 --- a/exporters/stdout/stdoutmetric/exporter.go +++ b/exporters/stdout/stdoutmetric/exporter.go @@ -63,7 +63,7 @@ func (e *exporter) Export(ctx context.Context, data *metricdata.ResourceMetrics) return e.encVal.Load().(encoderHolder).Encode(data) } -func (e *exporter) ForceFlush(context.Context) error { +func (*exporter) ForceFlush(context.Context) error { // exporter holds no state, nothing to flush. return nil } @@ -77,7 +77,7 @@ func (e *exporter) Shutdown(context.Context) error { return nil } -func (e *exporter) MarshalLog() interface{} { +func (*exporter) MarshalLog() any { return struct{ Type string }{Type: "STDOUT"} } diff --git a/exporters/stdout/stdoutmetric/go.mod b/exporters/stdout/stdoutmetric/go.mod index 665ebd568f6..b706c87531c 100644 --- a/exporters/stdout/stdoutmetric/go.mod +++ b/exporters/stdout/stdoutmetric/go.mod @@ -3,10 +3,10 @@ module go.opentelemetry.io/otel/exporters/stdout/stdoutmetric go 1.23.0 require ( - github.com/stretchr/testify v1.10.0 - go.opentelemetry.io/otel v1.37.0 - go.opentelemetry.io/otel/sdk v1.37.0 - go.opentelemetry.io/otel/sdk/metric v1.37.0 + github.com/stretchr/testify v1.11.1 + go.opentelemetry.io/otel v1.38.0 + go.opentelemetry.io/otel/sdk v1.38.0 + go.opentelemetry.io/otel/sdk/metric v1.38.0 ) require ( @@ -16,9 +16,9 @@ require ( github.com/google/uuid v1.6.0 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect go.opentelemetry.io/auto/sdk v1.1.0 // indirect - go.opentelemetry.io/otel/metric v1.37.0 // indirect - go.opentelemetry.io/otel/trace v1.37.0 // indirect - golang.org/x/sys v0.33.0 // indirect + go.opentelemetry.io/otel/metric v1.38.0 // indirect + go.opentelemetry.io/otel/trace v1.38.0 // indirect + golang.org/x/sys v0.35.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/exporters/stdout/stdoutmetric/go.sum b/exporters/stdout/stdoutmetric/go.sum index 47deaf0ed85..c241c13d6e5 100644 --- a/exporters/stdout/stdoutmetric/go.sum +++ b/exporters/stdout/stdoutmetric/go.sum @@ -17,12 +17,12 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= -github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= -github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= -golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw= -golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/sys v0.35.0 h1:vz1N37gP5bs89s7He8XuIYXpyY0+QlsKmzipCbUtyxI= +golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= diff --git a/exporters/stdout/stdouttrace/doc.go b/exporters/stdout/stdouttrace/doc.go index eff7730cdc9..648bc0749fc 100644 --- a/exporters/stdout/stdouttrace/doc.go +++ b/exporters/stdout/stdouttrace/doc.go @@ -3,4 +3,7 @@ // Package stdouttrace contains an OpenTelemetry exporter for tracing // telemetry to be written to an output destination as JSON. +// +// See [go.opentelemetry.io/otel/exporters/stdout/stdouttrace/internal/x] for information about +// the experimental features. package stdouttrace // import "go.opentelemetry.io/otel/exporters/stdout/stdouttrace" diff --git a/exporters/stdout/stdouttrace/example_test.go b/exporters/stdout/stdouttrace/example_test.go index a72977718d3..a1da0252167 100644 --- a/exporters/stdout/stdouttrace/example_test.go +++ b/exporters/stdout/stdouttrace/example_test.go @@ -12,7 +12,7 @@ import ( "go.opentelemetry.io/otel/exporters/stdout/stdouttrace" "go.opentelemetry.io/otel/sdk/resource" sdktrace "go.opentelemetry.io/otel/sdk/trace" - semconv "go.opentelemetry.io/otel/semconv/v1.34.0" + semconv "go.opentelemetry.io/otel/semconv/v1.37.0" "go.opentelemetry.io/otel/trace" ) diff --git a/exporters/stdout/stdouttrace/go.mod b/exporters/stdout/stdouttrace/go.mod index 8033c2602a9..9f87567dfcf 100644 --- a/exporters/stdout/stdouttrace/go.mod +++ b/exporters/stdout/stdouttrace/go.mod @@ -8,10 +8,12 @@ replace ( ) require ( - github.com/stretchr/testify v1.10.0 - go.opentelemetry.io/otel v1.37.0 - go.opentelemetry.io/otel/sdk v1.37.0 - go.opentelemetry.io/otel/trace v1.37.0 + github.com/stretchr/testify v1.11.1 + go.opentelemetry.io/otel v1.38.0 + go.opentelemetry.io/otel/metric v1.38.0 + go.opentelemetry.io/otel/sdk v1.38.0 + go.opentelemetry.io/otel/sdk/metric v1.38.0 + go.opentelemetry.io/otel/trace v1.38.0 ) require ( @@ -21,11 +23,12 @@ require ( github.com/google/uuid v1.6.0 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect go.opentelemetry.io/auto/sdk v1.1.0 // indirect - go.opentelemetry.io/otel/metric v1.37.0 // indirect - golang.org/x/sys v0.33.0 // indirect + golang.org/x/sys v0.35.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) replace go.opentelemetry.io/otel/trace => ../../../trace replace go.opentelemetry.io/otel/metric => ../../../metric + +replace go.opentelemetry.io/otel/sdk/metric => ../../../sdk/metric diff --git a/exporters/stdout/stdouttrace/go.sum b/exporters/stdout/stdouttrace/go.sum index e7f05f47d7f..144fc543a73 100644 --- a/exporters/stdout/stdouttrace/go.sum +++ b/exporters/stdout/stdouttrace/go.sum @@ -17,14 +17,14 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= -github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= -github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= -golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw= -golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/sys v0.35.0 h1:vz1N37gP5bs89s7He8XuIYXpyY0+QlsKmzipCbUtyxI= +golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= diff --git a/exporters/stdout/stdouttrace/internal/counter/counter.go b/exporters/stdout/stdouttrace/internal/counter/counter.go new file mode 100644 index 00000000000..8c780afb024 --- /dev/null +++ b/exporters/stdout/stdouttrace/internal/counter/counter.go @@ -0,0 +1,31 @@ +// Code generated by gotmpl. DO NOT MODIFY. +// source: internal/shared/counter/counter.go.tmpl + +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package counter provides a simple counter for generating unique IDs. +// +// This package is used to generate unique IDs while allowing testing packages +// to reset the counter. +package counter // import "go.opentelemetry.io/otel/exporters/stdout/stdouttrace/internal/counter" + +import "sync/atomic" + +// exporterN is a global 0-based count of the number of exporters created. +var exporterN atomic.Int64 + +// NextExporterID returns the next unique ID for an exporter. +func NextExporterID() int64 { + const inc = 1 + return exporterN.Add(inc) - inc +} + +// SetExporterID sets the exporter ID counter to v and returns the previous +// value. +// +// This function is useful for testing purposes, allowing you to reset the +// counter. It should not be used in production code. +func SetExporterID(v int64) int64 { + return exporterN.Swap(v) +} diff --git a/exporters/stdout/stdouttrace/internal/counter/counter_test.go b/exporters/stdout/stdouttrace/internal/counter/counter_test.go new file mode 100644 index 00000000000..f3e380d3325 --- /dev/null +++ b/exporters/stdout/stdouttrace/internal/counter/counter_test.go @@ -0,0 +1,65 @@ +// Code generated by gotmpl. DO NOT MODIFY. +// source: internal/shared/counter/counter_test.go.tmpl + +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package counter + +import ( + "sync" + "testing" +) + +func TestNextExporterID(t *testing.T) { + SetExporterID(0) + + var expected int64 + for range 10 { + id := NextExporterID() + if id != expected { + t.Errorf("NextExporterID() = %d; want %d", id, expected) + } + expected++ + } +} + +func TestSetExporterID(t *testing.T) { + SetExporterID(0) + + prev := SetExporterID(42) + if prev != 0 { + t.Errorf("SetExporterID(42) returned %d; want 0", prev) + } + + id := NextExporterID() + if id != 42 { + t.Errorf("NextExporterID() = %d; want 42", id) + } +} + +func TestNextExporterIDConcurrentSafe(t *testing.T) { + SetExporterID(0) + + const goroutines = 100 + const increments = 10 + + var wg sync.WaitGroup + wg.Add(goroutines) + + for range goroutines { + go func() { + defer wg.Done() + for range increments { + NextExporterID() + } + }() + } + + wg.Wait() + + expected := int64(goroutines * increments) + if id := NextExporterID(); id != expected { + t.Errorf("NextExporterID() = %d; want %d", id, expected) + } +} \ No newline at end of file diff --git a/exporters/stdout/stdouttrace/internal/gen.go b/exporters/stdout/stdouttrace/internal/gen.go new file mode 100644 index 00000000000..dcc8387d18a --- /dev/null +++ b/exporters/stdout/stdouttrace/internal/gen.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package internal provides internal functionality for the stdouttrace +// package. +package internal // import "go.opentelemetry.io/otel/exporters/stdout/stdouttrace/internal" + +//go:generate gotmpl --body=../../../../internal/shared/counter/counter.go.tmpl "--data={ \"pkg\": \"go.opentelemetry.io/otel/exporters/stdout/stdouttrace/internal/counter\" }" --out=counter/counter.go +//go:generate gotmpl --body=../../../../internal/shared/counter/counter_test.go.tmpl "--data={}" --out=counter/counter_test.go diff --git a/exporters/stdout/stdouttrace/internal/x/README.md b/exporters/stdout/stdouttrace/internal/x/README.md new file mode 100644 index 00000000000..6b7d1aec876 --- /dev/null +++ b/exporters/stdout/stdouttrace/internal/x/README.md @@ -0,0 +1,36 @@ +# Experimental Features + +The `stdouttrace` exporter contains features that have not yet stabilized in the OpenTelemetry specification. +These features are added to the `stdouttrace` exporter prior to stabilization in the specification so that users can start experimenting with them and provide feedback. + +These features may change in backwards incompatible ways as feedback is applied. +See the [Compatibility and Stability](#compatibility-and-stability) section for more information. + +## Features + +- [Self-Observability](#self-observability) + +### Self-Observability + +The `stdouttrace` exporter provides a self-observability feature that allows you to monitor the SDK itself. + +To opt-in, set the environment variable `OTEL_GO_X_SELF_OBSERVABILITY` to `true`. + +When enabled, the SDK will create the following metrics using the global `MeterProvider`: + +- `otel.sdk.exporter.span.inflight` +- `otel.sdk.exporter.span.exported` +- `otel.sdk.exporter.operation.duration` + +Please see the [Semantic conventions for OpenTelemetry SDK metrics] documentation for more details on these metrics. + +[Semantic conventions for OpenTelemetry SDK metrics]: https://github.com/open-telemetry/semantic-conventions/blob/v1.36.0/docs/otel/sdk-metrics.md + +## Compatibility and Stability + +Experimental features do not fall within the scope of the OpenTelemetry Go versioning and stability [policy](../../../../../VERSIONING.md). +These features may be removed or modified in successive version releases, including patch versions. + +When an experimental feature is promoted to a stable feature, a migration path will be included in the changelog entry of the release. +There is no guarantee that any environment variable feature flags that enabled the experimental feature will be supported by the stable version. +If they are supported, they may be accompanied with a deprecation notice stating a timeline for the removal of that support. diff --git a/exporters/stdout/stdouttrace/internal/x/x.go b/exporters/stdout/stdouttrace/internal/x/x.go new file mode 100644 index 00000000000..55bb98a9658 --- /dev/null +++ b/exporters/stdout/stdouttrace/internal/x/x.go @@ -0,0 +1,63 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package x documents experimental features for [go.opentelemetry.io/otel/exporters/stdout/stdouttrace]. +package x // import "go.opentelemetry.io/otel/exporters/stdout/stdouttrace/internal/x" + +import ( + "os" + "strings" +) + +// SelfObservability is an experimental feature flag that determines if SDK +// self-observability metrics are enabled. +// +// To enable this feature set the OTEL_GO_X_SELF_OBSERVABILITY environment variable +// to the case-insensitive string value of "true" (i.e. "True" and "TRUE" +// will also enable this). +var SelfObservability = newFeature("SELF_OBSERVABILITY", func(v string) (string, bool) { + if strings.EqualFold(v, "true") { + return v, true + } + return "", false +}) + +// Feature is an experimental feature control flag. It provides a uniform way +// to interact with these feature flags and parse their values. +type Feature[T any] struct { + key string + parse func(v string) (T, bool) +} + +func newFeature[T any](suffix string, parse func(string) (T, bool)) Feature[T] { + const envKeyRoot = "OTEL_GO_X_" + return Feature[T]{ + key: envKeyRoot + suffix, + parse: parse, + } +} + +// Key returns the environment variable key that needs to be set to enable the +// feature. +func (f Feature[T]) Key() string { return f.key } + +// Lookup returns the user configured value for the feature and true if the +// user has enabled the feature. Otherwise, if the feature is not enabled, a +// zero-value and false are returned. +func (f Feature[T]) Lookup() (v T, ok bool) { + // https://github.com/open-telemetry/opentelemetry-specification/blob/62effed618589a0bec416a87e559c0a9d96289bb/specification/configuration/sdk-environment-variables.md#parsing-empty-value + // + // > The SDK MUST interpret an empty value of an environment variable the + // > same way as when the variable is unset. + vRaw := os.Getenv(f.key) + if vRaw == "" { + return v, ok + } + return f.parse(vRaw) +} + +// Enabled reports whether the feature is enabled. +func (f Feature[T]) Enabled() bool { + _, ok := f.Lookup() + return ok +} diff --git a/exporters/stdout/stdouttrace/internal/x/x_test.go b/exporters/stdout/stdouttrace/internal/x/x_test.go new file mode 100644 index 00000000000..15124ca91d1 --- /dev/null +++ b/exporters/stdout/stdouttrace/internal/x/x_test.go @@ -0,0 +1,59 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package x + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestSelfObservability(t *testing.T) { + const key = "OTEL_GO_X_SELF_OBSERVABILITY" + require.Equal(t, key, SelfObservability.Key()) + + t.Run("100", run(setenv(key, "100"), assertDisabled(SelfObservability))) + t.Run("true", run(setenv(key, "true"), assertEnabled(SelfObservability, "true"))) + t.Run("True", run(setenv(key, "True"), assertEnabled(SelfObservability, "True"))) + t.Run("false", run(setenv(key, "false"), assertDisabled(SelfObservability))) + t.Run("empty", run(assertDisabled(SelfObservability))) +} + +func run(steps ...func(*testing.T)) func(*testing.T) { + return func(t *testing.T) { + t.Helper() + for _, step := range steps { + step(t) + } + } +} + +func setenv(k, v string) func(t *testing.T) { //nolint:unparam // This is a reusable test utility function. + return func(t *testing.T) { t.Setenv(k, v) } +} + +func assertEnabled[T any](f Feature[T], want T) func(*testing.T) { + return func(t *testing.T) { + t.Helper() + assert.True(t, f.Enabled(), "not enabled") + + v, ok := f.Lookup() + assert.True(t, ok, "Lookup state") + assert.Equal(t, want, v, "Lookup value") + } +} + +func assertDisabled[T any](f Feature[T]) func(*testing.T) { + var zero T + return func(t *testing.T) { + t.Helper() + + assert.False(t, f.Enabled(), "enabled") + + v, ok := f.Lookup() + assert.False(t, ok, "Lookup state") + assert.Equal(t, zero, v, "Lookup value") + } +} diff --git a/exporters/stdout/stdouttrace/trace.go b/exporters/stdout/stdouttrace/trace.go index bdb915ba803..d61324d2ee9 100644 --- a/exporters/stdout/stdouttrace/trace.go +++ b/exporters/stdout/stdouttrace/trace.go @@ -6,13 +6,28 @@ package stdouttrace // import "go.opentelemetry.io/otel/exporters/stdout/stdoutt import ( "context" "encoding/json" + "errors" + "fmt" "sync" "time" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/exporters/stdout/stdouttrace/internal/counter" + "go.opentelemetry.io/otel/exporters/stdout/stdouttrace/internal/x" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/sdk" "go.opentelemetry.io/otel/sdk/trace" "go.opentelemetry.io/otel/sdk/trace/tracetest" + semconv "go.opentelemetry.io/otel/semconv/v1.37.0" + "go.opentelemetry.io/otel/semconv/v1.37.0/otelconv" ) +// otelComponentType is a name identifying the type of the OpenTelemetry +// component. It is not a standardized OTel component type, so it uses the +// Go package prefixed type name to ensure uniqueness and identity. +const otelComponentType = "go.opentelemetry.io/otel/exporters/stdout/stdouttrace.Exporter" + var zeroTime time.Time var _ trace.SpanExporter = &Exporter{} @@ -26,10 +41,45 @@ func New(options ...Option) (*Exporter, error) { enc.SetIndent("", "\t") } - return &Exporter{ + exporter := &Exporter{ encoder: enc, timestamps: cfg.Timestamps, - }, nil + } + + if !x.SelfObservability.Enabled() { + return exporter, nil + } + + exporter.selfObservabilityEnabled = true + exporter.selfObservabilityAttrs = []attribute.KeyValue{ + semconv.OTelComponentName(fmt.Sprintf("%s/%d", otelComponentType, counter.NextExporterID())), + semconv.OTelComponentTypeKey.String(otelComponentType), + } + s := attribute.NewSet(exporter.selfObservabilityAttrs...) + exporter.selfObservabilitySetOpt = metric.WithAttributeSet(s) + + mp := otel.GetMeterProvider() + m := mp.Meter( + "go.opentelemetry.io/otel/exporters/stdout/stdouttrace", + metric.WithInstrumentationVersion(sdk.Version()), + metric.WithSchemaURL(semconv.SchemaURL), + ) + + var err, e error + if exporter.spanInflightMetric, e = otelconv.NewSDKExporterSpanInflight(m); e != nil { + e = fmt.Errorf("failed to create span inflight metric: %w", e) + err = errors.Join(err, e) + } + if exporter.spanExportedMetric, e = otelconv.NewSDKExporterSpanExported(m); e != nil { + e = fmt.Errorf("failed to create span exported metric: %w", e) + err = errors.Join(err, e) + } + if exporter.operationDurationMetric, e = otelconv.NewSDKExporterOperationDuration(m); e != nil { + e = fmt.Errorf("failed to create operation duration metric: %w", e) + err = errors.Join(err, e) + } + + return exporter, err } // Exporter is an implementation of trace.SpanSyncer that writes spans to stdout. @@ -40,10 +90,110 @@ type Exporter struct { stoppedMu sync.RWMutex stopped bool + + selfObservabilityEnabled bool + selfObservabilityAttrs []attribute.KeyValue // selfObservability common attributes + selfObservabilitySetOpt metric.MeasurementOption + spanInflightMetric otelconv.SDKExporterSpanInflight + spanExportedMetric otelconv.SDKExporterSpanExported + operationDurationMetric otelconv.SDKExporterOperationDuration } +var ( + measureAttrsPool = sync.Pool{ + New: func() any { + // "component.name" + "component.type" + "error.type" + const n = 1 + 1 + 1 + s := make([]attribute.KeyValue, 0, n) + // Return a pointer to a slice instead of a slice itself + // to avoid allocations on every call. + return &s + }, + } + + addOptPool = &sync.Pool{ + New: func() any { + const n = 1 // WithAttributeSet + o := make([]metric.AddOption, 0, n) + return &o + }, + } + + recordOptPool = &sync.Pool{ + New: func() any { + const n = 1 // WithAttributeSet + o := make([]metric.RecordOption, 0, n) + return &o + }, + } +) + // ExportSpans writes spans in json format to stdout. -func (e *Exporter) ExportSpans(ctx context.Context, spans []trace.ReadOnlySpan) error { +func (e *Exporter) ExportSpans(ctx context.Context, spans []trace.ReadOnlySpan) (err error) { + var success int64 + if e.selfObservabilityEnabled { + count := int64(len(spans)) + + addOpt := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *addOpt = (*addOpt)[:0] + addOptPool.Put(addOpt) + }() + + *addOpt = append(*addOpt, e.selfObservabilitySetOpt) + + e.spanInflightMetric.Inst().Add(ctx, count, *addOpt...) + defer func(starting time.Time) { + e.spanInflightMetric.Inst().Add(ctx, -count, *addOpt...) + + // Record the success and duration of the operation. + // + // Do not exclude 0 values, as they are valid and indicate no spans + // were exported which is meaningful for certain aggregations. + e.spanExportedMetric.Inst().Add(ctx, success, *addOpt...) + + mOpt := e.selfObservabilitySetOpt + if err != nil { + // additional attributes for self-observability, + // only spanExportedMetric and operationDurationMetric are supported. + attrs := measureAttrsPool.Get().(*[]attribute.KeyValue) + defer func() { + *attrs = (*attrs)[:0] // reset the slice for reuse + measureAttrsPool.Put(attrs) + }() + *attrs = append(*attrs, e.selfObservabilityAttrs...) + *attrs = append(*attrs, semconv.ErrorType(err)) + + // Do not inefficiently make a copy of attrs by using + // WithAttributes instead of WithAttributeSet. + set := attribute.NewSet(*attrs...) + mOpt = metric.WithAttributeSet(set) + + // Reset addOpt with new attribute set. + *addOpt = append((*addOpt)[:0], mOpt) + + e.spanExportedMetric.Inst().Add( + ctx, + count-success, + *addOpt..., + ) + } + + recordOpt := recordOptPool.Get().(*[]metric.RecordOption) + defer func() { + *recordOpt = (*recordOpt)[:0] + recordOptPool.Put(recordOpt) + }() + + *recordOpt = append(*recordOpt, mOpt) + e.operationDurationMetric.Inst().Record( + ctx, + time.Since(starting).Seconds(), + *recordOpt..., + ) + }(time.Now()) + } + if err := ctx.Err(); err != nil { return err } @@ -75,15 +225,17 @@ func (e *Exporter) ExportSpans(ctx context.Context, spans []trace.ReadOnlySpan) } // Encode span stubs, one by one - if err := e.encoder.Encode(stub); err != nil { - return err + if e := e.encoder.Encode(stub); e != nil { + err = errors.Join(err, fmt.Errorf("failed to encode span %d: %w", i, e)) + continue } + success++ } - return nil + return err } // Shutdown is called to stop the exporter, it performs no action. -func (e *Exporter) Shutdown(ctx context.Context) error { +func (e *Exporter) Shutdown(context.Context) error { e.stoppedMu.Lock() e.stopped = true e.stoppedMu.Unlock() @@ -92,7 +244,7 @@ func (e *Exporter) Shutdown(ctx context.Context) error { } // MarshalLog is the marshaling function used by the logging system to represent this Exporter. -func (e *Exporter) MarshalLog() interface{} { +func (e *Exporter) MarshalLog() any { return struct { Type string WithTimestamps bool diff --git a/exporters/stdout/stdouttrace/trace_test.go b/exporters/stdout/stdouttrace/trace_test.go index a034eb40187..2b686fa7aed 100644 --- a/exporters/stdout/stdouttrace/trace_test.go +++ b/exporters/stdout/stdouttrace/trace_test.go @@ -7,18 +7,30 @@ import ( "bytes" "context" "encoding/json" + "io" + "math" "testing" "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" "go.opentelemetry.io/otel/exporters/stdout/stdouttrace" + "go.opentelemetry.io/otel/exporters/stdout/stdouttrace/internal/counter" + mapi "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/sdk" + "go.opentelemetry.io/otel/sdk/instrumentation" + "go.opentelemetry.io/otel/sdk/metric" + "go.opentelemetry.io/otel/sdk/metric/metricdata" + "go.opentelemetry.io/otel/sdk/metric/metricdata/metricdatatest" "go.opentelemetry.io/otel/sdk/resource" tracesdk "go.opentelemetry.io/otel/sdk/trace" "go.opentelemetry.io/otel/sdk/trace/tracetest" + semconv "go.opentelemetry.io/otel/semconv/v1.37.0" + "go.opentelemetry.io/otel/semconv/v1.37.0/otelconv" "go.opentelemetry.io/otel/trace" ) @@ -204,7 +216,7 @@ func expectedJSON(now time.Time) string { func TestExporterShutdownIgnoresContext(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute) - defer cancel() + t.Cleanup(cancel) e, err := stdouttrace.New() if err != nil { @@ -227,3 +239,463 @@ func TestExporterShutdownNoError(t *testing.T) { t.Errorf("shutdown errored: expected nil, got %v", err) } } + +func TestSelfObservability(t *testing.T) { + defaultCallExportSpans := func(t *testing.T, exporter *stdouttrace.Exporter) { + require.NoError(t, exporter.ExportSpans(context.Background(), tracetest.SpanStubs{ + {Name: "/foo"}, + {Name: "/bar"}, + }.Snapshots())) + } + + tests := []struct { + name string + enabled bool + callExportSpans func(t *testing.T, exporter *stdouttrace.Exporter) + assertMetrics func(t *testing.T, rm metricdata.ResourceMetrics) + }{ + { + name: "Disabled", + enabled: false, + callExportSpans: defaultCallExportSpans, + assertMetrics: func(t *testing.T, rm metricdata.ResourceMetrics) { + assert.Empty(t, rm.ScopeMetrics) + }, + }, + { + name: "Enabled", + enabled: true, + callExportSpans: defaultCallExportSpans, + assertMetrics: func(t *testing.T, rm metricdata.ResourceMetrics) { + t.Helper() + require.Len(t, rm.ScopeMetrics, 1) + + sm := rm.ScopeMetrics[0] + require.Len(t, sm.Metrics, 3) + + assert.Equal(t, instrumentation.Scope{ + Name: "go.opentelemetry.io/otel/exporters/stdout/stdouttrace", + Version: sdk.Version(), + SchemaURL: semconv.SchemaURL, + }, sm.Scope) + + metricdatatest.AssertEqual(t, metricdata.Metrics{ + Name: otelconv.SDKExporterSpanInflight{}.Name(), + Description: otelconv.SDKExporterSpanInflight{}.Description(), + Unit: otelconv.SDKExporterSpanInflight{}.Unit(), + Data: metricdata.Sum[int64]{ + Temporality: metricdata.CumulativeTemporality, + DataPoints: []metricdata.DataPoint[int64]{ + { + Attributes: attribute.NewSet( + semconv.OTelComponentName( + "go.opentelemetry.io/otel/exporters/stdout/stdouttrace.Exporter/0", + ), + semconv.OTelComponentTypeKey.String( + "go.opentelemetry.io/otel/exporters/stdout/stdouttrace.Exporter", + ), + ), + Value: 0, + }, + }, + }, + }, sm.Metrics[0], metricdatatest.IgnoreTimestamp()) + + metricdatatest.AssertEqual(t, metricdata.Metrics{ + Name: otelconv.SDKExporterSpanExported{}.Name(), + Description: otelconv.SDKExporterSpanExported{}.Description(), + Unit: otelconv.SDKExporterSpanExported{}.Unit(), + Data: metricdata.Sum[int64]{ + Temporality: metricdata.CumulativeTemporality, + IsMonotonic: true, + DataPoints: []metricdata.DataPoint[int64]{ + { + Attributes: attribute.NewSet( + semconv.OTelComponentName( + "go.opentelemetry.io/otel/exporters/stdout/stdouttrace.Exporter/0", + ), + semconv.OTelComponentTypeKey.String( + "go.opentelemetry.io/otel/exporters/stdout/stdouttrace.Exporter", + ), + ), + Value: 2, + }, + }, + }, + }, sm.Metrics[1], metricdatatest.IgnoreTimestamp()) + + metricdatatest.AssertEqual(t, metricdata.Metrics{ + Name: otelconv.SDKExporterOperationDuration{}.Name(), + Description: otelconv.SDKExporterOperationDuration{}.Description(), + Unit: otelconv.SDKExporterOperationDuration{}.Unit(), + Data: metricdata.Histogram[float64]{ + Temporality: metricdata.CumulativeTemporality, + DataPoints: []metricdata.HistogramDataPoint[float64]{ + { + Attributes: attribute.NewSet( + semconv.OTelComponentName( + "go.opentelemetry.io/otel/exporters/stdout/stdouttrace.Exporter/0", + ), + semconv.OTelComponentTypeKey.String( + "go.opentelemetry.io/otel/exporters/stdout/stdouttrace.Exporter", + ), + ), + }, + }, + }, + }, sm.Metrics[2], metricdatatest.IgnoreTimestamp(), metricdatatest.IgnoreValue()) + }, + }, + { + name: "Enabled, but ExportSpans returns error", + enabled: true, + callExportSpans: func(t *testing.T, exporter *stdouttrace.Exporter) { + t.Helper() + ctx, cancel := context.WithCancel(context.Background()) + cancel() + + err := exporter.ExportSpans(ctx, tracetest.SpanStubs{ + {Name: "/foo"}, + {Name: "/bar"}, + }.Snapshots()) + require.Error(t, err) + }, + assertMetrics: func(t *testing.T, rm metricdata.ResourceMetrics) { + t.Helper() + require.Len(t, rm.ScopeMetrics, 1) + + sm := rm.ScopeMetrics[0] + require.Len(t, sm.Metrics, 3) + + assert.Equal(t, instrumentation.Scope{ + Name: "go.opentelemetry.io/otel/exporters/stdout/stdouttrace", + Version: sdk.Version(), + SchemaURL: semconv.SchemaURL, + }, sm.Scope) + + metricdatatest.AssertEqual(t, metricdata.Metrics{ + Name: otelconv.SDKExporterSpanInflight{}.Name(), + Description: otelconv.SDKExporterSpanInflight{}.Description(), + Unit: otelconv.SDKExporterSpanInflight{}.Unit(), + Data: metricdata.Sum[int64]{ + Temporality: metricdata.CumulativeTemporality, + DataPoints: []metricdata.DataPoint[int64]{ + { + Attributes: attribute.NewSet( + semconv.OTelComponentName( + "go.opentelemetry.io/otel/exporters/stdout/stdouttrace.Exporter/0", + ), + semconv.OTelComponentTypeKey.String( + "go.opentelemetry.io/otel/exporters/stdout/stdouttrace.Exporter", + ), + ), + Value: 0, + }, + }, + }, + }, sm.Metrics[0], metricdatatest.IgnoreTimestamp()) + + metricdatatest.AssertEqual(t, metricdata.Metrics{ + Name: otelconv.SDKExporterSpanExported{}.Name(), + Description: otelconv.SDKExporterSpanExported{}.Description(), + Unit: otelconv.SDKExporterSpanExported{}.Unit(), + Data: metricdata.Sum[int64]{ + Temporality: metricdata.CumulativeTemporality, + IsMonotonic: true, + DataPoints: []metricdata.DataPoint[int64]{ + { + Attributes: attribute.NewSet( + semconv.OTelComponentName( + "go.opentelemetry.io/otel/exporters/stdout/stdouttrace.Exporter/0", + ), + semconv.OTelComponentTypeKey.String( + "go.opentelemetry.io/otel/exporters/stdout/stdouttrace.Exporter", + ), + ), + Value: 0, + }, + { + Attributes: attribute.NewSet( + semconv.OTelComponentName( + "go.opentelemetry.io/otel/exporters/stdout/stdouttrace.Exporter/0", + ), + semconv.OTelComponentTypeKey.String( + "go.opentelemetry.io/otel/exporters/stdout/stdouttrace.Exporter", + ), + semconv.ErrorType(context.Canceled), + ), + Value: 2, + }, + }, + }, + }, sm.Metrics[1], metricdatatest.IgnoreTimestamp()) + + metricdatatest.AssertEqual(t, metricdata.Metrics{ + Name: otelconv.SDKExporterOperationDuration{}.Name(), + Description: otelconv.SDKExporterOperationDuration{}.Description(), + Unit: otelconv.SDKExporterOperationDuration{}.Unit(), + Data: metricdata.Histogram[float64]{ + Temporality: metricdata.CumulativeTemporality, + DataPoints: []metricdata.HistogramDataPoint[float64]{ + { + Attributes: attribute.NewSet( + semconv.OTelComponentName( + "go.opentelemetry.io/otel/exporters/stdout/stdouttrace.Exporter/0", + ), + semconv.OTelComponentTypeKey.String( + "go.opentelemetry.io/otel/exporters/stdout/stdouttrace.Exporter", + ), + semconv.ErrorType(context.Canceled), + ), + }, + }, + }, + }, sm.Metrics[2], metricdatatest.IgnoreTimestamp(), metricdatatest.IgnoreValue()) + }, + }, + { + name: "PartialExport", + enabled: true, + callExportSpans: func(t *testing.T, exporter *stdouttrace.Exporter) { + t.Helper() + + err := exporter.ExportSpans(context.Background(), tracetest.SpanStubs{ + {Name: "/foo"}, + { + Name: "JSON encoder cannot marshal math.Inf(1)", + Attributes: []attribute.KeyValue{attribute.Float64("", math.Inf(1))}, + }, + {Name: "/bar"}, + }.Snapshots()) + require.Error(t, err) + }, + assertMetrics: func(t *testing.T, rm metricdata.ResourceMetrics) { + t.Helper() + require.Len(t, rm.ScopeMetrics, 1) + + sm := rm.ScopeMetrics[0] + require.Len(t, sm.Metrics, 3) + + assert.Equal(t, instrumentation.Scope{ + Name: "go.opentelemetry.io/otel/exporters/stdout/stdouttrace", + Version: sdk.Version(), + SchemaURL: semconv.SchemaURL, + }, sm.Scope) + + metricdatatest.AssertEqual(t, metricdata.Metrics{ + Name: otelconv.SDKExporterSpanInflight{}.Name(), + Description: otelconv.SDKExporterSpanInflight{}.Description(), + Unit: otelconv.SDKExporterSpanInflight{}.Unit(), + Data: metricdata.Sum[int64]{ + Temporality: metricdata.CumulativeTemporality, + DataPoints: []metricdata.DataPoint[int64]{ + { + Attributes: attribute.NewSet( + semconv.OTelComponentName( + "go.opentelemetry.io/otel/exporters/stdout/stdouttrace.Exporter/0", + ), + semconv.OTelComponentTypeKey.String( + "go.opentelemetry.io/otel/exporters/stdout/stdouttrace.Exporter", + ), + ), + Value: 0, + }, + }, + }, + }, sm.Metrics[0], metricdatatest.IgnoreTimestamp()) + + require.IsType(t, metricdata.Sum[int64]{}, sm.Metrics[1].Data) + sum := sm.Metrics[1].Data.(metricdata.Sum[int64]) + var found bool + for i := range sum.DataPoints { + sum.DataPoints[i].Attributes, _ = sum.DataPoints[i].Attributes.Filter( + func(kv attribute.KeyValue) bool { + if kv.Key == semconv.ErrorTypeKey { + found = true + return false + } + return true + }, + ) + } + assert.True(t, found, "missing error type attribute in span export metric") + sm.Metrics[1].Data = sum + + metricdatatest.AssertEqual(t, metricdata.Metrics{ + Name: otelconv.SDKExporterSpanExported{}.Name(), + Description: otelconv.SDKExporterSpanExported{}.Description(), + Unit: otelconv.SDKExporterSpanExported{}.Unit(), + Data: metricdata.Sum[int64]{ + Temporality: metricdata.CumulativeTemporality, + IsMonotonic: true, + DataPoints: []metricdata.DataPoint[int64]{ + { + Attributes: attribute.NewSet( + semconv.OTelComponentName( + "go.opentelemetry.io/otel/exporters/stdout/stdouttrace.Exporter/0", + ), + semconv.OTelComponentTypeKey.String( + "go.opentelemetry.io/otel/exporters/stdout/stdouttrace.Exporter", + ), + ), + Value: 1, + }, + { + Attributes: attribute.NewSet( + semconv.OTelComponentName( + "go.opentelemetry.io/otel/exporters/stdout/stdouttrace.Exporter/0", + ), + semconv.OTelComponentTypeKey.String( + "go.opentelemetry.io/otel/exporters/stdout/stdouttrace.Exporter", + ), + ), + Value: 2, + }, + }, + }, + }, sm.Metrics[1], metricdatatest.IgnoreTimestamp()) + + require.IsType(t, metricdata.Histogram[float64]{}, sm.Metrics[2].Data) + hist := sm.Metrics[2].Data.(metricdata.Histogram[float64]) + require.Len(t, hist.DataPoints, 1) + found = false + hist.DataPoints[0].Attributes, _ = hist.DataPoints[0].Attributes.Filter( + func(kv attribute.KeyValue) bool { + if kv.Key == semconv.ErrorTypeKey { + found = true + return false + } + return true + }, + ) + assert.True(t, found, "missing error type attribute in operation duration metric") + sm.Metrics[2].Data = hist + + metricdatatest.AssertEqual(t, metricdata.Metrics{ + Name: otelconv.SDKExporterOperationDuration{}.Name(), + Description: otelconv.SDKExporterOperationDuration{}.Description(), + Unit: otelconv.SDKExporterOperationDuration{}.Unit(), + Data: metricdata.Histogram[float64]{ + Temporality: metricdata.CumulativeTemporality, + DataPoints: []metricdata.HistogramDataPoint[float64]{ + { + Attributes: attribute.NewSet( + semconv.OTelComponentName( + "go.opentelemetry.io/otel/exporters/stdout/stdouttrace.Exporter/0", + ), + semconv.OTelComponentTypeKey.String( + "go.opentelemetry.io/otel/exporters/stdout/stdouttrace.Exporter", + ), + ), + }, + }, + }, + }, sm.Metrics[2], metricdatatest.IgnoreTimestamp(), metricdatatest.IgnoreValue()) + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if tt.enabled { + t.Setenv("OTEL_GO_X_SELF_OBSERVABILITY", "true") + + // Reset component name counter for each test. + _ = counter.SetExporterID(0) + } + + original := otel.GetMeterProvider() + t.Cleanup(func() { otel.SetMeterProvider(original) }) + + r := metric.NewManualReader() + mp := metric.NewMeterProvider(metric.WithReader(r)) + otel.SetMeterProvider(mp) + + exporter, err := stdouttrace.New( + stdouttrace.WithWriter(io.Discard)) + require.NoError(t, err) + + tt.callExportSpans(t, exporter) + + var rm metricdata.ResourceMetrics + require.NoError(t, r.Collect(context.Background(), &rm)) + + tt.assertMetrics(t, rm) + }) + } +} + +type errMeterProvider struct { + mapi.MeterProvider + + err error +} + +func (m *errMeterProvider) Meter(string, ...mapi.MeterOption) mapi.Meter { + return &errMeter{err: m.err} +} + +type errMeter struct { + mapi.Meter + + err error +} + +func (m *errMeter) Int64UpDownCounter(string, ...mapi.Int64UpDownCounterOption) (mapi.Int64UpDownCounter, error) { + return nil, m.err +} + +func (m *errMeter) Int64Counter(string, ...mapi.Int64CounterOption) (mapi.Int64Counter, error) { + return nil, m.err +} + +func (m *errMeter) Float64Histogram(string, ...mapi.Float64HistogramOption) (mapi.Float64Histogram, error) { + return nil, m.err +} + +func TestSelfObservabilityInstrumentErrors(t *testing.T) { + orig := otel.GetMeterProvider() + t.Cleanup(func() { otel.SetMeterProvider(orig) }) + mp := &errMeterProvider{err: assert.AnError} + otel.SetMeterProvider(mp) + + t.Setenv("OTEL_GO_X_SELF_OBSERVABILITY", "true") + _, err := stdouttrace.New() + require.ErrorIs(t, err, assert.AnError, "new instrument errors") + + assert.ErrorContains(t, err, "inflight metric") + assert.ErrorContains(t, err, "span exported metric") + assert.ErrorContains(t, err, "operation duration metric") +} + +func BenchmarkExporterExportSpans(b *testing.B) { + ss := tracetest.SpanStubs{ + {Name: "/foo"}, + { + Name: "JSON encoder cannot marshal math.Inf(1)", + Attributes: []attribute.KeyValue{attribute.Float64("", math.Inf(1))}, + }, + {Name: "/bar"}, + }.Snapshots() + + run := func(b *testing.B) { + ex, err := stdouttrace.New(stdouttrace.WithWriter(io.Discard)) + if err != nil { + b.Fatalf("failed to create exporter: %v", err) + } + + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + err = ex.ExportSpans(context.Background(), ss) + } + _ = err + } + + b.Run("SelfObservability", func(b *testing.B) { + b.Setenv("OTEL_GO_X_SELF_OBSERVABILITY", "true") + run(b) + }) + + b.Run("NoObservability", run) +} diff --git a/exporters/zipkin/go.mod b/exporters/zipkin/go.mod index 987a7dea146..4778034f5c6 100644 --- a/exporters/zipkin/go.mod +++ b/exporters/zipkin/go.mod @@ -7,10 +7,10 @@ require ( github.com/go-logr/stdr v1.2.2 github.com/google/go-cmp v0.7.0 github.com/openzipkin/zipkin-go v0.4.3 - github.com/stretchr/testify v1.10.0 - go.opentelemetry.io/otel v1.37.0 - go.opentelemetry.io/otel/sdk v1.37.0 - go.opentelemetry.io/otel/trace v1.37.0 + github.com/stretchr/testify v1.11.1 + go.opentelemetry.io/otel v1.38.0 + go.opentelemetry.io/otel/sdk v1.38.0 + go.opentelemetry.io/otel/trace v1.38.0 ) require ( @@ -18,8 +18,8 @@ require ( github.com/google/uuid v1.6.0 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect go.opentelemetry.io/auto/sdk v1.1.0 // indirect - go.opentelemetry.io/otel/metric v1.37.0 // indirect - golang.org/x/sys v0.33.0 // indirect + go.opentelemetry.io/otel/metric v1.38.0 // indirect + golang.org/x/sys v0.35.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) @@ -30,3 +30,5 @@ replace go.opentelemetry.io/otel => ../.. replace go.opentelemetry.io/otel/sdk => ../../sdk replace go.opentelemetry.io/otel/metric => ../../metric + +replace go.opentelemetry.io/otel/sdk/metric => ../../sdk/metric diff --git a/exporters/zipkin/go.sum b/exporters/zipkin/go.sum index cf20fcda33d..75d99a1b676 100644 --- a/exporters/zipkin/go.sum +++ b/exporters/zipkin/go.sum @@ -19,14 +19,14 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= -github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= -github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= -golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw= -golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/sys v0.35.0 h1:vz1N37gP5bs89s7He8XuIYXpyY0+QlsKmzipCbUtyxI= +golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= diff --git a/exporters/zipkin/model.go b/exporters/zipkin/model.go index 37363c1c22f..89c51cd5ed5 100644 --- a/exporters/zipkin/model.go +++ b/exporters/zipkin/model.go @@ -20,7 +20,7 @@ import ( semconv120 "go.opentelemetry.io/otel/semconv/v1.20.0" semconv121 "go.opentelemetry.io/otel/semconv/v1.21.0" semconv125 "go.opentelemetry.io/otel/semconv/v1.25.0" - semconv "go.opentelemetry.io/otel/semconv/v1.34.0" + semconv "go.opentelemetry.io/otel/semconv/v1.37.0" "go.opentelemetry.io/otel/trace" ) @@ -148,13 +148,13 @@ func toZipkinAnnotations(events []tracesdk.Event) []zkmodel.Annotation { } func attributesToJSONMapString(attributes []attribute.KeyValue) string { - m := make(map[string]interface{}, len(attributes)) + m := make(map[string]any, len(attributes)) for _, a := range attributes { - m[(string)(a.Key)] = a.Value.AsInterface() + m[string(a.Key)] = a.Value.AsInterface() } // if an error happens, the result will be an empty string jsonBytes, _ := json.Marshal(m) - return (string)(jsonBytes) + return string(jsonBytes) } // attributeToStringPair serializes each attribute to a string pair. @@ -163,18 +163,18 @@ func attributeToStringPair(kv attribute.KeyValue) (string, string) { // For slice attributes, serialize as JSON list string. case attribute.BOOLSLICE: data, _ := json.Marshal(kv.Value.AsBoolSlice()) - return (string)(kv.Key), (string)(data) + return string(kv.Key), string(data) case attribute.INT64SLICE: data, _ := json.Marshal(kv.Value.AsInt64Slice()) - return (string)(kv.Key), (string)(data) + return string(kv.Key), string(data) case attribute.FLOAT64SLICE: data, _ := json.Marshal(kv.Value.AsFloat64Slice()) - return (string)(kv.Key), (string)(data) + return string(kv.Key), string(data) case attribute.STRINGSLICE: data, _ := json.Marshal(kv.Value.AsStringSlice()) - return (string)(kv.Key), (string)(data) + return string(kv.Key), string(data) default: - return (string)(kv.Key), kv.Value.Emit() + return string(kv.Key), kv.Value.Emit() } } diff --git a/exporters/zipkin/model_test.go b/exporters/zipkin/model_test.go index 7c3c224cdd9..7bbbb5a89a0 100644 --- a/exporters/zipkin/model_test.go +++ b/exporters/zipkin/model_test.go @@ -22,7 +22,7 @@ import ( tracesdk "go.opentelemetry.io/otel/sdk/trace" "go.opentelemetry.io/otel/sdk/trace/tracetest" semconv125 "go.opentelemetry.io/otel/semconv/v1.25.0" - semconv "go.opentelemetry.io/otel/semconv/v1.34.0" + semconv "go.opentelemetry.io/otel/semconv/v1.37.0" "go.opentelemetry.io/otel/trace" ) diff --git a/exporters/zipkin/zipkin.go b/exporters/zipkin/zipkin.go index e1a75ec9af5..bdd149eba49 100644 --- a/exporters/zipkin/zipkin.go +++ b/exporters/zipkin/zipkin.go @@ -146,7 +146,7 @@ func (e *Exporter) ExportSpans(ctx context.Context, spans []sdktrace.ReadOnlySpa req.Header.Set("Content-Type", "application/json") for k, v := range e.headers { - if strings.ToLower(k) == "host" { + if strings.EqualFold(k, "host") { req.Host = v } else { req.Header.Set(k, v) @@ -189,19 +189,19 @@ func (e *Exporter) Shutdown(ctx context.Context) error { return nil } -func (e *Exporter) logf(format string, args ...interface{}) { +func (e *Exporter) logf(format string, args ...any) { if e.logger != emptyLogger { e.logger.Info(fmt.Sprintf(format, args...)) } } -func (e *Exporter) errf(format string, args ...interface{}) error { +func (e *Exporter) errf(format string, args ...any) error { e.logf(format, args...) return fmt.Errorf(format, args...) } // MarshalLog is the marshaling function used by the logging system to represent this Exporter. -func (e *Exporter) MarshalLog() interface{} { +func (e *Exporter) MarshalLog() any { return struct { Type string URL string diff --git a/exporters/zipkin/zipkin_test.go b/exporters/zipkin/zipkin_test.go index 6c3f279aa4c..f023ab9ae7e 100644 --- a/exporters/zipkin/zipkin_test.go +++ b/exporters/zipkin/zipkin_test.go @@ -26,7 +26,7 @@ import ( "go.opentelemetry.io/otel/sdk/resource" sdktrace "go.opentelemetry.io/otel/sdk/trace" "go.opentelemetry.io/otel/sdk/trace/tracetest" - semconv "go.opentelemetry.io/otel/semconv/v1.34.0" + semconv "go.opentelemetry.io/otel/semconv/v1.37.0" "go.opentelemetry.io/otel/trace" ) @@ -171,7 +171,7 @@ type logStore struct { } func (s *logStore) Write(p []byte) (n int, err error) { - msg := (string)(p) + msg := string(p) if s.T != nil { s.T.Logf("%s", msg) } @@ -362,7 +362,7 @@ func TestErrorOnExportShutdownExporter(t *testing.T) { func TestLogrFormatting(t *testing.T) { format := "string %q, int %d" - args := []interface{}{"s", 1} + args := []any{"s", 1} var buf bytes.Buffer l := funcr.New(func(prefix, args string) { diff --git a/go.mod b/go.mod index 08c7f5d5139..fdbd6f81ca7 100644 --- a/go.mod +++ b/go.mod @@ -6,10 +6,10 @@ require ( github.com/go-logr/logr v1.4.3 github.com/go-logr/stdr v1.2.2 github.com/google/go-cmp v0.7.0 - github.com/stretchr/testify v1.10.0 + github.com/stretchr/testify v1.11.1 go.opentelemetry.io/auto/sdk v1.1.0 - go.opentelemetry.io/otel/metric v1.37.0 - go.opentelemetry.io/otel/trace v1.37.0 + go.opentelemetry.io/otel/metric v1.38.0 + go.opentelemetry.io/otel/trace v1.38.0 ) require ( diff --git a/go.sum b/go.sum index 349ceba9a44..e75f2d7593d 100644 --- a/go.sum +++ b/go.sum @@ -16,8 +16,8 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= -github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= -github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/internal/baggage/context_test.go b/internal/baggage/context_test.go index 7074f4b49a4..17870d4c9bb 100644 --- a/internal/baggage/context_test.go +++ b/internal/baggage/context_test.go @@ -44,7 +44,7 @@ func TestListFromContext(t *testing.T) { func TestContextWithSetHook(t *testing.T) { var called bool - f := func(ctx context.Context, list List) context.Context { + f := func(ctx context.Context, _ List) context.Context { called = true return ctx } @@ -65,7 +65,7 @@ func TestContextWithSetHook(t *testing.T) { func TestContextWithGetHook(t *testing.T) { var called bool - f := func(ctx context.Context, list List) List { + f := func(_ context.Context, list List) List { called = true return list } diff --git a/internal/global/alternate_meter_test.go b/internal/global/alternate_meter_test.go index 0bc66a29bef..f3994c56d07 100644 --- a/internal/global/alternate_meter_test.go +++ b/internal/global/alternate_meter_test.go @@ -28,7 +28,7 @@ type altMeterProvider struct { var _ metric.MeterProvider = &altMeterProvider{} -func (amp *altMeterProvider) Meter(name string, opts ...metric.MeterOption) metric.Meter { +func (amp *altMeterProvider) Meter(string, ...metric.MeterOption) metric.Meter { am := &altMeter{ provider: amp, } @@ -106,28 +106,28 @@ func (*altRegistration) Unregister() error { return nil } -func (am *altMeter) Int64Counter(name string, _ ...metric.Int64CounterOption) (metric.Int64Counter, error) { +func (*altMeter) Int64Counter(name string, _ ...metric.Int64CounterOption) (metric.Int64Counter, error) { return noop.NewMeterProvider().Meter("noop").Int64Counter(name) } -func (am *altMeter) Int64UpDownCounter( +func (*altMeter) Int64UpDownCounter( name string, _ ...metric.Int64UpDownCounterOption, ) (metric.Int64UpDownCounter, error) { return noop.NewMeterProvider().Meter("noop").Int64UpDownCounter(name) } -func (am *altMeter) Int64Histogram(name string, _ ...metric.Int64HistogramOption) (metric.Int64Histogram, error) { +func (*altMeter) Int64Histogram(name string, _ ...metric.Int64HistogramOption) (metric.Int64Histogram, error) { return noop.NewMeterProvider().Meter("noop").Int64Histogram(name) } -func (am *altMeter) Int64Gauge(name string, _ ...metric.Int64GaugeOption) (metric.Int64Gauge, error) { +func (*altMeter) Int64Gauge(name string, _ ...metric.Int64GaugeOption) (metric.Int64Gauge, error) { return noop.NewMeterProvider().Meter("noop").Int64Gauge(name) } func (am *altMeter) Int64ObservableCounter( - name string, - options ...metric.Int64ObservableCounterOption, + string, + ...metric.Int64ObservableCounterOption, ) (metric.Int64ObservableCounter, error) { return &testAiCounter{ meter: am, @@ -135,8 +135,8 @@ func (am *altMeter) Int64ObservableCounter( } func (am *altMeter) Int64ObservableUpDownCounter( - name string, - options ...metric.Int64ObservableUpDownCounterOption, + string, + ...metric.Int64ObservableUpDownCounterOption, ) (metric.Int64ObservableUpDownCounter, error) { return &testAiUpDownCounter{ meter: am, @@ -144,39 +144,39 @@ func (am *altMeter) Int64ObservableUpDownCounter( } func (am *altMeter) Int64ObservableGauge( - name string, - options ...metric.Int64ObservableGaugeOption, + string, + ...metric.Int64ObservableGaugeOption, ) (metric.Int64ObservableGauge, error) { return &testAiGauge{ meter: am, }, nil } -func (am *altMeter) Float64Counter(name string, _ ...metric.Float64CounterOption) (metric.Float64Counter, error) { +func (*altMeter) Float64Counter(name string, _ ...metric.Float64CounterOption) (metric.Float64Counter, error) { return noop.NewMeterProvider().Meter("noop").Float64Counter(name) } -func (am *altMeter) Float64UpDownCounter( +func (*altMeter) Float64UpDownCounter( name string, _ ...metric.Float64UpDownCounterOption, ) (metric.Float64UpDownCounter, error) { return noop.NewMeterProvider().Meter("noop").Float64UpDownCounter(name) } -func (am *altMeter) Float64Histogram( +func (*altMeter) Float64Histogram( name string, - options ...metric.Float64HistogramOption, + _ ...metric.Float64HistogramOption, ) (metric.Float64Histogram, error) { return noop.NewMeterProvider().Meter("noop").Float64Histogram(name) } -func (am *altMeter) Float64Gauge(name string, options ...metric.Float64GaugeOption) (metric.Float64Gauge, error) { +func (*altMeter) Float64Gauge(name string, _ ...metric.Float64GaugeOption) (metric.Float64Gauge, error) { return noop.NewMeterProvider().Meter("noop").Float64Gauge(name) } func (am *altMeter) Float64ObservableCounter( - name string, - options ...metric.Float64ObservableCounterOption, + string, + ...metric.Float64ObservableCounterOption, ) (metric.Float64ObservableCounter, error) { return &testAfCounter{ meter: am, @@ -184,8 +184,8 @@ func (am *altMeter) Float64ObservableCounter( } func (am *altMeter) Float64ObservableUpDownCounter( - name string, - options ...metric.Float64ObservableUpDownCounterOption, + string, + ...metric.Float64ObservableUpDownCounterOption, ) (metric.Float64ObservableUpDownCounter, error) { return &testAfUpDownCounter{ meter: am, @@ -193,8 +193,8 @@ func (am *altMeter) Float64ObservableUpDownCounter( } func (am *altMeter) Float64ObservableGauge( - name string, - options ...metric.Float64ObservableGaugeOption, + string, + ...metric.Float64ObservableGaugeOption, ) (metric.Float64ObservableGauge, error) { return &testAfGauge{ meter: am, diff --git a/internal/global/instruments_test.go b/internal/global/instruments_test.go index 74a89892bb8..bcf1e4f3936 100644 --- a/internal/global/instruments_test.go +++ b/internal/global/instruments_test.go @@ -55,19 +55,19 @@ func testInt64ConcurrentSafe(interact func(int64), setDelegate func(metric.Meter func TestAsyncInstrumentSetDelegateConcurrentSafe(t *testing.T) { // Float64 Instruments t.Run("Float64", func(t *testing.T) { - t.Run("Counter", func(t *testing.T) { + t.Run("Counter", func(*testing.T) { delegate := &afCounter{} f := func(float64) { _ = delegate.unwrap() } testFloat64ConcurrentSafe(f, delegate.setDelegate) }) - t.Run("UpDownCounter", func(t *testing.T) { + t.Run("UpDownCounter", func(*testing.T) { delegate := &afUpDownCounter{} f := func(float64) { _ = delegate.unwrap() } testFloat64ConcurrentSafe(f, delegate.setDelegate) }) - t.Run("Gauge", func(t *testing.T) { + t.Run("Gauge", func(*testing.T) { delegate := &afGauge{} f := func(float64) { _ = delegate.unwrap() } testFloat64ConcurrentSafe(f, delegate.setDelegate) @@ -77,19 +77,19 @@ func TestAsyncInstrumentSetDelegateConcurrentSafe(t *testing.T) { // Int64 Instruments t.Run("Int64", func(t *testing.T) { - t.Run("Counter", func(t *testing.T) { + t.Run("Counter", func(*testing.T) { delegate := &aiCounter{} f := func(int64) { _ = delegate.unwrap() } testInt64ConcurrentSafe(f, delegate.setDelegate) }) - t.Run("UpDownCounter", func(t *testing.T) { + t.Run("UpDownCounter", func(*testing.T) { delegate := &aiUpDownCounter{} f := func(int64) { _ = delegate.unwrap() } testInt64ConcurrentSafe(f, delegate.setDelegate) }) - t.Run("Gauge", func(t *testing.T) { + t.Run("Gauge", func(*testing.T) { delegate := &aiGauge{} f := func(int64) { _ = delegate.unwrap() } testInt64ConcurrentSafe(f, delegate.setDelegate) @@ -99,26 +99,26 @@ func TestAsyncInstrumentSetDelegateConcurrentSafe(t *testing.T) { func TestSyncInstrumentSetDelegateConcurrentSafe(t *testing.T) { // Float64 Instruments - t.Run("Float64", func(t *testing.T) { - t.Run("Counter", func(t *testing.T) { + t.Run("Float64", func(*testing.T) { + t.Run("Counter", func(*testing.T) { delegate := &sfCounter{} f := func(v float64) { delegate.Add(context.Background(), v) } testFloat64ConcurrentSafe(f, delegate.setDelegate) }) - t.Run("UpDownCounter", func(t *testing.T) { + t.Run("UpDownCounter", func(*testing.T) { delegate := &sfUpDownCounter{} f := func(v float64) { delegate.Add(context.Background(), v) } testFloat64ConcurrentSafe(f, delegate.setDelegate) }) - t.Run("Histogram", func(t *testing.T) { + t.Run("Histogram", func(*testing.T) { delegate := &sfHistogram{} f := func(v float64) { delegate.Record(context.Background(), v) } testFloat64ConcurrentSafe(f, delegate.setDelegate) }) - t.Run("Gauge", func(t *testing.T) { + t.Run("Gauge", func(*testing.T) { delegate := &sfGauge{} f := func(v float64) { delegate.Record(context.Background(), v) } testFloat64ConcurrentSafe(f, delegate.setDelegate) @@ -127,26 +127,26 @@ func TestSyncInstrumentSetDelegateConcurrentSafe(t *testing.T) { // Int64 Instruments - t.Run("Int64", func(t *testing.T) { - t.Run("Counter", func(t *testing.T) { + t.Run("Int64", func(*testing.T) { + t.Run("Counter", func(*testing.T) { delegate := &siCounter{} f := func(v int64) { delegate.Add(context.Background(), v) } testInt64ConcurrentSafe(f, delegate.setDelegate) }) - t.Run("UpDownCounter", func(t *testing.T) { + t.Run("UpDownCounter", func(*testing.T) { delegate := &siUpDownCounter{} f := func(v int64) { delegate.Add(context.Background(), v) } testInt64ConcurrentSafe(f, delegate.setDelegate) }) - t.Run("Histogram", func(t *testing.T) { + t.Run("Histogram", func(*testing.T) { delegate := &siHistogram{} f := func(v int64) { delegate.Record(context.Background(), v) } testInt64ConcurrentSafe(f, delegate.setDelegate) }) - t.Run("Gauge", func(t *testing.T) { + t.Run("Gauge", func(*testing.T) { delegate := &siGauge{} f := func(v int64) { delegate.Record(context.Background(), v) } testInt64ConcurrentSafe(f, delegate.setDelegate) diff --git a/internal/global/internal_logging.go b/internal/global/internal_logging.go index adbca7d3477..86d7f4ba086 100644 --- a/internal/global/internal_logging.go +++ b/internal/global/internal_logging.go @@ -41,22 +41,22 @@ func GetLogger() logr.Logger { // Info prints messages about the general state of the API or SDK. // This should usually be less than 5 messages a minute. -func Info(msg string, keysAndValues ...interface{}) { +func Info(msg string, keysAndValues ...any) { GetLogger().V(4).Info(msg, keysAndValues...) } // Error prints messages about exceptional states of the API or SDK. -func Error(err error, msg string, keysAndValues ...interface{}) { +func Error(err error, msg string, keysAndValues ...any) { GetLogger().Error(err, msg, keysAndValues...) } // Debug prints messages about all internal changes in the API or SDK. -func Debug(msg string, keysAndValues ...interface{}) { +func Debug(msg string, keysAndValues ...any) { GetLogger().V(8).Info(msg, keysAndValues...) } // Warn prints messages about warnings in the API or SDK. // Not an error but is likely more important than an informational event. -func Warn(msg string, keysAndValues ...interface{}) { +func Warn(msg string, keysAndValues ...any) { GetLogger().V(1).Info(msg, keysAndValues...) } diff --git a/internal/global/internal_logging_test.go b/internal/global/internal_logging_test.go index 96287146add..5937b0f3062 100644 --- a/internal/global/internal_logging_test.go +++ b/internal/global/internal_logging_test.go @@ -12,11 +12,9 @@ import ( "testing" "github.com/go-logr/logr" - - "github.com/stretchr/testify/assert" - "github.com/go-logr/logr/funcr" "github.com/go-logr/stdr" + "github.com/stretchr/testify/assert" ) func TestLoggerConcurrentSafe(t *testing.T) { @@ -90,8 +88,8 @@ func TestLogLevel(t *testing.T) { } func newBuffLogger(buf *bytes.Buffer, verbosity int) logr.Logger { - return funcr.New(func(prefix, args string) { - _, _ = buf.Write([]byte(args)) + return funcr.New(func(_, args string) { + _, _ = buf.WriteString(args) }, funcr.Options{ Verbosity: verbosity, }) diff --git a/internal/global/meter_test.go b/internal/global/meter_test.go index 7b444d61780..219fbf50d8f 100644 --- a/internal/global/meter_test.go +++ b/internal/global/meter_test.go @@ -18,7 +18,7 @@ import ( "go.opentelemetry.io/otel/metric/noop" ) -func TestMeterProviderConcurrentSafe(t *testing.T) { +func TestMeterProviderConcurrentSafe(*testing.T) { mp := &meterProvider{} done := make(chan struct{}) finish := make(chan struct{}) @@ -39,7 +39,7 @@ func TestMeterProviderConcurrentSafe(t *testing.T) { <-done } -var zeroCallback metric.Callback = func(ctx context.Context, or metric.Observer) error { +var zeroCallback metric.Callback = func(context.Context, metric.Observer) error { return nil } @@ -143,7 +143,7 @@ func testSetupAllInstrumentTypes( _, err = m.Int64ObservableGauge("test_Async_Gauge") assert.NoError(t, err) - _, err = m.RegisterCallback(func(ctx context.Context, obs metric.Observer) error { + _, err = m.RegisterCallback(func(_ context.Context, obs metric.Observer) error { obs.ObserveFloat64(afcounter, 3) return nil }, afcounter) @@ -442,7 +442,7 @@ type failingRegisterCallbackMeter struct { noop.Meter } -func (m *failingRegisterCallbackMeter) RegisterCallback( +func (*failingRegisterCallbackMeter) RegisterCallback( metric.Callback, ...metric.Observable, ) (metric.Registration, error) { diff --git a/internal/global/meter_types_test.go b/internal/global/meter_types_test.go index a0257b34c11..0bcc6b58d83 100644 --- a/internal/global/meter_types_test.go +++ b/internal/global/meter_types_test.go @@ -16,9 +16,8 @@ type testMeterProvider struct { count int } -func (p *testMeterProvider) Meter(name string, opts ...metric.MeterOption) metric.Meter { +func (p *testMeterProvider) Meter(string, ...metric.MeterOption) metric.Meter { p.count++ - return &testMeter{} } @@ -46,105 +45,105 @@ type testMeter struct { callbacks []metric.Callback } -func (m *testMeter) Int64Counter(name string, options ...metric.Int64CounterOption) (metric.Int64Counter, error) { +func (m *testMeter) Int64Counter(string, ...metric.Int64CounterOption) (metric.Int64Counter, error) { m.siCount++ return &testCountingIntInstrument{}, nil } func (m *testMeter) Int64UpDownCounter( - name string, - options ...metric.Int64UpDownCounterOption, + string, + ...metric.Int64UpDownCounterOption, ) (metric.Int64UpDownCounter, error) { m.siUDCount++ return &testCountingIntInstrument{}, nil } -func (m *testMeter) Int64Histogram(name string, options ...metric.Int64HistogramOption) (metric.Int64Histogram, error) { +func (m *testMeter) Int64Histogram(string, ...metric.Int64HistogramOption) (metric.Int64Histogram, error) { m.siHist++ return &testCountingIntInstrument{}, nil } -func (m *testMeter) Int64Gauge(name string, options ...metric.Int64GaugeOption) (metric.Int64Gauge, error) { +func (m *testMeter) Int64Gauge(string, ...metric.Int64GaugeOption) (metric.Int64Gauge, error) { m.siGauge++ return &testCountingIntInstrument{}, nil } func (m *testMeter) Int64ObservableCounter( - name string, - options ...metric.Int64ObservableCounterOption, + string, + ...metric.Int64ObservableCounterOption, ) (metric.Int64ObservableCounter, error) { m.aiCount++ return &testCountingIntInstrument{}, nil } func (m *testMeter) Int64ObservableUpDownCounter( - name string, - options ...metric.Int64ObservableUpDownCounterOption, + string, + ...metric.Int64ObservableUpDownCounterOption, ) (metric.Int64ObservableUpDownCounter, error) { m.aiUDCount++ return &testCountingIntInstrument{}, nil } func (m *testMeter) Int64ObservableGauge( - name string, - options ...metric.Int64ObservableGaugeOption, + string, + ...metric.Int64ObservableGaugeOption, ) (metric.Int64ObservableGauge, error) { m.aiGauge++ return &testCountingIntInstrument{}, nil } -func (m *testMeter) Float64Counter(name string, options ...metric.Float64CounterOption) (metric.Float64Counter, error) { +func (m *testMeter) Float64Counter(string, ...metric.Float64CounterOption) (metric.Float64Counter, error) { m.sfCount++ return &testCountingFloatInstrument{}, nil } func (m *testMeter) Float64UpDownCounter( - name string, - options ...metric.Float64UpDownCounterOption, + string, + ...metric.Float64UpDownCounterOption, ) (metric.Float64UpDownCounter, error) { m.sfUDCount++ return &testCountingFloatInstrument{}, nil } func (m *testMeter) Float64Histogram( - name string, - options ...metric.Float64HistogramOption, + string, + ...metric.Float64HistogramOption, ) (metric.Float64Histogram, error) { m.sfHist++ return &testCountingFloatInstrument{}, nil } -func (m *testMeter) Float64Gauge(name string, options ...metric.Float64GaugeOption) (metric.Float64Gauge, error) { +func (m *testMeter) Float64Gauge(string, ...metric.Float64GaugeOption) (metric.Float64Gauge, error) { m.sfGauge++ return &testCountingFloatInstrument{}, nil } func (m *testMeter) Float64ObservableCounter( - name string, - options ...metric.Float64ObservableCounterOption, + string, + ...metric.Float64ObservableCounterOption, ) (metric.Float64ObservableCounter, error) { m.afCount++ return &testCountingFloatInstrument{}, nil } func (m *testMeter) Float64ObservableUpDownCounter( - name string, - options ...metric.Float64ObservableUpDownCounterOption, + string, + ...metric.Float64ObservableUpDownCounterOption, ) (metric.Float64ObservableUpDownCounter, error) { m.afUDCount++ return &testCountingFloatInstrument{}, nil } func (m *testMeter) Float64ObservableGauge( - name string, - options ...metric.Float64ObservableGaugeOption, + string, + ...metric.Float64ObservableGaugeOption, ) (metric.Float64ObservableGauge, error) { m.afGauge++ return &testCountingFloatInstrument{}, nil } // RegisterCallback captures the function that will be called during Collect. -func (m *testMeter) RegisterCallback(f metric.Callback, i ...metric.Observable) (metric.Registration, error) { +func (m *testMeter) RegisterCallback(f metric.Callback, _ ...metric.Observable) (metric.Registration, error) { m.callbacks = append(m.callbacks, f) return testReg{ f: func(idx int) func() { @@ -183,14 +182,14 @@ type observationRecorder struct { ctx context.Context } -func (o observationRecorder) ObserveFloat64(i metric.Float64Observable, value float64, _ ...metric.ObserveOption) { +func (observationRecorder) ObserveFloat64(i metric.Float64Observable, _ float64, _ ...metric.ObserveOption) { iImpl, ok := i.(*testCountingFloatInstrument) if ok { iImpl.observe() } } -func (o observationRecorder) ObserveInt64(i metric.Int64Observable, value int64, _ ...metric.ObserveOption) { +func (observationRecorder) ObserveInt64(i metric.Int64Observable, _ int64, _ ...metric.ObserveOption) { iImpl, ok := i.(*testCountingIntInstrument) if ok { iImpl.observe() diff --git a/internal/global/trace.go b/internal/global/trace.go index 49e4ac4faab..bf5cf3119b2 100644 --- a/internal/global/trace.go +++ b/internal/global/trace.go @@ -26,6 +26,7 @@ import ( "sync/atomic" "go.opentelemetry.io/auto/sdk" + "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" "go.opentelemetry.io/otel/trace" diff --git a/internal/global/trace_test.go b/internal/global/trace_test.go index d513d32c20d..ce18320759c 100644 --- a/internal/global/trace_test.go +++ b/internal/global/trace_test.go @@ -10,8 +10,8 @@ import ( "time" "github.com/stretchr/testify/assert" - "go.opentelemetry.io/auto/sdk" + "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/trace" "go.opentelemetry.io/otel/trace/embedded" @@ -59,11 +59,11 @@ func TestTraceProviderDelegation(t *testing.T) { _, span1 := tracer1.Start(ctx, "span1") SetTracerProvider(fnTracerProvider{ - tracer: func(name string, opts ...trace.TracerOption) trace.Tracer { + tracer: func(name string, _ ...trace.TracerOption) trace.Tracer { spans, ok := expected[name] assert.Truef(t, ok, "invalid tracer: %s", name) return fnTracer{ - start: func(ctx context.Context, spanName string, opts ...trace.SpanStartOption) (context.Context, trace.Span) { + start: func(ctx context.Context, spanName string, _ ...trace.SpanStartOption) (context.Context, trace.Span) { if ok { if len(spans) == 0 { t.Errorf("unexpected span: %s", spanName) @@ -105,7 +105,7 @@ func TestTraceProviderDelegates(t *testing.T) { // Configure it with a spy. called := false SetTracerProvider(fnTracerProvider{ - tracer: func(name string, opts ...trace.TracerOption) trace.Tracer { + tracer: func(name string, _ ...trace.TracerOption) trace.Tracer { called = true assert.Equal(t, "abc", name) return noop.NewTracerProvider().Tracer("") @@ -142,7 +142,7 @@ func TestTraceProviderDelegatesConcurrentSafe(t *testing.T) { // Configure it with a spy. called := int32(0) SetTracerProvider(fnTracerProvider{ - tracer: func(name string, opts ...trace.TracerOption) trace.Tracer { + tracer: func(name string, _ ...trace.TracerOption) trace.Tracer { newVal := atomic.AddInt32(&called, 1) assert.Equal(t, "abc", name) if newVal == 10 { @@ -186,10 +186,10 @@ func TestTracerDelegatesConcurrentSafe(t *testing.T) { // Configure it with a spy. called := int32(0) SetTracerProvider(fnTracerProvider{ - tracer: func(name string, opts ...trace.TracerOption) trace.Tracer { + tracer: func(name string, _ ...trace.TracerOption) trace.Tracer { assert.Equal(t, "abc", name) return fnTracer{ - start: func(ctx context.Context, spanName string, opts ...trace.SpanStartOption) (context.Context, trace.Span) { + start: func(ctx context.Context, spanName string, _ ...trace.SpanStartOption) (context.Context, trace.Span) { newVal := atomic.AddInt32(&called, 1) assert.Equal(t, "name", spanName) if newVal == 10 { @@ -218,7 +218,7 @@ func TestTraceProviderDelegatesSameInstance(t *testing.T) { assert.Same(t, tracer, gtp.Tracer("abc", trace.WithInstrumentationVersion("xyz"))) SetTracerProvider(fnTracerProvider{ - tracer: func(name string, opts ...trace.TracerOption) trace.Tracer { + tracer: func(string, ...trace.TracerOption) trace.Tracer { return noop.NewTracerProvider().Tracer("") }, }) diff --git a/internal/internaltest/doc.go b/internal/internaltest/doc.go index 2e7479a62da..e203bfedfbf 100644 --- a/internal/internaltest/doc.go +++ b/internal/internaltest/doc.go @@ -1,6 +1,3 @@ -// Code generated by gotmpl. DO NOT MODIFY. -// source: internal/shared/internaltest/doc.go.tmpl - // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 diff --git a/internal/internaltest/text_map_carrier.go b/internal/internaltest/text_map_carrier.go index 567787c12ed..4b22f6498ee 100644 --- a/internal/internaltest/text_map_carrier.go +++ b/internal/internaltest/text_map_carrier.go @@ -1,12 +1,11 @@ -// Code generated by gotmpl. DO NOT MODIFY. -// source: internal/shared/internaltest/text_map_carrier.go.tmpl - // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 package internaltest // import "go.opentelemetry.io/otel/internal/internaltest" import ( + "maps" + "slices" "sync" "testing" @@ -28,9 +27,7 @@ var _ propagation.TextMapCarrier = (*TextMapCarrier)(nil) // NewTextMapCarrier returns a new *TextMapCarrier populated with data. func NewTextMapCarrier(data map[string]string) *TextMapCarrier { copied := make(map[string]string, len(data)) - for k, v := range data { - copied[k] = v - } + maps.Copy(copied, data) return &TextMapCarrier{data: copied} } @@ -58,10 +55,8 @@ func (c *TextMapCarrier) Get(key string) string { func (c *TextMapCarrier) GotKey(t *testing.T, key string) bool { c.mtx.Lock() defer c.mtx.Unlock() - for _, k := range c.gets { - if k == key { - return true - } + if slices.Contains(c.gets, key) { + return true } t.Errorf("TextMapCarrier.Get(%q) has not been called", key) return false @@ -120,9 +115,7 @@ func (c *TextMapCarrier) SetN(t *testing.T, n int) bool { // Reset zeros out the recording state and sets the carried values to data. func (c *TextMapCarrier) Reset(data map[string]string) { copied := make(map[string]string, len(data)) - for k, v := range data { - copied[k] = v - } + maps.Copy(copied, data) c.mtx.Lock() defer c.mtx.Unlock() diff --git a/internal/internaltest/text_map_carrier_test.go b/internal/internaltest/text_map_carrier_test.go index a4c233564b5..2884a215ba0 100644 --- a/internal/internaltest/text_map_carrier_test.go +++ b/internal/internaltest/text_map_carrier_test.go @@ -1,6 +1,3 @@ -// Code generated by gotmpl. DO NOT MODIFY. -// source: internal/shared/internaltest/text_map_carrier_test.go.tmpl - // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 diff --git a/internal/internaltest/text_map_propagator.go b/internal/internaltest/text_map_propagator.go index 25be32bcad7..c106746b00f 100644 --- a/internal/internaltest/text_map_propagator.go +++ b/internal/internaltest/text_map_propagator.go @@ -1,6 +1,3 @@ -// Code generated by gotmpl. DO NOT MODIFY. -// source: internal/shared/internaltest/text_map_propagator.go.tmpl - // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 diff --git a/internal/internaltest/text_map_propagator_test.go b/internal/internaltest/text_map_propagator_test.go index 67002ec838d..6770201e019 100644 --- a/internal/internaltest/text_map_propagator_test.go +++ b/internal/internaltest/text_map_propagator_test.go @@ -1,6 +1,3 @@ -// Code generated by gotmpl. DO NOT MODIFY. -// source: internal/shared/internaltest/text_map_propagator_test.go.tmpl - // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 diff --git a/internal/shared/counter/counter.go.tmpl b/internal/shared/counter/counter.go.tmpl new file mode 100644 index 00000000000..ad1b4030b90 --- /dev/null +++ b/internal/shared/counter/counter.go.tmpl @@ -0,0 +1,31 @@ +// Code generated by gotmpl. DO NOT MODIFY. +// source: internal/shared/counter/counter.go.tmpl + +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package counter provides a simple counter for generating unique IDs. +// +// This package is used to generate unique IDs while allowing testing packages +// to reset the counter. +package counter // import "{{.pkg}}" + +import "sync/atomic" + +// exporterN is a global 0-based count of the number of exporters created. +var exporterN atomic.Int64 + +// NextExporterID returns the next unique ID for an exporter. +func NextExporterID() int64 { + const inc = 1 + return exporterN.Add(inc) - inc +} + +// SetExporterID sets the exporter ID counter to v and returns the previous +// value. +// +// This function is useful for testing purposes, allowing you to reset the +// counter. It should not be used in production code. +func SetExporterID(v int64) int64 { + return exporterN.Swap(v) +} diff --git a/internal/shared/counter/counter_test.go.tmpl b/internal/shared/counter/counter_test.go.tmpl new file mode 100644 index 00000000000..f3e380d3325 --- /dev/null +++ b/internal/shared/counter/counter_test.go.tmpl @@ -0,0 +1,65 @@ +// Code generated by gotmpl. DO NOT MODIFY. +// source: internal/shared/counter/counter_test.go.tmpl + +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package counter + +import ( + "sync" + "testing" +) + +func TestNextExporterID(t *testing.T) { + SetExporterID(0) + + var expected int64 + for range 10 { + id := NextExporterID() + if id != expected { + t.Errorf("NextExporterID() = %d; want %d", id, expected) + } + expected++ + } +} + +func TestSetExporterID(t *testing.T) { + SetExporterID(0) + + prev := SetExporterID(42) + if prev != 0 { + t.Errorf("SetExporterID(42) returned %d; want 0", prev) + } + + id := NextExporterID() + if id != 42 { + t.Errorf("NextExporterID() = %d; want 42", id) + } +} + +func TestNextExporterIDConcurrentSafe(t *testing.T) { + SetExporterID(0) + + const goroutines = 100 + const increments = 10 + + var wg sync.WaitGroup + wg.Add(goroutines) + + for range goroutines { + go func() { + defer wg.Done() + for range increments { + NextExporterID() + } + }() + } + + wg.Wait() + + expected := int64(goroutines * increments) + if id := NextExporterID(); id != expected { + t.Errorf("NextExporterID() = %d; want %d", id, expected) + } +} \ No newline at end of file diff --git a/internal/shared/otlp/otlplog/transform/log_test.go.tmpl b/internal/shared/otlp/otlplog/transform/log_test.go.tmpl index c3212d19758..a9c71c72712 100644 --- a/internal/shared/otlp/otlplog/transform/log_test.go.tmpl +++ b/internal/shared/otlp/otlplog/transform/log_test.go.tmpl @@ -22,7 +22,7 @@ import ( "go.opentelemetry.io/otel/sdk/log" "go.opentelemetry.io/otel/sdk/log/logtest" "go.opentelemetry.io/otel/sdk/resource" - semconv "go.opentelemetry.io/otel/semconv/v1.34.0" + semconv "go.opentelemetry.io/otel/semconv/v1.37.0" "go.opentelemetry.io/otel/trace" ) diff --git a/internal/shared/otlp/otlpmetric/otest/client.go.tmpl b/internal/shared/otlp/otlpmetric/otest/client.go.tmpl index 883dc2b347b..d10fb64f8ac 100644 --- a/internal/shared/otlp/otlpmetric/otest/client.go.tmpl +++ b/internal/shared/otlp/otlpmetric/otest/client.go.tmpl @@ -19,7 +19,7 @@ import ( "google.golang.org/protobuf/proto" "go.opentelemetry.io/otel" - semconv "go.opentelemetry.io/otel/semconv/v1.34.0" + semconv "go.opentelemetry.io/otel/semconv/v1.37.0" collpb "go.opentelemetry.io/proto/otlp/collector/metrics/v1" cpb "go.opentelemetry.io/proto/otlp/common/v1" mpb "go.opentelemetry.io/proto/otlp/metrics/v1" diff --git a/internal/shared/otlp/otlpmetric/transform/metricdata_test.go.tmpl b/internal/shared/otlp/otlpmetric/transform/metricdata_test.go.tmpl index 028e2c94f28..b2a9b03300e 100644 --- a/internal/shared/otlp/otlpmetric/transform/metricdata_test.go.tmpl +++ b/internal/shared/otlp/otlpmetric/transform/metricdata_test.go.tmpl @@ -17,7 +17,7 @@ import ( "go.opentelemetry.io/otel/sdk/instrumentation" "go.opentelemetry.io/otel/sdk/metric/metricdata" "go.opentelemetry.io/otel/sdk/resource" - semconv "go.opentelemetry.io/otel/semconv/v1.34.0" + semconv "go.opentelemetry.io/otel/semconv/v1.37.0" cpb "go.opentelemetry.io/proto/otlp/common/v1" mpb "go.opentelemetry.io/proto/otlp/metrics/v1" rpb "go.opentelemetry.io/proto/otlp/resource/v1" diff --git a/internal/shared/otlp/otlptrace/otlptracetest/client.go.tmpl b/internal/shared/otlp/otlptrace/otlptracetest/client.go.tmpl index a6682b34b33..e53a80a427c 100644 --- a/internal/shared/otlp/otlptrace/otlptracetest/client.go.tmpl +++ b/internal/shared/otlp/otlptrace/otlptracetest/client.go.tmpl @@ -107,7 +107,7 @@ func testClientStopManyTimes(t *testing.T, client otlptrace.Client) { const num int = 20 wg.Add(num) errs := make([]error, num) - for i := 0; i < num; i++ { + for i := range num { go func(idx int) { defer wg.Done() <-ch diff --git a/internal/shared/otlp/otlptrace/otlptracetest/otlptest.go.tmpl b/internal/shared/otlp/otlptrace/otlptracetest/otlptest.go.tmpl index 175cbcf1ec8..36d22d4aefc 100644 --- a/internal/shared/otlp/otlptrace/otlptracetest/otlptest.go.tmpl +++ b/internal/shared/otlp/otlptrace/otlptracetest/otlptest.go.tmpl @@ -48,7 +48,7 @@ func RunEndToEndTest(ctx context.Context, t *testing.T, exp *otlptrace.Exporter, tr2 := tp2.Tracer("test-tracer2") // Now create few spans m := 4 - for i := 0; i < m; i++ { + for i := range m { _, span := tr1.Start(ctx, "AlwaysSample") span.SetAttributes(attribute.Int64("i", int64(i))) span.End() @@ -109,7 +109,7 @@ func RunEndToEndTest(ctx context.Context, t *testing.T, exp *otlptrace.Exporter, if got, want := len(attrMap), m; got != want { t.Fatalf("span attribute unique values: got %d want %d", got, want) } - for i := 0; i < m; i++ { + for i := range m { _, ok := attrMap[int64(i)] if !ok { t.Fatalf("span with attribute %d missing", i) diff --git a/internal/tools/go.mod b/internal/tools/go.mod index 33960a586d9..1db3fcca92d 100644 --- a/internal/tools/go.mod +++ b/internal/tools/go.mod @@ -6,15 +6,14 @@ require ( github.com/Masterminds/semver v1.5.0 github.com/client9/misspell v0.3.4 github.com/gogo/protobuf v1.3.2 - github.com/golangci/golangci-lint/v2 v2.1.6 + github.com/golangci/golangci-lint/v2 v2.3.1 github.com/jcchavezs/porto v0.7.0 github.com/wadey/gocovmerge v0.0.0-20160331181800-b5bfa59ec0ad - go.opentelemetry.io/build-tools/crosslink v0.23.1 - go.opentelemetry.io/build-tools/gotmpl v0.23.1 - go.opentelemetry.io/build-tools/multimod v0.23.1 - go.opentelemetry.io/build-tools/semconvgen v0.23.1 - golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b - golang.org/x/tools v0.34.0 + go.opentelemetry.io/build-tools/crosslink v0.26.2 + go.opentelemetry.io/build-tools/gotmpl v0.26.2 + go.opentelemetry.io/build-tools/multimod v0.26.2 + golang.org/x/exp v0.0.0-20250811191247-51f88131bc50 + golang.org/x/tools v0.36.0 golang.org/x/vuln v1.1.4 ) @@ -23,31 +22,34 @@ require ( 4d63.com/gochecknoglobals v0.2.2 // indirect codeberg.org/chavacava/garif v0.2.0 // indirect dario.cat/mergo v1.0.2 // indirect - github.com/4meepo/tagalign v1.4.2 // indirect + github.com/4meepo/tagalign v1.4.3 // indirect github.com/Abirdcfly/dupword v0.1.6 // indirect + github.com/AlwxSin/noinlineerr v1.0.5 // indirect github.com/Antonboom/errname v1.1.0 // indirect github.com/Antonboom/nilnil v1.1.0 // indirect github.com/Antonboom/testifylint v1.6.1 // indirect github.com/BurntSushi/toml v1.5.0 // indirect github.com/Djarvur/go-err113 v0.1.0 // indirect github.com/GaijinEntertainment/go-exhaustruct/v3 v3.3.1 // indirect - github.com/Masterminds/semver/v3 v3.3.1 // indirect + github.com/Masterminds/semver/v3 v3.4.0 // indirect github.com/Microsoft/go-winio v0.6.2 // indirect github.com/OpenPeeDeeP/depguard/v2 v2.2.1 // indirect github.com/ProtonMail/go-crypto v1.3.0 // indirect - github.com/alecthomas/chroma/v2 v2.18.0 // indirect + github.com/alecthomas/chroma/v2 v2.20.0 // indirect github.com/alecthomas/go-check-sumtype v0.3.1 // indirect github.com/alexkohler/nakedret/v2 v2.0.6 // indirect github.com/alexkohler/prealloc v1.0.0 // indirect + github.com/alfatraining/structtag v1.0.0 // indirect github.com/alingse/asasalint v0.0.11 // indirect github.com/alingse/nilnesserr v0.2.0 // indirect - github.com/ashanbrown/forbidigo v1.6.0 // indirect - github.com/ashanbrown/makezero v1.2.0 // indirect + github.com/ashanbrown/forbidigo/v2 v2.1.0 // indirect + github.com/ashanbrown/makezero/v2 v2.0.1 // indirect github.com/aymanbagabas/go-osc52/v2 v2.0.1 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/bkielbasa/cyclop v1.2.3 // indirect github.com/blizzy78/varnamelen v0.8.0 // indirect github.com/bombsimon/wsl/v4 v4.7.0 // indirect + github.com/bombsimon/wsl/v5 v5.1.1 // indirect github.com/breml/bidichk v0.3.3 // indirect github.com/breml/errchkjson v0.4.1 // indirect github.com/butuzov/ireturn v0.4.0 // indirect @@ -56,16 +58,16 @@ require ( github.com/ccojocar/zxcvbn-go v1.0.4 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/charithe/durationcheck v0.0.10 // indirect - github.com/charmbracelet/colorprofile v0.3.1 // indirect + github.com/charmbracelet/colorprofile v0.3.2 // indirect github.com/charmbracelet/lipgloss v1.1.0 // indirect - github.com/charmbracelet/x/ansi v0.9.3 // indirect + github.com/charmbracelet/x/ansi v0.10.1 // indirect github.com/charmbracelet/x/cellbuf v0.0.13-0.20250311204145-2c3ea96c31dd // indirect github.com/charmbracelet/x/term v0.2.1 // indirect github.com/ckaznocha/intrange v0.3.1 // indirect github.com/cloudflare/circl v1.6.1 // indirect github.com/curioswitch/go-reassign v0.3.0 // indirect github.com/cyphar/filepath-securejoin v0.4.1 // indirect - github.com/daixiang0/gci v0.13.6 // indirect + github.com/daixiang0/gci v0.13.7 // indirect github.com/dave/dst v0.27.3 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/denis-tingaikin/go-header v0.5.0 // indirect @@ -89,26 +91,29 @@ require ( github.com/go-toolsmith/astp v1.1.0 // indirect github.com/go-toolsmith/strparse v1.1.0 // indirect github.com/go-toolsmith/typep v1.1.0 // indirect - github.com/go-viper/mapstructure/v2 v2.3.0 // indirect + github.com/go-viper/mapstructure/v2 v2.4.0 // indirect github.com/go-xmlfmt/xmlfmt v1.1.3 // indirect github.com/gobwas/glob v0.2.3 // indirect github.com/gofrs/flock v0.12.1 // indirect github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 // indirect github.com/golangci/dupl v0.0.0-20250308024227-f665c8d69b32 // indirect github.com/golangci/go-printf-func-name v0.1.0 // indirect - github.com/golangci/gofmt v0.0.0-20250413222143-f2e10e00591b // indirect - github.com/golangci/golines v0.0.0-20250217232252-b35a6149b587 // indirect + github.com/golangci/gofmt v0.0.0-20250704145412-3e58ba0443c6 // indirect + github.com/golangci/golines v0.0.0-20250821215611-d4663ad2c370 // indirect github.com/golangci/misspell v0.7.0 // indirect github.com/golangci/plugin-module-register v0.1.2 // indirect github.com/golangci/revgrep v0.8.0 // indirect + github.com/golangci/swaggoswag v0.0.0-20250504205917-77f2aca3143e // indirect github.com/golangci/unconvert v0.0.0-20250410112200-a129a6e6413e // indirect github.com/google/go-cmp v0.7.0 // indirect - github.com/gordonklaus/ineffassign v0.1.0 // indirect + github.com/gordonklaus/ineffassign v0.2.0 // indirect github.com/gostaticanalysis/analysisutil v0.7.1 // indirect github.com/gostaticanalysis/comment v1.5.0 // indirect github.com/gostaticanalysis/forcetypeassert v0.2.0 // indirect github.com/gostaticanalysis/nilerr v0.1.1 // indirect + github.com/hashicorp/go-cleanhttp v0.5.2 // indirect github.com/hashicorp/go-immutable-radix/v2 v2.1.0 // indirect + github.com/hashicorp/go-retryablehttp v0.7.8 // indirect github.com/hashicorp/go-version v1.7.0 // indirect github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect github.com/hexops/gotextdiff v1.0.3 // indirect @@ -119,7 +124,7 @@ require ( github.com/jjti/go-spancheck v0.6.5 // indirect github.com/julz/importas v0.2.0 // indirect github.com/karamaru-alpha/copyloopvar v1.2.1 // indirect - github.com/kevinburke/ssh_config v1.2.0 // indirect + github.com/kevinburke/ssh_config v1.4.0 // indirect github.com/kisielk/errcheck v1.9.0 // indirect github.com/kkHAIKE/contextcheck v1.1.6 // indirect github.com/kulti/thelper v0.6.3 // indirect @@ -127,12 +132,13 @@ require ( github.com/lasiar/canonicalheader v1.1.2 // indirect github.com/ldez/exptostd v0.4.4 // indirect github.com/ldez/gomoddirectives v0.7.0 // indirect - github.com/ldez/grignotin v0.9.0 // indirect - github.com/ldez/tagliatelle v0.7.1 // indirect + github.com/ldez/grignotin v0.10.0 // indirect + github.com/ldez/tagliatelle v0.7.2 // indirect github.com/ldez/usetesting v0.5.0 // indirect github.com/leonklingele/grouper v1.1.2 // indirect github.com/lucasb-eyer/go-colorful v1.2.0 // indirect github.com/macabu/inamedparam v0.2.0 // indirect + github.com/manuelarte/embeddedstructfieldcheck v0.3.0 // indirect github.com/manuelarte/funcorder v0.5.0 // indirect github.com/maratori/testableexamples v1.0.0 // indirect github.com/maratori/testpackage v1.1.1 // indirect @@ -140,7 +146,7 @@ require ( github.com/mattn/go-colorable v0.1.14 // indirect github.com/mattn/go-isatty v0.0.20 // indirect github.com/mattn/go-runewidth v0.0.16 // indirect - github.com/mgechev/revive v1.10.0 // indirect + github.com/mgechev/revive v1.12.0 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect github.com/moricho/tparallel v0.3.2 // indirect github.com/muesli/termenv v0.16.0 // indirect @@ -148,15 +154,15 @@ require ( github.com/nakabonne/nestif v0.3.1 // indirect github.com/nishanths/exhaustive v0.12.0 // indirect github.com/nishanths/predeclared v0.2.2 // indirect - github.com/nunnatsa/ginkgolinter v0.19.1 // indirect + github.com/nunnatsa/ginkgolinter v0.20.0 // indirect github.com/pelletier/go-toml/v2 v2.2.4 // indirect - github.com/pjbgf/sha1cd v0.3.2 // indirect + github.com/pjbgf/sha1cd v0.4.0 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/polyfloyd/go-errorlint v1.8.0 // indirect - github.com/prometheus/client_golang v1.22.0 // indirect + github.com/prometheus/client_golang v1.23.0 // indirect github.com/prometheus/client_model v0.6.2 // indirect github.com/prometheus/common v0.65.0 // indirect - github.com/prometheus/procfs v0.16.1 // indirect + github.com/prometheus/procfs v0.17.0 // indirect github.com/quasilyte/go-ruleguard v0.4.4 // indirect github.com/quasilyte/go-ruleguard/dsl v0.3.22 // indirect github.com/quasilyte/gogrep v0.5.0 // indirect @@ -167,28 +173,28 @@ require ( github.com/rogpeppe/go-internal v1.14.1 // indirect github.com/ryancurrah/gomodguard v1.4.1 // indirect github.com/ryanrolds/sqlclosecheck v0.5.1 // indirect - github.com/sagikazarmark/locafero v0.9.0 // indirect + github.com/sagikazarmark/locafero v0.10.0 // indirect github.com/sanposhiho/wastedassign/v2 v2.1.0 // indirect github.com/santhosh-tekuri/jsonschema/v6 v6.0.2 // indirect github.com/sashamelentyev/interfacebloat v1.1.0 // indirect github.com/sashamelentyev/usestdlibvars v1.29.0 // indirect - github.com/securego/gosec/v2 v2.22.5 // indirect + github.com/securego/gosec/v2 v2.22.7 // indirect github.com/sergi/go-diff v1.4.0 // indirect github.com/sirupsen/logrus v1.9.3 // indirect github.com/sivchari/containedctx v1.0.3 // indirect github.com/skeema/knownhosts v1.3.1 // indirect - github.com/sonatard/noctx v0.3.3 // indirect - github.com/sourcegraph/conc v0.3.0 // indirect + github.com/sonatard/noctx v0.4.0 // indirect + github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8 // indirect github.com/sourcegraph/go-diff v0.7.0 // indirect github.com/spf13/afero v1.14.0 // indirect github.com/spf13/cast v1.9.2 // indirect github.com/spf13/cobra v1.9.1 // indirect - github.com/spf13/pflag v1.0.6 // indirect + github.com/spf13/pflag v1.0.7 // indirect github.com/spf13/viper v1.20.1 // indirect github.com/ssgreg/nlreturn/v2 v2.2.1 // indirect github.com/stbenjam/no-sprintf-host-port v0.2.0 // indirect github.com/stretchr/objx v0.5.2 // indirect - github.com/stretchr/testify v1.10.0 // indirect + github.com/stretchr/testify v1.11.1 // indirect github.com/subosito/gotenv v1.6.0 // indirect github.com/tdakkota/asciicheck v0.4.1 // indirect github.com/tetafro/godot v1.5.1 // indirect @@ -199,7 +205,7 @@ require ( github.com/ultraware/funlen v0.2.0 // indirect github.com/ultraware/whitespace v0.2.0 // indirect github.com/uudashr/gocognit v1.2.0 // indirect - github.com/uudashr/iface v1.4.0 // indirect + github.com/uudashr/iface v1.4.1 // indirect github.com/xanzy/ssh-agent v0.3.3 // indirect github.com/xen0n/gosmopolitan v1.3.0 // indirect github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect @@ -207,24 +213,24 @@ require ( github.com/yeya24/promlinter v0.3.0 // indirect github.com/ykadowak/zerologlint v0.1.5 // indirect gitlab.com/bosi/decorder v0.4.2 // indirect - go-simpler.org/musttag v0.13.1 // indirect - go-simpler.org/sloglint v0.11.0 // indirect - go.augendre.info/fatcontext v0.8.0 // indirect - go.opentelemetry.io/build-tools v0.23.1 // indirect + go-simpler.org/musttag v0.14.0 // indirect + go-simpler.org/sloglint v0.11.1 // indirect + go.augendre.info/arangolint v0.2.0 // indirect + go.augendre.info/fatcontext v0.8.1 // indirect + go.opentelemetry.io/build-tools v0.27.0 // indirect go.uber.org/automaxprocs v1.6.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect - golang.org/x/crypto v0.39.0 // indirect - golang.org/x/exp/typeparams v0.0.0-20250620022241-b7579e27df2b // indirect - golang.org/x/mod v0.25.0 // indirect - golang.org/x/net v0.41.0 // indirect - golang.org/x/sync v0.15.0 // indirect - golang.org/x/sys v0.33.0 // indirect - golang.org/x/telemetry v0.0.0-20250624183230-fef9409b2ec8 // indirect - golang.org/x/text v0.26.0 // indirect - google.golang.org/protobuf v1.36.6 // indirect + golang.org/x/crypto v0.41.0 // indirect + golang.org/x/exp/typeparams v0.0.0-20250811191247-51f88131bc50 // indirect + golang.org/x/mod v0.27.0 // indirect + golang.org/x/net v0.43.0 // indirect + golang.org/x/sync v0.16.0 // indirect + golang.org/x/sys v0.35.0 // indirect + golang.org/x/telemetry v0.0.0-20250807160809-1a19826ec488 // indirect + golang.org/x/text v0.28.0 // indirect + google.golang.org/protobuf v1.36.8 // indirect gopkg.in/warnings.v0 v0.1.2 // indirect - gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect honnef.co/go/tools v0.6.1 // indirect mvdan.cc/gofumpt v0.8.0 // indirect diff --git a/internal/tools/go.sum b/internal/tools/go.sum index 4a450f40d5b..42f896b0520 100644 --- a/internal/tools/go.sum +++ b/internal/tools/go.sum @@ -6,10 +6,12 @@ codeberg.org/chavacava/garif v0.2.0 h1:F0tVjhYbuOCnvNcU3YSpO6b3Waw6Bimy4K0mM8y6M codeberg.org/chavacava/garif v0.2.0/go.mod h1:P2BPbVbT4QcvLZrORc2T29szK3xEOlnl0GiPTJmEqBQ= dario.cat/mergo v1.0.2 h1:85+piFYR1tMbRrLcDwR18y4UKJ3aH1Tbzi24VRW1TK8= dario.cat/mergo v1.0.2/go.mod h1:E/hbnu0NxMFBjpMIE34DRGLWqDy0g5FuKDhCb31ngxA= -github.com/4meepo/tagalign v1.4.2 h1:0hcLHPGMjDyM1gHG58cS73aQF8J4TdVR96TZViorO9E= -github.com/4meepo/tagalign v1.4.2/go.mod h1:+p4aMyFM+ra7nb41CnFG6aSDXqRxU/w1VQqScKqDARI= +github.com/4meepo/tagalign v1.4.3 h1:Bnu7jGWwbfpAie2vyl63Zup5KuRv21olsPIha53BJr8= +github.com/4meepo/tagalign v1.4.3/go.mod h1:00WwRjiuSbrRJnSVeGWPLp2epS5Q/l4UEy0apLLS37c= github.com/Abirdcfly/dupword v0.1.6 h1:qeL6u0442RPRe3mcaLcbaCi2/Y/hOcdtw6DE9odjz9c= github.com/Abirdcfly/dupword v0.1.6/go.mod h1:s+BFMuL/I4YSiFv29snqyjwzDp4b65W2Kvy+PKzZ6cw= +github.com/AlwxSin/noinlineerr v1.0.5 h1:RUjt63wk1AYWTXtVXbSqemlbVTb23JOSRiNsshj7TbY= +github.com/AlwxSin/noinlineerr v1.0.5/go.mod h1:+QgkkoYrMH7RHvcdxdlI7vYYEdgeoFOVjU9sUhw/rQc= github.com/Antonboom/errname v1.1.0 h1:A+ucvdpMwlo/myWrkHEUEBWc/xuXdud23S8tmTb/oAE= github.com/Antonboom/errname v1.1.0/go.mod h1:O1NMrzgUcVBGIfi3xlVuvX8Q/VP/73sseCaAppfjqZw= github.com/Antonboom/nilnil v1.1.0 h1:jGxJxjgYS3VUUtOTNk8Z1icwT5ESpLH/426fjmQG+ng= @@ -24,8 +26,8 @@ github.com/GaijinEntertainment/go-exhaustruct/v3 v3.3.1 h1:Sz1JIXEcSfhz7fUi7xHnh github.com/GaijinEntertainment/go-exhaustruct/v3 v3.3.1/go.mod h1:n/LSCXNuIYqVfBlVXyHfMQkZDdp1/mmxfSjADd3z1Zg= github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww= github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= -github.com/Masterminds/semver/v3 v3.3.1 h1:QtNSWtVZ3nBfk8mAOu/B6v7FMJ+NHTIgUPi7rj+4nv4= -github.com/Masterminds/semver/v3 v3.3.1/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM= +github.com/Masterminds/semver/v3 v3.4.0 h1:Zog+i5UMtVoCU8oKka5P7i9q9HgrJeGzI9SA1Xbatp0= +github.com/Masterminds/semver/v3 v3.4.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM= github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY= github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= @@ -35,16 +37,18 @@ github.com/ProtonMail/go-crypto v1.3.0 h1:ILq8+Sf5If5DCpHQp4PbZdS1J7HDFRXz/+xKBi github.com/ProtonMail/go-crypto v1.3.0/go.mod h1:9whxjD8Rbs29b4XWbB8irEcE8KHMqaR2e7GWU1R+/PE= github.com/alecthomas/assert/v2 v2.11.0 h1:2Q9r3ki8+JYXvGsDyBXwH3LcJ+WK5D0gc5E8vS6K3D0= github.com/alecthomas/assert/v2 v2.11.0/go.mod h1:Bze95FyfUr7x34QZrjL+XP+0qgp/zg8yS+TtBj1WA3k= -github.com/alecthomas/chroma/v2 v2.18.0 h1:6h53Q4hW83SuF+jcsp7CVhLsMozzvQvO8HBbKQW+gn4= -github.com/alecthomas/chroma/v2 v2.18.0/go.mod h1:RVX6AvYm4VfYe/zsk7mjHueLDZor3aWCNE14TFlepBk= +github.com/alecthomas/chroma/v2 v2.20.0 h1:sfIHpxPyR07/Oylvmcai3X/exDlE8+FA820NTz+9sGw= +github.com/alecthomas/chroma/v2 v2.20.0/go.mod h1:e7tViK0xh/Nf4BYHl00ycY6rV7b8iXBksI9E359yNmA= github.com/alecthomas/go-check-sumtype v0.3.1 h1:u9aUvbGINJxLVXiFvHUlPEaD7VDULsrxJb4Aq31NLkU= github.com/alecthomas/go-check-sumtype v0.3.1/go.mod h1:A8TSiN3UPRw3laIgWEUOHHLPa6/r9MtoigdlP5h3K/E= -github.com/alecthomas/repr v0.4.0 h1:GhI2A8MACjfegCPVq9f1FLvIBS+DrQ2KQBFZP1iFzXc= -github.com/alecthomas/repr v0.4.0/go.mod h1:Fr0507jx4eOXV7AlPV6AVZLYrLIuIeSOWtW57eE/O/4= +github.com/alecthomas/repr v0.5.1 h1:E3G4t2QbHTSNpPKBgMTln5KLkZHLOcU7r37J4pXBuIg= +github.com/alecthomas/repr v0.5.1/go.mod h1:Fr0507jx4eOXV7AlPV6AVZLYrLIuIeSOWtW57eE/O/4= github.com/alexkohler/nakedret/v2 v2.0.6 h1:ME3Qef1/KIKr3kWX3nti3hhgNxw6aqN5pZmQiFSsuzQ= github.com/alexkohler/nakedret/v2 v2.0.6/go.mod h1:l3RKju/IzOMQHmsEvXwkqMDzHHvurNQfAgE1eVmT40Q= github.com/alexkohler/prealloc v1.0.0 h1:Hbq0/3fJPQhNkN0dR95AVrr6R7tou91y0uHG5pOcUuw= github.com/alexkohler/prealloc v1.0.0/go.mod h1:VetnK3dIgFBBKmg0YnD9F9x6Icjd+9cvfHR56wJVlKE= +github.com/alfatraining/structtag v1.0.0 h1:2qmcUqNcCoyVJ0up879K614L9PazjBSFruTB0GOFjCc= +github.com/alfatraining/structtag v1.0.0/go.mod h1:p3Xi5SwzTi+Ryj64DqjLWz7XurHxbGsq6y3ubePJPus= github.com/alingse/asasalint v0.0.11 h1:SFwnQXJ49Kx/1GghOFz1XGqHYKp21Kq1nHad/0WQRnw= github.com/alingse/asasalint v0.0.11/go.mod h1:nCaoMhw7a9kSJObvQyVzNTPBDbNpdocqrSP7t/cW5+I= github.com/alingse/nilnesserr v0.2.0 h1:raLem5KG7EFVb4UIDAXgrv3N2JIaffeKNtcEXkEWd/w= @@ -53,10 +57,10 @@ github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFI github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be/go.mod h1:ySMOLuWl6zY27l47sB3qLNK6tF2fkHG55UZxx8oIVo4= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= -github.com/ashanbrown/forbidigo v1.6.0 h1:D3aewfM37Yb3pxHujIPSpTf6oQk9sc9WZi8gerOIVIY= -github.com/ashanbrown/forbidigo v1.6.0/go.mod h1:Y8j9jy9ZYAEHXdu723cUlraTqbzjKF1MUyfOKL+AjcU= -github.com/ashanbrown/makezero v1.2.0 h1:/2Lp1bypdmK9wDIq7uWBlDF1iMUpIIS4A+pF6C9IEUU= -github.com/ashanbrown/makezero v1.2.0/go.mod h1:dxlPhHbDMC6N6xICzFBSK+4njQDdK8euNO0qjQMtGY4= +github.com/ashanbrown/forbidigo/v2 v2.1.0 h1:NAxZrWqNUQiDz19FKScQ/xvwzmij6BiOw3S0+QUQ+Hs= +github.com/ashanbrown/forbidigo/v2 v2.1.0/go.mod h1:0zZfdNAuZIL7rSComLGthgc/9/n2FqspBOH90xlCHdA= +github.com/ashanbrown/makezero/v2 v2.0.1 h1:r8GtKetWOgoJ4sLyUx97UTwyt2dO7WkGFHizn/Lo8TY= +github.com/ashanbrown/makezero/v2 v2.0.1/go.mod h1:kKU4IMxmYW1M4fiEHMb2vc5SFoPzXvgbMR9gIp5pjSw= github.com/aymanbagabas/go-osc52/v2 v2.0.1 h1:HwpRHbFMcZLEVr42D4p7XBqjyuxQH5SMiErDT4WkJ2k= github.com/aymanbagabas/go-osc52/v2 v2.0.1/go.mod h1:uYgXzlJ7ZpABp8OJ+exZzJJhRNQ2ASbcXHWsFqH8hp8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= @@ -67,6 +71,8 @@ github.com/blizzy78/varnamelen v0.8.0 h1:oqSblyuQvFsW1hbBHh1zfwrKe3kcSj0rnXkKzsQ github.com/blizzy78/varnamelen v0.8.0/go.mod h1:V9TzQZ4fLJ1DSrjVDfl89H7aMnTvKkApdHeyESmyR7k= github.com/bombsimon/wsl/v4 v4.7.0 h1:1Ilm9JBPRczjyUs6hvOPKvd7VL1Q++PL8M0SXBDf+jQ= github.com/bombsimon/wsl/v4 v4.7.0/go.mod h1:uV/+6BkffuzSAVYD+yGyld1AChO7/EuLrCF/8xTiapg= +github.com/bombsimon/wsl/v5 v5.1.1 h1:cQg5KJf9FlctAH4cpL9vLKnziYknoCMCdqXl0wjl72Q= +github.com/bombsimon/wsl/v5 v5.1.1/go.mod h1:Gp8lD04z27wm3FANIUPZycXp+8huVsn0oxc+n4qfV9I= github.com/breml/bidichk v0.3.3 h1:WSM67ztRusf1sMoqH6/c4OBCUlRVTKq+CbSeo0R17sE= github.com/breml/bidichk v0.3.3/go.mod h1:ISbsut8OnjB367j5NseXEGGgO/th206dVa427kR8YTE= github.com/breml/errchkjson v0.4.1 h1:keFSS8D7A2T0haP9kzZTi7o26r7kE3vymjZNeNDRDwg= @@ -83,12 +89,12 @@ github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UF github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/charithe/durationcheck v0.0.10 h1:wgw73BiocdBDQPik+zcEoBG/ob8uyBHf2iyoHGPf5w4= github.com/charithe/durationcheck v0.0.10/go.mod h1:bCWXb7gYRysD1CU3C+u4ceO49LoGOY1C1L6uouGNreQ= -github.com/charmbracelet/colorprofile v0.3.1 h1:k8dTHMd7fgw4bnFd7jXTLZrSU/CQrKnL3m+AxCzDz40= -github.com/charmbracelet/colorprofile v0.3.1/go.mod h1:/GkGusxNs8VB/RSOh3fu0TJmQ4ICMMPApIIVn0KszZ0= +github.com/charmbracelet/colorprofile v0.3.2 h1:9J27WdztfJQVAQKX2WOlSSRB+5gaKqqITmrvb1uTIiI= +github.com/charmbracelet/colorprofile v0.3.2/go.mod h1:mTD5XzNeWHj8oqHb+S1bssQb7vIHbepiebQ2kPKVKbI= github.com/charmbracelet/lipgloss v1.1.0 h1:vYXsiLHVkK7fp74RkV7b2kq9+zDLoEU4MZoFqR/noCY= github.com/charmbracelet/lipgloss v1.1.0/go.mod h1:/6Q8FR2o+kj8rz4Dq0zQc3vYf7X+B0binUUBwA0aL30= -github.com/charmbracelet/x/ansi v0.9.3 h1:BXt5DHS/MKF+LjuK4huWrC6NCvHtexww7dMayh6GXd0= -github.com/charmbracelet/x/ansi v0.9.3/go.mod h1:3RQDQ6lDnROptfpWuUVIUG64bD2g2BgntdxH0Ya5TeE= +github.com/charmbracelet/x/ansi v0.10.1 h1:rL3Koar5XvX0pHGfovN03f5cxLbCF2YvLeyz7D2jVDQ= +github.com/charmbracelet/x/ansi v0.10.1/go.mod h1:3RQDQ6lDnROptfpWuUVIUG64bD2g2BgntdxH0Ya5TeE= github.com/charmbracelet/x/cellbuf v0.0.13-0.20250311204145-2c3ea96c31dd h1:vy0GVL4jeHEwG5YOXDmi86oYw2yuYUGqz6a8sLwg0X8= github.com/charmbracelet/x/cellbuf v0.0.13-0.20250311204145-2c3ea96c31dd/go.mod h1:xe0nKWGd3eJgtqZRaN9RjMtK7xUYchjzPr7q6kcvCCs= github.com/charmbracelet/x/term v0.2.1 h1:AQeHeLZ1OqSXhrAWpYUtZyX1T3zVxfpZuEQMIQaGIAQ= @@ -104,8 +110,8 @@ github.com/curioswitch/go-reassign v0.3.0 h1:dh3kpQHuADL3cobV/sSGETA8DOv457dwl+f github.com/curioswitch/go-reassign v0.3.0/go.mod h1:nApPCCTtqLJN/s8HfItCcKV0jIPwluBOvZP+dsJGA88= github.com/cyphar/filepath-securejoin v0.4.1 h1:JyxxyPEaktOD+GAnqIqTf9A8tHyAG22rowi7HkoSU1s= github.com/cyphar/filepath-securejoin v0.4.1/go.mod h1:Sdj7gXlvMcPZsbhwhQ33GguGLDGQL7h7bg04C/+u9jI= -github.com/daixiang0/gci v0.13.6 h1:RKuEOSkGpSadkGbvZ6hJ4ddItT3cVZ9Vn9Rybk6xjl8= -github.com/daixiang0/gci v0.13.6/go.mod h1:12etP2OniiIdP4q+kjUGrC/rUagga7ODbqsom5Eo5Yk= +github.com/daixiang0/gci v0.13.7 h1:+0bG5eK9vlI08J+J/NWGbWPTNiXPG4WhNLJOkSxWITQ= +github.com/daixiang0/gci v0.13.7/go.mod h1:812WVN6JLFY9S6Tv76twqmNqevN0pa3SX3nih0brVzQ= github.com/dave/dst v0.27.3 h1:P1HPoMza3cMEquVf9kKy8yXsFirry4zEnWOdYPOoIzY= github.com/dave/dst v0.27.3/go.mod h1:jHh6EOibnHgcUW3WjKHisiooEkYwqpHLBSX1iOBhEyc= github.com/dave/jennifer v1.7.1 h1:B4jJJDHelWcDhlRQxWeo0Npa/pYKBLrirAQoTN45txo= @@ -175,8 +181,8 @@ github.com/go-toolsmith/strparse v1.1.0 h1:GAioeZUK9TGxnLS+qfdqNbA4z0SSm5zVNtCQi github.com/go-toolsmith/strparse v1.1.0/go.mod h1:7ksGy58fsaQkGQlY8WVoBFNyEPMGuJin1rfoPS4lBSQ= github.com/go-toolsmith/typep v1.1.0 h1:fIRYDyF+JywLfqzyhdiHzRop/GQDxxNhLGQ6gFUNHus= github.com/go-toolsmith/typep v1.1.0/go.mod h1:fVIw+7zjdsMxDA3ITWnH1yOiw1rnTQKCsF/sk2H/qig= -github.com/go-viper/mapstructure/v2 v2.3.0 h1:27XbWsHIqhbdR5TIC911OfYvgSaW93HM+dX7970Q7jk= -github.com/go-viper/mapstructure/v2 v2.3.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= +github.com/go-viper/mapstructure/v2 v2.4.0 h1:EBsztssimR/CONLSZZ04E8qAkxNYq4Qp9LvH92wZUgs= +github.com/go-viper/mapstructure/v2 v2.4.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= github.com/go-xmlfmt/xmlfmt v1.1.3 h1:t8Ey3Uy7jDSEisW2K3somuMKIpzktkWptA0iFCnRUWY= github.com/go-xmlfmt/xmlfmt v1.1.3/go.mod h1:aUCEOzzezBEjDBbFBoSiya/gduyIiWYRP6CnSFIV8AM= github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= @@ -191,18 +197,20 @@ github.com/golangci/dupl v0.0.0-20250308024227-f665c8d69b32 h1:WUvBfQL6EW/40l6Om github.com/golangci/dupl v0.0.0-20250308024227-f665c8d69b32/go.mod h1:NUw9Zr2Sy7+HxzdjIULge71wI6yEg1lWQr7Evcu8K0E= github.com/golangci/go-printf-func-name v0.1.0 h1:dVokQP+NMTO7jwO4bwsRwLWeudOVUPPyAKJuzv8pEJU= github.com/golangci/go-printf-func-name v0.1.0/go.mod h1:wqhWFH5mUdJQhweRnldEywnR5021wTdZSNgwYceV14s= -github.com/golangci/gofmt v0.0.0-20250413222143-f2e10e00591b h1:Aa2SLCpcFsO9PI39l00IkHCP79z/D9gk3EwZBoJGv1k= -github.com/golangci/gofmt v0.0.0-20250413222143-f2e10e00591b/go.mod h1:ivJ9QDg0XucIkmwhzCDsqcnxxlDStoTl89jDMIoNxKY= -github.com/golangci/golangci-lint/v2 v2.1.6 h1:LXqShFfAGM5BDzEOWD2SL1IzJAgUOqES/HRBsfKjI+w= -github.com/golangci/golangci-lint/v2 v2.1.6/go.mod h1:EPj+fgv4TeeBq3TcqaKZb3vkiV5dP4hHHKhXhEhzci8= -github.com/golangci/golines v0.0.0-20250217232252-b35a6149b587 h1:RXtAfHDBWAv49/t94l3j9Iqvy6eXL/nm56EejqrZuQc= -github.com/golangci/golines v0.0.0-20250217232252-b35a6149b587/go.mod h1:k9mmcyWKSTMcPPvQUCfRWWQ9VHJ1U9Dc0R7kaXAgtnQ= +github.com/golangci/gofmt v0.0.0-20250704145412-3e58ba0443c6 h1:jlKy3uQkETB3zMBK8utduvojT+If2nDAM1pWpEzXjaY= +github.com/golangci/gofmt v0.0.0-20250704145412-3e58ba0443c6/go.mod h1:OyaRySOXorMn8zJqFku8YsKptIhPkANyKKTMC+rqMCs= +github.com/golangci/golangci-lint/v2 v2.3.1 h1:kregGxX/IsDeHCmBbHo0LKJ5wNLKMGosMBTrxKyIweM= +github.com/golangci/golangci-lint/v2 v2.3.1/go.mod h1:JEcfo5MEAzo6nY7SLzLzhHoYBJudAd55rgB5ZYOHrXE= +github.com/golangci/golines v0.0.0-20250821215611-d4663ad2c370 h1:O2u8NCU/gGczNpU7/yjZIAvXMHLwKCAKsNc8axyQPWU= +github.com/golangci/golines v0.0.0-20250821215611-d4663ad2c370/go.mod h1:k9mmcyWKSTMcPPvQUCfRWWQ9VHJ1U9Dc0R7kaXAgtnQ= github.com/golangci/misspell v0.7.0 h1:4GOHr/T1lTW0hhR4tgaaV1WS/lJ+ncvYCoFKmqJsj0c= github.com/golangci/misspell v0.7.0/go.mod h1:WZyyI2P3hxPY2UVHs3cS8YcllAeyfquQcKfdeE9AFVg= github.com/golangci/plugin-module-register v0.1.2 h1:e5WM6PO6NIAEcij3B053CohVp3HIYbzSuP53UAYgOpg= github.com/golangci/plugin-module-register v0.1.2/go.mod h1:1+QGTsKBvAIvPvoY/os+G5eoqxWn70HYDm2uvUyGuVw= github.com/golangci/revgrep v0.8.0 h1:EZBctwbVd0aMeRnNUsFogoyayvKHyxlV3CdUA46FX2s= github.com/golangci/revgrep v0.8.0/go.mod h1:U4R/s9dlXZsg8uJmaR1GrloUr14D7qDl8gi2iPXJH8k= +github.com/golangci/swaggoswag v0.0.0-20250504205917-77f2aca3143e h1:ai0EfmVYE2bRA5htgAG9r7s3tHsfjIhN98WshBTJ9jM= +github.com/golangci/swaggoswag v0.0.0-20250504205917-77f2aca3143e/go.mod h1:Vrn4B5oR9qRwM+f54koyeH3yzphlecwERs0el27Fr/s= github.com/golangci/unconvert v0.0.0-20250410112200-a129a6e6413e h1:gD6P7NEo7Eqtt0ssnqSJNNndxe69DOQ24A5h7+i3KpM= github.com/golangci/unconvert v0.0.0-20250410112200-a129a6e6413e/go.mod h1:h+wZwLjUTJnm/P2rwlbJdRPZXOzaT36/FwnPnY2inzc= github.com/google/go-cmdtest v0.4.1-0.20220921163831-55ab3332a786 h1:rcv+Ippz6RAtvaGgKxc+8FQIpxHgsF+HBzPyYL2cyVU= @@ -210,7 +218,6 @@ github.com/google/go-cmdtest v0.4.1-0.20220921163831-55ab3332a786/go.mod h1:apVn github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= @@ -218,8 +225,8 @@ github.com/google/pprof v0.0.0-20250607225305-033d6d78b36a h1://KbezygeMJZCSHH+H github.com/google/pprof v0.0.0-20250607225305-033d6d78b36a/go.mod h1:5hDyRhoBCxViHszMt12TnOpEI4VVi+U8Gm9iphldiMA= github.com/google/renameio v0.1.0 h1:GOZbcHa3HfsPKPlmyPyN2KEohoMXOhdMbHrvbpl2QaA= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/gordonklaus/ineffassign v0.1.0 h1:y2Gd/9I7MdY1oEIt+n+rowjBNDcLQq3RsH5hwJd0f9s= -github.com/gordonklaus/ineffassign v0.1.0/go.mod h1:Qcp2HIAYhR7mNUVSIxZww3Guk4it82ghYcEXIAk+QT0= +github.com/gordonklaus/ineffassign v0.2.0 h1:Uths4KnmwxNJNzq87fwQQDDnbNb7De00VOk9Nu0TySs= +github.com/gordonklaus/ineffassign v0.2.0/go.mod h1:TIpymnagPSexySzs7F9FnO1XFTy8IT3a59vmZp5Y9Lw= github.com/gostaticanalysis/analysisutil v0.7.1 h1:ZMCjoue3DtDWQ5WyU16YbjbQEQ3VuzwxALrpYd+HeKk= github.com/gostaticanalysis/analysisutil v0.7.1/go.mod h1:v21E3hY37WKMGSnbsw2S/ojApNWb6C1//mXO48CXbVc= github.com/gostaticanalysis/comment v1.4.1/go.mod h1:ih6ZxzTHLdadaiSnF5WY3dxUoXfXAlTaRzuaNDlSado= @@ -233,8 +240,14 @@ github.com/gostaticanalysis/nilerr v0.1.1/go.mod h1:wZYb6YI5YAxxq0i1+VJbY0s2YONW github.com/gostaticanalysis/testutil v0.3.1-0.20210208050101-bfb5c8eec0e4/go.mod h1:D+FIZ+7OahH3ePw/izIEeH5I06eKs1IKI4Xr64/Am3M= github.com/gostaticanalysis/testutil v0.5.0 h1:Dq4wT1DdTwTGCQQv3rl3IvD5Ld0E6HiY+3Zh0sUGqw8= github.com/gostaticanalysis/testutil v0.5.0/go.mod h1:OLQSbuM6zw2EvCcXTz1lVq5unyoNft372msDY0nY5Hs= +github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= +github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= +github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k= +github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= github.com/hashicorp/go-immutable-radix/v2 v2.1.0 h1:CUW5RYIcysz+D3B+l1mDeXrQ7fUvGGCwJfdASSzbrfo= github.com/hashicorp/go-immutable-radix/v2 v2.1.0/go.mod h1:hgdqLXA4f6NIjRVisM1TJ9aOJVNRqKZj+xDGF6m7PBw= +github.com/hashicorp/go-retryablehttp v0.7.8 h1:ylXZWnqa7Lhqpk0L1P1LzDtGcCR0rPVUrx/c8Unxc48= +github.com/hashicorp/go-retryablehttp v0.7.8/go.mod h1:rjiScheydd+CxvumBsIrFKlx3iS0jrZ7LvzFGFmuKbw= github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-version v1.2.1/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= @@ -260,8 +273,8 @@ github.com/julz/importas v0.2.0 h1:y+MJN/UdL63QbFJHws9BVC5RpA2iq0kpjrFajTGivjQ= github.com/julz/importas v0.2.0/go.mod h1:pThlt589EnCYtMnmhmRYY/qn9lCf/frPOK+WMx3xiJY= github.com/karamaru-alpha/copyloopvar v1.2.1 h1:wmZaZYIjnJ0b5UoKDjUHrikcV0zuPyyxI4SVplLd2CI= github.com/karamaru-alpha/copyloopvar v1.2.1/go.mod h1:nFmMlFNlClC2BPvNaHMdkirmTJxVCY0lhxBtlfOypMM= -github.com/kevinburke/ssh_config v1.2.0 h1:x584FjTGwHzMwvHx18PXxbBVzfnxogHaAReU4gf13a4= -github.com/kevinburke/ssh_config v1.2.0/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= +github.com/kevinburke/ssh_config v1.4.0 h1:6xxtP5bZ2E4NF5tuQulISpTO2z8XbtH8cg1PWkxoFkQ= +github.com/kevinburke/ssh_config v1.4.0/go.mod h1:q2RIzfka+BXARoNexmF9gkxEX7DmvbW9P4hIVx2Kg4M= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/errcheck v1.9.0 h1:9xt1zI9EBfcYBvdU1nVrzMzzUPUtPKs9bVSIM3TAb3M= github.com/kisielk/errcheck v1.9.0/go.mod h1:kQxWMMVZgIkDq7U8xtG/n2juOjbLgZtedi0D+/VL/i8= @@ -285,10 +298,10 @@ github.com/ldez/exptostd v0.4.4 h1:58AtQjnLcT/tI5W/1KU7xE/O7zW9RAWB6c/ScQAnfus= github.com/ldez/exptostd v0.4.4/go.mod h1:QfdzPw6oHjFVdNV7ILoPu5sw3OZ3OG1JS0I5JN3J4Js= github.com/ldez/gomoddirectives v0.7.0 h1:EOx8Dd56BZYSez11LVgdj025lKwlP0/E5OLSl9HDwsY= github.com/ldez/gomoddirectives v0.7.0/go.mod h1:wR4v8MN9J8kcwvrkzrx6sC9xe9Cp68gWYCsda5xvyGc= -github.com/ldez/grignotin v0.9.0 h1:MgOEmjZIVNn6p5wPaGp/0OKWyvq42KnzAt/DAb8O4Ow= -github.com/ldez/grignotin v0.9.0/go.mod h1:uaVTr0SoZ1KBii33c47O1M8Jp3OP3YDwhZCmzT9GHEk= -github.com/ldez/tagliatelle v0.7.1 h1:bTgKjjc2sQcsgPiT902+aadvMjCeMHrY7ly2XKFORIk= -github.com/ldez/tagliatelle v0.7.1/go.mod h1:3zjxUpsNB2aEZScWiZTHrAXOl1x25t3cRmzfK1mlo2I= +github.com/ldez/grignotin v0.10.0 h1:NQPeh1E/Eza4F0exCeC1WkpnLvgUcQDT8MQ1vOLML0E= +github.com/ldez/grignotin v0.10.0/go.mod h1:oR4iCKUP9fwoeO6vCQeD7M5SMxCT6xdVas4vg0h1LaI= +github.com/ldez/tagliatelle v0.7.2 h1:KuOlL70/fu9paxuxbeqlicJnCspCRjH0x8FW+NfgYUk= +github.com/ldez/tagliatelle v0.7.2/go.mod h1:PtGgm163ZplJfZMZ2sf5nhUT170rSuPgBimoyYtdaSI= github.com/ldez/usetesting v0.5.0 h1:3/QtzZObBKLy1F4F8jLuKJiKBjjVFi1IavpoWbmqLwc= github.com/ldez/usetesting v0.5.0/go.mod h1:Spnb4Qppf8JTuRgblLrEWb7IE6rDmUpGvxY3iRrzvDQ= github.com/leonklingele/grouper v1.1.2 h1:o1ARBDLOmmasUaNDesWqWCIFH3u7hoFlM84YrjT3mIY= @@ -297,6 +310,8 @@ github.com/lucasb-eyer/go-colorful v1.2.0 h1:1nnpGOrhyZZuNyfu1QjKiUICQ74+3FNCN69 github.com/lucasb-eyer/go-colorful v1.2.0/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0= github.com/macabu/inamedparam v0.2.0 h1:VyPYpOc10nkhI2qeNUdh3Zket4fcZjEWe35poddBCpE= github.com/macabu/inamedparam v0.2.0/go.mod h1:+Pee9/YfGe5LJ62pYXqB89lJ+0k5bsR8Wgz/C0Zlq3U= +github.com/manuelarte/embeddedstructfieldcheck v0.3.0 h1:VhGqK8gANDvFYDxQkjPbv7/gDJtsGU9k6qj/hC2hgso= +github.com/manuelarte/embeddedstructfieldcheck v0.3.0/go.mod h1:LSo/IQpPfx1dXMcX4ibZCYA7Yy6ayZHIaOGM70+1Wy8= github.com/manuelarte/funcorder v0.5.0 h1:llMuHXXbg7tD0i/LNw8vGnkDTHFpTnWqKPI85Rknc+8= github.com/manuelarte/funcorder v0.5.0/go.mod h1:Yt3CiUQthSBMBxjShjdXMexmzpP8YGvGLjrxJNkO2hA= github.com/maratori/testableexamples v1.0.0 h1:dU5alXRrD8WKSjOUnmJZuzdxWOEQ57+7s93SLMxb2vI= @@ -313,8 +328,8 @@ github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWE github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc= github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= -github.com/mgechev/revive v1.10.0 h1:x2oJsd7yrDp0mC6IgZqSKBTjSUC9Zk5Ob2WfBwZic2I= -github.com/mgechev/revive v1.10.0/go.mod h1:1MRO9zUV7Yukhqh/nGRKSaw6xC5XDzPWPja5GMPWoSE= +github.com/mgechev/revive v1.12.0 h1:Q+/kkbbwerrVYPv9d9efaPGmAO/NsxwW/nE6ahpQaCU= +github.com/mgechev/revive v1.12.0/go.mod h1:VXsY2LsTigk8XU9BpZauVLjVrhICMOV3k1lpB3CXrp8= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/moricho/tparallel v0.3.2 h1:odr8aZVFA3NZrNybggMkYO3rgPRcqjeQUlBBFVxKHTI= @@ -329,8 +344,8 @@ github.com/nishanths/exhaustive v0.12.0 h1:vIY9sALmw6T/yxiASewa4TQcFsVYZQQRUQJhK github.com/nishanths/exhaustive v0.12.0/go.mod h1:mEZ95wPIZW+x8kC4TgC+9YCUgiST7ecevsVDTgc2obs= github.com/nishanths/predeclared v0.2.2 h1:V2EPdZPliZymNAn79T8RkNApBjMmVKh5XRpLm/w98Vk= github.com/nishanths/predeclared v0.2.2/go.mod h1:RROzoN6TnGQupbC+lqggsOlcgysk3LMK/HI84Mp280c= -github.com/nunnatsa/ginkgolinter v0.19.1 h1:mjwbOlDQxZi9Cal+KfbEJTCz327OLNfwNvoZ70NJ+c4= -github.com/nunnatsa/ginkgolinter v0.19.1/go.mod h1:jkQ3naZDmxaZMXPWaS9rblH+i+GWXQCaS/JFIWcOH2s= +github.com/nunnatsa/ginkgolinter v0.20.0 h1:OmWLkAFO2HUTYcU6mprnKud1Ey5pVdiVNYGO5HVicx8= +github.com/nunnatsa/ginkgolinter v0.20.0/go.mod h1:dCIuFlTPfQerXgGUju3VygfAFPdC5aE1mdacCDKDJcQ= github.com/onsi/ginkgo/v2 v2.23.4 h1:ktYTpKJAVZnDT4VjxSbiBenUjmlL/5QkBEocaWXiQus= github.com/onsi/ginkgo/v2 v2.23.4/go.mod h1:Bt66ApGPBFzHyR+JO10Zbt0Gsp4uWxu5mIOTusL46e8= github.com/onsi/gomega v1.37.0 h1:CdEG8g0S133B4OswTDC/5XPSzE1OeP29QOioj2PID2Y= @@ -346,8 +361,8 @@ github.com/otiai10/mint v1.6.3 h1:87qsV/aw1F5as1eH1zS/yqHY85ANKVMgkDrf9rcxbQs= github.com/otiai10/mint v1.6.3/go.mod h1:MJm72SBthJjz8qhefc4z1PYEieWmy8Bku7CjcAqyUSM= github.com/pelletier/go-toml/v2 v2.2.4 h1:mye9XuhQ6gvn5h28+VilKrrPoQVanw5PMw/TB0t5Ec4= github.com/pelletier/go-toml/v2 v2.2.4/go.mod h1:2gIqNv+qfxSVS7cM2xJQKtLSTLUE9V8t9Stt+h56mCY= -github.com/pjbgf/sha1cd v0.3.2 h1:a9wb0bp1oC2TGwStyn0Umc/IGKQnEgF0vVaZ8QF8eo4= -github.com/pjbgf/sha1cd v0.3.2/go.mod h1:zQWigSxVmsHEZow5qaLtPYxpcKMMQpa09ixqBxuCS6A= +github.com/pjbgf/sha1cd v0.4.0 h1:NXzbL1RvjTUi6kgYZCX3fPwwl27Q1LJndxtUDVfJGRY= +github.com/pjbgf/sha1cd v0.4.0/go.mod h1:zQWigSxVmsHEZow5qaLtPYxpcKMMQpa09ixqBxuCS6A= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= @@ -357,14 +372,14 @@ github.com/polyfloyd/go-errorlint v1.8.0 h1:DL4RestQqRLr8U4LygLw8g2DX6RN1eBJOpa2 github.com/polyfloyd/go-errorlint v1.8.0/go.mod h1:G2W0Q5roxbLCt0ZQbdoxQxXktTjwNyDbEaj3n7jvl4s= github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g= github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U= -github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q= -github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0= +github.com/prometheus/client_golang v1.23.0 h1:ust4zpdl9r4trLY/gSjlm07PuiBq2ynaXXlptpfy8Uc= +github.com/prometheus/client_golang v1.23.0/go.mod h1:i/o0R9ByOnHX0McrTMTyhYvKE4haaf2mW08I+jGAjEE= github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk= github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= github.com/prometheus/common v0.65.0 h1:QDwzd+G1twt//Kwj/Ww6E9FQq1iVMmODnILtW1t2VzE= github.com/prometheus/common v0.65.0/go.mod h1:0gZns+BLRQ3V6NdaerOhMbwwRbNh9hkGINtQAsP5GS8= -github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg= -github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is= +github.com/prometheus/procfs v0.17.0 h1:FuLQ+05u4ZI+SS/w9+BWEM2TXiHKsUQ9TADiRH7DuK0= +github.com/prometheus/procfs v0.17.0/go.mod h1:oPQLaDAMRbA+u8H5Pbfq+dl3VDAvHxMUOVhe0wYB2zw= github.com/quasilyte/go-ruleguard v0.4.4 h1:53DncefIeLX3qEpjzlS1lyUmQoUEeOWPFWqaTJq9eAQ= github.com/quasilyte/go-ruleguard v0.4.4/go.mod h1:Vl05zJ538vcEEwu16V/Hdu7IYZWyKSwIy4c88Ro1kRE= github.com/quasilyte/go-ruleguard/dsl v0.3.22 h1:wd8zkOhSNr+I+8Qeciml08ivDt1pSXe60+5DqOpCjPE= @@ -387,8 +402,8 @@ github.com/ryancurrah/gomodguard v1.4.1 h1:eWC8eUMNZ/wM/PWuZBv7JxxqT5fiIKSIyTvjb github.com/ryancurrah/gomodguard v1.4.1/go.mod h1:qnMJwV1hX9m+YJseXEBhd2s90+1Xn6x9dLz11ualI1I= github.com/ryanrolds/sqlclosecheck v0.5.1 h1:dibWW826u0P8jNLsLN+En7+RqWWTYrjCB9fJfSfdyCU= github.com/ryanrolds/sqlclosecheck v0.5.1/go.mod h1:2g3dUjoS6AL4huFdv6wn55WpLIDjY7ZgUR4J8HOO/XQ= -github.com/sagikazarmark/locafero v0.9.0 h1:GbgQGNtTrEmddYDSAH9QLRyfAHY12md+8YFTqyMTC9k= -github.com/sagikazarmark/locafero v0.9.0/go.mod h1:UBUyz37V+EdMS3hDF3QWIiVr/2dPrx49OMO0Bn0hJqk= +github.com/sagikazarmark/locafero v0.10.0 h1:FM8Cv6j2KqIhM2ZK7HZjm4mpj9NBktLgowT1aN9q5Cc= +github.com/sagikazarmark/locafero v0.10.0/go.mod h1:Ieo3EUsjifvQu4NZwV5sPd4dwvu0OCgEQV7vjc9yDjw= github.com/sanposhiho/wastedassign/v2 v2.1.0 h1:crurBF7fJKIORrV85u9UUpePDYGWnwvv3+A96WvwXT0= github.com/sanposhiho/wastedassign/v2 v2.1.0/go.mod h1:+oSmSC+9bQ+VUAxA66nBb0Z7N8CK7mscKTDYC6aIek4= github.com/santhosh-tekuri/jsonschema/v6 v6.0.2 h1:KRzFb2m7YtdldCEkzs6KqmJw4nqEVZGK7IN2kJkjTuQ= @@ -397,8 +412,8 @@ github.com/sashamelentyev/interfacebloat v1.1.0 h1:xdRdJp0irL086OyW1H/RTZTr1h/tM github.com/sashamelentyev/interfacebloat v1.1.0/go.mod h1:+Y9yU5YdTkrNvoX0xHc84dxiN1iBi9+G8zZIhPVoNjQ= github.com/sashamelentyev/usestdlibvars v1.29.0 h1:8J0MoRrw4/NAXtjQqTHrbW9NN+3iMf7Knkq057v4XOQ= github.com/sashamelentyev/usestdlibvars v1.29.0/go.mod h1:8PpnjHMk5VdeWlVb4wCdrB8PNbLqZ3wBZTZWkrpZZL8= -github.com/securego/gosec/v2 v2.22.5 h1:ySws9uwOeE42DsG54v2moaJfh7r08Ev7SAYJuoMDfRA= -github.com/securego/gosec/v2 v2.22.5/go.mod h1:AWfgrFsVewk5LKobsPWlygCHt8K91boVPyL6GUZG5NY= +github.com/securego/gosec/v2 v2.22.7 h1:8/9P+oTYI4yIpAzccQKVsg1/90Po+JzGtAhqoHImDeM= +github.com/securego/gosec/v2 v2.22.7/go.mod h1:510TFNDMrIPytokyHQAVLvPeDr41Yihn2ak8P+XQfNE= github.com/sergi/go-diff v1.4.0 h1:n/SP9D5ad1fORl+llWyN+D6qoUETXNZARKjyY2/KVCw= github.com/sergi/go-diff v1.4.0/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4= github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= @@ -410,10 +425,10 @@ github.com/sivchari/containedctx v1.0.3 h1:x+etemjbsh2fB5ewm5FeLNi5bUjK0V8n0RB+W github.com/sivchari/containedctx v1.0.3/go.mod h1:c1RDvCbnJLtH4lLcYD/GqwiBSSf4F5Qk0xld2rBqzJ4= github.com/skeema/knownhosts v1.3.1 h1:X2osQ+RAjK76shCbvhHHHVl3ZlgDm8apHEHFqRjnBY8= github.com/skeema/knownhosts v1.3.1/go.mod h1:r7KTdC8l4uxWRyK2TpQZ/1o5HaSzh06ePQNxPwTcfiY= -github.com/sonatard/noctx v0.3.3 h1:9+wFUxZfjiCdNadFaGH55sa7Y1r6yKZiAsUVmCP+tjw= -github.com/sonatard/noctx v0.3.3/go.mod h1:64XdbzFb18XL4LporKXp8poqZtPKbCrqQ402CV+kJas= -github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo= -github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0= +github.com/sonatard/noctx v0.4.0 h1:7MC/5Gg4SQ4lhLYR6mvOP6mQVSxCrdyiExo7atBs27o= +github.com/sonatard/noctx v0.4.0/go.mod h1:64XdbzFb18XL4LporKXp8poqZtPKbCrqQ402CV+kJas= +github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8 h1:+jumHNA0Wrelhe64i8F6HNlS8pkoyMv5sreGx2Ry5Rw= +github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8/go.mod h1:3n1Cwaq1E1/1lhQhtRK2ts/ZwZEhjcQeJQ1RuC6Q/8U= github.com/sourcegraph/go-diff v0.7.0 h1:9uLlrd5T46OXs5qpp8L/MTltk0zikUGi0sNNyCpA8G0= github.com/sourcegraph/go-diff v0.7.0/go.mod h1:iBszgVvyxdc8SFZ7gm69go2KDdt3ag071iBaWPF6cjs= github.com/spf13/afero v1.14.0 h1:9tH6MapGnn/j0eb0yIXiLjERO8RB6xIVZRDCX7PtqWA= @@ -423,8 +438,9 @@ github.com/spf13/cast v1.9.2/go.mod h1:jNfB8QC9IA6ZuY2ZjDp0KtFO2LZZlg4S/7bzP6qqe github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo= github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o= github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/pflag v1.0.7 h1:vN6T9TfwStFPFM5XzjsvmzZkLuaLX+HS+0SeFLRgU6M= +github.com/spf13/pflag v1.0.7/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.20.1 h1:ZMi+z/lvLyPSCoNtFCpqjy0S4kPbirhpTMwl8BkW9X4= github.com/spf13/viper v1.20.1/go.mod h1:P9Mdzt1zoHIG8m2eZQinpiBjo6kCmZSKBClNNqjJvu4= github.com/ssgreg/nlreturn/v2 v2.2.1 h1:X4XDI7jstt3ySqGU86YGAURbxw3oTDPK9sPEi6YEwQ0= @@ -437,10 +453,9 @@ github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/ github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= -github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= github.com/tdakkota/asciicheck v0.4.1 h1:bm0tbcmi0jezRA2b5kg4ozmMuGAFotKI3RZfrhfovg8= @@ -465,8 +480,8 @@ github.com/ultraware/whitespace v0.2.0 h1:TYowo2m9Nfj1baEQBjuHzvMRbp19i+RCcRYrSW github.com/ultraware/whitespace v0.2.0/go.mod h1:XcP1RLD81eV4BW8UhQlpaR+SDc2givTvyI8a586WjW8= github.com/uudashr/gocognit v1.2.0 h1:3BU9aMr1xbhPlvJLSydKwdLN3tEUUrzPSSM8S4hDYRA= github.com/uudashr/gocognit v1.2.0/go.mod h1:k/DdKPI6XBZO1q7HgoV2juESI2/Ofj9AcHPZhBBdrTU= -github.com/uudashr/iface v1.4.0 h1:ImZ+1oEJPXvjap7nK0md7gA9RRH7PMp4vliaLkJ2+cg= -github.com/uudashr/iface v1.4.0/go.mod h1:i/H4cfRMPe0izticV8Yz0g6/zcsh5xXlvthrdh1kqcY= +github.com/uudashr/iface v1.4.1 h1:J16Xl1wyNX9ofhpHmQ9h9gk5rnv2A6lX/2+APLTo0zU= +github.com/uudashr/iface v1.4.1/go.mod h1:pbeBPlbuU2qkNDn0mmfrxP2X+wjPMIQAy+r1MBXSXtg= github.com/wadey/gocovmerge v0.0.0-20160331181800-b5bfa59ec0ad h1:W0LEBv82YCGEtcmPA3uNZBI33/qF//HAAs3MawDjRa0= github.com/wadey/gocovmerge v0.0.0-20160331181800-b5bfa59ec0ad/go.mod h1:Hy8o65+MXnS6EwGElrSRjUzQDLXreJlzYLlWiHtt8hM= github.com/xanzy/ssh-agent v0.3.3 h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM= @@ -492,22 +507,22 @@ gitlab.com/bosi/decorder v0.4.2 h1:qbQaV3zgwnBZ4zPMhGLW4KZe7A7NwxEhJx39R3shffo= gitlab.com/bosi/decorder v0.4.2/go.mod h1:muuhHoaJkA9QLcYHq4Mj8FJUwDZ+EirSHRiaTcTf6T8= go-simpler.org/assert v0.9.0 h1:PfpmcSvL7yAnWyChSjOz6Sp6m9j5lyK8Ok9pEL31YkQ= go-simpler.org/assert v0.9.0/go.mod h1:74Eqh5eI6vCK6Y5l3PI8ZYFXG4Sa+tkr70OIPJAUr28= -go-simpler.org/musttag v0.13.1 h1:lw2sJyu7S1X8lc8zWUAdH42y+afdcCnHhWpnkWvd6vU= -go-simpler.org/musttag v0.13.1/go.mod h1:8r450ehpMLQgvpb6sg+hV5Ur47eH6olp/3yEanfG97k= -go-simpler.org/sloglint v0.11.0 h1:JlR1X4jkbeaffiyjLtymeqmGDKBDO1ikC6rjiuFAOco= -go-simpler.org/sloglint v0.11.0/go.mod h1:CFDO8R1i77dlciGfPEPvYke2ZMx4eyGiEIWkyeW2Pvw= -go.augendre.info/fatcontext v0.8.0 h1:2dfk6CQbDGeu1YocF59Za5Pia7ULeAM6friJ3LP7lmk= -go.augendre.info/fatcontext v0.8.0/go.mod h1:oVJfMgwngMsHO+KB2MdgzcO+RvtNdiCEOlWvSFtax/s= -go.opentelemetry.io/build-tools v0.23.1 h1:Md0aRvwGrRaIbW3baxFj7oipSY2HfrY0SiT/Iq1gzLI= -go.opentelemetry.io/build-tools v0.23.1/go.mod h1:ORnHNf2mImNjM63JUoVZ5YJhs+HAvQ6aAtE++KQF9Kc= -go.opentelemetry.io/build-tools/crosslink v0.23.1 h1:cNdO85DlDcHETEVmoN4A0hE5X89rvkxMejakvlbu+eU= -go.opentelemetry.io/build-tools/crosslink v0.23.1/go.mod h1:yC5WlPnCa44cTM5e0PtRGpcbgU9lPNgJ6msyTE+dt+0= -go.opentelemetry.io/build-tools/gotmpl v0.23.1 h1:iA+oEJVhig4jBfrAYT9AuSxpbx4lYxwHPaQzSS3/txY= -go.opentelemetry.io/build-tools/gotmpl v0.23.1/go.mod h1:cCN6ZsIuDiBH8eEBU9pqfra7gzaNtXleIwzGBpOJOZo= -go.opentelemetry.io/build-tools/multimod v0.23.1 h1:dYaYFQM47L49SvUVHroMkXqtqiHF9tvjfAkFSHDW6KA= -go.opentelemetry.io/build-tools/multimod v0.23.1/go.mod h1:S4L3dqkdnFiDNir0efNQRXHXMhYySsznl5DZ9g7SpSc= -go.opentelemetry.io/build-tools/semconvgen v0.23.1 h1:XH4G3N5eArhmNGG/GjJ2dqaofB0eRAH2oCAepVythaU= -go.opentelemetry.io/build-tools/semconvgen v0.23.1/go.mod h1:VkSnxMLSLR5a1SLXY9A7I0S9q8ogabNTtMyAB7I22NE= +go-simpler.org/musttag v0.14.0 h1:XGySZATqQYSEV3/YTy+iX+aofbZZllJaqwFWs+RTtSo= +go-simpler.org/musttag v0.14.0/go.mod h1:uP8EymctQjJ4Z1kUnjX0u2l60WfUdQxCwSNKzE1JEOE= +go-simpler.org/sloglint v0.11.1 h1:xRbPepLT/MHPTCA6TS/wNfZrDzkGvCCqUv4Bdwc3H7s= +go-simpler.org/sloglint v0.11.1/go.mod h1:2PowwiCOK8mjiF+0KGifVOT8ZsCNiFzvfyJeJOIt8MQ= +go.augendre.info/arangolint v0.2.0 h1:2NP/XudpPmfBhQKX4rMk+zDYIj//qbt4hfZmSSTcpj8= +go.augendre.info/arangolint v0.2.0/go.mod h1:Vx4KSJwu48tkE+8uxuf0cbBnAPgnt8O1KWiT7bljq7w= +go.augendre.info/fatcontext v0.8.1 h1:/T4+cCjpL9g71gJpcFAgVo/K5VFpqlN+NPU7QXxD5+A= +go.augendre.info/fatcontext v0.8.1/go.mod h1:r3Qz4ZOzex66wfyyj5VZ1xUcl81vzvHQ6/GWzzlMEwA= +go.opentelemetry.io/build-tools v0.27.0 h1:TM3p0bJRxN8IgeMg6vzylOvkm/AmFRvGQajrE4h0Lt4= +go.opentelemetry.io/build-tools v0.27.0/go.mod h1:uxKoJfHHoltEaZa4RSztaoHFc4p6gOKKq1oQdjOBuBw= +go.opentelemetry.io/build-tools/crosslink v0.26.2 h1:aTvLAh6KItG9YUh7Q2WYK4hlg44bfjqzMIET2hOd8aI= +go.opentelemetry.io/build-tools/crosslink v0.26.2/go.mod h1:7qAu8q779COJi1ljFCtKUSWi8Km3syqVO26qtftkQiA= +go.opentelemetry.io/build-tools/gotmpl v0.26.2 h1:9vhTpBldzBUKyzzbVMfvSJ3xUt8BmmTMvkz/0xV565I= +go.opentelemetry.io/build-tools/gotmpl v0.26.2/go.mod h1:HdRgbzMmTtIc7RsXTQhjluT3Fa/gxB2qq4GEwi6k3tY= +go.opentelemetry.io/build-tools/multimod v0.26.2 h1:xj+Yn8mnUUt4pc/RiPljWOH60bASl9FN/vaUXkDYKQU= +go.opentelemetry.io/build-tools/multimod v0.26.2/go.mod h1:vZ52aq0jmTeiYNzJKG9N7KPCCvRv6UfdZL19namLgm0= go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs= go.uber.org/automaxprocs v1.6.0/go.mod h1:ifeIMSnPZuznNm6jmdzmU3/bfk01Fe2fotchwEFJ8r8= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= @@ -523,27 +538,25 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= -golang.org/x/crypto v0.39.0 h1:SHs+kF4LP+f+p14esP5jAoDpHU8Gu/v9lFRK6IT5imM= -golang.org/x/crypto v0.39.0/go.mod h1:L+Xg3Wf6HoL4Bn4238Z6ft6KfEpN0tJGo53AAPC632U= -golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b h1:M2rDM6z3Fhozi9O7NWsxAkg/yqS/lQJ6PmkyIV3YP+o= -golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b/go.mod h1:3//PLf8L/X+8b4vuAfHzxeRUl04Adcb341+IGKfnqS8= +golang.org/x/crypto v0.41.0 h1:WKYxWedPGCTVVl5+WHSSrOBT0O8lx32+zxmHxijgXp4= +golang.org/x/crypto v0.41.0/go.mod h1:pO5AFd7FA68rFak7rOAGVuygIISepHftHnr8dr6+sUc= +golang.org/x/exp v0.0.0-20250811191247-51f88131bc50 h1:3yiSh9fhy5/RhCSntf4Sy0Tnx50DmMpQ4MQdKKk4yg4= +golang.org/x/exp v0.0.0-20250811191247-51f88131bc50/go.mod h1:rT6SFzZ7oxADUDx58pcaKFTcZ+inxAa9fTrYx/uVYwg= golang.org/x/exp/typeparams v0.0.0-20220428152302-39d4317da171/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= golang.org/x/exp/typeparams v0.0.0-20230203172020-98cc5a0785f9/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= -golang.org/x/exp/typeparams v0.0.0-20250620022241-b7579e27df2b h1:KdrhdYPDUvJTvrDK9gdjfFd6JTk8vA1WJoldYSi0kHo= -golang.org/x/exp/typeparams v0.0.0-20250620022241-b7579e27df2b/go.mod h1:LKZHyeOpPuZcMgxeHjJp4p5yvxrCX1xDvH10zYHhjjQ= +golang.org/x/exp/typeparams v0.0.0-20250811191247-51f88131bc50 h1:NdeMMm6dk54wgJiDh5juRcbhZQ9TQ/z4bACXQDFqkNc= +golang.org/x/exp/typeparams v0.0.0-20250811191247-51f88131bc50/go.mod h1:LKZHyeOpPuZcMgxeHjJp4p5yvxrCX1xDvH10zYHhjjQ= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.13.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= -golang.org/x/mod v0.25.0 h1:n7a+ZbQKQA/Ysbyb0/6IbB1H/X41mKgbhfv7AfG/44w= -golang.org/x/mod v0.25.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww= +golang.org/x/mod v0.27.0 h1:kb+q2PyFnEADO2IEF935ehFUXlWiNjJWtRNgBLSfbxQ= +golang.org/x/mod v0.27.0/go.mod h1:rWI627Fq0DEoudcK+MBkNkCe0EetEaDSwJJkCcjpazc= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -554,14 +567,12 @@ golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96b golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= golang.org/x/net v0.16.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= -golang.org/x/net v0.41.0 h1:vBTly1HeNPEn3wtREYfy4GZ/NECgw2Cnl+nK6Nz3uvw= -golang.org/x/net v0.41.0/go.mod h1:B/K4NNqkfmg07DQYrbwvSluqCJOOXwUjeb/5lOisjbA= +golang.org/x/net v0.43.0 h1:lat02VYK2j4aLzMzecihNvTlJNQUq316m2Mr9rnM6YE= +golang.org/x/net v0.43.0/go.mod h1:vhO1fvI4dGsIjh73sWfUVjj3N7CA9WkKJNQm2svM6Jg= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -571,8 +582,8 @@ golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sync v0.4.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= -golang.org/x/sync v0.15.0 h1:KWH3jNZsfyT6xfAfKiz6MRNmd46ByHDYaZ7KSkCtdW8= -golang.org/x/sync v0.15.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= +golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw= +golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -589,38 +600,33 @@ golang.org/x/sys v0.0.0-20211105183446-c75c47738b0c/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw= -golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= -golang.org/x/telemetry v0.0.0-20250624183230-fef9409b2ec8 h1:0ScGaUo1GWmAIQ7goo1r/Lwb7K9rgpUbQtVyb2hsJJA= -golang.org/x/telemetry v0.0.0-20250624183230-fef9409b2ec8/go.mod h1:mUcjA5g0luJpMYCLjhH91f4t4RAUNp+zq9ZmUoqPD7M= +golang.org/x/sys v0.35.0 h1:vz1N37gP5bs89s7He8XuIYXpyY0+QlsKmzipCbUtyxI= +golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/telemetry v0.0.0-20250807160809-1a19826ec488 h1:3doPGa+Gg4snce233aCWnbZVFsyFMo/dR40KK/6skyE= +golang.org/x/telemetry v0.0.0-20250807160809-1a19826ec488/go.mod h1:fGb/2+tgXXjhjHsTNdVEEMZNWA0quBnfrO+AfoDSAKw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= -golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= -golang.org/x/term v0.32.0 h1:DR4lr0TjUs3epypdhTOkMmuF5CDFJ/8pOnbzMZPQ7bg= -golang.org/x/term v0.32.0/go.mod h1:uZG1FhGx848Sqfsq4/DlJr3xGGsYMu/L5GW4abiaEPQ= +golang.org/x/term v0.34.0 h1:O/2T7POpk0ZZ7MAzMeWFSg6S5IpWd/RXDlM9hgM3DR4= +golang.org/x/term v0.34.0/go.mod h1:5jC53AEywhIVebHgPVeg0mj8OD3VO9OzclacVrqpaAw= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= -golang.org/x/text v0.26.0 h1:P42AVeLghgTYr4+xUnTRKDMqpar+PtX7KWuNQL21L8M= -golang.org/x/text v0.26.0/go.mod h1:QK15LZJUUQVJxhz7wXgxSy/CJaTFjd0G+YLonydOVQA= +golang.org/x/text v0.28.0 h1:rhazDwis8INMIwQ4tpjLDzUhx6RlXqZNPEM0huQojng= +golang.org/x/text v0.28.0/go.mod h1:U8nCwOR8jO/marOQ0QbDiOngZVEBB7MAiitBuMjXiNU= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200324003944-a576cf524670/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= @@ -633,24 +639,25 @@ golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.1.1-0.20210205202024-ef80cdb6ec6d/go.mod h1:9bzcO0MWcOuT0tm1iBGzDVPshzfwoVvREIui8C+MHqU= golang.org/x/tools v0.1.1-0.20210302220138-2ac05c832e1a/go.mod h1:9bzcO0MWcOuT0tm1iBGzDVPshzfwoVvREIui8C+MHqU= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s= golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= golang.org/x/tools v0.14.0/go.mod h1:uYBEerGOWcJyEORxN+Ek8+TT266gXkNlHdJBwexUsBg= -golang.org/x/tools v0.34.0 h1:qIpSLOxeCYGg9TrcJokLBG4KFA6d795g0xkBkiESGlo= -golang.org/x/tools v0.34.0/go.mod h1:pAP9OwEaY1CAW3HOmg3hLZC5Z0CCmzjAF2UQMSqNARg= +golang.org/x/tools v0.36.0 h1:kWS0uv/zsvHEle1LbV5LE8QujrxB3wfQyxHfhOk0Qkg= +golang.org/x/tools v0.36.0/go.mod h1:WBDiHKJK8YgLHlcQPYQzNCkUxUypCaa5ZegCVutKm+s= +golang.org/x/tools/go/expect v0.1.1-deprecated h1:jpBZDwmgPhXsKZC6WhL20P4b/wmnpsEAGHaNy0n/rJM= +golang.org/x/tools/go/expect v0.1.1-deprecated/go.mod h1:eihoPOH+FgIqa3FpoTwguz/bVUSGBlGQU67vpBeOrBY= +golang.org/x/tools/go/packages/packagestest v0.1.1-deprecated h1:1h2MnaIAIXISqTFKdENegdpAgUXz6NrPEsbIeWaBRvM= +golang.org/x/tools/go/packages/packagestest v0.1.1-deprecated/go.mod h1:RVAQXBGNv1ib0J382/DPCRS/BPnsGebyM1Gj5VSDpG8= golang.org/x/vuln v1.1.4 h1:Ju8QsuyhX3Hk8ma3CesTbO8vfJD9EvUBgHvkxHBzj0I= golang.org/x/vuln v1.1.4/go.mod h1:F+45wmU18ym/ca5PLTPLsSzr2KppzswxPP603ldA67s= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY= -google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= +google.golang.org/protobuf v1.36.8 h1:xHScyCOEuuwZEc6UtSOvPbAT4zRh0xcNRYekJwfqyMc= +google.golang.org/protobuf v1.36.8/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= @@ -658,7 +665,6 @@ gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EV gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME= gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= diff --git a/internal/tools/semconvkit/main.go b/internal/tools/semconvkit/main.go index e79c7238234..8fa36247699 100644 --- a/internal/tools/semconvkit/main.go +++ b/internal/tools/semconvkit/main.go @@ -20,6 +20,7 @@ import ( "text/template" "github.com/Masterminds/semver" + "go.opentelemetry.io/otel/internal/tools/semconvkit/decls" ) diff --git a/internal/tools/semconvkit/templates/error_type.go.tmpl b/internal/tools/semconvkit/templates/error_type.go.tmpl new file mode 100644 index 00000000000..185cf5810eb --- /dev/null +++ b/internal/tools/semconvkit/templates/error_type.go.tmpl @@ -0,0 +1,31 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconv // import "go.opentelemetry.io/otel/semconv/{{.TagVer}}" + +import ( + "fmt" + "reflect" + + "go.opentelemetry.io/otel/attribute" +) + +// ErrorType returns an [attribute.KeyValue] identifying the error type of err. +func ErrorType(err error) attribute.KeyValue { + if err == nil { + return ErrorTypeOther + } + t := reflect.TypeOf(err) + var value string + if t.PkgPath() == "" && t.Name() == "" { + // Likely a builtin type. + value = t.String() + } else { + value = fmt.Sprintf("%s.%s", t.PkgPath(), t.Name()) + } + + if value == "" { + return ErrorTypeOther + } + return ErrorTypeKey.String(value) +} diff --git a/internal/tools/semconvkit/templates/error_type_test.go.tmpl b/internal/tools/semconvkit/templates/error_type_test.go.tmpl new file mode 100644 index 00000000000..c695b87e032 --- /dev/null +++ b/internal/tools/semconvkit/templates/error_type_test.go.tmpl @@ -0,0 +1,59 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconv // import "go.opentelemetry.io/otel/semconv/{{.TagVer}}" + +import ( + "errors" + "fmt" + "reflect" + "testing" + + "go.opentelemetry.io/otel/attribute" +) + +type CustomError struct{} + +func (CustomError) Error() string { + return "custom error" +} + +func TestErrorType(t *testing.T) { + customErr := CustomError{} + builtinErr := errors.New("something went wrong") + var nilErr error + + wantCustomType := reflect.TypeOf(customErr) + wantCustomStr := fmt.Sprintf("%s.%s", wantCustomType.PkgPath(), wantCustomType.Name()) + + tests := []struct { + name string + err error + want attribute.KeyValue + }{ + { + name: "BuiltinError", + err: builtinErr, + want: attribute.String("error.type", "*errors.errorString"), + }, + { + name: "CustomError", + err: customErr, + want: attribute.String("error.type", wantCustomStr), + }, + { + name: "NilError", + err: nilErr, + want: ErrorTypeOther, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := ErrorType(tt.err) + if got != tt.want { + t.Errorf("ErrorType(%v) = %v, want %v", tt.err, got, tt.want) + } + }) + } +} diff --git a/internal/tools/tools.go b/internal/tools/tools.go index 33013ccb5ec..7c52fb3e495 100644 --- a/internal/tools/tools.go +++ b/internal/tools/tools.go @@ -15,7 +15,6 @@ import ( _ "go.opentelemetry.io/build-tools/crosslink" _ "go.opentelemetry.io/build-tools/gotmpl" _ "go.opentelemetry.io/build-tools/multimod" - _ "go.opentelemetry.io/build-tools/semconvgen" _ "golang.org/x/exp/cmd/gorelease" _ "golang.org/x/tools/cmd/stringer" _ "golang.org/x/vuln/cmd/govulncheck" diff --git a/internal/tools/verifyreadmes/main.go b/internal/tools/verifyreadmes/main.go index ff0d4f4bc88..2f4b3c342d2 100644 --- a/internal/tools/verifyreadmes/main.go +++ b/internal/tools/verifyreadmes/main.go @@ -31,7 +31,6 @@ func verifyReadme(path string, info os.FileInfo, err error) error { if !info.Mode().IsRegular() || info.Name() != "go.mod" { return nil - } for _, dir := range excludedDirs { @@ -40,7 +39,6 @@ func verifyReadme(path string, info os.FileInfo, err error) error { } } - // Check that a README.md exists in the same directory as the go.mod file. readme := filepath.Join(filepath.Dir(path), readmeFilename) _, err = os.Stat(readme) diff --git a/log/go.mod b/log/go.mod index 607ce49bf6d..d7b9984600a 100644 --- a/log/go.mod +++ b/log/go.mod @@ -4,8 +4,8 @@ go 1.23.0 require ( github.com/go-logr/logr v1.4.3 - github.com/stretchr/testify v1.10.0 - go.opentelemetry.io/otel v1.37.0 + github.com/stretchr/testify v1.11.1 + go.opentelemetry.io/otel v1.38.0 ) require ( @@ -13,8 +13,8 @@ require ( github.com/go-logr/stdr v1.2.2 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect go.opentelemetry.io/auto/sdk v1.1.0 // indirect - go.opentelemetry.io/otel/metric v1.37.0 // indirect - go.opentelemetry.io/otel/trace v1.37.0 // indirect + go.opentelemetry.io/otel/metric v1.38.0 // indirect + go.opentelemetry.io/otel/trace v1.38.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/log/go.sum b/log/go.sum index ff74960592c..416e4cb1a62 100644 --- a/log/go.sum +++ b/log/go.sum @@ -15,8 +15,8 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= -github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= -github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/log/internal/global/log_test.go b/log/internal/global/log_test.go index ae2c5b2a6dd..811b4435646 100644 --- a/log/internal/global/log_test.go +++ b/log/internal/global/log_test.go @@ -16,7 +16,7 @@ import ( "go.opentelemetry.io/otel/log/noop" ) -func TestLoggerProviderConcurrentSafe(t *testing.T) { +func TestLoggerProviderConcurrentSafe(*testing.T) { p := &loggerProvider{} done := make(chan struct{}) @@ -41,7 +41,7 @@ func TestLoggerProviderConcurrentSafe(t *testing.T) { <-done } -func TestLoggerConcurrentSafe(t *testing.T) { +func TestLoggerConcurrentSafe(*testing.T) { l := &logger{} done := make(chan struct{}) diff --git a/log/keyvalue.go b/log/keyvalue.go index 87d1a827554..f87cee04d60 100644 --- a/log/keyvalue.go +++ b/log/keyvalue.go @@ -242,10 +242,10 @@ func (v Value) Kind() Kind { } } -// Empty returns if v does not hold any value. +// Empty reports whether v does not hold any value. func (v Value) Empty() bool { return v.Kind() == KindEmpty } -// Equal returns if v is equal to w. +// Equal reports whether v is equal to w. func (v Value) Equal(w Value) bool { k1 := v.Kind() k2 := w.Kind() @@ -326,7 +326,7 @@ type KeyValue struct { Value Value } -// Equal returns if a is equal to b. +// Equal reports whether a is equal to b. func (a KeyValue) Equal(b KeyValue) bool { return a.Key == b.Key && a.Value.Equal(b.Value) } diff --git a/log/keyvalue_test.go b/log/keyvalue_test.go index 01e239d356d..9d2f948e68e 100644 --- a/log/keyvalue_test.go +++ b/log/keyvalue_test.go @@ -439,10 +439,10 @@ type logSink struct { err error msg string - keysAndValues []interface{} + keysAndValues []any } -func (l *logSink) Error(err error, msg string, keysAndValues ...interface{}) { +func (l *logSink) Error(err error, msg string, keysAndValues ...any) { l.err, l.msg, l.keysAndValues = err, msg, keysAndValues l.LogSink.Error(err, msg, keysAndValues...) } diff --git a/log/logger.go b/log/logger.go index 99a429a712b..8441ca88408 100644 --- a/log/logger.go +++ b/log/logger.go @@ -30,7 +30,7 @@ type Logger interface { // concurrently. Emit(ctx context.Context, record Record) - // Enabled returns whether the Logger emits for the given context and + // Enabled reports whether the Logger emits for the given context and // param. // // This is useful for users that want to know if a [Record] diff --git a/log/logtest/assert.go b/log/logtest/assert.go index 9e87f86a03f..dfd754425e3 100644 --- a/log/logtest/assert.go +++ b/log/logtest/assert.go @@ -5,7 +5,6 @@ package logtest // import "go.opentelemetry.io/otel/log/logtest" import ( "context" - "testing" "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" @@ -13,19 +12,14 @@ import ( "go.opentelemetry.io/otel/log" ) -// AssertEqual asserts that the two concrete data-types from the logtest package are equal. -func AssertEqual[T Recording | Record](t *testing.T, want, got T, opts ...AssertOption) bool { - t.Helper() - return assertEqual(t, want, got, opts...) -} - -// testingT reports failure messages. +// TestingT reports failure messages. // *testing.T implements this interface. -type testingT interface { +type TestingT interface { Errorf(format string, args ...any) } -func assertEqual[T Recording | Record](t testingT, want, got T, opts ...AssertOption) bool { +// AssertEqual asserts that the two concrete data-types from the logtest package are equal. +func AssertEqual[T Recording | Record](t TestingT, want, got T, opts ...AssertOption) bool { if h, ok := t.(interface{ Helper() }); ok { h.Helper() } diff --git a/log/logtest/assert_test.go b/log/logtest/assert_test.go index 865f96fab42..1bdc531fb3d 100644 --- a/log/logtest/assert_test.go +++ b/log/logtest/assert_test.go @@ -17,6 +17,14 @@ import ( var y2k = time.Date(2000, time.January, 1, 0, 0, 0, 0, time.UTC) +// Compile-time check to ensure testing structs implement TestingT. +var ( + _ TestingT = (*testing.T)(nil) + _ TestingT = (*testing.B)(nil) + _ TestingT = (*testing.F)(nil) + _ TestingT = (*mockTestingT)(nil) +) + type mockTestingT struct { errors []string } @@ -114,7 +122,7 @@ func TestAssertEqualRecording(t *testing.T) { for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { mockT := &mockTestingT{} - result := assertEqual(mockT, tc.a, tc.b, tc.opts...) + result := AssertEqual(mockT, tc.a, tc.b, tc.opts...) if result != tc.want { t.Errorf("AssertEqual() = %v, want %v", result, tc.want) } @@ -178,7 +186,7 @@ func TestAssertEqualRecord(t *testing.T) { for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { mockT := &mockTestingT{} - result := assertEqual(mockT, tc.a, tc.b, tc.opts...) + result := AssertEqual(mockT, tc.a, tc.b, tc.opts...) if result != tc.want { t.Errorf("AssertEqual() = %v, want %v", result, tc.want) } @@ -198,7 +206,7 @@ func TestDesc(t *testing.T) { Attributes: []log.KeyValue{log.Int("n", 1)}, } - assertEqual(mockT, a, b, Desc("custom message, %s", "test")) + AssertEqual(mockT, a, b, Desc("custom message, %s", "test")) require.Len(t, mockT.errors, 1, "expected one error") assert.Contains(t, mockT.errors[0], "custom message, test\n", "expected custom message") diff --git a/log/logtest/example_test.go b/log/logtest/example_test.go index 3ff69d460f1..ae759848d16 100644 --- a/log/logtest/example_test.go +++ b/log/logtest/example_test.go @@ -20,20 +20,18 @@ func Example() { // Emit a log record (code under test). l := rec.Logger("Example") - ctx := context.Background() r := log.Record{} r.SetTimestamp(time.Now()) r.SetSeverity(log.SeverityInfo) r.SetBody(log.StringValue("Hello there")) r.AddAttributes(log.String("foo", "bar")) r.AddAttributes(log.Int("n", 1)) - l.Emit(ctx, r) + l.Emit(context.Background(), r) // Verify that the expected and actual log records match. want := logtest.Recording{ logtest.Scope{Name: "Example"}: []logtest.Record{ { - Context: context.Background(), Severity: log.SeverityInfo, Body: log.StringValue("Hello there"), Attributes: []log.KeyValue{ @@ -45,9 +43,11 @@ func Example() { } got := rec.Result() logtest.AssertEqual(t, want, got, - // Ignore Timestamps. - logtest.Transform(func(time.Time) time.Time { - return time.Time{} + logtest.Transform(func(r logtest.Record) logtest.Record { + r = r.Clone() + r.Context = nil // Ignore context. + r.Timestamp = time.Time{} // Ignore timestamp. + return r }), ) // Output: diff --git a/log/logtest/go.mod b/log/logtest/go.mod index 409b4ee7689..4d113405628 100644 --- a/log/logtest/go.mod +++ b/log/logtest/go.mod @@ -4,9 +4,9 @@ go 1.23.0 require ( github.com/google/go-cmp v0.7.0 - github.com/stretchr/testify v1.10.0 - go.opentelemetry.io/otel v1.37.0 - go.opentelemetry.io/otel/log v0.13.0 + github.com/stretchr/testify v1.11.1 + go.opentelemetry.io/otel v1.38.0 + go.opentelemetry.io/otel/log v0.14.0 ) require ( @@ -15,8 +15,8 @@ require ( github.com/go-logr/stdr v1.2.2 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect go.opentelemetry.io/auto/sdk v1.1.0 // indirect - go.opentelemetry.io/otel/metric v1.37.0 // indirect - go.opentelemetry.io/otel/trace v1.37.0 // indirect + go.opentelemetry.io/otel/metric v1.38.0 // indirect + go.opentelemetry.io/otel/trace v1.38.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/log/logtest/go.sum b/log/logtest/go.sum index ff74960592c..416e4cb1a62 100644 --- a/log/logtest/go.sum +++ b/log/logtest/go.sum @@ -15,8 +15,8 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= -github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= -github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/log/logtest/recorder_test.go b/log/logtest/recorder_test.go index b44b17070b6..feea565a9b4 100644 --- a/log/logtest/recorder_test.go +++ b/log/logtest/recorder_test.go @@ -149,7 +149,7 @@ func TestRecorderLoggerEmitAndReset(t *testing.T) { assert.Equal(t, want, got) } -func TestRecorderConcurrentSafe(t *testing.T) { +func TestRecorderConcurrentSafe(*testing.T) { const goRoutineN = 10 var wg sync.WaitGroup @@ -157,7 +157,7 @@ func TestRecorderConcurrentSafe(t *testing.T) { r := &Recorder{} - for i := 0; i < goRoutineN; i++ { + for range goRoutineN { go func() { defer wg.Done() diff --git a/log/record.go b/log/record.go index 4d2f32d0fb0..adde7a0dc6f 100644 --- a/log/record.go +++ b/log/record.go @@ -142,3 +142,11 @@ func (r *Record) AddAttributes(attrs ...KeyValue) { func (r *Record) AttributesLen() int { return r.nFront + len(r.back) } + +// Clone returns a copy of the record with no shared state. +// The original record and the clone can both be modified without interfering with each other. +func (r *Record) Clone() Record { + res := *r + res.back = slices.Clone(r.back) + return res +} diff --git a/log/record_test.go b/log/record_test.go index b3f145fe0f2..7e44bbfc074 100644 --- a/log/record_test.go +++ b/log/record_test.go @@ -160,3 +160,64 @@ func TestRecordAllocationLimits(t *testing.T) { // Convince the linter these values are used. _, _, _, _, _, _ = tStamp, sev, text, body, n, attr } + +func TestRecordClone(t *testing.T) { + now0 := time.Now() + sev0 := log.SeverityInfo + text0 := "text" + val0 := log.BoolValue(true) + attr0 := log.Bool("0", true) + + r0 := log.Record{} + r0.SetTimestamp(now0) + r0.SetObservedTimestamp(now0) + r0.SetSeverity(sev0) + r0.SetSeverityText(text0) + r0.SetBody(val0) + r0.AddAttributes(attr0) + + // Clone and modify the clone + now1 := now0.Add(time.Second) + sev1 := log.SeverityDebug + text1 := "string" + val1 := log.IntValue(1) + attr1 := log.Int64("1", 2) + + r1 := r0.Clone() + r1.SetTimestamp(now1) + r1.SetObservedTimestamp(now1) + r1.SetSeverity(sev1) + r1.SetSeverityText(text1) + r1.SetBody(val1) + r1.AddAttributes(attr1) + + // Assertions on original record (r0) + assert.Equal(t, now0, r0.Timestamp()) + assert.Equal(t, now0, r0.ObservedTimestamp()) + assert.Equal(t, sev0, r0.Severity()) + assert.Equal(t, text0, r0.SeverityText()) + assert.True(t, val0.Equal(r0.Body())) + + var r0Attrs []log.KeyValue + r0.WalkAttributes(func(kv log.KeyValue) bool { + r0Attrs = append(r0Attrs, kv) + return true + }) + assert.Contains(t, r0Attrs, attr0) + assert.NotContains(t, r0Attrs, attr1) + + // Assertions on cloned record (r1) + assert.Equal(t, now1, r1.Timestamp()) + assert.Equal(t, now1, r1.ObservedTimestamp()) + assert.Equal(t, sev1, r1.Severity()) + assert.Equal(t, text1, r1.SeverityText()) + assert.True(t, val1.Equal(r1.Body())) + + var r1Attrs []log.KeyValue + r1.WalkAttributes(func(kv log.KeyValue) bool { + r1Attrs = append(r1Attrs, kv) + return true + }) + assert.Contains(t, r1Attrs, attr0) + assert.Contains(t, r1Attrs, attr1) +} diff --git a/metric/asyncfloat64_test.go b/metric/asyncfloat64_test.go index 7ef2b0d0236..af7ebf43824 100644 --- a/metric/asyncfloat64_test.go +++ b/metric/asyncfloat64_test.go @@ -35,7 +35,7 @@ func TestFloat64ObservableConfiguration(t *testing.T) { } } - cback := func(ctx context.Context, obsrv Float64Observer) error { + cback := func(_ context.Context, obsrv Float64Observer) error { obsrv.Observe(token) return nil } diff --git a/metric/asyncint64_test.go b/metric/asyncint64_test.go index bc39e996de2..e09a192d562 100644 --- a/metric/asyncint64_test.go +++ b/metric/asyncint64_test.go @@ -35,7 +35,7 @@ func TestInt64ObservableConfiguration(t *testing.T) { } } - cback := func(ctx context.Context, obsrv Int64Observer) error { + cback := func(_ context.Context, obsrv Int64Observer) error { obsrv.Observe(token) return nil } diff --git a/metric/example_test.go b/metric/example_test.go index c75c6faeaf1..5e67f0d1d14 100644 --- a/metric/example_test.go +++ b/metric/example_test.go @@ -15,7 +15,7 @@ import ( "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/metric" - semconv "go.opentelemetry.io/otel/semconv/v1.34.0" + semconv "go.opentelemetry.io/otel/semconv/v1.37.0" ) var meter = otel.Meter("my-service-meter") @@ -110,7 +110,7 @@ func ExampleMeter_counter() { if err != nil { panic(err) } - http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { + http.HandleFunc("/", func(_ http.ResponseWriter, r *http.Request) { apiCounter.Add(r.Context(), 1) // do some work in an API call @@ -166,7 +166,7 @@ func ExampleMeter_gauge() { go func() { defer close(fanSpeedSubscription) - for idx := 0; idx < 5; idx++ { + for range 5 { // Synchronous gauges are used when the measurement cycle is // synchronous to an external change. // Simulate that external cycle here. @@ -195,7 +195,7 @@ func ExampleMeter_histogram() { if err != nil { panic(err) } - http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { + http.HandleFunc("/", func(_ http.ResponseWriter, r *http.Request) { start := time.Now() // do some work in an API call @@ -231,7 +231,7 @@ func ExampleMeter_observableCounter() { func ExampleMeter_observableUpDownCounter() { // The function registers asynchronous metrics for the provided db. // Make sure to unregister metric.Registration before closing the provided db. - _ = func(db *sql.DB, meter metric.Meter, poolName string) (metric.Registration, error) { + _ = func(db *sql.DB, meter metric.Meter, _ string) (metric.Registration, error) { m, err := meter.Int64ObservableUpDownCounter( "db.client.connections.max", metric.WithDescription("The maximum number of open connections allowed."), @@ -301,7 +301,7 @@ func ExampleMeter_attributes() { if err != nil { panic(err) } - http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { + http.HandleFunc("/", func(_ http.ResponseWriter, r *http.Request) { // do some work in an API call and set the response HTTP status code statusCode := http.StatusOK diff --git a/metric/go.mod b/metric/go.mod index 120aa10a0c1..120b7a53353 100644 --- a/metric/go.mod +++ b/metric/go.mod @@ -3,8 +3,8 @@ module go.opentelemetry.io/otel/metric go 1.23.0 require ( - github.com/stretchr/testify v1.10.0 - go.opentelemetry.io/otel v1.37.0 + github.com/stretchr/testify v1.11.1 + go.opentelemetry.io/otel v1.38.0 ) require ( @@ -13,7 +13,7 @@ require ( github.com/go-logr/stdr v1.2.2 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect go.opentelemetry.io/auto/sdk v1.1.0 // indirect - go.opentelemetry.io/otel/trace v1.37.0 // indirect + go.opentelemetry.io/otel/trace v1.38.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/metric/go.sum b/metric/go.sum index ff74960592c..416e4cb1a62 100644 --- a/metric/go.sum +++ b/metric/go.sum @@ -15,8 +15,8 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= -github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= -github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/metric/instrument_test.go b/metric/instrument_test.go index 15a6e9e9c71..f7fc295d65d 100644 --- a/metric/instrument_test.go +++ b/metric/instrument_test.go @@ -90,7 +90,7 @@ func testConfAttr(newConf func(...MeasurementOption) attrConf) func(t *testing.T } } -func TestWithAttributesConcurrentSafe(t *testing.T) { +func TestWithAttributesConcurrentSafe(*testing.T) { attrs := []attribute.KeyValue{ attribute.String("user", "Alice"), attribute.Bool("admin", true), diff --git a/metric_test.go b/metric_test.go index 6454dc07ff6..c98e0e4a631 100644 --- a/metric_test.go +++ b/metric_test.go @@ -17,7 +17,7 @@ type testMeterProvider struct{ embedded.MeterProvider } var _ metric.MeterProvider = &testMeterProvider{} -func (*testMeterProvider) Meter(_ string, _ ...metric.MeterOption) metric.Meter { +func (*testMeterProvider) Meter(string, ...metric.MeterOption) metric.Meter { return noop.NewMeterProvider().Meter("") } diff --git a/propagation/baggage.go b/propagation/baggage.go index ebda5026d6b..0518826020e 100644 --- a/propagation/baggage.go +++ b/propagation/baggage.go @@ -20,7 +20,7 @@ type Baggage struct{} var _ TextMapPropagator = Baggage{} // Inject sets baggage key-values from ctx into the carrier. -func (b Baggage) Inject(ctx context.Context, carrier TextMapCarrier) { +func (Baggage) Inject(ctx context.Context, carrier TextMapCarrier) { bStr := baggage.FromContext(ctx).String() if bStr != "" { carrier.Set(baggageHeader, bStr) @@ -30,7 +30,7 @@ func (b Baggage) Inject(ctx context.Context, carrier TextMapCarrier) { // Extract returns a copy of parent with the baggage from the carrier added. // If carrier implements [ValuesGetter] (e.g. [HeaderCarrier]), Values is invoked // for multiple values extraction. Otherwise, Get is called. -func (b Baggage) Extract(parent context.Context, carrier TextMapCarrier) context.Context { +func (Baggage) Extract(parent context.Context, carrier TextMapCarrier) context.Context { if multiCarrier, ok := carrier.(ValuesGetter); ok { return extractMultiBaggage(parent, multiCarrier) } @@ -38,7 +38,7 @@ func (b Baggage) Extract(parent context.Context, carrier TextMapCarrier) context } // Fields returns the keys who's values are set with Inject. -func (b Baggage) Fields() []string { +func (Baggage) Fields() []string { return []string{baggageHeader} } diff --git a/propagation/baggage_test.go b/propagation/baggage_test.go index 8ce34bdec5c..4aeb29648c1 100644 --- a/propagation/baggage_test.go +++ b/propagation/baggage_test.go @@ -124,7 +124,7 @@ func TestExtractValidBaggage(t *testing.T) { t.Run(tt.name, func(t *testing.T) { mapCarr := propagation.MapCarrier{} mapCarr["baggage"] = tt.header - req, _ := http.NewRequest(http.MethodGet, "http://example.com/", nil) + req, _ := http.NewRequest(http.MethodGet, "http://example.com/", http.NoBody) req.Header.Set("baggage", tt.header) // test with http header carrier (which implements ValuesGetter) @@ -183,7 +183,7 @@ func TestExtractValidMultipleBaggageHeaders(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - req, _ := http.NewRequest(http.MethodGet, "http://example.com/", nil) + req, _ := http.NewRequest(http.MethodGet, "http://example.com/", http.NoBody) req.Header["Baggage"] = tt.headers ctx := context.Background() @@ -239,7 +239,7 @@ func TestExtractInvalidDistributedContextFromHTTPReq(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - req, _ := http.NewRequest(http.MethodGet, "http://example.com/", nil) + req, _ := http.NewRequest(http.MethodGet, "http://example.com/", http.NoBody) req.Header.Set("baggage", tt.header) expected := tt.has.Baggage(t) @@ -292,7 +292,7 @@ func TestInjectBaggageToHTTPReq(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - req, _ := http.NewRequest(http.MethodGet, "http://example.com/", nil) + req, _ := http.NewRequest(http.MethodGet, "http://example.com/", http.NoBody) ctx := baggage.ContextWithBaggage(context.Background(), tt.mems.Baggage(t)) propagator.Inject(ctx, propagation.HeaderCarrier(req.Header)) @@ -339,7 +339,7 @@ func TestBaggageInjectExtractRoundtrip(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { b := tt.mems.Baggage(t) - req, _ := http.NewRequest(http.MethodGet, "http://example.com/", nil) + req, _ := http.NewRequest(http.MethodGet, "http://example.com/", http.NoBody) ctx := baggage.ContextWithBaggage(context.Background(), b) propagator.Inject(ctx, propagation.HeaderCarrier(req.Header)) diff --git a/propagation/propagation.go b/propagation/propagation.go index 5c8c26ea2eb..0a32c59aa3b 100644 --- a/propagation/propagation.go +++ b/propagation/propagation.go @@ -20,7 +20,7 @@ type TextMapCarrier interface { // must never be done outside of a new major release. // Set stores the key-value pair. - Set(key string, value string) + Set(key, value string) // DO NOT CHANGE: any modification will not be backwards compatible and // must never be done outside of a new major release. @@ -88,7 +88,7 @@ func (hc HeaderCarrier) Values(key string) []string { } // Set stores the key-value pair. -func (hc HeaderCarrier) Set(key string, value string) { +func (hc HeaderCarrier) Set(key, value string) { http.Header(hc).Set(key, value) } diff --git a/propagation/propagation_test.go b/propagation/propagation_test.go index 47d49cfa3a8..e8482dc4ca0 100644 --- a/propagation/propagation_test.go +++ b/propagation/propagation_test.go @@ -20,9 +20,9 @@ var ctxKey ctxKeyType type carrier []string -func (c *carrier) Keys() []string { return nil } +func (*carrier) Keys() []string { return nil } -func (c *carrier) Get(string) string { return "" } +func (*carrier) Get(string) string { return "" } func (c *carrier) Set(setter, _ string) { *c = append(*c, setter) @@ -32,11 +32,11 @@ type propagator struct { Name string } -func (p propagator) Inject(ctx context.Context, carrier propagation.TextMapCarrier) { +func (p propagator) Inject(_ context.Context, carrier propagation.TextMapCarrier) { carrier.Set(p.Name, "") } -func (p propagator) Extract(ctx context.Context, carrier propagation.TextMapCarrier) context.Context { +func (p propagator) Extract(ctx context.Context, _ propagation.TextMapCarrier) context.Context { v := ctx.Value(ctxKey) if v == nil { ctx = context.WithValue(ctx, ctxKey, []string{p.Name}) diff --git a/propagation/propagators_test.go b/propagation/propagators_test.go index 15deba3263f..b41ebf394e1 100644 --- a/propagation/propagators_test.go +++ b/propagation/propagators_test.go @@ -48,7 +48,7 @@ type outOfThinAirPropagator struct { var _ propagation.TextMapPropagator = outOfThinAirPropagator{} -func (p outOfThinAirPropagator) Extract(ctx context.Context, carrier propagation.TextMapCarrier) context.Context { +func (p outOfThinAirPropagator) Extract(ctx context.Context, _ propagation.TextMapCarrier) context.Context { sc := trace.NewSpanContext(trace.SpanContextConfig{ TraceID: traceID, SpanID: spanID, @@ -72,11 +72,11 @@ func (nilCarrier) Keys() []string { return nil } -func (nilCarrier) Get(key string) string { +func (nilCarrier) Get(string) string { return "" } -func (nilCarrier) Set(key string, value string) {} +func (nilCarrier) Set(string, string) {} func TestMultiplePropagators(t *testing.T) { ootaProp := outOfThinAirPropagator{t: t} diff --git a/propagation/trace_context.go b/propagation/trace_context.go index 6870e316dc0..6692d2665d2 100644 --- a/propagation/trace_context.go +++ b/propagation/trace_context.go @@ -36,7 +36,7 @@ var ( ) // Inject injects the trace context from ctx into carrier. -func (tc TraceContext) Inject(ctx context.Context, carrier TextMapCarrier) { +func (TraceContext) Inject(ctx context.Context, carrier TextMapCarrier) { sc := trace.SpanContextFromContext(ctx) if !sc.IsValid() { return @@ -77,7 +77,7 @@ func (tc TraceContext) Extract(ctx context.Context, carrier TextMapCarrier) cont return trace.ContextWithRemoteSpanContext(ctx, sc) } -func (tc TraceContext) extract(carrier TextMapCarrier) trace.SpanContext { +func (TraceContext) extract(carrier TextMapCarrier) trace.SpanContext { h := carrier.Get(traceparentHeader) if h == "" { return trace.SpanContext{} @@ -151,6 +151,6 @@ func extractPart(dst []byte, h *string, n int) bool { } // Fields returns the keys who's values are set with Inject. -func (tc TraceContext) Fields() []string { +func (TraceContext) Fields() []string { return []string{traceparentHeader, tracestateHeader} } diff --git a/propagation/trace_context_benchmark_test.go b/propagation/trace_context_benchmark_test.go index 041d2f664ee..711e82aeb7f 100644 --- a/propagation/trace_context_benchmark_test.go +++ b/propagation/trace_context_benchmark_test.go @@ -56,7 +56,7 @@ func BenchmarkExtract(b *testing.B) { func extractSubBenchmarks(b *testing.B, fn func(*testing.B, *http.Request)) { b.Run("Sampled", func(b *testing.B) { - req, _ := http.NewRequest(http.MethodGet, "http://example.com/", nil) + req, _ := http.NewRequest(http.MethodGet, "http://example.com/", http.NoBody) req.Header.Set("traceparent", "00-4bf92f3577b34da6a3ce929d0e0e4736-00f067aa0ba902b7-01") b.ReportAllocs() @@ -64,14 +64,14 @@ func extractSubBenchmarks(b *testing.B, fn func(*testing.B, *http.Request)) { }) b.Run("BogusVersion", func(b *testing.B) { - req, _ := http.NewRequest(http.MethodGet, "http://example.com/", nil) + req, _ := http.NewRequest(http.MethodGet, "http://example.com/", http.NoBody) req.Header.Set("traceparent", "qw-00000000000000000000000000000000-0000000000000000-01") b.ReportAllocs() fn(b, req) }) b.Run("FutureAdditionalData", func(b *testing.B) { - req, _ := http.NewRequest(http.MethodGet, "http://example.com/", nil) + req, _ := http.NewRequest(http.MethodGet, "http://example.com/", http.NoBody) req.Header.Set("traceparent", "02-4bf92f3577b34da6a3ce929d0e0e4736-00f067aa0ba902b7-09-XYZxsf09") b.ReportAllocs() fn(b, req) diff --git a/schema/go.mod b/schema/go.mod index f770377fc3f..5fb28923c9d 100644 --- a/schema/go.mod +++ b/schema/go.mod @@ -3,8 +3,8 @@ module go.opentelemetry.io/otel/schema go 1.23.0 require ( - github.com/Masterminds/semver/v3 v3.3.1 - github.com/stretchr/testify v1.10.0 + github.com/Masterminds/semver/v3 v3.4.0 + github.com/stretchr/testify v1.11.1 gopkg.in/yaml.v3 v3.0.1 ) diff --git a/schema/go.sum b/schema/go.sum index 3a1efbd1552..c6cdffb79a4 100644 --- a/schema/go.sum +++ b/schema/go.sum @@ -1,11 +1,11 @@ -github.com/Masterminds/semver/v3 v3.3.1 h1:QtNSWtVZ3nBfk8mAOu/B6v7FMJ+NHTIgUPi7rj+4nv4= -github.com/Masterminds/semver/v3 v3.3.1/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM= +github.com/Masterminds/semver/v3 v3.4.0 h1:Zog+i5UMtVoCU8oKka5P7i9q9HgrJeGzI9SA1Xbatp0= +github.com/Masterminds/semver/v3 v3.4.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= -github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= diff --git a/schema/v1.1/types/types.go b/schema/v1.1/types/types.go index b26e3052cac..a6b4444c4fc 100644 --- a/schema/v1.1/types/types.go +++ b/schema/v1.1/types/types.go @@ -13,4 +13,4 @@ type TelemetryVersion types10.TelemetryVersion type AttributeName string // AttributeValue is an attribute value. -type AttributeValue interface{} +type AttributeValue any diff --git a/sdk/go.mod b/sdk/go.mod index 880bb1c2787..9861653453b 100644 --- a/sdk/go.mod +++ b/sdk/go.mod @@ -8,11 +8,13 @@ require ( github.com/go-logr/logr v1.4.3 github.com/google/go-cmp v0.7.0 github.com/google/uuid v1.6.0 - github.com/stretchr/testify v1.10.0 - go.opentelemetry.io/otel v1.37.0 - go.opentelemetry.io/otel/trace v1.37.0 + github.com/stretchr/testify v1.11.1 + go.opentelemetry.io/otel v1.38.0 + go.opentelemetry.io/otel/metric v1.38.0 + go.opentelemetry.io/otel/sdk/metric v1.38.0 + go.opentelemetry.io/otel/trace v1.38.0 go.uber.org/goleak v1.3.0 - golang.org/x/sys v0.33.0 + golang.org/x/sys v0.35.0 ) require ( @@ -20,10 +22,11 @@ require ( github.com/go-logr/stdr v1.2.2 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect go.opentelemetry.io/auto/sdk v1.1.0 // indirect - go.opentelemetry.io/otel/metric v1.37.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) replace go.opentelemetry.io/otel/trace => ../trace replace go.opentelemetry.io/otel/metric => ../metric + +replace go.opentelemetry.io/otel/sdk/metric => ./metric diff --git a/sdk/go.sum b/sdk/go.sum index e7f05f47d7f..144fc543a73 100644 --- a/sdk/go.sum +++ b/sdk/go.sum @@ -17,14 +17,14 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= -github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= -github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= -golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw= -golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/sys v0.35.0 h1:vz1N37gP5bs89s7He8XuIYXpyY0+QlsKmzipCbUtyxI= +golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= diff --git a/sdk/internal/x/x.go b/sdk/internal/x/x.go index 68d296cbed3..1be472e917a 100644 --- a/sdk/internal/x/x.go +++ b/sdk/internal/x/x.go @@ -19,7 +19,7 @@ import ( // to the case-insensitive string value of "true" (i.e. "True" and "TRUE" // will also enable this). var Resource = newFeature("RESOURCE", func(v string) (string, bool) { - if strings.ToLower(v) == "true" { + if strings.EqualFold(v, "true") { return v, true } return "", false @@ -59,7 +59,7 @@ func (f Feature[T]) Lookup() (v T, ok bool) { return f.parse(vRaw) } -// Enabled returns if the feature is enabled. +// Enabled reports whether the feature is enabled. func (f Feature[T]) Enabled() bool { _, ok := f.Lookup() return ok diff --git a/sdk/log/batch.go b/sdk/log/batch.go index b91741d5882..462eb1c3afc 100644 --- a/sdk/log/batch.go +++ b/sdk/log/batch.go @@ -329,7 +329,7 @@ func (q *queue) TryDequeue(buf []Record, write func([]Record) bool) int { origRead := q.read n := min(len(buf), q.len) - for i := 0; i < n; i++ { + for i := range n { buf[i] = q.read.Value q.read = q.read.Next() } diff --git a/sdk/log/batch_test.go b/sdk/log/batch_test.go index b2e993a5bfa..83c32470104 100644 --- a/sdk/log/batch_test.go +++ b/sdk/log/batch_test.go @@ -207,7 +207,7 @@ func TestBatchProcessor(t *testing.T) { WithExportInterval(time.Nanosecond), WithExportTimeout(time.Hour), ) - for i := 0; i < size; i++ { + for range size { assert.NoError(t, b.OnEmit(ctx, new(Record))) } var got []Record @@ -230,7 +230,7 @@ func TestBatchProcessor(t *testing.T) { WithExportInterval(time.Hour), WithExportTimeout(time.Hour), ) - for i := 0; i < 10*batch; i++ { + for range 10 * batch { assert.NoError(t, b.OnEmit(ctx, new(Record))) } assert.Eventually(t, func() bool { @@ -253,7 +253,7 @@ func TestBatchProcessor(t *testing.T) { WithExportInterval(time.Hour), WithExportTimeout(time.Hour), ) - for i := 0; i < 2*batch; i++ { + for range 2 * batch { assert.NoError(t, b.OnEmit(ctx, new(Record))) } @@ -293,7 +293,7 @@ func TestBatchProcessor(t *testing.T) { b := NewBatchProcessor(e) const shutdowns = 3 - for i := 0; i < shutdowns; i++ { + for range shutdowns { assert.NoError(t, b.Shutdown(ctx)) } assert.Equal(t, 1, e.ShutdownN(), "exporter Shutdown calls") @@ -382,7 +382,7 @@ func TestBatchProcessor(t *testing.T) { ) // Enqueue 10 x "batch size" amount of records. - for i := 0; i < 10*batch; i++ { + for range 10 * batch { require.NoError(t, b.OnEmit(ctx, new(Record))) } assert.Eventually(t, func() bool { @@ -490,7 +490,7 @@ func TestBatchProcessor(t *testing.T) { ctx, cancel := context.WithCancel(ctx) var wg sync.WaitGroup - for i := 0; i < goRoutines-1; i++ { + for range goRoutines - 1 { wg.Add(1) go func() { defer wg.Done() @@ -582,7 +582,7 @@ func TestQueue(t *testing.T) { t.Run("TryFlush", func(t *testing.T) { const size = 3 q := newQueue(size) - for i := 0; i < size-1; i++ { + for range size - 1 { q.write.Value = r q.write = q.write.Next() q.len++ @@ -627,7 +627,7 @@ func TestQueue(t *testing.T) { wg.Add(goRoutines) b := newQueue(goRoutines) - for i := 0; i < goRoutines; i++ { + for range goRoutines { go func() { defer wg.Done() b.Enqueue(Record{}) diff --git a/sdk/log/bench_test.go b/sdk/log/bench_test.go index 74689d7f387..8b12dc2e882 100644 --- a/sdk/log/bench_test.go +++ b/sdk/log/bench_test.go @@ -8,9 +8,9 @@ import ( "testing" "time" - "go.opentelemetry.io/otel/log" - "github.com/stretchr/testify/assert" + + "go.opentelemetry.io/otel/log" ) type mockDelayExporter struct{} @@ -128,57 +128,57 @@ func BenchmarkProcessor(b *testing.B) { type timestampProcessor struct{} -func (p timestampProcessor) OnEmit(ctx context.Context, r *Record) error { +func (timestampProcessor) OnEmit(_ context.Context, r *Record) error { r.SetObservedTimestamp(time.Date(1988, time.November, 17, 0, 0, 0, 0, time.UTC)) return nil } -func (p timestampProcessor) Enabled(context.Context, Record) bool { +func (timestampProcessor) Enabled(context.Context, Record) bool { return true } -func (p timestampProcessor) Shutdown(ctx context.Context) error { +func (timestampProcessor) Shutdown(context.Context) error { return nil } -func (p timestampProcessor) ForceFlush(ctx context.Context) error { +func (timestampProcessor) ForceFlush(context.Context) error { return nil } type attrAddProcessor struct{} -func (p attrAddProcessor) OnEmit(ctx context.Context, r *Record) error { +func (attrAddProcessor) OnEmit(_ context.Context, r *Record) error { r.AddAttributes(log.String("add", "me")) return nil } -func (p attrAddProcessor) Enabled(context.Context, Record) bool { +func (attrAddProcessor) Enabled(context.Context, Record) bool { return true } -func (p attrAddProcessor) Shutdown(ctx context.Context) error { +func (attrAddProcessor) Shutdown(context.Context) error { return nil } -func (p attrAddProcessor) ForceFlush(ctx context.Context) error { +func (attrAddProcessor) ForceFlush(context.Context) error { return nil } type attrSetDecorator struct{} -func (p attrSetDecorator) OnEmit(ctx context.Context, r *Record) error { +func (attrSetDecorator) OnEmit(_ context.Context, r *Record) error { r.SetAttributes(log.String("replace", "me")) return nil } -func (p attrSetDecorator) Enabled(context.Context, Record) bool { +func (attrSetDecorator) Enabled(context.Context, Record) bool { return true } -func (p attrSetDecorator) Shutdown(ctx context.Context) error { +func (attrSetDecorator) Shutdown(context.Context) error { return nil } -func (p attrSetDecorator) ForceFlush(ctx context.Context) error { +func (attrSetDecorator) ForceFlush(context.Context) error { return nil } diff --git a/sdk/log/doc.go b/sdk/log/doc.go index 78935de6368..a27834a5b32 100644 --- a/sdk/log/doc.go +++ b/sdk/log/doc.go @@ -30,6 +30,9 @@ should be used to describe the unique runtime environment instrumented code is being run on. That way when multiple instances of the code are collected at a single endpoint their origin is decipherable. +See [go.opentelemetry.io/otel/sdk/log/internal/x] for information about +the experimental features. + See [go.opentelemetry.io/otel/log] for more information about the OpenTelemetry Logs API. */ diff --git a/sdk/log/example_test.go b/sdk/log/example_test.go index 5cfd75e4b8b..c8a43a166f8 100644 --- a/sdk/log/example_test.go +++ b/sdk/log/example_test.go @@ -112,8 +112,56 @@ func ignoreLogs(ctx context.Context) bool { return ok } +// Use a processor which sets EventName on log records having "event.name" string attribute. +// This is useful for users of logging libraries that do not support +// setting the event name on log records, but do support attributes. +func ExampleProcessor_eventName() { + // Existing processor that emits telemetry. + var processor log.Processor = log.NewBatchProcessor(nil) + + // Add a processor so that it sets EventName on log records. + eventNameProcessor := &EventNameProcessor{} + + // The created processor can then be registered with + // the OpenTelemetry Logs SDK using the WithProcessor option. + _ = log.NewLoggerProvider( + // Order is important here. Set EventName before handing to the processor. + log.WithProcessor(eventNameProcessor), + log.WithProcessor(processor), + ) +} + +// EventNameProcessor is a [log.Processor] that sets the EventName +// on log records having "event.name" string attribute. +// It is useful for logging libraries that do not support +// setting the event name on log records, +// but do support attributes. +type EventNameProcessor struct{} + +// OnEmit sets the EventName on log records having an "event.name" string attribute. +// The original attribute is not removed. +func (*EventNameProcessor) OnEmit(_ context.Context, record *log.Record) error { + record.WalkAttributes(func(kv logapi.KeyValue) bool { + if kv.Key == "event.name" && kv.Value.Kind() == logapi.KindString { + record.SetEventName(kv.Value.AsString()) + } + return true + }) + return nil +} + +// Shutdown returns nil. +func (*EventNameProcessor) Shutdown(context.Context) error { + return nil +} + +// ForceFlush returns nil. +func (*EventNameProcessor) ForceFlush(context.Context) error { + return nil +} + // Use a processor which redacts sensitive data from some attributes. -func ExampleProcessor() { +func ExampleProcessor_redact() { // Existing processor that emits telemetry. var processor log.Processor = log.NewBatchProcessor(nil) @@ -135,7 +183,7 @@ type RedactTokensProcessor struct{} // OnEmit redacts values from attributes containing "token" in the key // by replacing them with a REDACTED value. -func (p *RedactTokensProcessor) OnEmit(ctx context.Context, record *log.Record) error { +func (*RedactTokensProcessor) OnEmit(_ context.Context, record *log.Record) error { record.WalkAttributes(func(kv logapi.KeyValue) bool { if strings.Contains(strings.ToLower(kv.Key), "token") { record.AddAttributes(logapi.String(kv.Key, "REDACTED")) @@ -146,11 +194,11 @@ func (p *RedactTokensProcessor) OnEmit(ctx context.Context, record *log.Record) } // Shutdown returns nil. -func (p *RedactTokensProcessor) Shutdown(ctx context.Context) error { +func (*RedactTokensProcessor) Shutdown(context.Context) error { return nil } // ForceFlush returns nil. -func (p *RedactTokensProcessor) ForceFlush(ctx context.Context) error { +func (*RedactTokensProcessor) ForceFlush(context.Context) error { return nil } diff --git a/sdk/log/exporter_test.go b/sdk/log/exporter_test.go index 25f05832087..7d348574275 100644 --- a/sdk/log/exporter_test.go +++ b/sdk/log/exporter_test.go @@ -112,7 +112,7 @@ func (e *testExporter) Stop() { <-e.done } -func (e *testExporter) Shutdown(ctx context.Context) error { +func (e *testExporter) Shutdown(context.Context) error { atomic.AddInt32(e.shutdownN, 1) return e.Err } @@ -121,7 +121,7 @@ func (e *testExporter) ShutdownN() int { return int(atomic.LoadInt32(e.shutdownN)) } -func (e *testExporter) ForceFlush(ctx context.Context) error { +func (e *testExporter) ForceFlush(context.Context) error { atomic.AddInt32(e.forceFlushN, 1) return e.Err } @@ -245,7 +245,7 @@ func TestExportSync(t *testing.T) { const goRoutines = 10 var wg sync.WaitGroup wg.Add(goRoutines) - for i := 0; i < goRoutines; i++ { + for i := range goRoutines { go func(n int) { defer wg.Done() @@ -338,7 +338,7 @@ func TestBufferExporter(t *testing.T) { stop := make(chan struct{}) var wg sync.WaitGroup - for i := 0; i < goRoutines; i++ { + for range goRoutines { wg.Add(1) go func() { defer wg.Done() @@ -382,7 +382,7 @@ func TestBufferExporter(t *testing.T) { defer func(orig otel.ErrorHandler) { otel.SetErrorHandler(orig) }(otel.GetErrorHandler()) - handler := otel.ErrorHandlerFunc(func(err error) {}) + handler := otel.ErrorHandlerFunc(func(error) {}) otel.SetErrorHandler(handler) exp := newTestExporter(assert.AnError) diff --git a/sdk/log/filter_processor.go b/sdk/log/filter_processor.go index 682f2eb2c4f..283133aba5a 100644 --- a/sdk/log/filter_processor.go +++ b/sdk/log/filter_processor.go @@ -30,7 +30,7 @@ import ( // It provides a Processor used to filter out [Record] // that has a [log.Severity] below a threshold. type FilterProcessor interface { - // Enabled returns whether the Processor will process for the given context + // Enabled reports whether the Processor will process for the given context // and param. // // The passed param is likely to be a partial record information being diff --git a/sdk/log/go.mod b/sdk/log/go.mod index c87f73f0067..c250d3b3cdb 100644 --- a/sdk/log/go.mod +++ b/sdk/log/go.mod @@ -6,11 +6,13 @@ require ( github.com/go-logr/logr v1.4.3 github.com/go-logr/stdr v1.2.2 github.com/google/go-cmp v0.7.0 - github.com/stretchr/testify v1.10.0 - go.opentelemetry.io/otel v1.37.0 - go.opentelemetry.io/otel/log v0.13.0 - go.opentelemetry.io/otel/sdk v1.37.0 - go.opentelemetry.io/otel/trace v1.37.0 + github.com/stretchr/testify v1.11.1 + go.opentelemetry.io/otel v1.38.0 + go.opentelemetry.io/otel/log v0.14.0 + go.opentelemetry.io/otel/metric v1.38.0 + go.opentelemetry.io/otel/sdk v1.38.0 + go.opentelemetry.io/otel/sdk/metric v1.38.0 + go.opentelemetry.io/otel/trace v1.38.0 ) require ( @@ -18,8 +20,7 @@ require ( github.com/google/uuid v1.6.0 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect go.opentelemetry.io/auto/sdk v1.1.0 // indirect - go.opentelemetry.io/otel/metric v1.37.0 // indirect - golang.org/x/sys v0.33.0 // indirect + golang.org/x/sys v0.35.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) @@ -32,3 +33,5 @@ replace go.opentelemetry.io/otel/sdk => ../ replace go.opentelemetry.io/otel/log => ../../log replace go.opentelemetry.io/otel => ../.. + +replace go.opentelemetry.io/otel/sdk/metric => ../metric diff --git a/sdk/log/go.sum b/sdk/log/go.sum index 47deaf0ed85..c241c13d6e5 100644 --- a/sdk/log/go.sum +++ b/sdk/log/go.sum @@ -17,12 +17,12 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= -github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= -github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= -golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw= -golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/sys v0.35.0 h1:vz1N37gP5bs89s7He8XuIYXpyY0+QlsKmzipCbUtyxI= +golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= diff --git a/sdk/log/internal/x/README.md b/sdk/log/internal/x/README.md new file mode 100644 index 00000000000..83e9e7b4cef --- /dev/null +++ b/sdk/log/internal/x/README.md @@ -0,0 +1,34 @@ +# Experimental Features + +The Logs SDK contains features that have not yet stabilized in the OpenTelemetry specification. +These features are added to the OpenTelemetry Go Logs SDK prior to stabilization in the specification so that users can start experimenting with them and provide feedback. + +These feature may change in backwards incompatible ways as feedback is applied. +See the [Compatibility and Stability](#compatibility-and-stability) section for more information. + +## Features + +- [Self-Observability](#self-observability) + +### Self-Observability + +The Logs SDK provides a self-observability feature that allows you to monitor the SDK itself. + +To opt-in, set the environment variable `OTEL_GO_X_SELF_OBSERVABILITY` to `true`. + +When enabled, the SDK will create the following metrics using the global `MeterProvider`: + +- `otel.sdk.log.created` + +Please see the [Semantic conventions for OpenTelemetry SDK metrics] documentation for more details on these metrics. + +[Semantic conventions for OpenTelemetry SDK metrics]: https://github.com/open-telemetry/semantic-conventions/blob/v1.36.0/docs/otel/sdk-metrics.md + +## Compatibility and Stability + +Experimental features do not fall within the scope of the OpenTelemetry Go versioning and stability [policy](../../../../VERSIONING.md). +These features may be removed or modified in successive version releases, including patch versions. + +When an experimental feature is promoted to a stable feature, a migration path will be included in the changelog entry of the release. +There is no guarantee that any environment variable feature flags that enabled the experimental feature will be supported by the stable version. +If they are supported, they may be accompanied with a deprecation notice stating a timeline for the removal of that support. diff --git a/sdk/log/internal/x/x.go b/sdk/log/internal/x/x.go new file mode 100644 index 00000000000..5f01b275df9 --- /dev/null +++ b/sdk/log/internal/x/x.go @@ -0,0 +1,63 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package x documents experimental features for [go.opentelemetry.io/otel/sdk/log]. +package x // import "go.opentelemetry.io/otel/sdk/log/internal/x" + +import ( + "os" + "strings" +) + +// SelfObservability is an experimental feature flag that determines if SDK +// self-observability metrics are enabled. +// +// To enable this feature set the OTEL_GO_X_SELF_OBSERVABILITY environment variable +// to the case-insensitive string value of "true" (i.e. "True" and "TRUE" +// will also enable this). +var SelfObservability = newFeature("SELF_OBSERVABILITY", func(v string) (string, bool) { + if strings.EqualFold(v, "true") { + return v, true + } + return "", false +}) + +// Feature is an experimental feature control flag. It provides a uniform way +// to interact with these feature flags and parse their values. +type Feature[T any] struct { + key string + parse func(v string) (T, bool) +} + +func newFeature[T any](suffix string, parse func(string) (T, bool)) Feature[T] { + const envKeyRoot = "OTEL_GO_X_" + return Feature[T]{ + key: envKeyRoot + suffix, + parse: parse, + } +} + +// Key returns the environment variable key that needs to be set to enable the +// feature. +func (f Feature[T]) Key() string { return f.key } + +// Lookup returns the user configured value for the feature and true if the +// user has enabled the feature. Otherwise, if the feature is not enabled, a +// zero-value and false are returned. +func (f Feature[T]) Lookup() (v T, ok bool) { + // https://github.com/open-telemetry/opentelemetry-specification/blob/62effed618589a0bec416a87e559c0a9d96289bb/specification/configuration/sdk-environment-variables.md#parsing-empty-value + // + // > The SDK MUST interpret an empty value of an environment variable the + // > same way as when the variable is unset. + vRaw := os.Getenv(f.key) + if vRaw == "" { + return v, ok + } + return f.parse(vRaw) +} + +// Enabled reports whether the feature is enabled. +func (f Feature[T]) Enabled() bool { + _, ok := f.Lookup() + return ok +} diff --git a/sdk/log/internal/x/x_test.go b/sdk/log/internal/x/x_test.go new file mode 100644 index 00000000000..15124ca91d1 --- /dev/null +++ b/sdk/log/internal/x/x_test.go @@ -0,0 +1,59 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package x + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestSelfObservability(t *testing.T) { + const key = "OTEL_GO_X_SELF_OBSERVABILITY" + require.Equal(t, key, SelfObservability.Key()) + + t.Run("100", run(setenv(key, "100"), assertDisabled(SelfObservability))) + t.Run("true", run(setenv(key, "true"), assertEnabled(SelfObservability, "true"))) + t.Run("True", run(setenv(key, "True"), assertEnabled(SelfObservability, "True"))) + t.Run("false", run(setenv(key, "false"), assertDisabled(SelfObservability))) + t.Run("empty", run(assertDisabled(SelfObservability))) +} + +func run(steps ...func(*testing.T)) func(*testing.T) { + return func(t *testing.T) { + t.Helper() + for _, step := range steps { + step(t) + } + } +} + +func setenv(k, v string) func(t *testing.T) { //nolint:unparam // This is a reusable test utility function. + return func(t *testing.T) { t.Setenv(k, v) } +} + +func assertEnabled[T any](f Feature[T], want T) func(*testing.T) { + return func(t *testing.T) { + t.Helper() + assert.True(t, f.Enabled(), "not enabled") + + v, ok := f.Lookup() + assert.True(t, ok, "Lookup state") + assert.Equal(t, want, v, "Lookup value") + } +} + +func assertDisabled[T any](f Feature[T]) func(*testing.T) { + var zero T + return func(t *testing.T) { + t.Helper() + + assert.False(t, f.Enabled(), "enabled") + + v, ok := f.Lookup() + assert.False(t, ok, "Lookup state") + assert.Equal(t, zero, v, "Lookup value") + } +} diff --git a/sdk/log/logger.go b/sdk/log/logger.go index 1ec8ff88381..7dad98c92dd 100644 --- a/sdk/log/logger.go +++ b/sdk/log/logger.go @@ -5,12 +5,18 @@ package log // import "go.opentelemetry.io/otel/sdk/log" import ( "context" + "fmt" "time" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/log" "go.opentelemetry.io/otel/log/embedded" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/sdk" "go.opentelemetry.io/otel/sdk/instrumentation" + "go.opentelemetry.io/otel/sdk/log/internal/x" + semconv "go.opentelemetry.io/otel/semconv/v1.37.0" + "go.opentelemetry.io/otel/semconv/v1.37.0/otelconv" "go.opentelemetry.io/otel/trace" ) @@ -24,13 +30,31 @@ type logger struct { provider *LoggerProvider instrumentationScope instrumentation.Scope + + selfObservabilityEnabled bool + logCreatedMetric otelconv.SDKLogCreated } func newLogger(p *LoggerProvider, scope instrumentation.Scope) *logger { - return &logger{ + l := &logger{ provider: p, instrumentationScope: scope, } + if !x.SelfObservability.Enabled() { + return l + } + l.selfObservabilityEnabled = true + mp := otel.GetMeterProvider() + m := mp.Meter("go.opentelemetry.io/otel/sdk/log", + metric.WithInstrumentationVersion(sdk.Version()), + metric.WithSchemaURL(semconv.SchemaURL)) + + var err error + if l.logCreatedMetric, err = otelconv.NewSDKLogCreated(m); err != nil { + err = fmt.Errorf("failed to create log created metric: %w", err) + otel.Handle(err) + } + return l } func (l *logger) Emit(ctx context.Context, r log.Record) { @@ -84,7 +108,6 @@ func (l *logger) newRecord(ctx context.Context, r log.Record) Record { observedTimestamp: r.ObservedTimestamp(), severity: r.Severity(), severityText: r.SeverityText(), - body: r.Body(), traceID: sc.TraceID(), spanID: sc.SpanID(), @@ -94,7 +117,14 @@ func (l *logger) newRecord(ctx context.Context, r log.Record) Record { scope: &l.instrumentationScope, attributeValueLengthLimit: l.provider.attributeValueLengthLimit, attributeCountLimit: l.provider.attributeCountLimit, + allowDupKeys: l.provider.allowDupKeys, } + if l.selfObservabilityEnabled { + l.logCreatedMetric.Add(ctx, 1) + } + + // This ensures we deduplicate key-value collections in the log body + newRecord.SetBody(r.Body()) // This field SHOULD be set once the event is observed by OpenTelemetry. if newRecord.observedTimestamp.IsZero() { diff --git a/sdk/log/logger_test.go b/sdk/log/logger_test.go index fa22cd31aff..0d6b2d136ef 100644 --- a/sdk/log/logger_test.go +++ b/sdk/log/logger_test.go @@ -6,15 +6,25 @@ package log // import "go.opentelemetry.io/otel/sdk/log" import ( "context" "errors" + "strconv" "testing" "time" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/log" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/sdk" "go.opentelemetry.io/otel/sdk/instrumentation" + sdkmetric "go.opentelemetry.io/otel/sdk/metric" + "go.opentelemetry.io/otel/sdk/metric/metricdata" + "go.opentelemetry.io/otel/sdk/metric/metricdata/metricdatatest" "go.opentelemetry.io/otel/sdk/resource" + semconv "go.opentelemetry.io/otel/semconv/v1.37.0" + "go.opentelemetry.io/otel/semconv/v1.37.0/otelconv" "go.opentelemetry.io/otel/trace" ) @@ -47,6 +57,21 @@ func TestLoggerEmit(t *testing.T) { rWithNoObservedTimestamp := r rWithNoObservedTimestamp.SetObservedTimestamp(time.Time{}) + rWithAllowKeyDuplication := r + rWithAllowKeyDuplication.AddAttributes( + log.String("k1", "str1"), + ) + rWithAllowKeyDuplication.SetBody(log.MapValue( + log.Int64("1", 2), + log.Int64("1", 3), + )) + + rWithDuplicatesInBody := r + rWithDuplicatesInBody.SetBody(log.MapValue( + log.Int64("1", 2), + log.Int64("1", 3), + )) + contextWithSpanContext := trace.ContextWithSpanContext( context.Background(), trace.NewSpanContext(trace.SpanContextConfig{ @@ -206,6 +231,73 @@ func TestLoggerEmit(t *testing.T) { }, }, }, + { + name: "WithAllowKeyDuplication", + logger: newLogger(NewLoggerProvider( + WithProcessor(p0), + WithProcessor(p1), + WithAttributeValueLengthLimit(5), + WithAttributeCountLimit(5), + WithResource(resource.NewSchemaless(attribute.String("key", "value"))), + WithAllowKeyDuplication(), + ), instrumentation.Scope{Name: "scope"}), + ctx: context.Background(), + record: rWithAllowKeyDuplication, + expectedRecords: []Record{ + { + eventName: rWithAllowKeyDuplication.EventName(), + timestamp: rWithAllowKeyDuplication.Timestamp(), + body: rWithAllowKeyDuplication.Body(), + severity: rWithAllowKeyDuplication.Severity(), + severityText: rWithAllowKeyDuplication.SeverityText(), + observedTimestamp: rWithAllowKeyDuplication.ObservedTimestamp(), + resource: resource.NewSchemaless(attribute.String("key", "value")), + attributeValueLengthLimit: 5, + attributeCountLimit: 5, + scope: &instrumentation.Scope{Name: "scope"}, + front: [attributesInlineCount]log.KeyValue{ + log.String("k1", "str"), + log.Float64("k2", 1.0), + log.String("k1", "str1"), + }, + nFront: 3, + allowDupKeys: true, + }, + }, + }, + { + name: "WithDuplicatesInBody", + logger: newLogger(NewLoggerProvider( + WithProcessor(p0), + WithProcessor(p1), + WithAttributeValueLengthLimit(5), + WithAttributeCountLimit(5), + WithResource(resource.NewSchemaless(attribute.String("key", "value"))), + ), instrumentation.Scope{Name: "scope"}), + ctx: context.Background(), + record: rWithDuplicatesInBody, + expectedRecords: []Record{ + { + eventName: rWithDuplicatesInBody.EventName(), + timestamp: rWithDuplicatesInBody.Timestamp(), + body: log.MapValue( + log.Int64("1", 3), + ), + severity: rWithDuplicatesInBody.Severity(), + severityText: rWithDuplicatesInBody.SeverityText(), + observedTimestamp: rWithDuplicatesInBody.ObservedTimestamp(), + resource: resource.NewSchemaless(attribute.String("key", "value")), + attributeValueLengthLimit: 5, + attributeCountLimit: 5, + scope: &instrumentation.Scope{Name: "scope"}, + front: [attributesInlineCount]log.KeyValue{ + log.String("k1", "str"), + log.Float64("k2", 1.0), + }, + nFront: 2, + }, + }, + }, } for _, tc := range testCases { @@ -309,3 +401,115 @@ func TestLoggerEnabled(t *testing.T) { }) } } + +func TestLoggerSelfObservability(t *testing.T) { + testCases := []struct { + name string + selfObservabilityEnabled bool + records []log.Record + wantLogRecordCount int64 + }{ + { + name: "Disabled", + selfObservabilityEnabled: false, + records: []log.Record{{}, {}}, + wantLogRecordCount: 0, + }, + { + name: "Enabled", + selfObservabilityEnabled: true, + records: []log.Record{{}, {}, {}, {}, {}}, + wantLogRecordCount: 5, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + t.Setenv("OTEL_GO_X_SELF_OBSERVABILITY", strconv.FormatBool(tc.selfObservabilityEnabled)) + prev := otel.GetMeterProvider() + t.Cleanup(func() { + otel.SetMeterProvider(prev) + }) + r := sdkmetric.NewManualReader() + mp := sdkmetric.NewMeterProvider(sdkmetric.WithReader(r)) + otel.SetMeterProvider(mp) + l := newLogger(NewLoggerProvider(), instrumentation.Scope{}) + + for _, record := range tc.records { + l.Emit(context.Background(), record) + } + + gotMetrics := new(metricdata.ResourceMetrics) + assert.NoError(t, r.Collect(context.Background(), gotMetrics)) + if tc.wantLogRecordCount == 0 { + assert.Empty(t, gotMetrics.ScopeMetrics) + return + } + + require.Len(t, gotMetrics.ScopeMetrics, 1) + sm := gotMetrics.ScopeMetrics[0] + assert.Equal(t, instrumentation.Scope{ + Name: "go.opentelemetry.io/otel/sdk/log", + Version: sdk.Version(), + SchemaURL: semconv.SchemaURL, + }, sm.Scope) + + wantMetric := metricdata.Metrics{ + Name: otelconv.SDKLogCreated{}.Name(), + Description: otelconv.SDKLogCreated{}.Description(), + Unit: otelconv.SDKLogCreated{}.Unit(), + Data: metricdata.Sum[int64]{ + DataPoints: []metricdata.DataPoint[int64]{{Value: tc.wantLogRecordCount}}, + Temporality: metricdata.CumulativeTemporality, + IsMonotonic: true, + }, + } + metricdatatest.AssertEqual(t, wantMetric, sm.Metrics[0], metricdatatest.IgnoreTimestamp()) + }) + } +} + +func TestNewLoggerSelfObservabilityErrorHandled(t *testing.T) { + errHandler := otel.GetErrorHandler() + t.Cleanup(func() { + otel.SetErrorHandler(errHandler) + }) + + var errs []error + eh := otel.ErrorHandlerFunc(func(e error) { errs = append(errs, e) }) + otel.SetErrorHandler(eh) + + orig := otel.GetMeterProvider() + t.Cleanup(func() { otel.SetMeterProvider(orig) }) + otel.SetMeterProvider(&errMeterProvider{err: assert.AnError}) + + t.Setenv("OTEL_GO_X_SELF_OBSERVABILITY", "true") + l := newLogger(NewLoggerProvider(), instrumentation.Scope{}) + _ = l + require.Len(t, errs, 1) + assert.ErrorIs(t, errs[0], assert.AnError) +} + +type errMeterProvider struct { + metric.MeterProvider + + err error +} + +func (mp *errMeterProvider) Meter(string, ...metric.MeterOption) metric.Meter { + return &errMeter{err: mp.err} +} + +type errMeter struct { + metric.Meter + + err error +} + +func (m *errMeter) Int64Counter(string, ...metric.Int64CounterOption) (metric.Int64Counter, error) { + return nil, m.err +} + +func (m *errMeter) Int64UpDownCounter(string, ...metric.Int64UpDownCounterOption) (metric.Int64UpDownCounter, error) { + return nil, m.err +} diff --git a/sdk/log/logtest/example_test.go b/sdk/log/logtest/example_test.go index 7bfdb38df3b..8b3ac0c8ff5 100644 --- a/sdk/log/logtest/example_test.go +++ b/sdk/log/logtest/example_test.go @@ -40,7 +40,7 @@ var _ log.Exporter = exporter{} type exporter struct{ io.Writer } -func (e exporter) Export(ctx context.Context, records []log.Record) error { +func (e exporter) Export(_ context.Context, records []log.Record) error { for i, r := range records { if i != 0 { if _, err := e.Write([]byte("\n")); err != nil { @@ -54,11 +54,11 @@ func (e exporter) Export(ctx context.Context, records []log.Record) error { return nil } -func (e exporter) Shutdown(context.Context) error { +func (exporter) Shutdown(context.Context) error { return nil } // appropriate error should be returned in these situations. -func (e exporter) ForceFlush(context.Context) error { +func (exporter) ForceFlush(context.Context) error { return nil } diff --git a/sdk/log/logtest/go.mod b/sdk/log/logtest/go.mod index dcbdc78bdaa..157543bc326 100644 --- a/sdk/log/logtest/go.mod +++ b/sdk/log/logtest/go.mod @@ -3,12 +3,12 @@ module go.opentelemetry.io/otel/sdk/log/logtest go 1.23.0 require ( - github.com/stretchr/testify v1.10.0 - go.opentelemetry.io/otel v1.37.0 - go.opentelemetry.io/otel/log v0.13.0 - go.opentelemetry.io/otel/sdk v1.37.0 - go.opentelemetry.io/otel/sdk/log v0.13.0 - go.opentelemetry.io/otel/trace v1.37.0 + github.com/stretchr/testify v1.11.1 + go.opentelemetry.io/otel v1.38.0 + go.opentelemetry.io/otel/log v0.14.0 + go.opentelemetry.io/otel/sdk v1.38.0 + go.opentelemetry.io/otel/sdk/log v0.14.0 + go.opentelemetry.io/otel/trace v1.38.0 ) require ( @@ -18,8 +18,8 @@ require ( github.com/google/uuid v1.6.0 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect go.opentelemetry.io/auto/sdk v1.1.0 // indirect - go.opentelemetry.io/otel/metric v1.37.0 // indirect - golang.org/x/sys v0.33.0 // indirect + go.opentelemetry.io/otel/metric v1.38.0 // indirect + golang.org/x/sys v0.35.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) @@ -34,3 +34,5 @@ replace go.opentelemetry.io/otel/sdk/log => ../ replace go.opentelemetry.io/otel/log => ../../../log replace go.opentelemetry.io/otel => ../../.. + +replace go.opentelemetry.io/otel/sdk/metric => ../../metric diff --git a/sdk/log/logtest/go.sum b/sdk/log/logtest/go.sum index 47deaf0ed85..c241c13d6e5 100644 --- a/sdk/log/logtest/go.sum +++ b/sdk/log/logtest/go.sum @@ -17,12 +17,12 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= -github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= -github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= -golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw= -golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/sys v0.35.0 h1:vz1N37gP5bs89s7He8XuIYXpyY0+QlsKmzipCbUtyxI= +golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= diff --git a/sdk/log/provider.go b/sdk/log/provider.go index 359357b7e89..c69422e12d4 100644 --- a/sdk/log/provider.go +++ b/sdk/log/provider.go @@ -32,6 +32,7 @@ type providerConfig struct { fltrProcessors []FilterProcessor attrCntLim setting[int] attrValLenLim setting[int] + allowDupKeys setting[bool] } func newProviderConfig(opts []LoggerProviderOption) providerConfig { @@ -67,6 +68,7 @@ type LoggerProvider struct { fltrProcessors []FilterProcessor attributeCountLimit int attributeValueLengthLimit int + allowDupKeys bool loggersMu sync.Mutex loggers map[instrumentation.Scope]*logger @@ -93,6 +95,7 @@ func NewLoggerProvider(opts ...LoggerProviderOption) *LoggerProvider { fltrProcessors: cfg.fltrProcessors, attributeCountLimit: cfg.attrCntLim.Value, attributeValueLengthLimit: cfg.attrValLenLim.Value, + allowDupKeys: cfg.allowDupKeys.Value, } } @@ -254,3 +257,21 @@ func WithAttributeValueLengthLimit(limit int) LoggerProviderOption { return cfg }) } + +// WithAllowKeyDuplication sets whether deduplication is skipped for log attributes or other key-value collections. +// +// By default, the key-value collections within a log record are deduplicated to comply with the OpenTelemetry Specification. +// Deduplication means that if multiple key–value pairs with the same key are present, only a single pair +// is retained and others are discarded. +// +// Disabling deduplication with this option can improve performance e.g. of adding attributes to the log record. +// +// Note that if you disable deduplication, you are responsible for ensuring that duplicate +// key-value pairs within in a single collection are not emitted, +// or that the telemetry receiver can handle such duplicates. +func WithAllowKeyDuplication() LoggerProviderOption { + return loggerProviderOptionFunc(func(cfg providerConfig) providerConfig { + cfg.allowDupKeys = newSetting(true) + return cfg + }) +} diff --git a/sdk/log/provider_test.go b/sdk/log/provider_test.go index 9497baa735c..23a74630cea 100644 --- a/sdk/log/provider_test.go +++ b/sdk/log/provider_test.go @@ -40,7 +40,7 @@ func newProcessor(name string) *processor { return &processor{Name: name} } -func (p *processor) OnEmit(ctx context.Context, r *Record) error { +func (p *processor) OnEmit(_ context.Context, r *Record) error { if p.Err != nil { return p.Err } @@ -115,12 +115,14 @@ func TestNewLoggerProviderConfiguration(t *testing.T) { WithProcessor(p1), WithAttributeCountLimit(attrCntLim), WithAttributeValueLengthLimit(attrValLenLim), + WithAllowKeyDuplication(), }, want: &LoggerProvider{ resource: res, processors: []Processor{p0, p1}, attributeCountLimit: attrCntLim, attributeValueLengthLimit: attrValLenLim, + allowDupKeys: true, }, }, { @@ -237,7 +239,6 @@ func TestWithResource(t *testing.T) { }, } for _, tc := range cases { - tc := tc t.Run(tc.name, func(t *testing.T) { got := newProviderConfig(tc.options).resource if diff := cmp.Diff(got, tc.want); diff != "" { @@ -247,7 +248,7 @@ func TestWithResource(t *testing.T) { } } -func TestLoggerProviderConcurrentSafe(t *testing.T) { +func TestLoggerProviderConcurrentSafe(*testing.T) { const goRoutineN = 10 var wg sync.WaitGroup @@ -256,7 +257,7 @@ func TestLoggerProviderConcurrentSafe(t *testing.T) { p := NewLoggerProvider(WithProcessor(newProcessor("0"))) const name = "testLogger" ctx := context.Background() - for i := 0; i < goRoutineN; i++ { + for range goRoutineN { go func() { defer wg.Done() @@ -274,10 +275,10 @@ type logSink struct { level int msg string - keysAndValues []interface{} + keysAndValues []any } -func (l *logSink) Enabled(int) bool { return true } +func (*logSink) Enabled(int) bool { return true } func (l *logSink) Info(level int, msg string, keysAndValues ...any) { l.level, l.msg, l.keysAndValues = level, msg, keysAndValues diff --git a/sdk/log/record.go b/sdk/log/record.go index 38fd6507943..9dfd69b645b 100644 --- a/sdk/log/record.go +++ b/sdk/log/record.go @@ -93,6 +93,9 @@ type Record struct { attributeValueLengthLimit int attributeCountLimit int + // specifies whether we should deduplicate any key value collections or not + allowDupKeys bool + noCmp [0]func() //nolint: unused // This is indeed used. } @@ -167,7 +170,11 @@ func (r *Record) Body() log.Value { // SetBody sets the body of the log record. func (r *Record) SetBody(v log.Value) { - r.body = v + if !r.allowDupKeys { + r.body = r.dedupeBodyCollections(v) + } else { + r.body = v + } } // WalkAttributes walks all attributes the log record holds by calling f for @@ -192,56 +199,60 @@ func (r *Record) AddAttributes(attrs ...log.KeyValue) { if n == 0 { // Avoid the more complex duplicate map lookups below. var drop int - attrs, drop = dedup(attrs) - r.setDropped(drop) + if !r.allowDupKeys { + attrs, drop = dedup(attrs) + r.setDropped(drop) + } - attrs, drop = head(attrs, r.attributeCountLimit) + attrs, drop := head(attrs, r.attributeCountLimit) r.addDropped(drop) r.addAttrs(attrs) return } - // Used to find duplicates between attrs and existing attributes in r. - rIndex := r.attrIndex() - defer putIndex(rIndex) + if !r.allowDupKeys { + // Used to find duplicates between attrs and existing attributes in r. + rIndex := r.attrIndex() + defer putIndex(rIndex) - // Unique attrs that need to be added to r. This uses the same underlying - // array as attrs. - // - // Note, do not iterate attrs twice by just calling dedup(attrs) here. - unique := attrs[:0] - // Used to find duplicates within attrs itself. The index value is the - // index of the element in unique. - uIndex := getIndex() - defer putIndex(uIndex) - - // Deduplicate attrs within the scope of all existing attributes. - for _, a := range attrs { - // Last-value-wins for any duplicates in attrs. - idx, found := uIndex[a.Key] - if found { - r.addDropped(1) - unique[idx] = a - continue - } + // Unique attrs that need to be added to r. This uses the same underlying + // array as attrs. + // + // Note, do not iterate attrs twice by just calling dedup(attrs) here. + unique := attrs[:0] + // Used to find duplicates within attrs itself. The index value is the + // index of the element in unique. + uIndex := getIndex() + defer putIndex(uIndex) + + // Deduplicate attrs within the scope of all existing attributes. + for _, a := range attrs { + // Last-value-wins for any duplicates in attrs. + idx, found := uIndex[a.Key] + if found { + r.addDropped(1) + unique[idx] = a + continue + } - idx, found = rIndex[a.Key] - if found { - // New attrs overwrite any existing with the same key. - r.addDropped(1) - if idx < 0 { - r.front[-(idx + 1)] = a + idx, found = rIndex[a.Key] + if found { + // New attrs overwrite any existing with the same key. + r.addDropped(1) + if idx < 0 { + r.front[-(idx + 1)] = a + } else { + r.back[idx] = a + } } else { - r.back[idx] = a + // Unique attribute. + unique = append(unique, a) + uIndex[a.Key] = len(unique) - 1 } - } else { - // Unique attribute. - unique = append(unique, a) - uIndex[a.Key] = len(unique) - 1 } + attrs = unique } - attrs = unique if r.attributeCountLimit > 0 && n+len(attrs) > r.attributeCountLimit { // Truncate the now unique attributes to comply with limit. @@ -297,8 +308,11 @@ func (r *Record) addAttrs(attrs []log.KeyValue) { // SetAttributes sets (and overrides) attributes to the log record. func (r *Record) SetAttributes(attrs ...log.KeyValue) { var drop int - attrs, drop = dedup(attrs) - r.setDropped(drop) + r.setDropped(0) + if !r.allowDupKeys { + attrs, drop = dedup(attrs) + r.setDropped(drop) + } attrs, drop = head(attrs, r.attributeCountLimit) r.addDropped(drop) @@ -426,10 +440,14 @@ func (r *Record) applyValueLimits(val log.Value) log.Value { } val = log.SliceValue(sl...) case log.KindMap: - // Deduplicate then truncate. Do not do at the same time to avoid - // wasted truncation operations. - kvs, dropped := dedup(val.AsMap()) - r.addDropped(dropped) + kvs := val.AsMap() + if !r.allowDupKeys { + // Deduplicate then truncate. Do not do at the same time to avoid + // wasted truncation operations. + var dropped int + kvs, dropped = dedup(kvs) + r.addDropped(dropped) + } for i := range kvs { kvs[i] = r.applyAttrLimits(kvs[i]) } @@ -438,6 +456,24 @@ func (r *Record) applyValueLimits(val log.Value) log.Value { return val } +func (r *Record) dedupeBodyCollections(val log.Value) log.Value { + switch val.Kind() { + case log.KindSlice: + sl := val.AsSlice() + for i := range sl { + sl[i] = r.dedupeBodyCollections(sl[i]) + } + val = log.SliceValue(sl...) + case log.KindMap: + kvs, _ := dedup(val.AsMap()) + for i := range kvs { + kvs[i].Value = r.dedupeBodyCollections(kvs[i].Value) + } + val = log.MapValue(kvs...) + } + return val +} + // truncate returns a truncated version of s such that it contains less than // the limit number of characters. Truncation is applied by returning the limit // number of valid characters contained in s. diff --git a/sdk/log/record_test.go b/sdk/log/record_test.go index 411a8a3e41a..103e673a15f 100644 --- a/sdk/log/record_test.go +++ b/sdk/log/record_test.go @@ -56,10 +56,82 @@ func TestRecordSeverityText(t *testing.T) { } func TestRecordBody(t *testing.T) { - v := log.BoolValue(true) - r := new(Record) - r.SetBody(v) - assert.True(t, v.Equal(r.Body())) + testcases := []struct { + name string + allowDuplicates bool + body log.Value + want log.Value + }{ + { + name: "Bool", + body: log.BoolValue(true), + want: log.BoolValue(true), + }, + { + name: "slice", + body: log.SliceValue(log.BoolValue(true), log.BoolValue(false)), + want: log.SliceValue(log.BoolValue(true), log.BoolValue(false)), + }, + { + name: "map", + body: log.MapValue( + log.Bool("0", true), + log.Int64("1", 2), // This should be removed + log.Float64("2", 3.0), + log.String("3", "forth"), + log.Slice("4", log.Int64Value(1)), + log.Map("5", log.Int("key", 2)), + log.Bytes("6", []byte("six")), + log.Int64("1", 3), + ), + want: log.MapValue( + log.Bool("0", true), + log.Float64("2", 3.0), + log.String("3", "forth"), + log.Slice("4", log.Int64Value(1)), + log.Map("5", log.Int("key", 2)), + log.Bytes("6", []byte("six")), + log.Int64("1", 3), + ), + }, + { + name: "nestedMap", + body: log.MapValue( + log.Map("key", + log.Int64("key", 1), + log.Int64("key", 2), + ), + ), + want: log.MapValue( + log.Map("key", + log.Int64("key", 2), + ), + ), + }, + { + name: "map - allow duplicates", + allowDuplicates: true, + body: log.MapValue( + log.Int64("1", 2), + log.Int64("1", 3), + ), + want: log.MapValue( + log.Int64("1", 2), + log.Int64("1", 3), + ), + }, + } + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + r := new(Record) + r.allowDupKeys = tc.allowDuplicates + r.SetBody(tc.body) + got := r.Body() + if !got.Equal(tc.want) { + t.Errorf("r.Body() = %v, want %v", got, tc.want) + } + }) + } } func TestRecordAttributes(t *testing.T) { @@ -230,6 +302,158 @@ func TestRecordDroppedAttributes(t *testing.T) { } } +func TestRecordAttrAllowDuplicateAttributes(t *testing.T) { + testcases := []struct { + name string + attrs []log.KeyValue + want []log.KeyValue + }{ + { + name: "EmptyKey", + attrs: make([]log.KeyValue, 10), + want: make([]log.KeyValue, 10), + }, + { + name: "MapKey", + attrs: []log.KeyValue{ + log.Map("key", log.Int("key", 5), log.Int("key", 10)), + }, + want: []log.KeyValue{ + log.Map("key", log.Int("key", 5), log.Int("key", 10)), + }, + }, + { + name: "NonEmptyKey", + attrs: []log.KeyValue{ + log.Bool("key", true), + log.Int64("key", 1), + log.Bool("key", false), + log.Float64("key", 2.), + log.String("key", "3"), + log.Slice("key", log.Int64Value(4)), + log.Map("key", log.Int("key", 5)), + log.Bytes("key", []byte("six")), + log.Bool("key", false), + }, + want: []log.KeyValue{ + log.Bool("key", true), + log.Int64("key", 1), + log.Bool("key", false), + log.Float64("key", 2.), + log.String("key", "3"), + log.Slice("key", log.Int64Value(4)), + log.Map("key", log.Int("key", 5)), + log.Bytes("key", []byte("six")), + log.Bool("key", false), + }, + }, + { + name: "Multiple", + attrs: []log.KeyValue{ + log.Bool("a", true), + log.Int64("b", 1), + log.Bool("a", false), + log.Float64("c", 2.), + log.String("b", "3"), + log.Slice("d", log.Int64Value(4)), + log.Map("a", log.Int("key", 5)), + log.Bytes("d", []byte("six")), + log.Bool("e", true), + log.Int("f", 1), + log.Int("f", 2), + log.Int("f", 3), + log.Float64("b", 0.0), + log.Float64("b", 0.0), + log.String("g", "G"), + log.String("h", "H"), + log.String("g", "GG"), + log.Bool("a", false), + }, + want: []log.KeyValue{ + // Order is important here. + log.Bool("a", true), + log.Int64("b", 1), + log.Bool("a", false), + log.Float64("c", 2.), + log.String("b", "3"), + log.Slice("d", log.Int64Value(4)), + log.Map("a", log.Int("key", 5)), + log.Bytes("d", []byte("six")), + log.Bool("e", true), + log.Int("f", 1), + log.Int("f", 2), + log.Int("f", 3), + log.Float64("b", 0.0), + log.Float64("b", 0.0), + log.String("g", "G"), + log.String("h", "H"), + log.String("g", "GG"), + log.Bool("a", false), + }, + }, + { + name: "NoDuplicate", + attrs: func() []log.KeyValue { + out := make([]log.KeyValue, attributesInlineCount*2) + for i := range out { + out[i] = log.Bool(strconv.Itoa(i), true) + } + return out + }(), + want: func() []log.KeyValue { + out := make([]log.KeyValue, attributesInlineCount*2) + for i := range out { + out[i] = log.Bool(strconv.Itoa(i), true) + } + return out + }(), + }, + } + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + validate := func(t *testing.T, r *Record, want []log.KeyValue) { + t.Helper() + + var i int + r.WalkAttributes(func(kv log.KeyValue) bool { + if assert.Lessf(t, i, len(want), "additional: %v", kv) { + want := want[i] + assert.Truef(t, kv.Equal(want), "%d: want %v, got %v", i, want, kv) + } + i++ + return true + }) + } + + t.Run("SetAttributes", func(t *testing.T) { + r := new(Record) + r.allowDupKeys = true + r.attributeValueLengthLimit = -1 + r.SetAttributes(tc.attrs...) + validate(t, r, tc.want) + }) + + t.Run("AddAttributes/Empty", func(t *testing.T) { + r := new(Record) + r.allowDupKeys = true + r.attributeValueLengthLimit = -1 + r.AddAttributes(tc.attrs...) + validate(t, r, tc.want) + }) + + t.Run("AddAttributes/Twice", func(t *testing.T) { + r := new(Record) + r.allowDupKeys = true + r.attributeValueLengthLimit = -1 + r.AddAttributes(tc.attrs...) + r.AddAttributes(tc.attrs...) + want := append(tc.want, tc.want...) + validate(t, r, want) + }) + }) + } +} + func TestRecordAttrDeduplication(t *testing.T) { testcases := []struct { name string @@ -763,6 +987,19 @@ func BenchmarkSetAddAttributes(b *testing.B) { } }) + b.Run("SetAttributes/AllowDuplicates", func(b *testing.B) { + records := make([]Record, b.N) + for i := range records { + records[i].allowDupKeys = true + } + + b.ResetTimer() + b.ReportAllocs() + for i := 0; i < b.N; i++ { + records[i].SetAttributes(kv) + } + }) + b.Run("AddAttributes", func(b *testing.B) { records := make([]Record, b.N) @@ -772,4 +1009,37 @@ func BenchmarkSetAddAttributes(b *testing.B) { records[i].AddAttributes(kv) } }) + + b.Run("AddAttributes/AllowDuplicates", func(b *testing.B) { + records := make([]Record, b.N) + for i := range records { + records[i].allowDupKeys = true + } + + b.ResetTimer() + b.ReportAllocs() + for i := 0; i < b.N; i++ { + records[i].AddAttributes(kv) + } + }) +} + +func BenchmarkSetBody(b *testing.B) { + b.Run("SetBody", func(b *testing.B) { + records := make([]Record, b.N) + + b.ResetTimer() + b.ReportAllocs() + for i := 0; i < b.N; i++ { + records[i].SetBody(log.MapValue( + log.Bool("0", true), + log.Float64("2", 3.0), + log.String("3", "forth"), + log.Slice("4", log.Int64Value(1)), + log.Map("5", log.Int("key", 2)), + log.Bytes("6", []byte("six")), + log.Int64("1", 3), + )) + } + }) } diff --git a/sdk/log/ring_test.go b/sdk/log/ring_test.go index c4e0be79df6..ba55b57700a 100644 --- a/sdk/log/ring_test.go +++ b/sdk/log/ring_test.go @@ -15,9 +15,9 @@ import ( "go.opentelemetry.io/otel/log" ) -func verifyRing(t *testing.T, r *ring, N int, sum int) { +func verifyRing(t *testing.T, r *ring, num, sum int) { // Length. - assert.Equal(t, N, r.Len(), "r.Len()") + assert.Equal(t, num, r.Len(), "r.Len()") // Iteration. var n, s int @@ -28,7 +28,7 @@ func verifyRing(t *testing.T, r *ring, N int, sum int) { s += int(body.AsInt64()) } }) - assert.Equal(t, N, n, "number of forward iterations") + assert.Equal(t, num, n, "number of forward iterations") if sum >= 0 { assert.Equal(t, sum, s, "forward ring sum") } @@ -55,13 +55,13 @@ func verifyRing(t *testing.T, r *ring, N int, sum int) { } func TestNewRing(t *testing.T) { - for i := 0; i < 10; i++ { + for i := range 10 { // Empty value. r := newRing(i) verifyRing(t, r, i, -1) } - for n := 0; n < 10; n++ { + for n := range 10 { r := newRing(n) for i := 1; i <= n; i++ { var rec Record diff --git a/sdk/log/simple_test.go b/sdk/log/simple_test.go index f8719bfe551..394e4b7968f 100644 --- a/sdk/log/simple_test.go +++ b/sdk/log/simple_test.go @@ -77,11 +77,11 @@ func (e *writerExporter) Export(_ context.Context, records []log.Record) error { return nil } -func (e *writerExporter) Shutdown(context.Context) error { +func (*writerExporter) Shutdown(context.Context) error { return nil } -func (e *writerExporter) ForceFlush(context.Context) error { +func (*writerExporter) ForceFlush(context.Context) error { return nil } @@ -96,7 +96,7 @@ func TestSimpleProcessorEmpty(t *testing.T) { }) } -func TestSimpleProcessorConcurrentSafe(t *testing.T) { +func TestSimpleProcessorConcurrentSafe(*testing.T) { const goRoutineN = 10 var wg sync.WaitGroup @@ -107,7 +107,7 @@ func TestSimpleProcessorConcurrentSafe(t *testing.T) { ctx := context.Background() e := &writerExporter{new(strings.Builder)} s := log.NewSimpleProcessor(e) - for i := 0; i < goRoutineN; i++ { + for range goRoutineN { go func() { defer wg.Done() diff --git a/sdk/metric/benchmark_test.go b/sdk/metric/benchmark_test.go index b0eaa457c6b..d73759cbde2 100644 --- a/sdk/metric/benchmark_test.go +++ b/sdk/metric/benchmark_test.go @@ -113,19 +113,19 @@ func benchMeasAttrs(meas measF) func(*testing.B) { return func(b *testing.B) { b.Run("Attributes/0", func(b *testing.B) { f := meas(*attribute.EmptySet()) - b.ReportAllocs() - b.ResetTimer() - for n := 0; n < b.N; n++ { - f() - } + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + f() + } + }) }) b.Run("Attributes/1", func(b *testing.B) { f := meas(attribute.NewSet(attribute.Bool("K", true))) - b.ReportAllocs() - b.ResetTimer() - for n := 0; n < b.N; n++ { - f() - } + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + f() + } + }) }) b.Run("Attributes/10", func(b *testing.B) { n := 10 @@ -135,11 +135,11 @@ func benchMeasAttrs(meas measF) func(*testing.B) { attrs = append(attrs, attribute.Int(strconv.Itoa(i), i)) } f := meas(attribute.NewSet(attrs...)) - b.ReportAllocs() - b.ResetTimer() - for n := 0; n < b.N; n++ { - f() - } + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + f() + } + }) }) } } @@ -169,7 +169,7 @@ func benchCollectViews(views ...View) func(*testing.B) { m, r := setup("benchCollectViews/Int64Counter") i, err := m.Int64Counter("int64-counter") assert.NoError(b, err) - for n := 0; n < 10; n++ { + for range 10 { i.Add(ctx, 1, metric.WithAttributeSet(s)) } return r @@ -186,7 +186,7 @@ func benchCollectViews(views ...View) func(*testing.B) { m, r := setup("benchCollectViews/Float64Counter") i, err := m.Float64Counter("float64-counter") assert.NoError(b, err) - for n := 0; n < 10; n++ { + for range 10 { i.Add(ctx, 1, metric.WithAttributeSet(s)) } return r @@ -203,7 +203,7 @@ func benchCollectViews(views ...View) func(*testing.B) { m, r := setup("benchCollectViews/Int64UpDownCounter") i, err := m.Int64UpDownCounter("int64-up-down-counter") assert.NoError(b, err) - for n := 0; n < 10; n++ { + for range 10 { i.Add(ctx, 1, metric.WithAttributeSet(s)) } return r @@ -220,7 +220,7 @@ func benchCollectViews(views ...View) func(*testing.B) { m, r := setup("benchCollectViews/Float64UpDownCounter") i, err := m.Float64UpDownCounter("float64-up-down-counter") assert.NoError(b, err) - for n := 0; n < 10; n++ { + for range 10 { i.Add(ctx, 1, metric.WithAttributeSet(s)) } return r @@ -237,7 +237,7 @@ func benchCollectViews(views ...View) func(*testing.B) { m, r := setup("benchCollectViews/Int64Histogram") i, err := m.Int64Histogram("int64-histogram") assert.NoError(b, err) - for n := 0; n < 10; n++ { + for range 10 { i.Record(ctx, 1, metric.WithAttributeSet(s)) } return r @@ -254,7 +254,7 @@ func benchCollectViews(views ...View) func(*testing.B) { m, r := setup("benchCollectViews/Float64Histogram") i, err := m.Float64Histogram("float64-histogram") assert.NoError(b, err) - for n := 0; n < 10; n++ { + for range 10 { i.Record(ctx, 1, metric.WithAttributeSet(s)) } return r diff --git a/sdk/metric/cache_test.go b/sdk/metric/cache_test.go index c5ddbc055d4..8e0e4aa8db4 100644 --- a/sdk/metric/cache_test.go +++ b/sdk/metric/cache_test.go @@ -37,7 +37,7 @@ func TestCacheConcurrentSafe(t *testing.T) { c := cache[string, int]{} var wg sync.WaitGroup - for n := 0; n < goroutines; n++ { + for n := range goroutines { wg.Add(1) go func(i int) { defer wg.Done() diff --git a/sdk/metric/config.go b/sdk/metric/config.go index 203cd9d6508..c6440a1346c 100644 --- a/sdk/metric/config.go +++ b/sdk/metric/config.go @@ -7,6 +7,7 @@ import ( "context" "errors" "os" + "strconv" "strings" "sync" @@ -17,12 +18,15 @@ import ( // config contains configuration options for a MeterProvider. type config struct { - res *resource.Resource - readers []Reader - views []View - exemplarFilter exemplar.Filter + res *resource.Resource + readers []Reader + views []View + exemplarFilter exemplar.Filter + cardinalityLimit int } +const defaultCardinalityLimit = 0 + // readerSignals returns a force-flush and shutdown function for a // MeterProvider to call in their respective options. All Readers c contains // will have their force-flush and shutdown methods unified into returned @@ -69,8 +73,9 @@ func unifyShutdown(funcs []func(context.Context) error) func(context.Context) er // newConfig returns a config configured with options. func newConfig(options []Option) config { conf := config{ - res: resource.Default(), - exemplarFilter: exemplar.TraceBasedFilter, + res: resource.Default(), + exemplarFilter: exemplar.TraceBasedFilter, + cardinalityLimit: cardinalityLimitFromEnv(), } for _, o := range meterProviderOptionsFromEnv() { conf = o.apply(conf) @@ -155,6 +160,21 @@ func WithExemplarFilter(filter exemplar.Filter) Option { }) } +// WithCardinalityLimit sets the cardinality limit for the MeterProvider. +// +// The cardinality limit is the hard limit on the number of metric datapoints +// that can be collected for a single instrument in a single collect cycle. +// +// Setting this to a zero or negative value means no limit is applied. +func WithCardinalityLimit(limit int) Option { + // For backward compatibility, the environment variable `OTEL_GO_X_CARDINALITY_LIMIT` + // can also be used to set this value. + return optionFunc(func(cfg config) config { + cfg.cardinalityLimit = limit + return cfg + }) +} + func meterProviderOptionsFromEnv() []Option { var opts []Option // https://github.com/open-telemetry/opentelemetry-specification/blob/d4b241f451674e8f611bb589477680341006ad2b/specification/configuration/sdk-environment-variables.md#exemplar @@ -170,3 +190,17 @@ func meterProviderOptionsFromEnv() []Option { } return opts } + +func cardinalityLimitFromEnv() int { + const cardinalityLimitKey = "OTEL_GO_X_CARDINALITY_LIMIT" + v := strings.TrimSpace(os.Getenv(cardinalityLimitKey)) + if v == "" { + return defaultCardinalityLimit + } + n, err := strconv.Atoi(v) + if err != nil { + otel.Handle(err) + return defaultCardinalityLimit + } + return n +} diff --git a/sdk/metric/config_test.go b/sdk/metric/config_test.go index 307c3e598e0..ab7bebbe067 100644 --- a/sdk/metric/config_test.go +++ b/sdk/metric/config_test.go @@ -66,11 +66,11 @@ func TestConfigReaderSignalsEmpty(t *testing.T) { func TestConfigReaderSignalsForwarded(t *testing.T) { var flush, sdown int r := &reader{ - forceFlushFunc: func(ctx context.Context) error { + forceFlushFunc: func(context.Context) error { flush++ return nil }, - shutdownFunc: func(ctx context.Context) error { + shutdownFunc: func(context.Context) error { sdown++ return nil }, @@ -93,8 +93,8 @@ func TestConfigReaderSignalsForwarded(t *testing.T) { func TestConfigReaderSignalsForwardedErrors(t *testing.T) { r := &reader{ - forceFlushFunc: func(ctx context.Context) error { return assert.AnError }, - shutdownFunc: func(ctx context.Context) error { return assert.AnError }, + forceFlushFunc: func(context.Context) error { return assert.AnError }, + shutdownFunc: func(context.Context) error { return assert.AnError }, } c := newConfig([]Option{WithReader(r)}) f, s := c.readerSignals() @@ -115,9 +115,9 @@ func TestUnifyMultiError(t *testing.T) { e2 = errors.New("2") ) err := unify([]func(context.Context) error{ - func(ctx context.Context) error { return e0 }, - func(ctx context.Context) error { return e1 }, - func(ctx context.Context) error { return e2 }, + func(context.Context) error { return e0 }, + func(context.Context) error { return e1 }, + func(context.Context) error { return e2 }, })(context.Background()) assert.ErrorIs(t, err, e0) assert.ErrorIs(t, err, e1) @@ -185,7 +185,6 @@ func TestWithResource(t *testing.T) { }, } for _, tc := range cases { - tc := tc t.Run(tc.name, func(t *testing.T) { got := newConfig(tc.options).res if diff := cmp.Diff(got, tc.want); diff != "" { @@ -306,6 +305,54 @@ func TestWithExemplarFilterOff(t *testing.T) { } } +func TestWithCardinalityLimit(t *testing.T) { + cases := []struct { + name string + envValue string + options []Option + expectedLimit int + }{ + { + name: "only cardinality limit from option", + envValue: "", + options: []Option{WithCardinalityLimit(1000)}, + expectedLimit: 1000, + }, + { + name: "cardinality limit from option overrides env", + envValue: "500", + options: []Option{WithCardinalityLimit(1000)}, + expectedLimit: 1000, + }, + { + name: "cardinality limit from env", + envValue: "1234", + options: []Option{}, + expectedLimit: 1234, + }, + { + name: "invalid env value uses default", + envValue: "not-a-number", + options: []Option{}, + expectedLimit: defaultCardinalityLimit, + }, + { + name: "empty env and no option uses default", + envValue: "", + options: []Option{}, + expectedLimit: defaultCardinalityLimit, + }, + } + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + t.Setenv("OTEL_GO_X_CARDINALITY_LIMIT", tc.envValue) + c := newConfig(tc.options) + assert.Equal(t, tc.expectedLimit, c.cardinalityLimit) + }) + } +} + func sample(parent context.Context) context.Context { sc := trace.NewSpanContext(trace.SpanContextConfig{ TraceID: trace.TraceID{0x01}, diff --git a/sdk/metric/doc.go b/sdk/metric/doc.go index 90a4ae16c1a..0f3b9d623f7 100644 --- a/sdk/metric/doc.go +++ b/sdk/metric/doc.go @@ -39,6 +39,30 @@ // Meter.RegisterCallback and Registration.Unregister to add and remove // callbacks without leaking memory. // +// # Cardinality Limits +// +// Cardinality refers to the number of unique attributes collected. High cardinality can lead to +// excessive memory usage, increased storage costs, and backend performance issues. +// +// Currently, the OpenTelemetry Go Metric SDK does not enforce a cardinality limit by default +// (note that this may change in a future release). Use [WithCardinalityLimit] to set the +// cardinality limit as desired. +// +// New attribute sets are dropped when the cardinality limit is reached. The measurement of +// these sets are aggregated into +// a special attribute set containing attribute.Bool("otel.metric.overflow", true). +// This ensures total metric values (e.g., Sum, Count) remain correct for the +// collection cycle, but information about the specific dropped sets +// is not preserved. +// +// Recommendations: +// +// - Set the limit based on the theoretical maximum combinations or expected +// active combinations. The OpenTelemetry Specification recommends a default of 2000. +// - A too high of a limit increases worst-case memory overhead in the SDK and may cause downstream +// issues for databases that cannot handle high cardinality. +// - A too low of a limit causes loss of attribute detail as more data falls into overflow. +// // See [go.opentelemetry.io/otel/metric] for more information about // the metric API. // diff --git a/sdk/metric/example_test.go b/sdk/metric/example_test.go index 234bfd65945..499b4cef8e2 100644 --- a/sdk/metric/example_test.go +++ b/sdk/metric/example_test.go @@ -15,7 +15,7 @@ import ( "go.opentelemetry.io/otel/sdk/metric" "go.opentelemetry.io/otel/sdk/metric/exemplar" "go.opentelemetry.io/otel/sdk/resource" - semconv "go.opentelemetry.io/otel/semconv/v1.34.0" + semconv "go.opentelemetry.io/otel/semconv/v1.37.0" ) // To enable metrics in your application using the SDK, @@ -45,6 +45,7 @@ func Example() { meterProvider := metric.NewMeterProvider( metric.WithResource(res), metric.WithReader(reader), + metric.WithCardinalityLimit(2000), ) // Handle shutdown properly so that nothing leaks. diff --git a/sdk/metric/exemplar.go b/sdk/metric/exemplar.go index 549d3bd5f95..38b8745e676 100644 --- a/sdk/metric/exemplar.go +++ b/sdk/metric/exemplar.go @@ -58,10 +58,7 @@ func DefaultExemplarReservoirProviderSelector(agg Aggregation) exemplar.Reservoi // SimpleFixedSizeExemplarReservoir with a reservoir equal to the // smaller of the maximum number of buckets configured on the // aggregation or twenty (e.g. min(20, max_buckets)). - n = int(a.MaxSize) - if n > 20 { - n = 20 - } + n = min(int(a.MaxSize), 20) } else { // https://github.com/open-telemetry/opentelemetry-specification/blob/e94af89e3d0c01de30127a0f423e912f6cda7bed/specification/metrics/sdk.md#simplefixedsizeexemplarreservoir // This Exemplar reservoir MAY take a configuration parameter for @@ -69,11 +66,11 @@ func DefaultExemplarReservoirProviderSelector(agg Aggregation) exemplar.Reservoi // provided, the default size MAY be the number of possible // concurrent threads (e.g. number of CPUs) to help reduce // contention. Otherwise, a default size of 1 SHOULD be used. - n = runtime.NumCPU() - if n < 1 { - // Should never be the case, but be defensive. - n = 1 - } + // + // Use runtime.GOMAXPROCS instead of runtime.NumCPU to support + // containerized environments that may have less than the total number + // of logical CPUs available on the local machine allocated to it. + n = max(runtime.GOMAXPROCS(0), 1) } return exemplar.FixedSizeReservoirProvider(n) diff --git a/sdk/metric/exemplar/filter.go b/sdk/metric/exemplar/filter.go index b595e2acef3..b50f5c1531c 100644 --- a/sdk/metric/exemplar/filter.go +++ b/sdk/metric/exemplar/filter.go @@ -24,11 +24,11 @@ func TraceBasedFilter(ctx context.Context) bool { } // AlwaysOnFilter is a [Filter] that always offers measurements. -func AlwaysOnFilter(ctx context.Context) bool { +func AlwaysOnFilter(context.Context) bool { return true } // AlwaysOffFilter is a [Filter] that never offers measurements. -func AlwaysOffFilter(ctx context.Context) bool { +func AlwaysOffFilter(context.Context) bool { return false } diff --git a/sdk/metric/exemplar/fixed_size_reservoir.go b/sdk/metric/exemplar/fixed_size_reservoir.go index 1fb1e0095fb..08e8f68fe73 100644 --- a/sdk/metric/exemplar/fixed_size_reservoir.go +++ b/sdk/metric/exemplar/fixed_size_reservoir.go @@ -14,7 +14,7 @@ import ( // FixedSizeReservoirProvider returns a provider of [FixedSizeReservoir]. func FixedSizeReservoirProvider(k int) ReservoirProvider { - return func(_ attribute.Set) Reservoir { + return func(attribute.Set) Reservoir { return NewFixedSizeReservoir(k) } } @@ -56,7 +56,7 @@ func newFixedSizeReservoir(s *storage) *FixedSizeReservoir { // randomFloat64 returns, as a float64, a uniform pseudo-random number in the // open interval (0.0,1.0). -func (r *FixedSizeReservoir) randomFloat64() float64 { +func (*FixedSizeReservoir) randomFloat64() float64 { // TODO: Use an algorithm that avoids rejection sampling. For example: // // const precision = 1 << 53 // 2^53 @@ -125,13 +125,11 @@ func (r *FixedSizeReservoir) Offer(ctx context.Context, t time.Time, n Value, a if int(r.count) < cap(r.store) { r.store[r.count] = newMeasurement(ctx, t, n, a) - } else { - if r.count == r.next { - // Overwrite a random existing measurement with the one offered. - idx := int(rand.Int64N(int64(cap(r.store)))) - r.store[idx] = newMeasurement(ctx, t, n, a) - r.advance() - } + } else if r.count == r.next { + // Overwrite a random existing measurement with the one offered. + idx := int(rand.Int64N(int64(cap(r.store)))) + r.store[idx] = newMeasurement(ctx, t, n, a) + r.advance() } r.count++ } diff --git a/sdk/metric/exemplar/histogram_reservoir.go b/sdk/metric/exemplar/histogram_reservoir.go index 3b76cf305a4..decab613e77 100644 --- a/sdk/metric/exemplar/histogram_reservoir.go +++ b/sdk/metric/exemplar/histogram_reservoir.go @@ -16,7 +16,7 @@ import ( func HistogramReservoirProvider(bounds []float64) ReservoirProvider { cp := slices.Clone(bounds) slices.Sort(cp) - return func(_ attribute.Set) Reservoir { + return func(attribute.Set) Reservoir { return NewHistogramReservoir(cp) } } diff --git a/sdk/metric/exemplar_test.go b/sdk/metric/exemplar_test.go index 8a0529e9d93..9c3f5c16ceb 100644 --- a/sdk/metric/exemplar_test.go +++ b/sdk/metric/exemplar_test.go @@ -37,7 +37,7 @@ func TestFixedSizeExemplarConcurrentSafe(t *testing.T) { goRoutines := max(10, runtime.NumCPU()) var wg sync.WaitGroup - for n := 0; n < goRoutines; n++ { + for range goRoutines { wg.Add(1) go func() { defer wg.Done() @@ -54,7 +54,7 @@ func TestFixedSizeExemplarConcurrentSafe(t *testing.T) { const collections = 100 var rm metricdata.ResourceMetrics - for c := 0; c < collections; c++ { + for range collections { require.NotPanics(t, func() { _ = r.Collect(ctx, &rm) }) } diff --git a/sdk/metric/go.mod b/sdk/metric/go.mod index 628b8daccd0..b5036e933a5 100644 --- a/sdk/metric/go.mod +++ b/sdk/metric/go.mod @@ -6,11 +6,11 @@ require ( github.com/go-logr/logr v1.4.3 github.com/go-logr/stdr v1.2.2 github.com/google/go-cmp v0.7.0 - github.com/stretchr/testify v1.10.0 - go.opentelemetry.io/otel v1.37.0 - go.opentelemetry.io/otel/metric v1.37.0 - go.opentelemetry.io/otel/sdk v1.37.0 - go.opentelemetry.io/otel/trace v1.37.0 + github.com/stretchr/testify v1.11.1 + go.opentelemetry.io/otel v1.38.0 + go.opentelemetry.io/otel/metric v1.38.0 + go.opentelemetry.io/otel/sdk v1.38.0 + go.opentelemetry.io/otel/trace v1.38.0 ) require ( @@ -18,7 +18,7 @@ require ( github.com/google/uuid v1.6.0 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect go.opentelemetry.io/auto/sdk v1.1.0 // indirect - golang.org/x/sys v0.33.0 // indirect + golang.org/x/sys v0.35.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/sdk/metric/go.sum b/sdk/metric/go.sum index 47deaf0ed85..c241c13d6e5 100644 --- a/sdk/metric/go.sum +++ b/sdk/metric/go.sum @@ -17,12 +17,12 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= -github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= -github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= -golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw= -golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/sys v0.35.0 h1:vz1N37gP5bs89s7He8XuIYXpyY0+QlsKmzipCbUtyxI= +golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= diff --git a/sdk/metric/instrument.go b/sdk/metric/instrument.go index 18891ed5b1a..63cccc508f4 100644 --- a/sdk/metric/instrument.go +++ b/sdk/metric/instrument.go @@ -75,7 +75,7 @@ type Instrument struct { nonComparable // nolint: unused } -// IsEmpty returns if all Instrument fields are their zero-value. +// IsEmpty reports whether all Instrument fields are their zero-value. func (i Instrument) IsEmpty() bool { return i.Name == "" && i.Description == "" && @@ -204,7 +204,7 @@ func (i *int64Inst) Record(ctx context.Context, val int64, opts ...metric.Record i.aggregate(ctx, val, c.Attributes()) } -func (i *int64Inst) Enabled(_ context.Context) bool { +func (i *int64Inst) Enabled(context.Context) bool { return len(i.measures) != 0 } @@ -245,7 +245,7 @@ func (i *float64Inst) Record(ctx context.Context, val float64, opts ...metric.Re i.aggregate(ctx, val, c.Attributes()) } -func (i *float64Inst) Enabled(_ context.Context) bool { +func (i *float64Inst) Enabled(context.Context) bool { return len(i.measures) != 0 } diff --git a/sdk/metric/internal/aggregate/drop.go b/sdk/metric/internal/aggregate/drop.go index 8396faaa4ae..129920cbdd3 100644 --- a/sdk/metric/internal/aggregate/drop.go +++ b/sdk/metric/internal/aggregate/drop.go @@ -18,10 +18,10 @@ func dropReservoir[N int64 | float64](attribute.Set) FilteredExemplarReservoir[N type dropRes[N int64 | float64] struct{} // Offer does nothing, all measurements offered will be dropped. -func (r *dropRes[N]) Offer(context.Context, N, []attribute.KeyValue) {} +func (*dropRes[N]) Offer(context.Context, N, []attribute.KeyValue) {} // Collect resets dest. No exemplars will ever be returned. -func (r *dropRes[N]) Collect(dest *[]exemplar.Exemplar) { +func (*dropRes[N]) Collect(dest *[]exemplar.Exemplar) { clear(*dest) // Erase elements to let GC collect objects *dest = (*dest)[:0] } diff --git a/sdk/metric/internal/aggregate/exponential_histogram.go b/sdk/metric/internal/aggregate/exponential_histogram.go index ae1f5934401..857eddf305f 100644 --- a/sdk/metric/internal/aggregate/exponential_histogram.go +++ b/sdk/metric/internal/aggregate/exponential_histogram.go @@ -183,8 +183,8 @@ func (p *expoHistogramDataPoint[N]) scaleChange(bin, startBin int32, length int) var count int32 for high-low >= p.maxSize { - low = low >> 1 - high = high >> 1 + low >>= 1 + high >>= 1 count++ if count > expoMaxScale-expoMinScale { return count @@ -225,7 +225,7 @@ func (b *expoBuckets) record(bin int32) { b.counts = append(b.counts, make([]uint64, newLength-len(b.counts))...) } - copy(b.counts[shift:origLen+int(shift)], b.counts[:]) + copy(b.counts[shift:origLen+int(shift)], b.counts) b.counts = b.counts[:newLength] for i := 1; i < int(shift); i++ { b.counts[i] = 0 @@ -264,7 +264,7 @@ func (b *expoBuckets) downscale(delta int32) { // new Counts: [4, 14, 30, 10] if len(b.counts) <= 1 || delta < 1 { - b.startBin = b.startBin >> delta + b.startBin >>= delta return } @@ -282,7 +282,7 @@ func (b *expoBuckets) downscale(delta int32) { lastIdx := (len(b.counts) - 1 + int(offset)) / int(steps) b.counts = b.counts[:lastIdx+1] - b.startBin = b.startBin >> delta + b.startBin >>= delta } // newExponentialHistogram returns an Aggregator that summarizes a set of @@ -350,7 +350,9 @@ func (e *expoHistogram[N]) measure( v.res.Offer(ctx, value, droppedAttr) } -func (e *expoHistogram[N]) delta(dest *metricdata.Aggregation) int { +func (e *expoHistogram[N]) delta( + dest *metricdata.Aggregation, //nolint:gocritic // The pointer is needed for the ComputeAggregation interface +) int { t := now() // If *dest is not a metricdata.ExponentialHistogram, memory reuse is missed. @@ -411,7 +413,9 @@ func (e *expoHistogram[N]) delta(dest *metricdata.Aggregation) int { return n } -func (e *expoHistogram[N]) cumulative(dest *metricdata.Aggregation) int { +func (e *expoHistogram[N]) cumulative( + dest *metricdata.Aggregation, //nolint:gocritic // The pointer is needed for the ComputeAggregation interface +) int { t := now() // If *dest is not a metricdata.ExponentialHistogram, memory reuse is missed. diff --git a/sdk/metric/internal/aggregate/exponential_histogram_test.go b/sdk/metric/internal/aggregate/exponential_histogram_test.go index a262e5a7736..c0ad6d53e0f 100644 --- a/sdk/metric/internal/aggregate/exponential_histogram_test.go +++ b/sdk/metric/internal/aggregate/exponential_histogram_test.go @@ -654,9 +654,9 @@ func BenchmarkPrepend(b *testing.B) { for i := 0; i < b.N; i++ { agg := newExpoHistogramDataPoint[float64](alice, 1024, 20, false, false) n := math.MaxFloat64 - for j := 0; j < 1024; j++ { + for range 1024 { agg.record(n) - n = n / 2 + n /= 2 } } } @@ -665,9 +665,9 @@ func BenchmarkAppend(b *testing.B) { for i := 0; i < b.N; i++ { agg := newExpoHistogramDataPoint[float64](alice, 1024, 20, false, false) n := smallestNonZeroNormalFloat64 - for j := 0; j < 1024; j++ { + for range 1024 { agg.record(n) - n = n * 2 + n *= 2 } } } @@ -1061,7 +1061,7 @@ func FuzzGetBin(f *testing.F) { f.Fuzz(func(t *testing.T, v float64, scale int32) { // GetBin only works on positive values. if math.Signbit(v) { - v = v * -1 + v *= -1 } // GetBin Doesn't work on zero. if v == 0.0 { diff --git a/sdk/metric/internal/aggregate/histogram.go b/sdk/metric/internal/aggregate/histogram.go index d3068484cf1..736287e736f 100644 --- a/sdk/metric/internal/aggregate/histogram.go +++ b/sdk/metric/internal/aggregate/histogram.go @@ -140,7 +140,9 @@ type histogram[N int64 | float64] struct { start time.Time } -func (s *histogram[N]) delta(dest *metricdata.Aggregation) int { +func (s *histogram[N]) delta( + dest *metricdata.Aggregation, //nolint:gocritic // The pointer is needed for the ComputeAggregation interface +) int { t := now() // If *dest is not a metricdata.Histogram, memory reuse is missed. In that @@ -190,7 +192,9 @@ func (s *histogram[N]) delta(dest *metricdata.Aggregation) int { return n } -func (s *histogram[N]) cumulative(dest *metricdata.Aggregation) int { +func (s *histogram[N]) cumulative( + dest *metricdata.Aggregation, //nolint:gocritic // The pointer is needed for the ComputeAggregation interface +) int { t := now() // If *dest is not a metricdata.Histogram, memory reuse is missed. In that diff --git a/sdk/metric/internal/aggregate/lastvalue.go b/sdk/metric/internal/aggregate/lastvalue.go index 350ccebdcb1..4bbe624c77c 100644 --- a/sdk/metric/internal/aggregate/lastvalue.go +++ b/sdk/metric/internal/aggregate/lastvalue.go @@ -55,7 +55,9 @@ func (s *lastValue[N]) measure(ctx context.Context, value N, fltrAttr attribute. s.values[attr.Equivalent()] = d } -func (s *lastValue[N]) delta(dest *metricdata.Aggregation) int { +func (s *lastValue[N]) delta( + dest *metricdata.Aggregation, //nolint:gocritic // The pointer is needed for the ComputeAggregation interface +) int { t := now() // Ignore if dest is not a metricdata.Gauge. The chance for memory reuse of // the DataPoints is missed (better luck next time). @@ -75,7 +77,9 @@ func (s *lastValue[N]) delta(dest *metricdata.Aggregation) int { return n } -func (s *lastValue[N]) cumulative(dest *metricdata.Aggregation) int { +func (s *lastValue[N]) cumulative( + dest *metricdata.Aggregation, //nolint:gocritic // The pointer is needed for the ComputeAggregation interface +) int { t := now() // Ignore if dest is not a metricdata.Gauge. The chance for memory reuse of // the DataPoints is missed (better luck next time). @@ -126,7 +130,9 @@ type precomputedLastValue[N int64 | float64] struct { *lastValue[N] } -func (s *precomputedLastValue[N]) delta(dest *metricdata.Aggregation) int { +func (s *precomputedLastValue[N]) delta( + dest *metricdata.Aggregation, //nolint:gocritic // The pointer is needed for the ComputeAggregation interface +) int { t := now() // Ignore if dest is not a metricdata.Gauge. The chance for memory reuse of // the DataPoints is missed (better luck next time). @@ -146,7 +152,9 @@ func (s *precomputedLastValue[N]) delta(dest *metricdata.Aggregation) int { return n } -func (s *precomputedLastValue[N]) cumulative(dest *metricdata.Aggregation) int { +func (s *precomputedLastValue[N]) cumulative( + dest *metricdata.Aggregation, //nolint:gocritic // The pointer is needed for the ComputeAggregation interface +) int { t := now() // Ignore if dest is not a metricdata.Gauge. The chance for memory reuse of // the DataPoints is missed (better luck next time). diff --git a/sdk/metric/internal/aggregate/sum.go b/sdk/metric/internal/aggregate/sum.go index 612cde43277..1b4b2304c0b 100644 --- a/sdk/metric/internal/aggregate/sum.go +++ b/sdk/metric/internal/aggregate/sum.go @@ -70,7 +70,9 @@ type sum[N int64 | float64] struct { start time.Time } -func (s *sum[N]) delta(dest *metricdata.Aggregation) int { +func (s *sum[N]) delta( + dest *metricdata.Aggregation, //nolint:gocritic // The pointer is needed for the ComputeAggregation interface +) int { t := now() // If *dest is not a metricdata.Sum, memory reuse is missed. In that case, @@ -105,7 +107,9 @@ func (s *sum[N]) delta(dest *metricdata.Aggregation) int { return n } -func (s *sum[N]) cumulative(dest *metricdata.Aggregation) int { +func (s *sum[N]) cumulative( + dest *metricdata.Aggregation, //nolint:gocritic // The pointer is needed for the ComputeAggregation interface +) int { t := now() // If *dest is not a metricdata.Sum, memory reuse is missed. In that case, @@ -165,7 +169,9 @@ type precomputedSum[N int64 | float64] struct { reported map[attribute.Distinct]N } -func (s *precomputedSum[N]) delta(dest *metricdata.Aggregation) int { +func (s *precomputedSum[N]) delta( + dest *metricdata.Aggregation, //nolint:gocritic // The pointer is needed for the ComputeAggregation interface +) int { t := now() newReported := make(map[attribute.Distinct]N) @@ -206,7 +212,9 @@ func (s *precomputedSum[N]) delta(dest *metricdata.Aggregation) int { return n } -func (s *precomputedSum[N]) cumulative(dest *metricdata.Aggregation) int { +func (s *precomputedSum[N]) cumulative( + dest *metricdata.Aggregation, //nolint:gocritic // The pointer is needed for the ComputeAggregation interface +) int { t := now() // If *dest is not a metricdata.Sum, memory reuse is missed. In that case, diff --git a/sdk/metric/internal/x/README.md b/sdk/metric/internal/x/README.md index 59f736b733f..be0714a5f44 100644 --- a/sdk/metric/internal/x/README.md +++ b/sdk/metric/internal/x/README.md @@ -1,47 +1,16 @@ # Experimental Features -The metric SDK contains features that have not yet stabilized in the OpenTelemetry specification. -These features are added to the OpenTelemetry Go metric SDK prior to stabilization in the specification so that users can start experimenting with them and provide feedback. +The Metric SDK contains features that have not yet stabilized in the OpenTelemetry specification. +These features are added to the OpenTelemetry Go Metric SDK prior to stabilization in the specification so that users can start experimenting with them and provide feedback. These feature may change in backwards incompatible ways as feedback is applied. See the [Compatibility and Stability](#compatibility-and-stability) section for more information. ## Features -- [Cardinality Limit](#cardinality-limit) - [Exemplars](#exemplars) - [Instrument Enabled](#instrument-enabled) -### Cardinality Limit - -The cardinality limit is the hard limit on the number of metric streams that can be collected for a single instrument. - -This experimental feature can be enabled by setting the `OTEL_GO_X_CARDINALITY_LIMIT` environment value. -The value must be an integer value. -All other values are ignored. - -If the value set is less than or equal to `0`, no limit will be applied. - -#### Examples - -Set the cardinality limit to 2000. - -```console -export OTEL_GO_X_CARDINALITY_LIMIT=2000 -``` - -Set an infinite cardinality limit (functionally equivalent to disabling the feature). - -```console -export OTEL_GO_X_CARDINALITY_LIMIT=-1 -``` - -Disable the cardinality limit. - -```console -unset OTEL_GO_X_CARDINALITY_LIMIT -``` - ### Exemplars A sample of measurements made may be exported directly as a set of exemplars. diff --git a/sdk/metric/internal/x/x.go b/sdk/metric/internal/x/x.go index a98606238ad..294dcf8469e 100644 --- a/sdk/metric/internal/x/x.go +++ b/sdk/metric/internal/x/x.go @@ -10,25 +10,8 @@ package x // import "go.opentelemetry.io/otel/sdk/metric/internal/x" import ( "context" "os" - "strconv" ) -// CardinalityLimit is an experimental feature flag that defines if -// cardinality limits should be applied to the recorded metric data-points. -// -// To enable this feature set the OTEL_GO_X_CARDINALITY_LIMIT environment -// variable to the integer limit value you want to use. -// -// Setting OTEL_GO_X_CARDINALITY_LIMIT to a value less than or equal to 0 -// will disable the cardinality limits. -var CardinalityLimit = newFeature("CARDINALITY_LIMIT", func(v string) (int, bool) { - n, err := strconv.Atoi(v) - if err != nil { - return 0, false - } - return n, true -}) - // Feature is an experimental feature control flag. It provides a uniform way // to interact with these feature flags and parse their values. type Feature[T any] struct { @@ -36,6 +19,7 @@ type Feature[T any] struct { parse func(v string) (T, bool) } +//nolint:unused func newFeature[T any](suffix string, parse func(string) (T, bool)) Feature[T] { const envKeyRoot = "OTEL_GO_X_" return Feature[T]{ @@ -63,7 +47,7 @@ func (f Feature[T]) Lookup() (v T, ok bool) { return f.parse(vRaw) } -// Enabled returns if the feature is enabled. +// Enabled reports whether the feature is enabled. func (f Feature[T]) Enabled() bool { _, ok := f.Lookup() return ok @@ -73,7 +57,7 @@ func (f Feature[T]) Enabled() bool { // // EnabledInstrument interface is implemented by synchronous instruments. type EnabledInstrument interface { - // Enabled returns whether the instrument will process measurements for the given context. + // Enabled reports whether the instrument will process measurements for the given context. // // This function can be used in places where measuring an instrument // would result in computationally expensive operations. diff --git a/sdk/metric/internal/x/x_test.go b/sdk/metric/internal/x/x_test.go index 257ca76137d..d7d5e41ce4a 100644 --- a/sdk/metric/internal/x/x_test.go +++ b/sdk/metric/internal/x/x_test.go @@ -7,19 +7,9 @@ import ( "testing" "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) -func TestCardinalityLimit(t *testing.T) { - const key = "OTEL_GO_X_CARDINALITY_LIMIT" - require.Equal(t, key, CardinalityLimit.Key()) - - t.Run("100", run(setenv(key, "100"), assertEnabled(CardinalityLimit, 100))) - t.Run("-1", run(setenv(key, "-1"), assertEnabled(CardinalityLimit, -1))) - t.Run("false", run(setenv(key, "false"), assertDisabled(CardinalityLimit))) - t.Run("empty", run(assertDisabled(CardinalityLimit))) -} - +//nolint:unused func run(steps ...func(*testing.T)) func(*testing.T) { return func(t *testing.T) { t.Helper() @@ -29,10 +19,12 @@ func run(steps ...func(*testing.T)) func(*testing.T) { } } +//nolint:unused func setenv(k, v string) func(t *testing.T) { return func(t *testing.T) { t.Setenv(k, v) } } +//nolint:unused func assertEnabled[T any](f Feature[T], want T) func(*testing.T) { return func(t *testing.T) { t.Helper() @@ -44,6 +36,7 @@ func assertEnabled[T any](f Feature[T], want T) func(*testing.T) { } } +//nolint:unused func assertDisabled[T any](f Feature[T]) func(*testing.T) { var zero T return func(t *testing.T) { diff --git a/sdk/metric/manual_reader.go b/sdk/metric/manual_reader.go index 96e77908665..85d3dc20768 100644 --- a/sdk/metric/manual_reader.go +++ b/sdk/metric/manual_reader.go @@ -129,7 +129,7 @@ func (mr *ManualReader) Collect(ctx context.Context, rm *metricdata.ResourceMetr } // MarshalLog returns logging data about the ManualReader. -func (r *ManualReader) MarshalLog() interface{} { +func (r *ManualReader) MarshalLog() any { r.mu.Lock() down := r.isShutdown r.mu.Unlock() diff --git a/sdk/metric/manual_reader_test.go b/sdk/metric/manual_reader_test.go index 9583d0396e9..b6b196f720c 100644 --- a/sdk/metric/manual_reader_test.go +++ b/sdk/metric/manual_reader_test.go @@ -102,7 +102,7 @@ func TestManualReaderCollect(t *testing.T) { // Ensure the pipeline has a callback setup testM, err := meter.Int64ObservableCounter("test") assert.NoError(t, err) - _, err = meter.RegisterCallback(func(_ context.Context, o metric.Observer) error { + _, err = meter.RegisterCallback(func(context.Context, metric.Observer) error { return nil }, testM) assert.NoError(t, err) diff --git a/sdk/metric/meter.go b/sdk/metric/meter.go index c500fd9f2ac..e0a1e90e778 100644 --- a/sdk/metric/meter.go +++ b/sdk/metric/meter.go @@ -12,7 +12,6 @@ import ( "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/metric/embedded" "go.opentelemetry.io/otel/sdk/instrumentation" - "go.opentelemetry.io/otel/sdk/metric/internal/aggregate" ) @@ -423,7 +422,7 @@ func (m *meter) Float64ObservableGauge( } func validateInstrumentName(name string) error { - if len(name) == 0 { + if name == "" { return fmt.Errorf("%w: %s: is empty", ErrInstrumentName, name) } if len(name) > 255 { diff --git a/sdk/metric/meter_test.go b/sdk/metric/meter_test.go index b51ffdb5186..10d7f26b39b 100644 --- a/sdk/metric/meter_test.go +++ b/sdk/metric/meter_test.go @@ -31,7 +31,7 @@ import ( ) // A meter should be able to make instruments concurrently. -func TestMeterInstrumentConcurrentSafe(t *testing.T) { +func TestMeterInstrumentConcurrentSafe(*testing.T) { wg := &sync.WaitGroup{} wg.Add(12) @@ -92,7 +92,7 @@ func TestMeterInstrumentConcurrentSafe(t *testing.T) { var emptyCallback metric.Callback = func(context.Context, metric.Observer) error { return nil } // A Meter Should be able register Callbacks Concurrently. -func TestMeterCallbackCreationConcurrency(t *testing.T) { +func TestMeterCallbackCreationConcurrency(*testing.T) { wg := &sync.WaitGroup{} wg.Add(2) @@ -1080,12 +1080,12 @@ func newLogSink(t *testing.T) *logSink { return &logSink{LogSink: testr.New(t).GetSink()} } -func (l *logSink) Info(level int, msg string, keysAndValues ...interface{}) { +func (l *logSink) Info(level int, msg string, keysAndValues ...any) { l.messages = append(l.messages, msg) l.LogSink.Info(level, msg, keysAndValues...) } -func (l *logSink) Error(err error, msg string, keysAndValues ...interface{}) { +func (l *logSink) Error(err error, msg string, keysAndValues ...any) { l.messages = append(l.messages, fmt.Sprintf("%s: %s", err, msg)) l.LogSink.Error(err, msg, keysAndValues...) } @@ -1368,7 +1368,7 @@ func testAttributeFilter(temporality metricdata.Temporality) func(*testing.T) { }{ { name: "ObservableFloat64Counter", - register: func(t *testing.T, mtr metric.Meter) error { + register: func(_ *testing.T, mtr metric.Meter) error { ctr, err := mtr.Float64ObservableCounter("afcounter") if err != nil { return err @@ -1394,7 +1394,7 @@ func testAttributeFilter(temporality metricdata.Temporality) func(*testing.T) { }, { name: "ObservableFloat64UpDownCounter", - register: func(t *testing.T, mtr metric.Meter) error { + register: func(_ *testing.T, mtr metric.Meter) error { ctr, err := mtr.Float64ObservableUpDownCounter("afupdowncounter") if err != nil { return err @@ -1423,7 +1423,7 @@ func testAttributeFilter(temporality metricdata.Temporality) func(*testing.T) { }, { name: "ObservableFloat64Gauge", - register: func(t *testing.T, mtr metric.Meter) error { + register: func(_ *testing.T, mtr metric.Meter) error { ctr, err := mtr.Float64ObservableGauge("afgauge") if err != nil { return err @@ -1446,7 +1446,7 @@ func testAttributeFilter(temporality metricdata.Temporality) func(*testing.T) { }, { name: "ObservableInt64Counter", - register: func(t *testing.T, mtr metric.Meter) error { + register: func(_ *testing.T, mtr metric.Meter) error { ctr, err := mtr.Int64ObservableCounter("aicounter") if err != nil { return err @@ -1472,7 +1472,7 @@ func testAttributeFilter(temporality metricdata.Temporality) func(*testing.T) { }, { name: "ObservableInt64UpDownCounter", - register: func(t *testing.T, mtr metric.Meter) error { + register: func(_ *testing.T, mtr metric.Meter) error { ctr, err := mtr.Int64ObservableUpDownCounter("aiupdowncounter") if err != nil { return err @@ -1498,7 +1498,7 @@ func testAttributeFilter(temporality metricdata.Temporality) func(*testing.T) { }, { name: "ObservableInt64Gauge", - register: func(t *testing.T, mtr metric.Meter) error { + register: func(_ *testing.T, mtr metric.Meter) error { ctr, err := mtr.Int64ObservableGauge("aigauge") if err != nil { return err @@ -1521,7 +1521,7 @@ func testAttributeFilter(temporality metricdata.Temporality) func(*testing.T) { }, { name: "SyncFloat64Counter", - register: func(t *testing.T, mtr metric.Meter) error { + register: func(_ *testing.T, mtr metric.Meter) error { ctr, err := mtr.Float64Counter("sfcounter") if err != nil { return err @@ -1544,7 +1544,7 @@ func testAttributeFilter(temporality metricdata.Temporality) func(*testing.T) { }, { name: "SyncFloat64UpDownCounter", - register: func(t *testing.T, mtr metric.Meter) error { + register: func(_ *testing.T, mtr metric.Meter) error { ctr, err := mtr.Float64UpDownCounter("sfupdowncounter") if err != nil { return err @@ -1567,7 +1567,7 @@ func testAttributeFilter(temporality metricdata.Temporality) func(*testing.T) { }, { name: "SyncFloat64Histogram", - register: func(t *testing.T, mtr metric.Meter) error { + register: func(_ *testing.T, mtr metric.Meter) error { ctr, err := mtr.Float64Histogram("sfhistogram") if err != nil { return err @@ -1600,7 +1600,7 @@ func testAttributeFilter(temporality metricdata.Temporality) func(*testing.T) { }, { name: "SyncInt64Counter", - register: func(t *testing.T, mtr metric.Meter) error { + register: func(_ *testing.T, mtr metric.Meter) error { ctr, err := mtr.Int64Counter("sicounter") if err != nil { return err @@ -1623,7 +1623,7 @@ func testAttributeFilter(temporality metricdata.Temporality) func(*testing.T) { }, { name: "SyncInt64UpDownCounter", - register: func(t *testing.T, mtr metric.Meter) error { + register: func(_ *testing.T, mtr metric.Meter) error { ctr, err := mtr.Int64UpDownCounter("siupdowncounter") if err != nil { return err @@ -1646,7 +1646,7 @@ func testAttributeFilter(temporality metricdata.Temporality) func(*testing.T) { }, { name: "SyncInt64Histogram", - register: func(t *testing.T, mtr metric.Meter) error { + register: func(_ *testing.T, mtr metric.Meter) error { ctr, err := mtr.Int64Histogram("sihistogram") if err != nil { return err @@ -2127,7 +2127,7 @@ func TestMalformedSelectors(t *testing.T) { sfHistogram, err := meter.Float64Histogram("sync.float64.histogram") require.NoError(t, err) - callback := func(ctx context.Context, obs metric.Observer) error { + callback := func(_ context.Context, obs metric.Observer) error { obs.ObserveInt64(aiCounter, 1) obs.ObserveInt64(aiUpDownCounter, 1) obs.ObserveInt64(aiGauge, 1) @@ -2274,7 +2274,7 @@ func TestObservableDropAggregation(t *testing.T) { { name: "drop all metrics", views: []View{ - func(i Instrument) (Stream, bool) { + func(Instrument) (Stream, bool) { return Stream{Aggregation: AggregationDrop{}}, true }, }, @@ -2352,7 +2352,7 @@ func TestObservableDropAggregation(t *testing.T) { otel.SetLogger( funcr.NewJSON( func(obj string) { - var entry map[string]interface{} + var entry map[string]any _ = json.Unmarshal([]byte(obj), &entry) // All unregistered observables should log `errUnregObserver` error. @@ -2393,7 +2393,7 @@ func TestObservableDropAggregation(t *testing.T) { require.NoError(t, err) _, err = meter.RegisterCallback( - func(ctx context.Context, obs metric.Observer) error { + func(_ context.Context, obs metric.Observer) error { obs.ObserveInt64(intCnt, 1) obs.ObserveInt64(intUDCnt, 1) obs.ObserveInt64(intGaugeCnt, 1) @@ -2530,7 +2530,7 @@ func TestDuplicateInstrumentCreation(t *testing.T) { }() m := NewMeterProvider(WithReader(reader)).Meter("TestDuplicateInstrumentCreation") - for i := 0; i < 3; i++ { + for range 3 { require.NoError(t, tt.createInstrument(m)) } internalMeter, ok := m.(*meter) @@ -2553,7 +2553,7 @@ func TestDuplicateInstrumentCreation(t *testing.T) { func TestMeterProviderDelegation(t *testing.T) { meter := otel.Meter("go.opentelemetry.io/otel/metric/internal/global/meter_test") otel.SetErrorHandler(otel.ErrorHandlerFunc(func(err error) { require.NoError(t, err) })) - for i := 0; i < 5; i++ { + for range 5 { int64Counter, err := meter.Int64ObservableCounter("observable.int64.counter") require.NoError(t, err) int64UpDownCounter, err := meter.Int64ObservableUpDownCounter("observable.int64.up.down.counter") @@ -2566,7 +2566,7 @@ func TestMeterProviderDelegation(t *testing.T) { require.NoError(t, err) floatGauge, err := meter.Float64ObservableGauge("observable.float.gauge") require.NoError(t, err) - _, err = meter.RegisterCallback(func(ctx context.Context, o metric.Observer) error { + _, err = meter.RegisterCallback(func(_ context.Context, o metric.Observer) error { o.ObserveInt64(int64Counter, int64(10)) o.ObserveInt64(int64UpDownCounter, int64(10)) o.ObserveInt64(int64Gauge, int64(10)) diff --git a/sdk/metric/metricdata/metricdatatest/assertion.go b/sdk/metric/metricdata/metricdatatest/assertion.go index d140d1f4057..7935d9e99cd 100644 --- a/sdk/metric/metricdata/metricdatatest/assertion.go +++ b/sdk/metric/metricdata/metricdatatest/assertion.go @@ -121,10 +121,10 @@ func AssertEqual[T Datatypes](t TestingT, expected, actual T, opts ...Option) bo cfg := newConfig(opts) // Generic types cannot be type asserted. Use an interface instead. - aIface := interface{}(actual) + aIface := any(actual) var r []string - switch e := interface{}(expected).(type) { + switch e := any(expected).(type) { case metricdata.Exemplar[int64]: r = equalExemplars(e, aIface.(metricdata.Exemplar[int64]), cfg) case metricdata.Exemplar[float64]: @@ -206,7 +206,7 @@ func AssertHasAttributes[T Datatypes](t TestingT, actual T, attrs ...attribute.K var reasons []string - switch e := interface{}(actual).(type) { + switch e := any(actual).(type) { case metricdata.Exemplar[int64]: reasons = hasAttributesExemplars(e, attrs...) case metricdata.Exemplar[float64]: diff --git a/sdk/metric/metricdata/metricdatatest/comparisons.go b/sdk/metric/metricdata/metricdatatest/comparisons.go index 68f20d9669b..f80ce853d84 100644 --- a/sdk/metric/metricdata/metricdatatest/comparisons.go +++ b/sdk/metric/metricdata/metricdatatest/comparisons.go @@ -497,7 +497,7 @@ func equalQuantileValue(a, b metricdata.QuantileValue, _ config) (reasons []stri return reasons } -func notEqualStr(prefix string, expected, actual interface{}) string { +func notEqualStr(prefix string, expected, actual any) string { return fmt.Sprintf("%s not equal:\nexpected: %v\nactual: %v", prefix, expected, actual) } @@ -591,9 +591,9 @@ func equalExemplars[N int64 | float64](a, b metricdata.Exemplar[N], cfg config) func diffSlices[T any](a, b []T, equal func(T, T) bool) (extraA, extraB []T) { visited := make([]bool, len(b)) - for i := 0; i < len(a); i++ { + for i := range a { found := false - for j := 0; j < len(b); j++ { + for j := range b { if visited[j] { continue } @@ -608,7 +608,7 @@ func diffSlices[T any](a, b []T, equal func(T, T) bool) (extraA, extraB []T) { } } - for j := 0; j < len(b); j++ { + for j := range b { if visited[j] { continue } diff --git a/sdk/metric/periodic_reader.go b/sdk/metric/periodic_reader.go index 0a48aed74dd..f08c771a68f 100644 --- a/sdk/metric/periodic_reader.go +++ b/sdk/metric/periodic_reader.go @@ -114,7 +114,7 @@ func NewPeriodicReader(exporter Exporter, options ...PeriodicReaderOption) *Peri cancel: cancel, done: make(chan struct{}), rmPool: sync.Pool{ - New: func() interface{} { + New: func() any { return &metricdata.ResourceMetrics{} }, }, @@ -234,7 +234,7 @@ func (r *PeriodicReader) Collect(ctx context.Context, rm *metricdata.ResourceMet } // collect unwraps p as a produceHolder and returns its produce results. -func (r *PeriodicReader) collect(ctx context.Context, p interface{}, rm *metricdata.ResourceMetrics) error { +func (r *PeriodicReader) collect(ctx context.Context, p any, rm *metricdata.ResourceMetrics) error { if p == nil { return ErrReaderNotRegistered } @@ -349,7 +349,7 @@ func (r *PeriodicReader) Shutdown(ctx context.Context) error { } // MarshalLog returns logging data about the PeriodicReader. -func (r *PeriodicReader) MarshalLog() interface{} { +func (r *PeriodicReader) MarshalLog() any { r.mu.Lock() down := r.isShutdown r.mu.Unlock() diff --git a/sdk/metric/periodic_reader_test.go b/sdk/metric/periodic_reader_test.go index be67b2b5ac0..c2a0fa56b84 100644 --- a/sdk/metric/periodic_reader_test.go +++ b/sdk/metric/periodic_reader_test.go @@ -489,7 +489,7 @@ func TestPeriodicReaderCollect(t *testing.T) { // Ensure the pipeline has a callback setup testM, err := meter.Int64ObservableCounter("test") assert.NoError(t, err) - _, err = meter.RegisterCallback(func(_ context.Context, o metric.Observer) error { + _, err = meter.RegisterCallback(func(context.Context, metric.Observer) error { return nil }, testM) assert.NoError(t, err) diff --git a/sdk/metric/pipeline.go b/sdk/metric/pipeline.go index 7bdb699cae0..408fddc8d4e 100644 --- a/sdk/metric/pipeline.go +++ b/sdk/metric/pipeline.go @@ -17,7 +17,6 @@ import ( "go.opentelemetry.io/otel/sdk/metric/exemplar" "go.opentelemetry.io/otel/sdk/metric/internal" "go.opentelemetry.io/otel/sdk/metric/internal/aggregate" - "go.opentelemetry.io/otel/sdk/metric/internal/x" "go.opentelemetry.io/otel/sdk/metric/metricdata" "go.opentelemetry.io/otel/sdk/resource" ) @@ -37,17 +36,24 @@ type instrumentSync struct { compAgg aggregate.ComputeAggregation } -func newPipeline(res *resource.Resource, reader Reader, views []View, exemplarFilter exemplar.Filter) *pipeline { +func newPipeline( + res *resource.Resource, + reader Reader, + views []View, + exemplarFilter exemplar.Filter, + cardinalityLimit int, +) *pipeline { if res == nil { res = resource.Empty() } return &pipeline{ - resource: res, - reader: reader, - views: views, - int64Measures: map[observableID[int64]][]aggregate.Measure[int64]{}, - float64Measures: map[observableID[float64]][]aggregate.Measure[float64]{}, - exemplarFilter: exemplarFilter, + resource: res, + reader: reader, + views: views, + int64Measures: map[observableID[int64]][]aggregate.Measure[int64]{}, + float64Measures: map[observableID[float64]][]aggregate.Measure[float64]{}, + exemplarFilter: exemplarFilter, + cardinalityLimit: cardinalityLimit, // aggregations is lazy allocated when needed. } } @@ -65,12 +71,13 @@ type pipeline struct { views []View sync.Mutex - int64Measures map[observableID[int64]][]aggregate.Measure[int64] - float64Measures map[observableID[float64]][]aggregate.Measure[float64] - aggregations map[instrumentation.Scope][]instrumentSync - callbacks []func(context.Context) error - multiCallbacks list.List - exemplarFilter exemplar.Filter + int64Measures map[observableID[int64]][]aggregate.Measure[int64] + float64Measures map[observableID[float64]][]aggregate.Measure[float64] + aggregations map[instrumentation.Scope][]instrumentSync + callbacks []func(context.Context) error + multiCallbacks list.List + exemplarFilter exemplar.Filter + cardinalityLimit int } // addInt64Measure adds a new int64 measure to the pipeline for each observer. @@ -388,10 +395,9 @@ func (i *inserter[N]) cachedAggregator( b.Filter = stream.AttributeFilter // A value less than or equal to zero will disable the aggregation // limits for the builder (an all the created aggregates). - // CardinalityLimit.Lookup returns 0 by default if unset (or + // cardinalityLimit will be 0 by default if unset (or // unrecognized input). Use that value directly. - b.AggregationLimit, _ = x.CardinalityLimit.Lookup() - + b.AggregationLimit = i.pipeline.cardinalityLimit in, out, err := i.aggregateFunc(b, stream.Aggregation, kind) if err != nil { return aggVal[N]{0, nil, err} @@ -426,7 +432,7 @@ func (i *inserter[N]) logConflict(id instID) { } const msg = "duplicate metric stream definitions" - args := []interface{}{ + args := []any{ "names", fmt.Sprintf("%q, %q", existing.Name, id.Name), "descriptions", fmt.Sprintf("%q, %q", existing.Description, id.Description), "kinds", fmt.Sprintf("%s, %s", existing.Kind, id.Kind), @@ -460,7 +466,7 @@ func (i *inserter[N]) logConflict(id instID) { global.Warn(msg, args...) } -func (i *inserter[N]) instID(kind InstrumentKind, stream Stream) instID { +func (*inserter[N]) instID(kind InstrumentKind, stream Stream) instID { var zero N return instID{ Name: stream.Name, @@ -590,10 +596,16 @@ func isAggregatorCompatible(kind InstrumentKind, agg Aggregation) error { // measurement. type pipelines []*pipeline -func newPipelines(res *resource.Resource, readers []Reader, views []View, exemplarFilter exemplar.Filter) pipelines { +func newPipelines( + res *resource.Resource, + readers []Reader, + views []View, + exemplarFilter exemplar.Filter, + cardinalityLimit int, +) pipelines { pipes := make([]*pipeline, 0, len(readers)) for _, r := range readers { - p := newPipeline(res, r, views, exemplarFilter) + p := newPipeline(res, r, views, exemplarFilter, cardinalityLimit) r.register(p) pipes = append(pipes, p) } diff --git a/sdk/metric/pipeline_registry_test.go b/sdk/metric/pipeline_registry_test.go index 7f248735b24..64bcf70784e 100644 --- a/sdk/metric/pipeline_registry_test.go +++ b/sdk/metric/pipeline_registry_test.go @@ -57,7 +57,7 @@ func assertSum[N int64 | float64]( t.Helper() requireN[N](t, n, meas, comps, err) - for m := 0; m < n; m++ { + for m := range n { t.Logf("input/output number: %d", m) in, out := meas[m], comps[m] in(context.Background(), 1, *attribute.EmptySet()) @@ -184,7 +184,7 @@ func testCreateAggregators[N int64 | float64](t *testing.T) { { name: "Default/Drop", reader: NewManualReader( - WithAggregationSelector(func(ik InstrumentKind) Aggregation { return AggregationDrop{} }), + WithAggregationSelector(func(InstrumentKind) Aggregation { return AggregationDrop{} }), ), inst: instruments[InstrumentKindCounter], validate: func(t *testing.T, meas []aggregate.Measure[N], comps []aggregate.ComputeAggregation, err error) { @@ -326,7 +326,7 @@ func testCreateAggregators[N int64 | float64](t *testing.T) { { name: "Reader/Default/Cumulative/Sum/Monotonic", reader: NewManualReader( - WithAggregationSelector(func(ik InstrumentKind) Aggregation { return AggregationDefault{} }), + WithAggregationSelector(func(InstrumentKind) Aggregation { return AggregationDefault{} }), ), inst: instruments[InstrumentKindCounter], validate: assertSum[N](1, metricdata.CumulativeTemporality, true, [2]N{1, 4}), @@ -334,7 +334,7 @@ func testCreateAggregators[N int64 | float64](t *testing.T) { { name: "Reader/Default/Cumulative/Sum/NonMonotonic", reader: NewManualReader( - WithAggregationSelector(func(ik InstrumentKind) Aggregation { return AggregationDefault{} }), + WithAggregationSelector(func(InstrumentKind) Aggregation { return AggregationDefault{} }), ), inst: instruments[InstrumentKindUpDownCounter], validate: assertSum[N](1, metricdata.CumulativeTemporality, false, [2]N{1, 4}), @@ -342,7 +342,7 @@ func testCreateAggregators[N int64 | float64](t *testing.T) { { name: "Reader/Default/Cumulative/ExplicitBucketHistogram", reader: NewManualReader( - WithAggregationSelector(func(ik InstrumentKind) Aggregation { return AggregationDefault{} }), + WithAggregationSelector(func(InstrumentKind) Aggregation { return AggregationDefault{} }), ), inst: instruments[InstrumentKindHistogram], validate: assertHist[N](metricdata.CumulativeTemporality), @@ -350,7 +350,7 @@ func testCreateAggregators[N int64 | float64](t *testing.T) { { name: "Reader/Default/Cumulative/Gauge", reader: NewManualReader( - WithAggregationSelector(func(ik InstrumentKind) Aggregation { return AggregationDefault{} }), + WithAggregationSelector(func(InstrumentKind) Aggregation { return AggregationDefault{} }), ), inst: instruments[InstrumentKindGauge], validate: assertLastValue[N], @@ -358,7 +358,7 @@ func testCreateAggregators[N int64 | float64](t *testing.T) { { name: "Reader/Default/Cumulative/PrecomputedSum/Monotonic", reader: NewManualReader( - WithAggregationSelector(func(ik InstrumentKind) Aggregation { return AggregationDefault{} }), + WithAggregationSelector(func(InstrumentKind) Aggregation { return AggregationDefault{} }), ), inst: instruments[InstrumentKindObservableCounter], validate: assertSum[N](1, metricdata.CumulativeTemporality, true, [2]N{1, 3}), @@ -366,7 +366,7 @@ func testCreateAggregators[N int64 | float64](t *testing.T) { { name: "Reader/Default/Cumulative/PrecomputedSum/NonMonotonic", reader: NewManualReader( - WithAggregationSelector(func(ik InstrumentKind) Aggregation { return AggregationDefault{} }), + WithAggregationSelector(func(InstrumentKind) Aggregation { return AggregationDefault{} }), ), inst: instruments[InstrumentKindObservableUpDownCounter], validate: assertSum[N](1, metricdata.CumulativeTemporality, false, [2]N{1, 3}), @@ -374,7 +374,7 @@ func testCreateAggregators[N int64 | float64](t *testing.T) { { name: "Reader/Default/Gauge", reader: NewManualReader( - WithAggregationSelector(func(ik InstrumentKind) Aggregation { return AggregationDefault{} }), + WithAggregationSelector(func(InstrumentKind) Aggregation { return AggregationDefault{} }), ), inst: instruments[InstrumentKindObservableGauge], validate: assertLastValue[N], @@ -392,7 +392,7 @@ func testCreateAggregators[N int64 | float64](t *testing.T) { for _, tt := range testcases { t.Run(tt.name, func(t *testing.T) { var c cache[string, instID] - p := newPipeline(nil, tt.reader, tt.views, exemplar.AlwaysOffFilter) + p := newPipeline(nil, tt.reader, tt.views, exemplar.AlwaysOffFilter, 0) i := newInserter[N](p, &c) readerAggregation := i.readerDefaultAggregation(tt.inst.Kind) input, err := i.Instrument(tt.inst, readerAggregation) @@ -414,7 +414,7 @@ func TestCreateAggregators(t *testing.T) { func testInvalidInstrumentShouldPanic[N int64 | float64]() { var c cache[string, instID] - i := newInserter[N](newPipeline(nil, NewManualReader(), []View{defaultView}, exemplar.AlwaysOffFilter), &c) + i := newInserter[N](newPipeline(nil, NewManualReader(), []View{defaultView}, exemplar.AlwaysOffFilter, 0), &c) inst := Instrument{ Name: "foo", Kind: InstrumentKind(255), @@ -430,7 +430,7 @@ func TestInvalidInstrumentShouldPanic(t *testing.T) { func TestPipelinesAggregatorForEachReader(t *testing.T) { r0, r1 := NewManualReader(), NewManualReader() - pipes := newPipelines(resource.Empty(), []Reader{r0, r1}, nil, exemplar.AlwaysOffFilter) + pipes := newPipelines(resource.Empty(), []Reader{r0, r1}, nil, exemplar.AlwaysOffFilter, 0) require.Len(t, pipes, 2, "created pipelines") inst := Instrument{Name: "foo", Kind: InstrumentKindCounter} @@ -453,7 +453,7 @@ func TestPipelineRegistryCreateAggregators(t *testing.T) { renameView := NewView(Instrument{Name: "foo"}, Stream{Name: "bar"}) testRdr := NewManualReader() testRdrHistogram := NewManualReader( - WithAggregationSelector(func(ik InstrumentKind) Aggregation { return AggregationExplicitBucketHistogram{} }), + WithAggregationSelector(func(InstrumentKind) Aggregation { return AggregationExplicitBucketHistogram{} }), ) testCases := []struct { @@ -504,7 +504,7 @@ func TestPipelineRegistryCreateAggregators(t *testing.T) { for _, tt := range testCases { t.Run(tt.name, func(t *testing.T) { - p := newPipelines(resource.Empty(), tt.readers, tt.views, exemplar.AlwaysOffFilter) + p := newPipelines(resource.Empty(), tt.readers, tt.views, exemplar.AlwaysOffFilter, 0) testPipelineRegistryResolveIntAggregators(t, p, tt.wantCount) testPipelineRegistryResolveFloatAggregators(t, p, tt.wantCount) testPipelineRegistryResolveIntHistogramAggregators(t, p, tt.wantCount) @@ -558,7 +558,7 @@ func TestPipelineRegistryResource(t *testing.T) { readers := []Reader{NewManualReader()} views := []View{defaultView, v} res := resource.NewSchemaless(attribute.String("key", "val")) - pipes := newPipelines(res, readers, views, exemplar.AlwaysOffFilter) + pipes := newPipelines(res, readers, views, exemplar.AlwaysOffFilter, 0) for _, p := range pipes { assert.True(t, res.Equal(p.resource), "resource not set") } @@ -566,12 +566,12 @@ func TestPipelineRegistryResource(t *testing.T) { func TestPipelineRegistryCreateAggregatorsIncompatibleInstrument(t *testing.T) { testRdrHistogram := NewManualReader( - WithAggregationSelector(func(ik InstrumentKind) Aggregation { return AggregationSum{} }), + WithAggregationSelector(func(InstrumentKind) Aggregation { return AggregationSum{} }), ) readers := []Reader{testRdrHistogram} views := []View{defaultView} - p := newPipelines(resource.Empty(), readers, views, exemplar.AlwaysOffFilter) + p := newPipelines(resource.Empty(), readers, views, exemplar.AlwaysOffFilter, 0) inst := Instrument{Name: "foo", Kind: InstrumentKindObservableGauge} var vc cache[string, instID] @@ -601,7 +601,7 @@ type logCounter struct { infoN uint32 } -func (l *logCounter) Info(level int, msg string, keysAndValues ...interface{}) { +func (l *logCounter) Info(level int, msg string, keysAndValues ...any) { atomic.AddUint32(&l.infoN, 1) l.LogSink.Info(level, msg, keysAndValues...) } @@ -610,7 +610,7 @@ func (l *logCounter) InfoN() int { return int(atomic.SwapUint32(&l.infoN, 0)) } -func (l *logCounter) Error(err error, msg string, keysAndValues ...interface{}) { +func (l *logCounter) Error(err error, msg string, keysAndValues ...any) { atomic.AddUint32(&l.errN, 1) l.LogSink.Error(err, msg, keysAndValues...) } @@ -631,7 +631,7 @@ func TestResolveAggregatorsDuplicateErrors(t *testing.T) { fooInst := Instrument{Name: "foo", Kind: InstrumentKindCounter} barInst := Instrument{Name: "bar", Kind: InstrumentKindCounter} - p := newPipelines(resource.Empty(), readers, views, exemplar.AlwaysOffFilter) + p := newPipelines(resource.Empty(), readers, views, exemplar.AlwaysOffFilter, 0) var vc cache[string, instID] ri := newResolver[int64](p, &vc) diff --git a/sdk/metric/pipeline_test.go b/sdk/metric/pipeline_test.go index 57d9f2901b5..1f90a6431b6 100644 --- a/sdk/metric/pipeline_test.go +++ b/sdk/metric/pipeline_test.go @@ -32,7 +32,9 @@ import ( "go.opentelemetry.io/otel/trace" ) -func testSumAggregateOutput(dest *metricdata.Aggregation) int { +func testSumAggregateOutput( + dest *metricdata.Aggregation, //nolint:gocritic // The pointer is needed for the ComputeAggregation interface +) int { *dest = metricdata.Sum[int64]{ Temporality: metricdata.CumulativeTemporality, IsMonotonic: false, @@ -42,7 +44,7 @@ func testSumAggregateOutput(dest *metricdata.Aggregation) int { } func TestNewPipeline(t *testing.T) { - pipe := newPipeline(nil, nil, nil, exemplar.AlwaysOffFilter) + pipe := newPipeline(nil, nil, nil, exemplar.AlwaysOffFilter, 0) output := metricdata.ResourceMetrics{} err := pipe.produce(context.Background(), &output) @@ -68,7 +70,7 @@ func TestNewPipeline(t *testing.T) { func TestPipelineUsesResource(t *testing.T) { res := resource.NewWithAttributes("noSchema", attribute.String("test", "resource")) - pipe := newPipeline(res, nil, nil, exemplar.AlwaysOffFilter) + pipe := newPipeline(res, nil, nil, exemplar.AlwaysOffFilter, 0) output := metricdata.ResourceMetrics{} err := pipe.produce(context.Background(), &output) @@ -76,14 +78,14 @@ func TestPipelineUsesResource(t *testing.T) { assert.Equal(t, res, output.Resource) } -func TestPipelineConcurrentSafe(t *testing.T) { - pipe := newPipeline(nil, nil, nil, exemplar.AlwaysOffFilter) +func TestPipelineConcurrentSafe(*testing.T) { + pipe := newPipeline(nil, nil, nil, exemplar.AlwaysOffFilter, 0) ctx := context.Background() var output metricdata.ResourceMetrics var wg sync.WaitGroup const threads = 2 - for i := 0; i < threads; i++ { + for i := range threads { wg.Add(1) go func() { defer wg.Done() @@ -142,13 +144,13 @@ func testDefaultViewImplicit[N int64 | float64]() func(t *testing.T) { }{ { name: "NoView", - pipe: newPipeline(nil, reader, nil, exemplar.AlwaysOffFilter), + pipe: newPipeline(nil, reader, nil, exemplar.AlwaysOffFilter, 0), }, { name: "NoMatchingView", pipe: newPipeline(nil, reader, []View{ NewView(Instrument{Name: "foo"}, Stream{Name: "bar"}), - }, exemplar.AlwaysOffFilter), + }, exemplar.AlwaysOffFilter, 0), }, } @@ -233,7 +235,7 @@ func TestLogConflictName(t *testing.T) { return instID{Name: tc.existing} }) - i := newInserter[int64](newPipeline(nil, nil, nil, exemplar.AlwaysOffFilter), &vc) + i := newInserter[int64](newPipeline(nil, nil, nil, exemplar.AlwaysOffFilter, 0), &vc) i.logConflict(instID{Name: tc.name}) if tc.conflict { @@ -275,7 +277,7 @@ func TestLogConflictSuggestView(t *testing.T) { var vc cache[string, instID] name := strings.ToLower(orig.Name) _ = vc.Lookup(name, func() instID { return orig }) - i := newInserter[int64](newPipeline(nil, nil, nil, exemplar.AlwaysOffFilter), &vc) + i := newInserter[int64](newPipeline(nil, nil, nil, exemplar.AlwaysOffFilter, 0), &vc) viewSuggestion := func(inst instID, stream string) string { return `"NewView(Instrument{` + @@ -380,7 +382,7 @@ func TestInserterCachedAggregatorNameConflict(t *testing.T) { } var vc cache[string, instID] - pipe := newPipeline(nil, NewManualReader(), nil, exemplar.AlwaysOffFilter) + pipe := newPipeline(nil, NewManualReader(), nil, exemplar.AlwaysOffFilter, 0) i := newInserter[int64](pipe, &vc) readerAggregation := i.readerDefaultAggregation(kind) @@ -511,7 +513,7 @@ func TestExemplars(t *testing.T) { t.Run("Custom reservoir", func(t *testing.T) { r := NewManualReader() - reservoirProviderSelector := func(agg Aggregation) exemplar.ReservoirProvider { + reservoirProviderSelector := func(Aggregation) exemplar.ReservoirProvider { return exemplar.FixedSizeReservoirProvider(2) } v1 := NewView(Instrument{Name: "int64-expo-histogram"}, Stream{ @@ -621,7 +623,7 @@ func TestPipelineWithMultipleReaders(t *testing.T) { func TestPipelineProduceErrors(t *testing.T) { // Create a test pipeline with aggregations pipeReader := NewManualReader() - pipe := newPipeline(nil, pipeReader, nil, exemplar.AlwaysOffFilter) + pipe := newPipeline(nil, pipeReader, nil, exemplar.AlwaysOffFilter, 0) // Set up an observable with callbacks var testObsID observableID[int64] @@ -653,32 +655,31 @@ func TestPipelineProduceErrors(t *testing.T) { var shouldReturnError bool // When true, the third callback returns an error var callbackCounts [3]int - // Callback 1: cancels the context during execution but continues to populate data - pipe.callbacks = append(pipe.callbacks, func(ctx context.Context) error { - callbackCounts[0]++ - for _, m := range pipe.int64Measures[testObsID] { - m(ctx, 123, *attribute.EmptySet()) - } - return nil - }) - - // Callback 2: populates int64 observable data - pipe.callbacks = append(pipe.callbacks, func(ctx context.Context) error { - callbackCounts[1]++ - if shouldCancelContext { - cancelCtx() - } - return nil - }) - - // Callback 3: return an error - pipe.callbacks = append(pipe.callbacks, func(ctx context.Context) error { - callbackCounts[2]++ - if shouldReturnError { - return fmt.Errorf("test callback error") - } - return nil - }) + pipe.callbacks = append(pipe.callbacks, + // Callback 1: cancels the context during execution but continues to populate data + func(ctx context.Context) error { + callbackCounts[0]++ + for _, m := range pipe.int64Measures[testObsID] { + m(ctx, 123, *attribute.EmptySet()) + } + return nil + }, + // Callback 2: populates int64 observable data + func(context.Context) error { + callbackCounts[1]++ + if shouldCancelContext { + cancelCtx() + } + return nil + }, + // Callback 3: return an error + func(context.Context) error { + callbackCounts[2]++ + if shouldReturnError { + return fmt.Errorf("test callback error") + } + return nil + }) assertMetrics := func(rm *metricdata.ResourceMetrics, expectVal int64) { require.Len(t, rm.ScopeMetrics, 1) diff --git a/sdk/metric/provider.go b/sdk/metric/provider.go index 2fca89e5a8e..b0a6ec58085 100644 --- a/sdk/metric/provider.go +++ b/sdk/metric/provider.go @@ -42,7 +42,7 @@ func NewMeterProvider(options ...Option) *MeterProvider { flush, sdown := conf.readerSignals() mp := &MeterProvider{ - pipes: newPipelines(conf.res, conf.readers, conf.views, conf.exemplarFilter), + pipes: newPipelines(conf.res, conf.readers, conf.views, conf.exemplarFilter, conf.cardinalityLimit), forceFlush: flush, shutdown: sdown, } diff --git a/sdk/metric/provider_test.go b/sdk/metric/provider_test.go index 7e9361ee3d6..262e4ac594d 100644 --- a/sdk/metric/provider_test.go +++ b/sdk/metric/provider_test.go @@ -21,7 +21,7 @@ import ( "go.opentelemetry.io/otel/sdk/metric/metricdata" ) -func TestMeterConcurrentSafe(t *testing.T) { +func TestMeterConcurrentSafe(*testing.T) { const name = "TestMeterConcurrentSafe meter" mp := NewMeterProvider() @@ -35,7 +35,7 @@ func TestMeterConcurrentSafe(t *testing.T) { <-done } -func TestForceFlushConcurrentSafe(t *testing.T) { +func TestForceFlushConcurrentSafe(*testing.T) { mp := NewMeterProvider() done := make(chan struct{}) @@ -48,7 +48,7 @@ func TestForceFlushConcurrentSafe(t *testing.T) { <-done } -func TestShutdownConcurrentSafe(t *testing.T) { +func TestShutdownConcurrentSafe(*testing.T) { mp := NewMeterProvider() done := make(chan struct{}) @@ -61,7 +61,7 @@ func TestShutdownConcurrentSafe(t *testing.T) { <-done } -func TestMeterAndShutdownConcurrentSafe(t *testing.T) { +func TestMeterAndShutdownConcurrentSafe(*testing.T) { const name = "TestMeterAndShutdownConcurrentSafe meter" mp := NewMeterProvider() @@ -174,3 +174,73 @@ func TestMeterProviderMixingOnRegisterErrors(t *testing.T) { "Metrics produced for instrument collected by different MeterProvider", ) } + +func TestMeterProviderCardinalityLimit(t *testing.T) { + const uniqueAttributesCount = 10 + + tests := []struct { + name string + options []Option + wantDataPoints int + }{ + { + name: "no limit (default)", + options: nil, + wantDataPoints: uniqueAttributesCount, + }, + { + name: "no limit (limit=0)", + options: []Option{WithCardinalityLimit(0)}, + wantDataPoints: uniqueAttributesCount, + }, + { + name: "no limit (negative)", + options: []Option{WithCardinalityLimit(-5)}, + wantDataPoints: uniqueAttributesCount, + }, + { + name: "limit=5", + options: []Option{WithCardinalityLimit(5)}, + wantDataPoints: 5, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + reader := NewManualReader() + + opts := append(tt.options, WithReader(reader)) + mp := NewMeterProvider(opts...) + + meter := mp.Meter("test-meter") + counter, err := meter.Int64Counter("metric") + require.NoError(t, err, "failed to create counter") + + for i := range uniqueAttributesCount { + counter.Add( + context.Background(), + 1, + api.WithAttributes(attribute.Int("key", i)), + ) + } + + var rm metricdata.ResourceMetrics + err = reader.Collect(context.Background(), &rm) + require.NoError(t, err, "failed to collect metrics") + + require.Len(t, rm.ScopeMetrics, 1, "expected 1 ScopeMetrics") + require.Len(t, rm.ScopeMetrics[0].Metrics, 1, "expected 1 Metric") + + data := rm.ScopeMetrics[0].Metrics[0].Data + require.IsType(t, metricdata.Sum[int64]{}, data, "expected metricdata.Sum[int64]") + + sumData := data.(metricdata.Sum[int64]) + assert.Len( + t, + sumData.DataPoints, + tt.wantDataPoints, + "unexpected number of data points", + ) + }) + } +} diff --git a/sdk/metric/reader.go b/sdk/metric/reader.go index c96e500a2bd..5c1cea8254e 100644 --- a/sdk/metric/reader.go +++ b/sdk/metric/reader.go @@ -117,7 +117,7 @@ type produceHolder struct { type shutdownProducer struct{} // produce returns an ErrReaderShutdown error. -func (p shutdownProducer) produce(context.Context, *metricdata.ResourceMetrics) error { +func (shutdownProducer) produce(context.Context, *metricdata.ResourceMetrics) error { return ErrReaderShutdown } diff --git a/sdk/metric/reader_test.go b/sdk/metric/reader_test.go index a84dd569c43..6c6c61a4cb4 100644 --- a/sdk/metric/reader_test.go +++ b/sdk/metric/reader_test.go @@ -83,7 +83,7 @@ func (ts *readerTestSuite) TestShutdownTwice() { func (ts *readerTestSuite) TestMultipleRegister() { ts.Reader = ts.Factory() p0 := testSDKProducer{ - produceFunc: func(ctx context.Context, rm *metricdata.ResourceMetrics) error { + produceFunc: func(_ context.Context, rm *metricdata.ResourceMetrics) error { // Differentiate this producer from the second by returning an // error. *rm = testResourceMetricsA @@ -103,12 +103,12 @@ func (ts *readerTestSuite) TestMultipleRegister() { func (ts *readerTestSuite) TestExternalProducerPartialSuccess() { ts.Reader = ts.Factory( WithProducer(testExternalProducer{ - produceFunc: func(ctx context.Context) ([]metricdata.ScopeMetrics, error) { + produceFunc: func(context.Context) ([]metricdata.ScopeMetrics, error) { return []metricdata.ScopeMetrics{}, assert.AnError }, }), WithProducer(testExternalProducer{ - produceFunc: func(ctx context.Context) ([]metricdata.ScopeMetrics, error) { + produceFunc: func(context.Context) ([]metricdata.ScopeMetrics, error) { return []metricdata.ScopeMetrics{testScopeMetricsB}, nil }, }), @@ -124,7 +124,7 @@ func (ts *readerTestSuite) TestExternalProducerPartialSuccess() { func (ts *readerTestSuite) TestSDKFailureBlocksExternalProducer() { ts.Reader = ts.Factory(WithProducer(testExternalProducer{})) ts.Reader.register(testSDKProducer{ - produceFunc: func(ctx context.Context, rm *metricdata.ResourceMetrics) error { + produceFunc: func(_ context.Context, rm *metricdata.ResourceMetrics) error { *rm = metricdata.ResourceMetrics{} return assert.AnError }, @@ -146,7 +146,7 @@ func (ts *readerTestSuite) TestMethodConcurrentSafe() { var wg sync.WaitGroup const threads = 2 - for i := 0; i < threads; i++ { + for range threads { wg.Add(1) go func() { defer wg.Done() diff --git a/sdk/metric/version.go b/sdk/metric/version.go index 0e5adc1a766..dd9051a76c5 100644 --- a/sdk/metric/version.go +++ b/sdk/metric/version.go @@ -5,5 +5,5 @@ package metric // import "go.opentelemetry.io/otel/sdk/metric" // version is the current release version of the metric SDK in use. func version() string { - return "1.37.0" + return "1.38.0" } diff --git a/sdk/metric/view_test.go b/sdk/metric/view_test.go index 37bf37b7a4d..e66b3934678 100644 --- a/sdk/metric/view_test.go +++ b/sdk/metric/view_test.go @@ -400,7 +400,7 @@ func TestNewViewReplace(t *testing.T) { Unit: "1", Aggregation: AggregationLastValue{}, }, - want: func(i Instrument) Stream { + want: func(Instrument) Stream { return Stream{ Name: alt, Description: alt, diff --git a/sdk/resource/benchmark_test.go b/sdk/resource/benchmark_test.go index 94f89b2443a..5bd019513b4 100644 --- a/sdk/resource/benchmark_test.go +++ b/sdk/resource/benchmark_test.go @@ -18,7 +18,7 @@ func makeAttrs(n int) (_, _ *resource.Resource) { used := map[string]bool{} l1 := make([]attribute.KeyValue, n) l2 := make([]attribute.KeyValue, n) - for i := 0; i < n; i++ { + for i := range n { var k string for { k = fmt.Sprint("k", rand.IntN(1000000000)) diff --git a/sdk/resource/builtin.go b/sdk/resource/builtin.go index cefe4ab914a..3f20eb7a563 100644 --- a/sdk/resource/builtin.go +++ b/sdk/resource/builtin.go @@ -13,7 +13,7 @@ import ( "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/sdk" - semconv "go.opentelemetry.io/otel/semconv/v1.34.0" + semconv "go.opentelemetry.io/otel/semconv/v1.37.0" ) type ( @@ -72,7 +72,7 @@ func StringDetector(schemaURL string, k attribute.Key, f func() (string, error)) // Detect returns a *Resource that describes the string as a value // corresponding to attribute.Key as well as the specific schemaURL. -func (sd stringDetector) Detect(ctx context.Context) (*Resource, error) { +func (sd stringDetector) Detect(context.Context) (*Resource, error) { value, err := sd.F() if err != nil { return nil, fmt.Errorf("%s: %w", string(sd.K), err) diff --git a/sdk/resource/container.go b/sdk/resource/container.go index 0d8619715e6..bbe142d2031 100644 --- a/sdk/resource/container.go +++ b/sdk/resource/container.go @@ -11,7 +11,7 @@ import ( "os" "regexp" - semconv "go.opentelemetry.io/otel/semconv/v1.34.0" + semconv "go.opentelemetry.io/otel/semconv/v1.37.0" ) type containerIDProvider func() (string, error) @@ -27,7 +27,7 @@ const cgroupPath = "/proc/self/cgroup" // Detect returns a *Resource that describes the id of the container. // If no container id found, an empty resource will be returned. -func (cgroupContainerIDDetector) Detect(ctx context.Context) (*Resource, error) { +func (cgroupContainerIDDetector) Detect(context.Context) (*Resource, error) { containerID, err := containerID() if err != nil { return nil, err diff --git a/sdk/resource/container_test.go b/sdk/resource/container_test.go index d46f178b372..832cb24cc0f 100644 --- a/sdk/resource/container_test.go +++ b/sdk/resource/container_test.go @@ -141,14 +141,14 @@ func TestGetContainerIDFromCGroup(t *testing.T) { for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { - osStat = func(name string) (os.FileInfo, error) { + osStat = func(string) (os.FileInfo, error) { if tc.cgroupFileNotExist { return nil, os.ErrNotExist } return nil, nil } - osOpen = func(name string) (io.ReadCloser, error) { + osOpen = func(string) (io.ReadCloser, error) { if tc.openFileError != nil { return nil, tc.openFileError } diff --git a/sdk/resource/env.go b/sdk/resource/env.go index 16a062ad8cb..4a1b017eea9 100644 --- a/sdk/resource/env.go +++ b/sdk/resource/env.go @@ -12,7 +12,7 @@ import ( "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" - semconv "go.opentelemetry.io/otel/semconv/v1.34.0" + semconv "go.opentelemetry.io/otel/semconv/v1.37.0" ) const ( diff --git a/sdk/resource/env_test.go b/sdk/resource/env_test.go index 727d3124df1..6872bea9b27 100644 --- a/sdk/resource/env_test.go +++ b/sdk/resource/env_test.go @@ -12,7 +12,7 @@ import ( "github.com/stretchr/testify/require" "go.opentelemetry.io/otel/attribute" - semconv "go.opentelemetry.io/otel/semconv/v1.34.0" + semconv "go.opentelemetry.io/otel/semconv/v1.37.0" ) func TestDetectOnePair(t *testing.T) { diff --git a/sdk/resource/host_id.go b/sdk/resource/host_id.go index 78190392385..5fed33d4fb6 100644 --- a/sdk/resource/host_id.go +++ b/sdk/resource/host_id.go @@ -8,7 +8,7 @@ import ( "errors" "strings" - semconv "go.opentelemetry.io/otel/semconv/v1.34.0" + semconv "go.opentelemetry.io/otel/semconv/v1.37.0" ) type hostIDProvider func() (string, error) @@ -96,7 +96,7 @@ func (r *hostIDReaderLinux) read() (string, error) { type hostIDDetector struct{} // Detect returns a *Resource containing the platform specific host id. -func (hostIDDetector) Detect(ctx context.Context) (*Resource, error) { +func (hostIDDetector) Detect(context.Context) (*Resource, error) { hostID, err := hostID() if err != nil { return nil, err diff --git a/sdk/resource/host_id_test.go b/sdk/resource/host_id_test.go index 1a5e0f49f6d..b200bdbdb26 100644 --- a/sdk/resource/host_id_test.go +++ b/sdk/resource/host_id_test.go @@ -13,11 +13,11 @@ import ( var ( expectedHostID = "f2c668b579780554f70f72a063dc0864" - readFileNoError = func(filename string) (string, error) { + readFileNoError = func(string) (string, error) { return expectedHostID + "\n", nil } - readFileError = func(filename string) (string, error) { + readFileError = func(string) (string, error) { return "", errors.New("not found") } @@ -70,8 +70,6 @@ func TestHostIDReaderBSD(t *testing.T) { } for _, tc := range tt { - tc := tc - t.Run(tc.name, func(t *testing.T) { reader := hostIDReaderBSD{ readFile: tc.fileReader, @@ -119,8 +117,6 @@ func TestHostIDReaderLinux(t *testing.T) { } for _, tc := range tt { - tc := tc - t.Run(tc.name, func(t *testing.T) { reader := hostIDReaderLinux{ readFile: tc.fileReader, @@ -198,7 +194,6 @@ func TestHostIDReaderDarwin(t *testing.T) { } for _, tc := range tt { - tc := tc t.Run(tc.name, func(t *testing.T) { reader := hostIDReaderDarwin{ execCommand: tc.commandExecutor, diff --git a/sdk/resource/os.go b/sdk/resource/os.go index 01b4d27a038..51da76e807f 100644 --- a/sdk/resource/os.go +++ b/sdk/resource/os.go @@ -8,7 +8,7 @@ import ( "strings" "go.opentelemetry.io/otel/attribute" - semconv "go.opentelemetry.io/otel/semconv/v1.34.0" + semconv "go.opentelemetry.io/otel/semconv/v1.37.0" ) type osDescriptionProvider func() (string, error) @@ -32,7 +32,7 @@ type ( // Detect returns a *Resource that describes the operating system type the // service is running on. -func (osTypeDetector) Detect(ctx context.Context) (*Resource, error) { +func (osTypeDetector) Detect(context.Context) (*Resource, error) { osType := runtimeOS() osTypeAttribute := mapRuntimeOSToSemconvOSType(osType) @@ -45,7 +45,7 @@ func (osTypeDetector) Detect(ctx context.Context) (*Resource, error) { // Detect returns a *Resource that describes the operating system the // service is running on. -func (osDescriptionDetector) Detect(ctx context.Context) (*Resource, error) { +func (osDescriptionDetector) Detect(context.Context) (*Resource, error) { description, err := osDescription() if err != nil { return nil, err diff --git a/sdk/resource/os_release_darwin_test.go b/sdk/resource/os_release_darwin_test.go index 12179632f5a..0f86b4e0dcb 100644 --- a/sdk/resource/os_release_darwin_test.go +++ b/sdk/resource/os_release_darwin_test.go @@ -84,8 +84,6 @@ func TestParsePlistFile(t *testing.T) { } for _, tc := range tt { - tc := tc - t.Run(tc.Name, func(t *testing.T) { result, err := resource.ParsePlistFile(tc.Plist) @@ -155,8 +153,6 @@ func TestBuildOSRelease(t *testing.T) { } for _, tc := range tt { - tc := tc - t.Run(tc.Name, func(t *testing.T) { result := resource.BuildOSRelease(tc.Properties) require.Equal(t, tc.OSRelease, result) diff --git a/sdk/resource/os_release_unix.go b/sdk/resource/os_release_unix.go index f537e5ca5c4..7252af79fc9 100644 --- a/sdk/resource/os_release_unix.go +++ b/sdk/resource/os_release_unix.go @@ -63,12 +63,12 @@ func parseOSReleaseFile(file io.Reader) map[string]string { return values } -// skip returns true if the line is blank or starts with a '#' character, and +// skip reports whether the line is blank or starts with a '#' character, and // therefore should be skipped from processing. func skip(line string) bool { line = strings.TrimSpace(line) - return len(line) == 0 || strings.HasPrefix(line, "#") + return line == "" || strings.HasPrefix(line, "#") } // parse attempts to split the provided line on the first '=' character, and then @@ -76,7 +76,7 @@ func skip(line string) bool { func parse(line string) (string, string, bool) { k, v, found := strings.Cut(line, "=") - if !found || len(k) == 0 { + if !found || k == "" { return "", "", false } diff --git a/sdk/resource/os_test.go b/sdk/resource/os_test.go index f3156e1e796..a51a448a5bf 100644 --- a/sdk/resource/os_test.go +++ b/sdk/resource/os_test.go @@ -10,7 +10,7 @@ import ( "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/sdk/resource" - semconv "go.opentelemetry.io/otel/semconv/v1.34.0" + semconv "go.opentelemetry.io/otel/semconv/v1.37.0" ) func mockRuntimeProviders() { @@ -45,8 +45,6 @@ func TestMapRuntimeOSToSemconvOSType(t *testing.T) { } for _, tc := range tt { - tc := tc - t.Run(tc.Name, func(t *testing.T) { osTypeAttribute := resource.MapRuntimeOSToSemconvOSType(tc.Goos) require.Equal(t, osTypeAttribute, tc.OSType) diff --git a/sdk/resource/os_unix_test.go b/sdk/resource/os_unix_test.go index bfa03da7623..f9ae12638bb 100644 --- a/sdk/resource/os_unix_test.go +++ b/sdk/resource/os_unix_test.go @@ -27,7 +27,7 @@ func fakeUnameProvider(buf *unix.Utsname) error { return nil } -func fakeUnameProviderWithError(buf *unix.Utsname) error { +func fakeUnameProviderWithError(*unix.Utsname) error { return fmt.Errorf("error invoking uname(2)") } @@ -92,8 +92,6 @@ func TestGetFirstAvailableFile(t *testing.T) { } for _, tc := range tt { - tc := tc - t.Run(tc.Name, func(t *testing.T) { file, err := resource.GetFirstAvailableFile(tc.Candidates) diff --git a/sdk/resource/process.go b/sdk/resource/process.go index 6712ce80d5c..138e57721b6 100644 --- a/sdk/resource/process.go +++ b/sdk/resource/process.go @@ -11,7 +11,7 @@ import ( "path/filepath" "runtime" - semconv "go.opentelemetry.io/otel/semconv/v1.34.0" + semconv "go.opentelemetry.io/otel/semconv/v1.37.0" ) type ( @@ -112,19 +112,19 @@ type ( // Detect returns a *Resource that describes the process identifier (PID) of the // executing process. -func (processPIDDetector) Detect(ctx context.Context) (*Resource, error) { +func (processPIDDetector) Detect(context.Context) (*Resource, error) { return NewWithAttributes(semconv.SchemaURL, semconv.ProcessPID(pid())), nil } // Detect returns a *Resource that describes the name of the process executable. -func (processExecutableNameDetector) Detect(ctx context.Context) (*Resource, error) { +func (processExecutableNameDetector) Detect(context.Context) (*Resource, error) { executableName := filepath.Base(commandArgs()[0]) return NewWithAttributes(semconv.SchemaURL, semconv.ProcessExecutableName(executableName)), nil } // Detect returns a *Resource that describes the full path of the process executable. -func (processExecutablePathDetector) Detect(ctx context.Context) (*Resource, error) { +func (processExecutablePathDetector) Detect(context.Context) (*Resource, error) { executablePath, err := executablePath() if err != nil { return nil, err @@ -135,13 +135,13 @@ func (processExecutablePathDetector) Detect(ctx context.Context) (*Resource, err // Detect returns a *Resource that describes all the command arguments as received // by the process. -func (processCommandArgsDetector) Detect(ctx context.Context) (*Resource, error) { +func (processCommandArgsDetector) Detect(context.Context) (*Resource, error) { return NewWithAttributes(semconv.SchemaURL, semconv.ProcessCommandArgs(commandArgs()...)), nil } // Detect returns a *Resource that describes the username of the user that owns the // process. -func (processOwnerDetector) Detect(ctx context.Context) (*Resource, error) { +func (processOwnerDetector) Detect(context.Context) (*Resource, error) { owner, err := owner() if err != nil { return nil, err @@ -152,17 +152,17 @@ func (processOwnerDetector) Detect(ctx context.Context) (*Resource, error) { // Detect returns a *Resource that describes the name of the compiler used to compile // this process image. -func (processRuntimeNameDetector) Detect(ctx context.Context) (*Resource, error) { +func (processRuntimeNameDetector) Detect(context.Context) (*Resource, error) { return NewWithAttributes(semconv.SchemaURL, semconv.ProcessRuntimeName(runtimeName())), nil } // Detect returns a *Resource that describes the version of the runtime of this process. -func (processRuntimeVersionDetector) Detect(ctx context.Context) (*Resource, error) { +func (processRuntimeVersionDetector) Detect(context.Context) (*Resource, error) { return NewWithAttributes(semconv.SchemaURL, semconv.ProcessRuntimeVersion(runtimeVersion())), nil } // Detect returns a *Resource that describes the runtime of this process. -func (processRuntimeDescriptionDetector) Detect(ctx context.Context) (*Resource, error) { +func (processRuntimeDescriptionDetector) Detect(context.Context) (*Resource, error) { runtimeDescription := fmt.Sprintf( "go version %s %s/%s", runtimeVersion(), runtimeOS(), runtimeArch()) diff --git a/sdk/resource/resource.go b/sdk/resource/resource.go index 09b91e1e1b0..28e1e4f7ebd 100644 --- a/sdk/resource/resource.go +++ b/sdk/resource/resource.go @@ -112,7 +112,7 @@ func (r *Resource) String() string { } // MarshalLog is the marshaling function used by the logging system to represent this Resource. -func (r *Resource) MarshalLog() interface{} { +func (r *Resource) MarshalLog() any { return struct { Attributes attribute.Set SchemaURL string @@ -148,7 +148,7 @@ func (r *Resource) Iter() attribute.Iterator { return r.attrs.Iter() } -// Equal returns whether r and o represent the same resource. Two resources can +// Equal reports whether r and o represent the same resource. Two resources can // be equal even if they have different schema URLs. // // See the documentation on the [Resource] type for the pitfalls of using == diff --git a/sdk/resource/resource_experimental_test.go b/sdk/resource/resource_experimental_test.go index cbce6f48e83..fe2197928d2 100644 --- a/sdk/resource/resource_experimental_test.go +++ b/sdk/resource/resource_experimental_test.go @@ -10,7 +10,7 @@ import ( "github.com/stretchr/testify/require" - semconv "go.opentelemetry.io/otel/semconv/v1.34.0" + semconv "go.opentelemetry.io/otel/semconv/v1.37.0" ) func TestDefaultExperimental(t *testing.T) { diff --git a/sdk/resource/resource_test.go b/sdk/resource/resource_test.go index c41a31a77bc..625e0b28b5b 100644 --- a/sdk/resource/resource_test.go +++ b/sdk/resource/resource_test.go @@ -20,7 +20,7 @@ import ( "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/sdk" "go.opentelemetry.io/otel/sdk/resource" - semconv "go.opentelemetry.io/otel/semconv/v1.34.0" + semconv "go.opentelemetry.io/otel/semconv/v1.37.0" ) var ( @@ -782,7 +782,7 @@ func TestResourceConcurrentSafe(t *testing.T) { // Creating Resources should also be free of any data races, // because Resources are immutable. var wg sync.WaitGroup - for i := 0; i < 2; i++ { + for range 2 { wg.Add(1) go func() { defer wg.Done() @@ -796,7 +796,7 @@ func TestResourceConcurrentSafe(t *testing.T) { type fakeDetector struct{} -func (f fakeDetector) Detect(_ context.Context) (*resource.Resource, error) { +func (fakeDetector) Detect(context.Context) (*resource.Resource, error) { // A bit pedantic, but resource.NewWithAttributes returns an empty Resource when // no attributes specified. We want to make sure that this is concurrent-safe. return resource.NewWithAttributes("https://opentelemetry.io/schemas/1.21.0"), nil diff --git a/sdk/trace/batch_span_processor.go b/sdk/trace/batch_span_processor.go index 6966ed861e6..9bc3e525d19 100644 --- a/sdk/trace/batch_span_processor.go +++ b/sdk/trace/batch_span_processor.go @@ -6,24 +6,35 @@ package trace // import "go.opentelemetry.io/otel/sdk/trace" import ( "context" "errors" + "fmt" "sync" "sync/atomic" "time" "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/internal/global" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/sdk" "go.opentelemetry.io/otel/sdk/internal/env" + "go.opentelemetry.io/otel/sdk/trace/internal/x" + semconv "go.opentelemetry.io/otel/semconv/v1.37.0" + "go.opentelemetry.io/otel/semconv/v1.37.0/otelconv" "go.opentelemetry.io/otel/trace" ) // Defaults for BatchSpanProcessorOptions. const ( - DefaultMaxQueueSize = 2048 - DefaultScheduleDelay = 5000 + DefaultMaxQueueSize = 2048 + // DefaultScheduleDelay is the delay interval between two consecutive exports, in milliseconds. + DefaultScheduleDelay = 5000 + // DefaultExportTimeout is the duration after which an export is cancelled, in milliseconds. DefaultExportTimeout = 30000 DefaultMaxExportBatchSize = 512 ) +var queueFull = otelconv.ErrorTypeAttr("queue_full") + // BatchSpanProcessorOption configures a BatchSpanProcessor. type BatchSpanProcessorOption func(o *BatchSpanProcessorOptions) @@ -67,6 +78,11 @@ type batchSpanProcessor struct { queue chan ReadOnlySpan dropped uint32 + selfObservabilityEnabled bool + callbackRegistration metric.Registration + spansProcessedCounter otelconv.SDKProcessorSpanProcessed + componentNameAttr attribute.KeyValue + batch []ReadOnlySpan batchMutex sync.Mutex timer *time.Timer @@ -87,11 +103,7 @@ func NewBatchSpanProcessor(exporter SpanExporter, options ...BatchSpanProcessorO maxExportBatchSize := env.BatchSpanProcessorMaxExportBatchSize(DefaultMaxExportBatchSize) if maxExportBatchSize > maxQueueSize { - if DefaultMaxExportBatchSize > maxQueueSize { - maxExportBatchSize = maxQueueSize - } else { - maxExportBatchSize = DefaultMaxExportBatchSize - } + maxExportBatchSize = min(DefaultMaxExportBatchSize, maxQueueSize) } o := BatchSpanProcessorOptions{ @@ -112,6 +124,21 @@ func NewBatchSpanProcessor(exporter SpanExporter, options ...BatchSpanProcessorO stopCh: make(chan struct{}), } + if x.SelfObservability.Enabled() { + bsp.selfObservabilityEnabled = true + bsp.componentNameAttr = componentName() + + var err error + bsp.spansProcessedCounter, bsp.callbackRegistration, err = newBSPObs( + bsp.componentNameAttr, + func() int64 { return int64(len(bsp.queue)) }, + int64(bsp.o.MaxQueueSize), + ) + if err != nil { + otel.Handle(err) + } + } + bsp.stopWait.Add(1) go func() { defer bsp.stopWait.Done() @@ -122,8 +149,61 @@ func NewBatchSpanProcessor(exporter SpanExporter, options ...BatchSpanProcessorO return bsp } +var processorIDCounter atomic.Int64 + +// nextProcessorID returns an identifier for this batch span processor, +// starting with 0 and incrementing by 1 each time it is called. +func nextProcessorID() int64 { + return processorIDCounter.Add(1) - 1 +} + +func componentName() attribute.KeyValue { + id := nextProcessorID() + name := fmt.Sprintf("%s/%d", otelconv.ComponentTypeBatchingSpanProcessor, id) + return semconv.OTelComponentName(name) +} + +// newBSPObs creates and returns a new set of metrics instruments and a +// registration for a BatchSpanProcessor. It is the caller's responsibility +// to unregister the registration when it is no longer needed. +func newBSPObs( + cmpnt attribute.KeyValue, + qLen func() int64, + qMax int64, +) (otelconv.SDKProcessorSpanProcessed, metric.Registration, error) { + meter := otel.GetMeterProvider().Meter( + selfObsScopeName, + metric.WithInstrumentationVersion(sdk.Version()), + metric.WithSchemaURL(semconv.SchemaURL), + ) + + qCap, err := otelconv.NewSDKProcessorSpanQueueCapacity(meter) + + qSize, e := otelconv.NewSDKProcessorSpanQueueSize(meter) + err = errors.Join(err, e) + + spansProcessed, e := otelconv.NewSDKProcessorSpanProcessed(meter) + err = errors.Join(err, e) + + cmpntT := semconv.OTelComponentTypeBatchingSpanProcessor + attrs := metric.WithAttributes(cmpnt, cmpntT) + + reg, e := meter.RegisterCallback( + func(_ context.Context, o metric.Observer) error { + o.ObserveInt64(qSize.Inst(), qLen(), attrs) + o.ObserveInt64(qCap.Inst(), qMax, attrs) + return nil + }, + qSize.Inst(), + qCap.Inst(), + ) + err = errors.Join(err, e) + + return spansProcessed, reg, err +} + // OnStart method does nothing. -func (bsp *batchSpanProcessor) OnStart(parent context.Context, s ReadWriteSpan) {} +func (*batchSpanProcessor) OnStart(context.Context, ReadWriteSpan) {} // OnEnd method enqueues a ReadOnlySpan for later processing. func (bsp *batchSpanProcessor) OnEnd(s ReadOnlySpan) { @@ -162,6 +242,9 @@ func (bsp *batchSpanProcessor) Shutdown(ctx context.Context) error { case <-ctx.Done(): err = ctx.Err() } + if bsp.selfObservabilityEnabled { + err = errors.Join(err, bsp.callbackRegistration.Unregister()) + } }) return err } @@ -171,7 +254,7 @@ type forceFlushSpan struct { flushed chan struct{} } -func (f forceFlushSpan) SpanContext() trace.SpanContext { +func (forceFlushSpan) SpanContext() trace.SpanContext { return trace.NewSpanContext(trace.SpanContextConfig{TraceFlags: trace.FlagsSampled}) } @@ -274,6 +357,11 @@ func (bsp *batchSpanProcessor) exportSpans(ctx context.Context) error { if l := len(bsp.batch); l > 0 { global.Debug("exporting spans", "count", len(bsp.batch), "total_dropped", atomic.LoadUint32(&bsp.dropped)) + if bsp.selfObservabilityEnabled { + bsp.spansProcessedCounter.Add(ctx, int64(l), + bsp.componentNameAttr, + bsp.spansProcessedCounter.AttrComponentType(otelconv.ComponentTypeBatchingSpanProcessor)) + } err := bsp.e.ExportSpans(ctx, bsp.batch) // A new batch is always created after exporting, even if the batch failed to be exported. @@ -382,11 +470,17 @@ func (bsp *batchSpanProcessor) enqueueBlockOnQueueFull(ctx context.Context, sd R case bsp.queue <- sd: return true case <-ctx.Done(): + if bsp.selfObservabilityEnabled { + bsp.spansProcessedCounter.Add(ctx, 1, + bsp.componentNameAttr, + bsp.spansProcessedCounter.AttrComponentType(otelconv.ComponentTypeBatchingSpanProcessor), + bsp.spansProcessedCounter.AttrErrorType(queueFull)) + } return false } } -func (bsp *batchSpanProcessor) enqueueDrop(_ context.Context, sd ReadOnlySpan) bool { +func (bsp *batchSpanProcessor) enqueueDrop(ctx context.Context, sd ReadOnlySpan) bool { if !sd.SpanContext().IsSampled() { return false } @@ -396,12 +490,18 @@ func (bsp *batchSpanProcessor) enqueueDrop(_ context.Context, sd ReadOnlySpan) b return true default: atomic.AddUint32(&bsp.dropped, 1) + if bsp.selfObservabilityEnabled { + bsp.spansProcessedCounter.Add(ctx, 1, + bsp.componentNameAttr, + bsp.spansProcessedCounter.AttrComponentType(otelconv.ComponentTypeBatchingSpanProcessor), + bsp.spansProcessedCounter.AttrErrorType(queueFull)) + } } return false } // MarshalLog is the marshaling function used by the logging system to represent this Span Processor. -func (bsp *batchSpanProcessor) MarshalLog() interface{} { +func (bsp *batchSpanProcessor) MarshalLog() any { return struct { Type string SpanExporter SpanExporter diff --git a/sdk/trace/batch_span_processor_test.go b/sdk/trace/batch_span_processor_test.go index 23e84138d11..cef4c8198d3 100644 --- a/sdk/trace/batch_span_processor_test.go +++ b/sdk/trace/batch_span_processor_test.go @@ -8,14 +8,25 @@ import ( "encoding/binary" "errors" "fmt" + "runtime" "sync" + "sync/atomic" "testing" "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/sdk" + "go.opentelemetry.io/otel/sdk/instrumentation" "go.opentelemetry.io/otel/sdk/internal/env" + sdkmetric "go.opentelemetry.io/otel/sdk/metric" + "go.opentelemetry.io/otel/sdk/metric/metricdata" + "go.opentelemetry.io/otel/sdk/metric/metricdata/metricdatatest" + semconv "go.opentelemetry.io/otel/semconv/v1.37.0" + "go.opentelemetry.io/otel/semconv/v1.37.0/otelconv" "go.opentelemetry.io/otel/trace" ) @@ -505,7 +516,7 @@ func TestBatchSpanProcessorDropBatchIfFailed(t *testing.T) { func assertMaxSpanDiff(t *testing.T, want, got, maxDif int) { spanDifference := want - got if spanDifference < 0 { - spanDifference = spanDifference * -1 + spanDifference *= -1 } if spanDifference > maxDif { t.Errorf("number of exported span not equal to or within %d less than: got %+v, want %+v\n", @@ -514,7 +525,7 @@ func assertMaxSpanDiff(t *testing.T, want, got, maxDif int) { } type indefiniteExporter struct { - stop chan (struct{}) + stop chan struct{} } func newIndefiniteExporter(t *testing.T) indefiniteExporter { @@ -525,7 +536,7 @@ func newIndefiniteExporter(t *testing.T) indefiniteExporter { return e } -func (e indefiniteExporter) Shutdown(context.Context) error { +func (indefiniteExporter) Shutdown(context.Context) error { return nil } @@ -580,7 +591,7 @@ func TestBatchSpanProcessorForceFlushQueuedSpans(t *testing.T) { tracer := tp.Tracer("tracer") - for i := 0; i < 10; i++ { + for i := range 10 { _, span := tracer.Start(ctx, fmt.Sprintf("span%d", i)) span.End() @@ -633,3 +644,261 @@ func TestBatchSpanProcessorConcurrentSafe(t *testing.T) { wg.Wait() } + +// Drop metrics not being tested in this test. +var dropSpanMetricsView = sdkmetric.NewView( + sdkmetric.Instrument{ + Name: "otel.sdk.span.*", + }, + sdkmetric.Stream{Aggregation: sdkmetric.AggregationDrop{}}, +) + +func TestBatchSpanProcessorMetricsDisabled(t *testing.T) { + t.Setenv("OTEL_GO_X_SELF_OBSERVABILITY", "false") + tp := basicTracerProvider(t) + reader := sdkmetric.NewManualReader() + meterProvider := sdkmetric.NewMeterProvider( + sdkmetric.WithReader(reader), + sdkmetric.WithView(dropSpanMetricsView), + ) + otel.SetMeterProvider(meterProvider) + me := newBlockingExporter() + t.Cleanup(func() { assert.NoError(t, me.Shutdown(context.Background())) }) + bsp := NewBatchSpanProcessor( + me, + // Make sure timeout doesn't trigger during the test. + WithBatchTimeout(time.Hour), + WithMaxQueueSize(2), + WithMaxExportBatchSize(2), + ) + tp.RegisterSpanProcessor(bsp) + + tr := tp.Tracer("TestBatchSpanProcessorMetricsDisabled") + // Generate 2 spans, which export and block during the export call. + generateSpan(t, tr, testOption{genNumSpans: 2}) + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + assert.NoError(t, me.waitForSpans(ctx, 2)) + + // Validate that there are no metrics produced. + gotMetrics := new(metricdata.ResourceMetrics) + assert.NoError(t, reader.Collect(context.Background(), gotMetrics)) + require.Empty(t, gotMetrics.ScopeMetrics) + // Generate 3 spans. 2 fill the queue, and 1 is dropped because the queue is full. + generateSpan(t, tr, testOption{genNumSpans: 3}) + // Validate that there are no metrics produced. + gotMetrics = new(metricdata.ResourceMetrics) + assert.NoError(t, reader.Collect(context.Background(), gotMetrics)) + require.Empty(t, gotMetrics.ScopeMetrics) +} + +func TestBatchSpanProcessorMetrics(t *testing.T) { + t.Setenv("OTEL_GO_X_SELF_OBSERVABILITY", "true") + tp := basicTracerProvider(t) + reader := sdkmetric.NewManualReader() + meterProvider := sdkmetric.NewMeterProvider( + sdkmetric.WithReader(reader), + sdkmetric.WithView(dropSpanMetricsView), + ) + otel.SetMeterProvider(meterProvider) + me := newBlockingExporter() + t.Cleanup(func() { assert.NoError(t, me.Shutdown(context.Background())) }) + bsp := NewBatchSpanProcessor( + me, + // Make sure timeout doesn't trigger during the test. + WithBatchTimeout(time.Hour), + WithMaxQueueSize(2), + WithMaxExportBatchSize(2), + ) + internalBsp := bsp.(*batchSpanProcessor) + tp.RegisterSpanProcessor(bsp) + + tr := tp.Tracer("TestBatchSpanProcessorMetrics") + // Generate 2 spans, which export and block during the export call. + generateSpan(t, tr, testOption{genNumSpans: 2}) + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + assert.NoError(t, me.waitForSpans(ctx, 2)) + assertSelfObsScopeMetrics(t, internalBsp.componentNameAttr, reader, + expectMetrics{queueCapacity: 2, queueSize: 0, successProcessed: 2}) + // Generate 3 spans. 2 fill the queue, and 1 is dropped because the queue is full. + generateSpan(t, tr, testOption{genNumSpans: 3}) + assertSelfObsScopeMetrics(t, internalBsp.componentNameAttr, reader, + expectMetrics{queueCapacity: 2, queueSize: 2, queueFullProcessed: 1, successProcessed: 2}) +} + +func TestBatchSpanProcessorBlockingMetrics(t *testing.T) { + t.Setenv("OTEL_GO_X_SELF_OBSERVABILITY", "true") + tp := basicTracerProvider(t) + reader := sdkmetric.NewManualReader() + meterProvider := sdkmetric.NewMeterProvider( + sdkmetric.WithReader(reader), + sdkmetric.WithView(dropSpanMetricsView), + ) + otel.SetMeterProvider(meterProvider) + me := newBlockingExporter() + t.Cleanup(func() { assert.NoError(t, me.Shutdown(context.Background())) }) + bsp := NewBatchSpanProcessor( + me, + // Use WithBlocking so we can trigger a queueFull using ForceFlush. + WithBlocking(), + // Make sure timeout doesn't trigger during the test. + WithBatchTimeout(time.Hour), + WithMaxQueueSize(2), + WithMaxExportBatchSize(2), + ) + internalBsp := bsp.(*batchSpanProcessor) + tp.RegisterSpanProcessor(bsp) + + tr := tp.Tracer("TestBatchSpanProcessorBlockingMetrics") + // Generate 2 spans that are exported to the exporter, which blocks. + generateSpan(t, tr, testOption{genNumSpans: 2}) + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + assert.NoError(t, me.waitForSpans(ctx, 2)) + assertSelfObsScopeMetrics(t, internalBsp.componentNameAttr, reader, + expectMetrics{queueCapacity: 2, queueSize: 0, successProcessed: 2}) + // Generate 2 spans to fill the queue. + generateSpan(t, tr, testOption{genNumSpans: 2}) + go func() { + // Generate a span which blocks because the queue is full. + generateSpan(t, tr, testOption{genNumSpans: 1}) + }() + assertSelfObsScopeMetrics(t, internalBsp.componentNameAttr, reader, + expectMetrics{queueCapacity: 2, queueSize: 2, successProcessed: 2}) + + // Use ForceFlush to force the span that is blocking on the full queue to be dropped. + ctx, cancel = context.WithTimeout(context.Background(), 10*time.Millisecond) + defer cancel() + assert.Error(t, tp.ForceFlush(ctx)) + assertSelfObsScopeMetrics(t, internalBsp.componentNameAttr, reader, + expectMetrics{queueCapacity: 2, queueSize: 2, queueFullProcessed: 1, successProcessed: 2}) +} + +type expectMetrics struct { + queueCapacity int64 + queueSize int64 + successProcessed int64 + queueFullProcessed int64 +} + +func assertSelfObsScopeMetrics(t *testing.T, componentNameAttr attribute.KeyValue, reader sdkmetric.Reader, + expectation expectMetrics, +) { + t.Helper() + gotResourceMetrics := new(metricdata.ResourceMetrics) + assert.NoError(t, reader.Collect(context.Background(), gotResourceMetrics)) + + baseAttrs := attribute.NewSet( + semconv.OTelComponentTypeBatchingSpanProcessor, + componentNameAttr, + ) + wantMetrics := []metricdata.Metrics{ + { + Name: otelconv.SDKProcessorSpanQueueCapacity{}.Name(), + Description: otelconv.SDKProcessorSpanQueueCapacity{}.Description(), + Unit: otelconv.SDKProcessorSpanQueueCapacity{}.Unit(), + Data: metricdata.Sum[int64]{ + DataPoints: []metricdata.DataPoint[int64]{{Attributes: baseAttrs, Value: expectation.queueCapacity}}, + Temporality: metricdata.CumulativeTemporality, + IsMonotonic: false, + }, + }, + { + Name: otelconv.SDKProcessorSpanQueueSize{}.Name(), + Description: otelconv.SDKProcessorSpanQueueSize{}.Description(), + Unit: otelconv.SDKProcessorSpanQueueSize{}.Unit(), + Data: metricdata.Sum[int64]{ + DataPoints: []metricdata.DataPoint[int64]{{Attributes: baseAttrs, Value: expectation.queueSize}}, + Temporality: metricdata.CumulativeTemporality, + IsMonotonic: false, + }, + }, + } + + wantProcessedDataPoints := []metricdata.DataPoint[int64]{} + if expectation.successProcessed > 0 { + wantProcessedDataPoints = append(wantProcessedDataPoints, metricdata.DataPoint[int64]{ + Value: expectation.successProcessed, + Attributes: attribute.NewSet( + semconv.OTelComponentTypeBatchingSpanProcessor, + componentNameAttr, + ), + }) + } + if expectation.queueFullProcessed > 0 { + wantProcessedDataPoints = append(wantProcessedDataPoints, metricdata.DataPoint[int64]{ + Value: expectation.queueFullProcessed, + Attributes: attribute.NewSet( + semconv.OTelComponentTypeBatchingSpanProcessor, + componentNameAttr, + semconv.ErrorTypeKey.String(string(queueFull)), + ), + }) + } + + if len(wantProcessedDataPoints) > 0 { + wantMetrics = append(wantMetrics, + metricdata.Metrics{ + Name: otelconv.SDKProcessorSpanProcessed{}.Name(), + Description: otelconv.SDKProcessorSpanProcessed{}.Description(), + Unit: otelconv.SDKProcessorSpanProcessed{}.Unit(), + Data: metricdata.Sum[int64]{ + DataPoints: wantProcessedDataPoints, + Temporality: metricdata.CumulativeTemporality, + IsMonotonic: true, + }, + }, + ) + } + + wantScopeMetric := metricdata.ScopeMetrics{ + Scope: instrumentation.Scope{ + Name: "go.opentelemetry.io/otel/sdk/trace", + Version: sdk.Version(), + SchemaURL: semconv.SchemaURL, + }, + Metrics: wantMetrics, + } + metricdatatest.AssertEqual(t, wantScopeMetric, gotResourceMetrics.ScopeMetrics[0], metricdatatest.IgnoreTimestamp()) +} + +// blockingExporter blocks until the exported span is removed from the channel. +type blockingExporter struct { + shutdown chan struct{} + total atomic.Int32 +} + +func newBlockingExporter() *blockingExporter { + e := &blockingExporter{shutdown: make(chan struct{})} + return e +} + +func (e *blockingExporter) Shutdown(ctx context.Context) error { + select { + case <-e.shutdown: + default: + close(e.shutdown) + } + return ctx.Err() +} + +func (e *blockingExporter) ExportSpans(ctx context.Context, s []ReadOnlySpan) error { + e.total.Add(int32(len(s))) + <-e.shutdown + return ctx.Err() +} + +func (e *blockingExporter) waitForSpans(ctx context.Context, n int32) error { + // Wait for all n spans to reach the export call + for e.total.Load() < n { + select { + case <-ctx.Done(): + return fmt.Errorf("timeout waiting for %d spans to be exported", n) + default: + // So the select will not block + } + runtime.Gosched() + } + return nil +} diff --git a/sdk/trace/benchmark_test.go b/sdk/trace/benchmark_test.go index 2e932ad7864..7e1ca41f1ce 100644 --- a/sdk/trace/benchmark_test.go +++ b/sdk/trace/benchmark_test.go @@ -293,6 +293,45 @@ func BenchmarkSpanWithEvents_WithTimestamp(b *testing.B) { }) } +func BenchmarkTraceIDFromHex(b *testing.B) { + want := trace.TraceID{ + 0xde, + 0xad, + 0xbe, + 0xef, + 0x01, + 0x23, + 0x45, + 0x67, + 0x89, + 0xab, + 0xcd, + 0xef, + 0x01, + 0x23, + 0x45, + 0x67, + } + b.ReportAllocs() + for i := 0; i < b.N; i++ { + got, _ := trace.TraceIDFromHex("deadbeef0123456789abcdef01234567") + if got != want { + b.Fatalf("got = %q want = %q", got.String(), want) + } + } +} + +func BenchmarkSpanIDFromHex(b *testing.B) { + want := trace.SpanID{0xde, 0xad, 0xbe, 0xef, 0x01, 0x23, 0x45, 0x67} + b.ReportAllocs() + for i := 0; i < b.N; i++ { + got, _ := trace.SpanIDFromHex("deadbeef01234567") + if got != want { + b.Fatalf("got = %q want = %q", got.String(), want) + } + } +} + func BenchmarkTraceID_DotString(b *testing.B) { t, _ := trace.TraceIDFromHex("0000000000000001000000000000002a") sc := trace.NewSpanContext(trace.SpanContextConfig{TraceID: t}) @@ -367,7 +406,7 @@ func BenchmarkSpanProcessorVerboseLogging(b *testing.B) { b.Cleanup(func(l logr.Logger) func() { return func() { global.SetLogger(l) } }(global.GetLogger())) - global.SetLogger(funcr.New(func(prefix, args string) {}, funcr.Options{Verbosity: 5})) + global.SetLogger(funcr.New(func(string, string) {}, funcr.Options{Verbosity: 5})) tp := sdktrace.NewTracerProvider( sdktrace.WithBatcher( tracetest.NewNoopExporter(), @@ -383,7 +422,7 @@ func BenchmarkSpanProcessorVerboseLogging(b *testing.B) { b.ReportAllocs() for i := 0; i < b.N; i++ { - for j := 0; j < 10; j++ { + for range 10 { _, span := tracer.Start(ctx, "bench") span.End() } diff --git a/sdk/trace/doc.go b/sdk/trace/doc.go index 1f60524e3ee..e58e7f6ed78 100644 --- a/sdk/trace/doc.go +++ b/sdk/trace/doc.go @@ -6,5 +6,8 @@ Package trace contains support for OpenTelemetry distributed tracing. The following assumes a basic familiarity with OpenTelemetry concepts. See https://opentelemetry.io. + +See [go.opentelemetry.io/otel/sdk/trace/internal/x] for information about +the experimental features. */ package trace // import "go.opentelemetry.io/otel/sdk/trace" diff --git a/sdk/trace/evictedqueue_test.go b/sdk/trace/evictedqueue_test.go index 66269395817..f395659a2aa 100644 --- a/sdk/trace/evictedqueue_test.go +++ b/sdk/trace/evictedqueue_test.go @@ -45,7 +45,7 @@ func TestDropCount(t *testing.T) { t.Cleanup(func(l logr.Logger) func() { return func() { global.SetLogger(l) } }(global.GetLogger())) - global.SetLogger(funcr.New(func(prefix, args string) { + global.SetLogger(funcr.New(func(string, string) { called++ }, funcr.Options{Verbosity: 1})) diff --git a/sdk/trace/id_generator.go b/sdk/trace/id_generator.go index c8d3fb7e3cf..3649322a6e4 100644 --- a/sdk/trace/id_generator.go +++ b/sdk/trace/id_generator.go @@ -32,7 +32,7 @@ type randomIDGenerator struct{} var _ IDGenerator = &randomIDGenerator{} // NewSpanID returns a non-zero span ID from a randomly-chosen sequence. -func (gen *randomIDGenerator) NewSpanID(ctx context.Context, traceID trace.TraceID) trace.SpanID { +func (*randomIDGenerator) NewSpanID(context.Context, trace.TraceID) trace.SpanID { sid := trace.SpanID{} for { binary.NativeEndian.PutUint64(sid[:], rand.Uint64()) @@ -45,7 +45,7 @@ func (gen *randomIDGenerator) NewSpanID(ctx context.Context, traceID trace.Trace // NewIDs returns a non-zero trace ID and a non-zero span ID from a // randomly-chosen sequence. -func (gen *randomIDGenerator) NewIDs(ctx context.Context) (trace.TraceID, trace.SpanID) { +func (*randomIDGenerator) NewIDs(context.Context) (trace.TraceID, trace.SpanID) { tid := trace.TraceID{} sid := trace.SpanID{} for { diff --git a/sdk/trace/id_generator_test.go b/sdk/trace/id_generator_test.go index 29fe1edf7ac..42058a2202e 100644 --- a/sdk/trace/id_generator_test.go +++ b/sdk/trace/id_generator_test.go @@ -16,7 +16,7 @@ func TestNewIDs(t *testing.T) { gen := defaultIDGenerator() n := 1000 - for i := 0; i < n; i++ { + for range n { traceID, spanID := gen.NewIDs(context.Background()) assert.Truef(t, traceID.IsValid(), "trace id: %s", traceID.String()) assert.Truef(t, spanID.IsValid(), "span id: %s", spanID.String()) @@ -28,7 +28,7 @@ func TestNewSpanID(t *testing.T) { testTraceID := [16]byte{123, 123} n := 1000 - for i := 0; i < n; i++ { + for range n { spanID := gen.NewSpanID(context.Background(), testTraceID) assert.Truef(t, spanID.IsValid(), "span id: %s", spanID.String()) } diff --git a/sdk/trace/internal/x/README.md b/sdk/trace/internal/x/README.md new file mode 100644 index 00000000000..feec16fa64b --- /dev/null +++ b/sdk/trace/internal/x/README.md @@ -0,0 +1,35 @@ +# Experimental Features + +The Trace SDK contains features that have not yet stabilized in the OpenTelemetry specification. +These features are added to the OpenTelemetry Go Trace SDK prior to stabilization in the specification so that users can start experimenting with them and provide feedback. + +These features may change in backwards incompatible ways as feedback is applied. +See the [Compatibility and Stability](#compatibility-and-stability) section for more information. + +## Features + +- [Self-Observability](#self-observability) + +### Self-Observability + +The SDK provides a self-observability feature that allows you to monitor the SDK itself. + +To opt-in, set the environment variable `OTEL_GO_X_SELF_OBSERVABILITY` to `true`. + +When enabled, the SDK will create the following metrics using the global `MeterProvider`: + +- `otel.sdk.span.live` +- `otel.sdk.span.started` + +Please see the [Semantic conventions for OpenTelemetry SDK metrics] documentation for more details on these metrics. + +[Semantic conventions for OpenTelemetry SDK metrics]: https://github.com/open-telemetry/semantic-conventions/blob/v1.36.0/docs/otel/sdk-metrics.md + +## Compatibility and Stability + +Experimental features do not fall within the scope of the OpenTelemetry Go versioning and stability [policy](../../../../VERSIONING.md). +These features may be removed or modified in successive version releases, including patch versions. + +When an experimental feature is promoted to a stable feature, a migration path will be included in the changelog entry of the release. +There is no guarantee that any environment variable feature flags that enabled the experimental feature will be supported by the stable version. +If they are supported, they may be accompanied with a deprecation notice stating a timeline for the removal of that support. diff --git a/sdk/trace/internal/x/x.go b/sdk/trace/internal/x/x.go new file mode 100644 index 00000000000..2fcbbcc66ec --- /dev/null +++ b/sdk/trace/internal/x/x.go @@ -0,0 +1,63 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package x documents experimental features for [go.opentelemetry.io/otel/sdk/trace]. +package x // import "go.opentelemetry.io/otel/sdk/trace/internal/x" + +import ( + "os" + "strings" +) + +// SelfObservability is an experimental feature flag that determines if SDK +// self-observability metrics are enabled. +// +// To enable this feature set the OTEL_GO_X_SELF_OBSERVABILITY environment variable +// to the case-insensitive string value of "true" (i.e. "True" and "TRUE" +// will also enable this). +var SelfObservability = newFeature("SELF_OBSERVABILITY", func(v string) (string, bool) { + if strings.EqualFold(v, "true") { + return v, true + } + return "", false +}) + +// Feature is an experimental feature control flag. It provides a uniform way +// to interact with these feature flags and parse their values. +type Feature[T any] struct { + key string + parse func(v string) (T, bool) +} + +func newFeature[T any](suffix string, parse func(string) (T, bool)) Feature[T] { + const envKeyRoot = "OTEL_GO_X_" + return Feature[T]{ + key: envKeyRoot + suffix, + parse: parse, + } +} + +// Key returns the environment variable key that needs to be set to enable the +// feature. +func (f Feature[T]) Key() string { return f.key } + +// Lookup returns the user configured value for the feature and true if the +// user has enabled the feature. Otherwise, if the feature is not enabled, a +// zero-value and false are returned. +func (f Feature[T]) Lookup() (v T, ok bool) { + // https://github.com/open-telemetry/opentelemetry-specification/blob/62effed618589a0bec416a87e559c0a9d96289bb/specification/configuration/sdk-environment-variables.md#parsing-empty-value + // + // > The SDK MUST interpret an empty value of an environment variable the + // > same way as when the variable is unset. + vRaw := os.Getenv(f.key) + if vRaw == "" { + return v, ok + } + return f.parse(vRaw) +} + +// Enabled reports whether the feature is enabled. +func (f Feature[T]) Enabled() bool { + _, ok := f.Lookup() + return ok +} diff --git a/sdk/trace/internal/x/x_test.go b/sdk/trace/internal/x/x_test.go new file mode 100644 index 00000000000..15124ca91d1 --- /dev/null +++ b/sdk/trace/internal/x/x_test.go @@ -0,0 +1,59 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package x + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestSelfObservability(t *testing.T) { + const key = "OTEL_GO_X_SELF_OBSERVABILITY" + require.Equal(t, key, SelfObservability.Key()) + + t.Run("100", run(setenv(key, "100"), assertDisabled(SelfObservability))) + t.Run("true", run(setenv(key, "true"), assertEnabled(SelfObservability, "true"))) + t.Run("True", run(setenv(key, "True"), assertEnabled(SelfObservability, "True"))) + t.Run("false", run(setenv(key, "false"), assertDisabled(SelfObservability))) + t.Run("empty", run(assertDisabled(SelfObservability))) +} + +func run(steps ...func(*testing.T)) func(*testing.T) { + return func(t *testing.T) { + t.Helper() + for _, step := range steps { + step(t) + } + } +} + +func setenv(k, v string) func(t *testing.T) { //nolint:unparam // This is a reusable test utility function. + return func(t *testing.T) { t.Setenv(k, v) } +} + +func assertEnabled[T any](f Feature[T], want T) func(*testing.T) { + return func(t *testing.T) { + t.Helper() + assert.True(t, f.Enabled(), "not enabled") + + v, ok := f.Lookup() + assert.True(t, ok, "Lookup state") + assert.Equal(t, want, v, "Lookup value") + } +} + +func assertDisabled[T any](f Feature[T]) func(*testing.T) { + var zero T + return func(t *testing.T) { + t.Helper() + + assert.False(t, f.Enabled(), "enabled") + + v, ok := f.Lookup() + assert.False(t, ok, "Lookup state") + assert.Equal(t, zero, v, "Lookup value") + } +} diff --git a/sdk/trace/provider.go b/sdk/trace/provider.go index 0e2a2e7c60d..37ce2ac876a 100644 --- a/sdk/trace/provider.go +++ b/sdk/trace/provider.go @@ -5,14 +5,20 @@ package trace // import "go.opentelemetry.io/otel/sdk/trace" import ( "context" + "errors" "fmt" "sync" "sync/atomic" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/internal/global" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/sdk" "go.opentelemetry.io/otel/sdk/instrumentation" "go.opentelemetry.io/otel/sdk/resource" + "go.opentelemetry.io/otel/sdk/trace/internal/x" + semconv "go.opentelemetry.io/otel/semconv/v1.37.0" + "go.opentelemetry.io/otel/semconv/v1.37.0/otelconv" "go.opentelemetry.io/otel/trace" "go.opentelemetry.io/otel/trace/embedded" "go.opentelemetry.io/otel/trace/noop" @@ -20,6 +26,7 @@ import ( const ( defaultTracerName = "go.opentelemetry.io/otel/sdk/tracer" + selfObsScopeName = "go.opentelemetry.io/otel/sdk/trace" ) // tracerProviderConfig. @@ -45,7 +52,7 @@ type tracerProviderConfig struct { } // MarshalLog is the marshaling function used by the logging system to represent this Provider. -func (cfg tracerProviderConfig) MarshalLog() interface{} { +func (cfg tracerProviderConfig) MarshalLog() any { return struct { SpanProcessors []SpanProcessor SamplerType string @@ -156,8 +163,18 @@ func (p *TracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.T t, ok := p.namedTracer[is] if !ok { t = &tracer{ - provider: p, - instrumentationScope: is, + provider: p, + instrumentationScope: is, + selfObservabilityEnabled: x.SelfObservability.Enabled(), + } + if t.selfObservabilityEnabled { + var err error + t.spanLiveMetric, t.spanStartedMetric, err = newInst() + if err != nil { + msg := "failed to create self-observability metrics for tracer: %w" + err := fmt.Errorf(msg, err) + otel.Handle(err) + } } p.namedTracer[is] = t } @@ -184,6 +201,23 @@ func (p *TracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.T return t } +func newInst() (otelconv.SDKSpanLive, otelconv.SDKSpanStarted, error) { + m := otel.GetMeterProvider().Meter( + selfObsScopeName, + metric.WithInstrumentationVersion(sdk.Version()), + metric.WithSchemaURL(semconv.SchemaURL), + ) + + var err error + spanLiveMetric, e := otelconv.NewSDKSpanLive(m) + err = errors.Join(err, e) + + spanStartedMetric, e := otelconv.NewSDKSpanStarted(m) + err = errors.Join(err, e) + + return spanLiveMetric, spanStartedMetric, err +} + // RegisterSpanProcessor adds the given SpanProcessor to the list of SpanProcessors. func (p *TracerProvider) RegisterSpanProcessor(sp SpanProcessor) { // This check prevents calls during a shutdown. diff --git a/sdk/trace/provider_test.go b/sdk/trace/provider_test.go index 9ec8267a5de..9fa1923a7ce 100644 --- a/sdk/trace/provider_test.go +++ b/sdk/trace/provider_test.go @@ -13,7 +13,9 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/trace" ) @@ -33,8 +35,8 @@ func (t *basicSpanProcessor) Shutdown(context.Context) error { return t.injectShutdownError } -func (t *basicSpanProcessor) OnStart(context.Context, ReadWriteSpan) {} -func (t *basicSpanProcessor) OnEnd(ReadOnlySpan) {} +func (*basicSpanProcessor) OnStart(context.Context, ReadWriteSpan) {} +func (*basicSpanProcessor) OnEnd(ReadOnlySpan) {} func (t *basicSpanProcessor) ForceFlush(context.Context) error { t.flushed = true return nil @@ -48,16 +50,16 @@ func (t *shutdownSpanProcessor) Shutdown(ctx context.Context) error { return t.shutdown(ctx) } -func (t *shutdownSpanProcessor) OnStart(context.Context, ReadWriteSpan) {} -func (t *shutdownSpanProcessor) OnEnd(ReadOnlySpan) {} -func (t *shutdownSpanProcessor) ForceFlush(context.Context) error { +func (*shutdownSpanProcessor) OnStart(context.Context, ReadWriteSpan) {} +func (*shutdownSpanProcessor) OnEnd(ReadOnlySpan) {} +func (*shutdownSpanProcessor) ForceFlush(context.Context) error { return nil } func TestShutdownCallsTracerMethod(t *testing.T) { stp := NewTracerProvider() sp := &shutdownSpanProcessor{ - shutdown: func(ctx context.Context) error { + shutdown: func(context.Context) error { _ = stp.Tracer("abc") // must not deadlock return nil }, @@ -232,7 +234,7 @@ func TestTracerProviderSamplerConfigFromEnv(t *testing.T) { argOptional bool description string errorType error - invalidArgErrorType interface{} + invalidArgErrorType any } randFloat := rand.Float64() @@ -353,7 +355,7 @@ func TestTracerProviderSamplerConfigFromEnv(t *testing.T) { } } -func testStoredError(t *testing.T, target interface{}) { +func testStoredError(t *testing.T, target any) { t.Helper() if assert.Len(t, handler.errs, 1) && assert.Error(t, handler.errs[0]) { @@ -398,3 +400,69 @@ func TestTracerProviderReturnsSameTracer(t *testing.T) { assert.Same(t, t1, t4) assert.Same(t, t2, t5) } + +func TestTracerProviderSelfObservability(t *testing.T) { + handler.Reset() + p := NewTracerProvider() + + // Enable self-observability + t.Setenv("OTEL_GO_X_SELF_OBSERVABILITY", "true") + + tr := p.Tracer("test-tracer") + require.IsType(t, &tracer{}, tr) + + tStruct := tr.(*tracer) + assert.True(t, tStruct.selfObservabilityEnabled, "Self-observability should be enabled") + + // Verify instruments are created + assert.NotNil(t, tStruct.spanLiveMetric, "spanLiveMetric should be created") + assert.NotNil(t, tStruct.spanStartedMetric, "spanStartedMetric should be created") + + // Verify errors are passed to the otel handler + handlerErrs := handler.errs + assert.Empty(t, handlerErrs, "No errors should occur during instrument creation") +} + +func TestTracerProviderSelfObservabilityErrorsHandled(t *testing.T) { + handler.Reset() + + orig := otel.GetMeterProvider() + t.Cleanup(func() { otel.SetMeterProvider(orig) }) + otel.SetMeterProvider(&errMeterProvider{err: assert.AnError}) + + p := NewTracerProvider() + + // Enable self-observability + t.Setenv("OTEL_GO_X_SELF_OBSERVABILITY", "true") + + // Create a tracer to trigger instrument creation. + tr := p.Tracer("test-tracer") + _ = tr + + require.Len(t, handler.errs, 1) + assert.ErrorIs(t, handler.errs[0], assert.AnError) +} + +type errMeterProvider struct { + metric.MeterProvider + + err error +} + +func (mp *errMeterProvider) Meter(string, ...metric.MeterOption) metric.Meter { + return &errMeter{err: mp.err} +} + +type errMeter struct { + metric.Meter + + err error +} + +func (m *errMeter) Int64Counter(string, ...metric.Int64CounterOption) (metric.Int64Counter, error) { + return nil, m.err +} + +func (m *errMeter) Int64UpDownCounter(string, ...metric.Int64UpDownCounterOption) (metric.Int64UpDownCounter, error) { + return nil, m.err +} diff --git a/sdk/trace/sampling.go b/sdk/trace/sampling.go index aa7b262d0d9..689663d48b2 100644 --- a/sdk/trace/sampling.go +++ b/sdk/trace/sampling.go @@ -110,14 +110,14 @@ func TraceIDRatioBased(fraction float64) Sampler { type alwaysOnSampler struct{} -func (as alwaysOnSampler) ShouldSample(p SamplingParameters) SamplingResult { +func (alwaysOnSampler) ShouldSample(p SamplingParameters) SamplingResult { return SamplingResult{ Decision: RecordAndSample, Tracestate: trace.SpanContextFromContext(p.ParentContext).TraceState(), } } -func (as alwaysOnSampler) Description() string { +func (alwaysOnSampler) Description() string { return "AlwaysOnSampler" } @@ -131,14 +131,14 @@ func AlwaysSample() Sampler { type alwaysOffSampler struct{} -func (as alwaysOffSampler) ShouldSample(p SamplingParameters) SamplingResult { +func (alwaysOffSampler) ShouldSample(p SamplingParameters) SamplingResult { return SamplingResult{ Decision: Drop, Tracestate: trace.SpanContextFromContext(p.ParentContext).TraceState(), } } -func (as alwaysOffSampler) Description() string { +func (alwaysOffSampler) Description() string { return "AlwaysOffSampler" } diff --git a/sdk/trace/sampling_test.go b/sdk/trace/sampling_test.go index c871ecf4f70..b715d166318 100644 --- a/sdk/trace/sampling_test.go +++ b/sdk/trace/sampling_test.go @@ -180,14 +180,14 @@ func TestTraceIdRatioSamplesInclusively(t *testing.T) { ) idg := defaultIDGenerator() - for i := 0; i < numSamplers; i++ { + for range numSamplers { ratioLo, ratioHi := rand.Float64(), rand.Float64() if ratioHi < ratioLo { ratioLo, ratioHi = ratioHi, ratioLo } samplerHi := TraceIDRatioBased(ratioHi) samplerLo := TraceIDRatioBased(ratioLo) - for j := 0; j < numTraces; j++ { + for range numTraces { traceID, _ := idg.NewIDs(context.Background()) params := SamplingParameters{TraceID: traceID} diff --git a/sdk/trace/simple_span_processor.go b/sdk/trace/simple_span_processor.go index 664e13e03f0..411d9ccdd78 100644 --- a/sdk/trace/simple_span_processor.go +++ b/sdk/trace/simple_span_processor.go @@ -39,7 +39,7 @@ func NewSimpleSpanProcessor(exporter SpanExporter) SpanProcessor { } // OnStart does nothing. -func (ssp *simpleSpanProcessor) OnStart(context.Context, ReadWriteSpan) {} +func (*simpleSpanProcessor) OnStart(context.Context, ReadWriteSpan) {} // OnEnd immediately exports a ReadOnlySpan. func (ssp *simpleSpanProcessor) OnEnd(s ReadOnlySpan) { @@ -104,13 +104,13 @@ func (ssp *simpleSpanProcessor) Shutdown(ctx context.Context) error { } // ForceFlush does nothing as there is no data to flush. -func (ssp *simpleSpanProcessor) ForceFlush(context.Context) error { +func (*simpleSpanProcessor) ForceFlush(context.Context) error { return nil } // MarshalLog is the marshaling function used by the logging system to represent // this Span Processor. -func (ssp *simpleSpanProcessor) MarshalLog() interface{} { +func (ssp *simpleSpanProcessor) MarshalLog() any { return struct { Type string Exporter SpanExporter diff --git a/sdk/trace/simple_span_processor_test.go b/sdk/trace/simple_span_processor_test.go index 137a37b3f0c..4aa4ea39c88 100644 --- a/sdk/trace/simple_span_processor_test.go +++ b/sdk/trace/simple_span_processor_test.go @@ -18,7 +18,7 @@ type simpleTestExporter struct { shutdown bool } -func (t *simpleTestExporter) ExportSpans(ctx context.Context, spans []ReadOnlySpan) error { +func (t *simpleTestExporter) ExportSpans(_ context.Context, spans []ReadOnlySpan) error { t.spans = append(t.spans, spans...) return nil } diff --git a/sdk/trace/snapshot.go b/sdk/trace/snapshot.go index d511d0f271f..63aa337800c 100644 --- a/sdk/trace/snapshot.go +++ b/sdk/trace/snapshot.go @@ -35,7 +35,7 @@ type snapshot struct { var _ ReadOnlySpan = snapshot{} -func (s snapshot) private() {} +func (snapshot) private() {} // Name returns the name of the span. func (s snapshot) Name() string { diff --git a/sdk/trace/span.go b/sdk/trace/span.go index 1785a4bbb0a..b376051fbb8 100644 --- a/sdk/trace/span.go +++ b/sdk/trace/span.go @@ -20,7 +20,7 @@ import ( "go.opentelemetry.io/otel/internal/global" "go.opentelemetry.io/otel/sdk/instrumentation" "go.opentelemetry.io/otel/sdk/resource" - semconv "go.opentelemetry.io/otel/semconv/v1.34.0" + semconv "go.opentelemetry.io/otel/semconv/v1.37.0" "go.opentelemetry.io/otel/trace" "go.opentelemetry.io/otel/trace/embedded" ) @@ -61,6 +61,7 @@ type ReadOnlySpan interface { InstrumentationScope() instrumentation.Scope // InstrumentationLibrary returns information about the instrumentation // library that created the span. + // // Deprecated: please use InstrumentationScope instead. InstrumentationLibrary() instrumentation.Library //nolint:staticcheck // This method needs to be define for backwards compatibility // Resource returns information about the entity that produced the span. @@ -165,7 +166,7 @@ func (s *recordingSpan) SpanContext() trace.SpanContext { return s.spanContext } -// IsRecording returns if this span is being recorded. If this span has ended +// IsRecording reports whether this span is being recorded. If this span has ended // this will return false. func (s *recordingSpan) IsRecording() bool { if s == nil { @@ -177,7 +178,7 @@ func (s *recordingSpan) IsRecording() bool { return s.isRecording() } -// isRecording returns if this span is being recorded. If this span has ended +// isRecording reports whether this span is being recorded. If this span has ended // this will return false. // // This method assumes s.mu.Lock is held by the caller. @@ -495,6 +496,16 @@ func (s *recordingSpan) End(options ...trace.SpanEndOption) { } s.mu.Unlock() + if s.tracer.selfObservabilityEnabled { + defer func() { + // Add the span to the context to ensure the metric is recorded + // with the correct span context. + ctx := trace.ContextWithSpan(context.Background(), s) + set := spanLiveSet(s.spanContext.IsSampled()) + s.tracer.spanLiveMetric.AddSet(ctx, -1, set) + }() + } + sps := s.tracer.provider.getSpanProcessors() if len(sps) == 0 { return @@ -545,7 +556,7 @@ func (s *recordingSpan) RecordError(err error, opts ...trace.EventOption) { s.addEvent(semconv.ExceptionEventName, opts...) } -func typeStr(i interface{}) string { +func typeStr(i any) string { t := reflect.TypeOf(i) if t.PkgPath() == "" && t.Name() == "" { // Likely a builtin type. diff --git a/sdk/trace/span_limits_test.go b/sdk/trace/span_limits_test.go index 871a4fe4b85..3e5e7b2ae17 100644 --- a/sdk/trace/span_limits_test.go +++ b/sdk/trace/span_limits_test.go @@ -129,10 +129,10 @@ func TestSettingSpanLimits(t *testing.T) { type recorder []ReadOnlySpan -func (r *recorder) OnStart(context.Context, ReadWriteSpan) {} -func (r *recorder) OnEnd(s ReadOnlySpan) { *r = append(*r, s) } -func (r *recorder) ForceFlush(context.Context) error { return nil } -func (r *recorder) Shutdown(context.Context) error { return nil } +func (*recorder) OnStart(context.Context, ReadWriteSpan) {} +func (r *recorder) OnEnd(s ReadOnlySpan) { *r = append(*r, s) } +func (*recorder) ForceFlush(context.Context) error { return nil } +func (*recorder) Shutdown(context.Context) error { return nil } func testSpanLimits(t *testing.T, limits SpanLimits) ReadOnlySpan { rec := new(recorder) diff --git a/sdk/trace/span_processor_annotator_example_test.go b/sdk/trace/span_processor_annotator_example_test.go index e963afc5c7c..7f74b6f0350 100644 --- a/sdk/trace/span_processor_annotator_example_test.go +++ b/sdk/trace/span_processor_annotator_example_test.go @@ -34,9 +34,9 @@ type Annotator struct { } func (a Annotator) OnStart(_ context.Context, s ReadWriteSpan) { s.SetAttributes(a.AttrsFunc()...) } -func (a Annotator) Shutdown(context.Context) error { return nil } -func (a Annotator) ForceFlush(context.Context) error { return nil } -func (a Annotator) OnEnd(s ReadOnlySpan) { +func (Annotator) Shutdown(context.Context) error { return nil } +func (Annotator) ForceFlush(context.Context) error { return nil } +func (Annotator) OnEnd(s ReadOnlySpan) { attr := s.Attributes()[0] fmt.Printf("%s: %s\n", attr.Key, attr.Value.AsString()) } diff --git a/sdk/trace/span_processor_test.go b/sdk/trace/span_processor_test.go index 4bff6972759..81aec7541c1 100644 --- a/sdk/trace/span_processor_test.go +++ b/sdk/trace/span_processor_test.go @@ -52,7 +52,7 @@ func (t *testSpanProcessor) OnEnd(s ReadOnlySpan) { t.spansEnded = append(t.spansEnded, s) } -func (t *testSpanProcessor) Shutdown(_ context.Context) error { +func (t *testSpanProcessor) Shutdown(context.Context) error { if t == nil { return nil } diff --git a/sdk/trace/span_test.go b/sdk/trace/span_test.go index 5a0355c0bad..ac9f9a44b03 100644 --- a/sdk/trace/span_test.go +++ b/sdk/trace/span_test.go @@ -373,7 +373,7 @@ func TestLogDropAttrs(t *testing.T) { func BenchmarkRecordingSpanSetAttributes(b *testing.B) { var attrs []attribute.KeyValue - for i := 0; i < 100; i++ { + for i := range 100 { attr := attribute.String(fmt.Sprintf("hello.attrib%d", i), fmt.Sprintf("goodbye.attrib%d", i)) attrs = append(attrs, attr) } @@ -400,18 +400,42 @@ func BenchmarkRecordingSpanSetAttributes(b *testing.B) { } func BenchmarkSpanEnd(b *testing.B) { - tracer := NewTracerProvider().Tracer("") + cases := []struct { + name string + env map[string]string + }{ + { + name: "Default", + }, + { + name: "SelfObservabilityEnabled", + env: map[string]string{ + "OTEL_GO_X_SELF_OBSERVABILITY": "True", + }, + }, + } + ctx := trace.ContextWithSpanContext(context.Background(), trace.SpanContext{}) - spans := make([]trace.Span, b.N) - for i := 0; i < b.N; i++ { - _, span := tracer.Start(ctx, "") - spans[i] = span - } + for _, c := range cases { + b.Run(c.name, func(b *testing.B) { + for k, v := range c.env { + b.Setenv(k, v) + } + + tracer := NewTracerProvider().Tracer("") + + spans := make([]trace.Span, b.N) + for i := 0; i < b.N; i++ { + _, span := tracer.Start(ctx, "") + spans[i] = span + } - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - spans[i].End() + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + spans[i].End() + } + }) } } diff --git a/sdk/trace/trace_test.go b/sdk/trace/trace_test.go index 77e9f5c8630..7da0d13d643 100644 --- a/sdk/trace/trace_test.go +++ b/sdk/trace/trace_test.go @@ -22,9 +22,14 @@ import ( "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/sdk" "go.opentelemetry.io/otel/sdk/instrumentation" + "go.opentelemetry.io/otel/sdk/metric" + "go.opentelemetry.io/otel/sdk/metric/metricdata" + "go.opentelemetry.io/otel/sdk/metric/metricdata/metricdatatest" "go.opentelemetry.io/otel/sdk/resource" - semconv "go.opentelemetry.io/otel/semconv/v1.34.0" + semconv "go.opentelemetry.io/otel/semconv/v1.37.0" + "go.opentelemetry.io/otel/semconv/v1.37.0/otelconv" "go.opentelemetry.io/otel/trace" ) @@ -156,7 +161,7 @@ func (ts *testSampler) ShouldSample(p SamplingParameters) SamplingResult { } } -func (ts testSampler) Description() string { +func (testSampler) Description() string { return "testSampler" } @@ -278,13 +283,12 @@ func TestSampling(t *testing.T) { "SampledParentSpanWithParentTraceIdRatioBased_.50": {sampler: ParentBased(TraceIDRatioBased(0.50)), expect: 1, parent: true, sampledParent: true}, "UnsampledParentSpanWithParentTraceIdRatioBased_.50": {sampler: ParentBased(TraceIDRatioBased(0.50)), expect: 0, parent: true, sampledParent: false}, } { - tc := tc t.Run(name, func(t *testing.T) { t.Parallel() p := NewTracerProvider(WithSampler(tc.sampler)) tr := p.Tracer("test") var sampled int - for i := 0; i < total; i++ { + for range total { ctx := context.Background() if tc.parent { tid, sid := idg.NewIDs(ctx) @@ -915,7 +919,7 @@ func TestSetSpanStatusWithoutMessageWhenStatusIsNotError(t *testing.T) { } } -func cmpDiff(x, y interface{}) string { +func cmpDiff(x, y any) string { return cmp.Diff(x, y, cmp.AllowUnexported(snapshot{}), cmp.AllowUnexported(attribute.Value{}), @@ -1144,7 +1148,7 @@ func TestChildSpanCount(t *testing.T) { } } -func TestNilSpanEnd(t *testing.T) { +func TestNilSpanEnd(*testing.T) { var span *recordingSpan span.End() } @@ -1474,7 +1478,6 @@ func TestWithResource(t *testing.T) { }, } for _, tc := range cases { - tc := tc t.Run(tc.name, func(t *testing.T) { te := NewTestExporter() defaultOptions := []TracerProviderOption{WithSyncer(te), WithSampler(AlwaysSample())} @@ -1796,7 +1799,7 @@ func (s *stateSampler) ShouldSample(p SamplingParameters) SamplingResult { return SamplingResult{Decision: decision, Tracestate: ts} } -func (s stateSampler) Description() string { +func (stateSampler) Description() string { return "stateSampler" } @@ -1821,7 +1824,7 @@ func TestSamplerTraceState(t *testing.T) { clearer := func(prefix string) Sampler { return &stateSampler{ prefix: prefix, - f: func(t trace.TraceState) trace.TraceState { return trace.TraceState{} }, + f: func(trace.TraceState) trace.TraceState { return trace.TraceState{} }, } } @@ -1890,7 +1893,6 @@ func TestSamplerTraceState(t *testing.T) { } for _, ts := range tests { - ts := ts t.Run(ts.name, func(t *testing.T) { te := NewTestExporter() tp := NewTracerProvider(WithSampler(ts.sampler), WithSyncer(te), WithResource(resource.Empty())) @@ -1928,8 +1930,8 @@ func TestSamplerTraceState(t *testing.T) { } type testIDGenerator struct { - traceID int - spanID int + traceID uint64 + spanID uint64 } func (gen *testIDGenerator) NewIDs(ctx context.Context) (trace.TraceID, trace.SpanID) { @@ -1941,7 +1943,7 @@ func (gen *testIDGenerator) NewIDs(ctx context.Context) (trace.TraceID, trace.Sp return traceID, spanID } -func (gen *testIDGenerator) NewSpanID(ctx context.Context, traceID trace.TraceID) trace.SpanID { +func (gen *testIDGenerator) NewSpanID(context.Context, trace.TraceID) trace.SpanID { spanIDHex := fmt.Sprintf("%016x", gen.spanID) spanID, _ := trace.SpanIDFromHex(spanIDHex) gen.spanID++ @@ -1952,8 +1954,8 @@ var _ IDGenerator = (*testIDGenerator)(nil) func TestWithIDGenerator(t *testing.T) { const ( - startTraceID = 1 - startSpanID = 10 + startTraceID = 0x1001_1001_1001_1001 + startSpanID = 0x2001_2001_2001_2001 numSpan = 5 ) @@ -1963,22 +1965,89 @@ func TestWithIDGenerator(t *testing.T) { WithSyncer(te), WithIDGenerator(gen), ) - for i := 0; i < numSpan; i++ { + for i := range numSpan { func() { _, span := tp.Tracer(t.Name()).Start(context.Background(), strconv.Itoa(i)) defer span.End() gotSpanID, err := strconv.ParseUint(span.SpanContext().SpanID().String(), 16, 64) require.NoError(t, err) - assert.Equal(t, uint64(startSpanID+i), gotSpanID) + assert.Equal(t, uint64(startSpanID)+uint64(i), gotSpanID) gotTraceID, err := strconv.ParseUint(span.SpanContext().TraceID().String(), 16, 64) require.NoError(t, err) - assert.Equal(t, uint64(startTraceID+i), gotTraceID) + assert.Equal(t, uint64(startTraceID)+uint64(i), gotTraceID) }() } } +func TestIDsRoundTrip(t *testing.T) { + gen := defaultIDGenerator() + + for range 1000 { + traceID, spanID := gen.NewIDs(context.Background()) + gotTraceID, err := trace.TraceIDFromHex(traceID.String()) + assert.NoError(t, err) + assert.Equal(t, traceID, gotTraceID) + gotSpanID, err := trace.SpanIDFromHex(spanID.String()) + assert.NoError(t, err) + assert.Equal(t, spanID, gotSpanID) + } +} + +func TestIDConversionErrors(t *testing.T) { + for _, tt := range []struct { + name string + spanIDStr string + traceIDStr string + spanIDError string + traceIDError string + }{ + { + name: "slightly too long", + spanIDStr: sid.String() + "0", + spanIDError: "hex encoded span-id must have length equals to 16", + traceIDStr: tid.String() + "0", + traceIDError: "hex encoded trace-id must have length equals to 32", + }, + { + name: "blank input", + spanIDStr: "", + spanIDError: "hex encoded span-id must have length equals to 16", + traceIDStr: "", + traceIDError: "hex encoded trace-id must have length equals to 32", + }, + { + name: "not hex", + spanIDStr: "unacceptablechar", + spanIDError: "trace-id and span-id can only contain [0-9a-f] characters, all lowercase", + traceIDStr: "completely unacceptablecharacter", + traceIDError: "trace-id and span-id can only contain [0-9a-f] characters, all lowercase", + }, + { + name: "upper-case hex", + spanIDStr: "DEADBEEFBAD0CAFE", + spanIDError: "trace-id and span-id can only contain [0-9a-f] characters, all lowercase", + traceIDStr: "DEADBEEFBAD0CAFEDEADBEEFBAD0CAFE", + traceIDError: "trace-id and span-id can only contain [0-9a-f] characters, all lowercase", + }, + { + name: "all zero", + spanIDStr: "0000000000000000", + spanIDError: "span-id can't be all zero", + traceIDStr: "00000000000000000000000000000000", + traceIDError: "trace-id can't be all zero", + }, + } { + t.Run(tt.name, func(t *testing.T) { + _, err := trace.SpanIDFromHex(tt.spanIDStr) + assert.ErrorContains(t, err, tt.spanIDError) + _, err = trace.TraceIDFromHex(tt.traceIDStr) + assert.ErrorContains(t, err, tt.traceIDError) + }) + } +} + func TestEmptyRecordingSpanAttributes(t *testing.T) { assert.Nil(t, (&recordingSpan{}).Attributes()) } @@ -2177,8 +2246,632 @@ func TestAddLinkToNonRecordingSpan(t *testing.T) { } } +func TestSelfObservability(t *testing.T) { + testCases := []struct { + name string + test func(t *testing.T, scopeMetrics func() metricdata.ScopeMetrics) + }{ + { + name: "SampledSpan", + test: func(t *testing.T, scopeMetrics func() metricdata.ScopeMetrics) { + tp := NewTracerProvider() + _, span := tp.Tracer("").Start(context.Background(), "StartSpan") + + want := metricdata.ScopeMetrics{ + Scope: instrumentation.Scope{ + Name: "go.opentelemetry.io/otel/sdk/trace", + Version: sdk.Version(), + SchemaURL: semconv.SchemaURL, + }, + Metrics: []metricdata.Metrics{ + { + Name: otelconv.SDKSpanLive{}.Name(), + Description: otelconv.SDKSpanLive{}.Description(), + Unit: otelconv.SDKSpanLive{}.Unit(), + Data: metricdata.Sum[int64]{ + Temporality: metricdata.CumulativeTemporality, + DataPoints: []metricdata.DataPoint[int64]{ + { + Attributes: attribute.NewSet( + otelconv.SDKSpanLive{}.AttrSpanSamplingResult( + otelconv.SpanSamplingResultRecordAndSample, + ), + ), + Value: 1, + }, + }, + }, + }, + { + Name: otelconv.SDKSpanStarted{}.Name(), + Description: otelconv.SDKSpanStarted{}.Description(), + Unit: otelconv.SDKSpanStarted{}.Unit(), + Data: metricdata.Sum[int64]{ + Temporality: metricdata.CumulativeTemporality, + IsMonotonic: true, + DataPoints: []metricdata.DataPoint[int64]{ + { + Attributes: attribute.NewSet( + otelconv.SDKSpanStarted{}.AttrSpanParentOrigin( + otelconv.SpanParentOriginNone, + ), + otelconv.SDKSpanStarted{}.AttrSpanSamplingResult( + otelconv.SpanSamplingResultRecordAndSample, + ), + ), + Value: 1, + }, + }, + }, + }, + }, + } + + got := scopeMetrics() + metricdatatest.AssertEqual( + t, + want, + got, + metricdatatest.IgnoreTimestamp(), + metricdatatest.IgnoreExemplars(), + ) + + span.End() + + want = metricdata.ScopeMetrics{ + Scope: instrumentation.Scope{ + Name: "go.opentelemetry.io/otel/sdk/trace", + Version: sdk.Version(), + SchemaURL: semconv.SchemaURL, + }, + Metrics: []metricdata.Metrics{ + { + Name: otelconv.SDKSpanLive{}.Name(), + Description: otelconv.SDKSpanLive{}.Description(), + Unit: otelconv.SDKSpanLive{}.Unit(), + Data: metricdata.Sum[int64]{ + Temporality: metricdata.CumulativeTemporality, + DataPoints: []metricdata.DataPoint[int64]{ + { + Attributes: attribute.NewSet( + otelconv.SDKSpanLive{}.AttrSpanSamplingResult( + otelconv.SpanSamplingResultRecordAndSample, + ), + ), + Value: 0, // No live spans at this point. + }, + }, + }, + }, + { + Name: otelconv.SDKSpanStarted{}.Name(), + Description: otelconv.SDKSpanStarted{}.Description(), + Unit: otelconv.SDKSpanStarted{}.Unit(), + Data: metricdata.Sum[int64]{ + Temporality: metricdata.CumulativeTemporality, + IsMonotonic: true, + DataPoints: []metricdata.DataPoint[int64]{ + { + Attributes: attribute.NewSet( + otelconv.SDKSpanStarted{}.AttrSpanParentOrigin( + otelconv.SpanParentOriginNone, + ), + otelconv.SDKSpanStarted{}.AttrSpanSamplingResult( + otelconv.SpanSamplingResultRecordAndSample, + ), + ), + Value: 1, + }, + }, + }, + }, + }, + } + got = scopeMetrics() + metricdatatest.AssertEqual( + t, + want, + got, + metricdatatest.IgnoreTimestamp(), + metricdatatest.IgnoreExemplars(), + ) + }, + }, + { + name: "NonRecordingSpan", + test: func(t *testing.T, scopeMetrics func() metricdata.ScopeMetrics) { + // Create a tracer provider with NeverSample sampler to get non-recording spans. + tp := NewTracerProvider(WithSampler(NeverSample())) + tp.Tracer("").Start(context.Background(), "NonRecordingSpan") + + want := metricdata.ScopeMetrics{ + Scope: instrumentation.Scope{ + Name: "go.opentelemetry.io/otel/sdk/trace", + Version: sdk.Version(), + SchemaURL: semconv.SchemaURL, + }, + Metrics: []metricdata.Metrics{ + { + Name: otelconv.SDKSpanStarted{}.Name(), + Description: otelconv.SDKSpanStarted{}.Description(), + Unit: otelconv.SDKSpanStarted{}.Unit(), + Data: metricdata.Sum[int64]{ + Temporality: metricdata.CumulativeTemporality, + IsMonotonic: true, + DataPoints: []metricdata.DataPoint[int64]{ + { + Attributes: attribute.NewSet( + otelconv.SDKSpanStarted{}.AttrSpanParentOrigin( + otelconv.SpanParentOriginNone, + ), + otelconv.SDKSpanStarted{}.AttrSpanSamplingResult( + otelconv.SpanSamplingResultDrop, + ), + ), + Value: 1, + }, + }, + }, + }, + }, + } + + got := scopeMetrics() + metricdatatest.AssertEqual( + t, + want, + got, + metricdatatest.IgnoreTimestamp(), + metricdatatest.IgnoreExemplars(), + ) + }, + }, + { + name: "OnlyRecordingSpan", + test: func(t *testing.T, scopeMetrics func() metricdata.ScopeMetrics) { + // Create a tracer provider with NeverSample sampler to get non-recording spans. + tp := NewTracerProvider(WithSampler(RecordingOnly())) + tp.Tracer("").Start(context.Background(), "OnlyRecordingSpan") + + want := metricdata.ScopeMetrics{ + Scope: instrumentation.Scope{ + Name: "go.opentelemetry.io/otel/sdk/trace", + Version: sdk.Version(), + SchemaURL: semconv.SchemaURL, + }, + Metrics: []metricdata.Metrics{ + { + Name: otelconv.SDKSpanLive{}.Name(), + Description: otelconv.SDKSpanLive{}.Description(), + Unit: otelconv.SDKSpanLive{}.Unit(), + Data: metricdata.Sum[int64]{ + Temporality: metricdata.CumulativeTemporality, + DataPoints: []metricdata.DataPoint[int64]{ + { + Attributes: attribute.NewSet( + otelconv.SDKSpanLive{}.AttrSpanSamplingResult( + otelconv.SpanSamplingResultRecordOnly, + ), + ), + Value: 1, + }, + }, + }, + }, + { + Name: otelconv.SDKSpanStarted{}.Name(), + Description: otelconv.SDKSpanStarted{}.Description(), + Unit: otelconv.SDKSpanStarted{}.Unit(), + Data: metricdata.Sum[int64]{ + Temporality: metricdata.CumulativeTemporality, + IsMonotonic: true, + DataPoints: []metricdata.DataPoint[int64]{ + { + Attributes: attribute.NewSet( + otelconv.SDKSpanStarted{}.AttrSpanParentOrigin( + otelconv.SpanParentOriginNone, + ), + otelconv.SDKSpanStarted{}.AttrSpanSamplingResult( + otelconv.SpanSamplingResultRecordOnly, + ), + ), + Value: 1, + }, + }, + }, + }, + }, + } + + got := scopeMetrics() + metricdatatest.AssertEqual( + t, + want, + got, + metricdatatest.IgnoreTimestamp(), + metricdatatest.IgnoreExemplars(), + ) + }, + }, + { + name: "RemoteParentSpan", + test: func(t *testing.T, scopeMetrics func() metricdata.ScopeMetrics) { + // Create a remote parent context + tid, _ := trace.TraceIDFromHex("01020304050607080102040810203040") + sid, _ := trace.SpanIDFromHex("0102040810203040") + remoteCtx := trace.ContextWithRemoteSpanContext(context.Background(), + trace.NewSpanContext(trace.SpanContextConfig{ + TraceID: tid, + SpanID: sid, + TraceFlags: 0x1, + Remote: true, + })) + + tp := NewTracerProvider() + tp.Tracer("").Start(remoteCtx, "ChildSpan") + + want := metricdata.ScopeMetrics{ + Scope: instrumentation.Scope{ + Name: "go.opentelemetry.io/otel/sdk/trace", + Version: sdk.Version(), + SchemaURL: semconv.SchemaURL, + }, + Metrics: []metricdata.Metrics{ + { + Name: otelconv.SDKSpanLive{}.Name(), + Description: otelconv.SDKSpanLive{}.Description(), + Unit: otelconv.SDKSpanLive{}.Unit(), + Data: metricdata.Sum[int64]{ + Temporality: metricdata.CumulativeTemporality, + DataPoints: []metricdata.DataPoint[int64]{ + { + Attributes: attribute.NewSet( + otelconv.SDKSpanLive{}.AttrSpanSamplingResult( + otelconv.SpanSamplingResultRecordAndSample, + ), + ), + Value: 1, + }, + }, + }, + }, + { + Name: otelconv.SDKSpanStarted{}.Name(), + Description: otelconv.SDKSpanStarted{}.Description(), + Unit: otelconv.SDKSpanStarted{}.Unit(), + Data: metricdata.Sum[int64]{ + Temporality: metricdata.CumulativeTemporality, + IsMonotonic: true, + DataPoints: []metricdata.DataPoint[int64]{ + { + Attributes: attribute.NewSet( + otelconv.SDKSpanStarted{}.AttrSpanParentOrigin( + otelconv.SpanParentOriginRemote, + ), + otelconv.SDKSpanStarted{}.AttrSpanSamplingResult( + otelconv.SpanSamplingResultRecordAndSample, + ), + ), + Value: 1, + }, + }, + }, + }, + }, + } + got := scopeMetrics() + metricdatatest.AssertEqual( + t, + want, + got, + metricdatatest.IgnoreTimestamp(), + metricdatatest.IgnoreExemplars(), + ) + }, + }, + { + name: "LocalParentSpan", + test: func(t *testing.T, scopeMetrics func() metricdata.ScopeMetrics) { + tp := NewTracerProvider() + ctx, parentSpan := tp.Tracer("").Start(context.Background(), "ParentSpan") + _, childSpan := tp.Tracer("").Start(ctx, "ChildSpan") + + want := metricdata.ScopeMetrics{ + Scope: instrumentation.Scope{ + Name: "go.opentelemetry.io/otel/sdk/trace", + Version: sdk.Version(), + SchemaURL: semconv.SchemaURL, + }, + Metrics: []metricdata.Metrics{ + { + Name: otelconv.SDKSpanLive{}.Name(), + Description: otelconv.SDKSpanLive{}.Description(), + Unit: otelconv.SDKSpanLive{}.Unit(), + Data: metricdata.Sum[int64]{ + Temporality: metricdata.CumulativeTemporality, + DataPoints: []metricdata.DataPoint[int64]{ + { + Attributes: attribute.NewSet( + otelconv.SDKSpanLive{}.AttrSpanSamplingResult( + otelconv.SpanSamplingResultRecordAndSample, + ), + ), + Value: 2, // Both parent and child spans are active. + }, + }, + }, + }, + { + Name: otelconv.SDKSpanStarted{}.Name(), + Description: otelconv.SDKSpanStarted{}.Description(), + Unit: otelconv.SDKSpanStarted{}.Unit(), + Data: metricdata.Sum[int64]{ + Temporality: metricdata.CumulativeTemporality, + IsMonotonic: true, + DataPoints: []metricdata.DataPoint[int64]{ + { + Attributes: attribute.NewSet( + otelconv.SDKSpanStarted{}.AttrSpanParentOrigin( + otelconv.SpanParentOriginNone, + ), + otelconv.SDKSpanStarted{}.AttrSpanSamplingResult( + otelconv.SpanSamplingResultRecordAndSample, + ), + ), + Value: 1, // Parent span with no parent of its own. + }, + { + Attributes: attribute.NewSet( + otelconv.SDKSpanStarted{}.AttrSpanParentOrigin( + otelconv.SpanParentOriginLocal, + ), + otelconv.SDKSpanStarted{}.AttrSpanSamplingResult( + otelconv.SpanSamplingResultRecordAndSample, + ), + ), + Value: 1, // Child span with local parent. + }, + }, + }, + }, + }, + } + + got := scopeMetrics() + metricdatatest.AssertEqual( + t, + want, + got, + metricdatatest.IgnoreTimestamp(), + metricdatatest.IgnoreExemplars(), + ) + + childSpan.End() + parentSpan.End() + + want = metricdata.ScopeMetrics{ + Scope: instrumentation.Scope{ + Name: "go.opentelemetry.io/otel/sdk/trace", + Version: sdk.Version(), + SchemaURL: semconv.SchemaURL, + }, + Metrics: []metricdata.Metrics{ + { + Name: otelconv.SDKSpanLive{}.Name(), + Description: otelconv.SDKSpanLive{}.Description(), + Unit: otelconv.SDKSpanLive{}.Unit(), + Data: metricdata.Sum[int64]{ + Temporality: metricdata.CumulativeTemporality, + DataPoints: []metricdata.DataPoint[int64]{ + { + Attributes: attribute.NewSet( + otelconv.SDKSpanLive{}.AttrSpanSamplingResult( + otelconv.SpanSamplingResultRecordAndSample, + ), + ), + Value: 0, // No live spans after ending both. + }, + }, + }, + }, + { + Name: otelconv.SDKSpanStarted{}.Name(), + Description: otelconv.SDKSpanStarted{}.Description(), + Unit: otelconv.SDKSpanStarted{}.Unit(), + Data: metricdata.Sum[int64]{ + Temporality: metricdata.CumulativeTemporality, + IsMonotonic: true, + DataPoints: []metricdata.DataPoint[int64]{ + { + Attributes: attribute.NewSet( + otelconv.SDKSpanStarted{}.AttrSpanParentOrigin( + otelconv.SpanParentOriginNone, + ), + otelconv.SDKSpanStarted{}.AttrSpanSamplingResult( + otelconv.SpanSamplingResultRecordAndSample, + ), + ), + Value: 1, + }, + { + Attributes: attribute.NewSet( + otelconv.SDKSpanStarted{}.AttrSpanParentOrigin( + otelconv.SpanParentOriginLocal, + ), + otelconv.SDKSpanStarted{}.AttrSpanSamplingResult( + otelconv.SpanSamplingResultRecordAndSample, + ), + ), + Value: 1, + }, + }, + }, + }, + }, + } + + got = scopeMetrics() + metricdatatest.AssertEqual( + t, + want, + got, + metricdatatest.IgnoreTimestamp(), + metricdatatest.IgnoreExemplars(), + ) + }, + }, + } + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + t.Setenv("OTEL_GO_X_SELF_OBSERVABILITY", "True") + prev := otel.GetMeterProvider() + t.Cleanup(func() { otel.SetMeterProvider(prev) }) + + r := metric.NewManualReader() + mp := metric.NewMeterProvider(metric.WithReader(r)) + otel.SetMeterProvider(mp) + + scopeMetrics := func() metricdata.ScopeMetrics { + var got metricdata.ResourceMetrics + err := r.Collect(context.Background(), &got) + require.NoError(t, err) + require.Len(t, got.ScopeMetrics, 1) + return got.ScopeMetrics[0] + } + tc.test(t, scopeMetrics) + }) + } +} + +// ctxKeyT is a custom context value type used for testing context propagation. +type ctxKeyT string + +// ctxKey is a context key used to store and retrieve values in the context. +var ctxKey = ctxKeyT("testKey") + +func TestSelfObservabilityContextPropagation(t *testing.T) { + t.Setenv("OTEL_GO_X_SELF_OBSERVABILITY", "True") + prev := otel.GetMeterProvider() + t.Cleanup(func() { otel.SetMeterProvider(prev) }) + + // Approximate number of expected measuresments. This is not a strict + // requirement, but it should be enough to ensure no backpressure. + const count = 3 * 2 // 3 measurements per span, 2 spans (parent and child). + ctxCh, fltr := filterFn(count) + + const want = "testValue" + n := make(chan int) + go func() { + // Validate the span context is propagated to all measurements by + // testing the context passed to the registered exemplar filter. This + // filter receives the measurement context in the standard metric SDK + // that we have registered. + + // Count of how many contexts were received. + var count int + + for ctx := range ctxCh { + count++ + + s := trace.SpanFromContext(ctx) + + // All spans should have a valid span context. This should be + // passed to the measurements in all cases. + isValid := s.SpanContext().IsValid() + assert.True(t, isValid, "Context should have a valid span") + + if s.IsRecording() { + // Check if the context value is propagated correctly for Span + // starts. The Span end operation does not receive any user + // context so do not check this if the span is not recording + // (i.e. end operation). + + got := ctx.Value(ctxKey) + assert.Equal(t, want, got, "Context value not propagated") + } + } + n <- count + }() + + // At least one reader is required to not get a no-op MeterProvider and + // short-circuit any instrumentation measurements. + r := metric.NewManualReader() + mp := metric.NewMeterProvider( + metric.WithExemplarFilter(fltr), + metric.WithReader(r), + ) + otel.SetMeterProvider(mp) + + tp := NewTracerProvider() + + wrap := func(parentCtx context.Context, name string, fn func(context.Context)) { + const tracer = "TestSelfObservabilityContextPropagation" + ctx, s := tp.Tracer(tracer).Start(parentCtx, name) + defer s.End() + fn(ctx) + } + + ctx := context.WithValue(context.Background(), ctxKey, want) + wrap(ctx, "parent", func(ctx context.Context) { + wrap(ctx, "child", func(context.Context) {}) + }) + + require.NoError(t, tp.Shutdown(context.Background())) + + // The TracerProvider shutdown returned, no more measurements will be sent + // to the exemplar filter. + close(ctxCh) + + assert.Positive(t, <-n, "Expected at least 1 context propagations") +} + +// filterFn returns a channel that receives contexts passed to the returned +// exemplar filter function. +func filterFn(n int) (chan context.Context, func(ctx context.Context) bool) { + out := make(chan context.Context, n) + return out, func(ctx context.Context) bool { + out <- ctx + return true + } +} + +// RecordingOnly creates a Sampler that samples no traces, but enables recording. +// The created sampler maintains any tracestate from the parent span context. +func RecordingOnly() Sampler { + return recordOnlySampler{} +} + +type recordOnlySampler struct{} + +// ShouldSample implements Sampler interface. It always returns Record but not Sample. +func (recordOnlySampler) ShouldSample(p SamplingParameters) SamplingResult { + psc := trace.SpanContextFromContext(p.ParentContext) + return SamplingResult{ + Decision: RecordOnly, + Tracestate: psc.TraceState(), + } +} + +// Description returns description of the sampler. +func (recordOnlySampler) Description() string { + return "RecordingOnly" +} + +func TestRecordOnlySampler(t *testing.T) { + te := NewTestExporter() + tp := NewTracerProvider(WithSyncer(te), WithSampler(RecordingOnly())) + + _, span := tp.Tracer("RecordOnly").Start(context.Background(), "test-span") + + assert.True(t, span.IsRecording(), "span should be recording") + assert.False(t, span.SpanContext().IsSampled(), "span should not be sampled") + + span.End() + + assert.Zero(t, te.Len(), "no spans should be exported") +} + func BenchmarkTraceStart(b *testing.B) { - tracer := NewTracerProvider().Tracer("") ctx := trace.ContextWithSpanContext(context.Background(), trace.SpanContext{}) l1 := trace.Link{SpanContext: trace.SpanContext{}, Attributes: []attribute.KeyValue{}} @@ -2188,6 +2881,7 @@ func BenchmarkTraceStart(b *testing.B) { for _, tt := range []struct { name string + env map[string]string options []trace.SpanStartOption }{ { @@ -2208,8 +2902,20 @@ func BenchmarkTraceStart(b *testing.B) { ), }, }, + { + name: "SelfObservabilityEnabled", + env: map[string]string{ + "OTEL_GO_X_SELF_OBSERVABILITY": "True", + }, + }, } { b.Run(tt.name, func(b *testing.B) { + for k, v := range tt.env { + b.Setenv(k, v) + } + + tracer := NewTracerProvider().Tracer("") + spans := make([]trace.Span, b.N) b.ReportAllocs() b.ResetTimer() diff --git a/sdk/trace/tracer.go b/sdk/trace/tracer.go index 0b65ae9ab70..e965c4cce86 100644 --- a/sdk/trace/tracer.go +++ b/sdk/trace/tracer.go @@ -7,7 +7,9 @@ import ( "context" "time" + "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/sdk/instrumentation" + "go.opentelemetry.io/otel/semconv/v1.37.0/otelconv" "go.opentelemetry.io/otel/trace" "go.opentelemetry.io/otel/trace/embedded" ) @@ -17,6 +19,10 @@ type tracer struct { provider *TracerProvider instrumentationScope instrumentation.Scope + + selfObservabilityEnabled bool + spanLiveMetric otelconv.SDKSpanLive + spanStartedMetric otelconv.SDKSpanStarted } var _ trace.Tracer = &tracer{} @@ -46,17 +52,25 @@ func (tr *tracer) Start( } s := tr.newSpan(ctx, name, &config) + newCtx := trace.ContextWithSpan(ctx, s) + if tr.selfObservabilityEnabled { + psc := trace.SpanContextFromContext(ctx) + set := spanStartedSet(psc, s) + tr.spanStartedMetric.AddSet(newCtx, 1, set) + } + if rw, ok := s.(ReadWriteSpan); ok && s.IsRecording() { sps := tr.provider.getSpanProcessors() for _, sp := range sps { + // Use original context. sp.sp.OnStart(ctx, rw) } } if rtt, ok := s.(runtimeTracer); ok { - ctx = rtt.runtimeTrace(ctx) + newCtx = rtt.runtimeTrace(newCtx) } - return trace.ContextWithSpan(ctx, s), s + return newCtx, s } type runtimeTracer interface { @@ -112,11 +126,12 @@ func (tr *tracer) newSpan(ctx context.Context, name string, config *trace.SpanCo if !isRecording(samplingResult) { return tr.newNonRecordingSpan(sc) } - return tr.newRecordingSpan(psc, sc, name, samplingResult, config) + return tr.newRecordingSpan(ctx, psc, sc, name, samplingResult, config) } // newRecordingSpan returns a new configured recordingSpan. func (tr *tracer) newRecordingSpan( + ctx context.Context, psc, sc trace.SpanContext, name string, sr SamplingResult, @@ -153,6 +168,14 @@ func (tr *tracer) newRecordingSpan( s.SetAttributes(sr.Attributes...) s.SetAttributes(config.Attributes()...) + if tr.selfObservabilityEnabled { + // Propagate any existing values from the context with the new span to + // the measurement context. + ctx = trace.ContextWithSpan(ctx, s) + set := spanLiveSet(s.spanContext.IsSampled()) + tr.spanLiveMetric.AddSet(ctx, 1, set) + } + return s } @@ -160,3 +183,112 @@ func (tr *tracer) newRecordingSpan( func (tr *tracer) newNonRecordingSpan(sc trace.SpanContext) nonRecordingSpan { return nonRecordingSpan{tracer: tr, sc: sc} } + +type parentState int + +const ( + parentStateNoParent parentState = iota + parentStateLocalParent + parentStateRemoteParent +) + +type samplingState int + +const ( + samplingStateDrop samplingState = iota + samplingStateRecordOnly + samplingStateRecordAndSample +) + +type spanStartedSetKey struct { + parent parentState + sampling samplingState +} + +var spanStartedSetCache = map[spanStartedSetKey]attribute.Set{ + {parentStateNoParent, samplingStateDrop}: attribute.NewSet( + otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginNone), + otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultDrop), + ), + {parentStateLocalParent, samplingStateDrop}: attribute.NewSet( + otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginLocal), + otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultDrop), + ), + {parentStateRemoteParent, samplingStateDrop}: attribute.NewSet( + otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginRemote), + otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultDrop), + ), + + {parentStateNoParent, samplingStateRecordOnly}: attribute.NewSet( + otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginNone), + otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultRecordOnly), + ), + {parentStateLocalParent, samplingStateRecordOnly}: attribute.NewSet( + otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginLocal), + otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultRecordOnly), + ), + {parentStateRemoteParent, samplingStateRecordOnly}: attribute.NewSet( + otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginRemote), + otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultRecordOnly), + ), + + {parentStateNoParent, samplingStateRecordAndSample}: attribute.NewSet( + otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginNone), + otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultRecordAndSample), + ), + {parentStateLocalParent, samplingStateRecordAndSample}: attribute.NewSet( + otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginLocal), + otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultRecordAndSample), + ), + {parentStateRemoteParent, samplingStateRecordAndSample}: attribute.NewSet( + otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginRemote), + otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultRecordAndSample), + ), +} + +func spanStartedSet(psc trace.SpanContext, span trace.Span) attribute.Set { + key := spanStartedSetKey{ + parent: parentStateNoParent, + sampling: samplingStateDrop, + } + + if psc.IsValid() { + if psc.IsRemote() { + key.parent = parentStateRemoteParent + } else { + key.parent = parentStateLocalParent + } + } + + if span.IsRecording() { + if span.SpanContext().IsSampled() { + key.sampling = samplingStateRecordAndSample + } else { + key.sampling = samplingStateRecordOnly + } + } + + return spanStartedSetCache[key] +} + +type spanLiveSetKey struct { + sampled bool +} + +var spanLiveSetCache = map[spanLiveSetKey]attribute.Set{ + {true}: attribute.NewSet( + otelconv.SDKSpanLive{}.AttrSpanSamplingResult( + otelconv.SpanSamplingResultRecordAndSample, + ), + ), + {false}: attribute.NewSet( + otelconv.SDKSpanLive{}.AttrSpanSamplingResult( + otelconv.SpanSamplingResultRecordOnly, + ), + ), +} + +func spanLiveSet(sampled bool) attribute.Set { + key := spanLiveSetKey{sampled: sampled} + return spanLiveSetCache[key] +} diff --git a/sdk/trace/tracetest/exporter.go b/sdk/trace/tracetest/exporter.go index 07117495a8e..e12fa67e63b 100644 --- a/sdk/trace/tracetest/exporter.go +++ b/sdk/trace/tracetest/exporter.go @@ -25,10 +25,10 @@ func NewNoopExporter() *NoopExporter { type NoopExporter struct{} // ExportSpans handles export of spans by dropping them. -func (nsb *NoopExporter) ExportSpans(context.Context, []trace.ReadOnlySpan) error { return nil } +func (*NoopExporter) ExportSpans(context.Context, []trace.ReadOnlySpan) error { return nil } // Shutdown stops the exporter by doing nothing. -func (nsb *NoopExporter) Shutdown(context.Context) error { return nil } +func (*NoopExporter) Shutdown(context.Context) error { return nil } var _ trace.SpanExporter = (*InMemoryExporter)(nil) diff --git a/sdk/trace/tracetest/exporter_test.go b/sdk/trace/tracetest/exporter_test.go index ff875badb98..c60bce10ec7 100644 --- a/sdk/trace/tracetest/exporter_test.go +++ b/sdk/trace/tracetest/exporter_test.go @@ -28,7 +28,7 @@ func TestNewInMemoryExporter(t *testing.T) { assert.Empty(t, imsb.GetSpans()) input := make(SpanStubs, 10) - for i := 0; i < 10; i++ { + for i := range 10 { input[i] = SpanStub{Name: fmt.Sprintf("span %d", i)} } require.NoError(t, imsb.ExportSpans(context.Background(), input.Snapshots())) diff --git a/sdk/trace/tracetest/recorder.go b/sdk/trace/tracetest/recorder.go index 732669a17ad..ca63038f34e 100644 --- a/sdk/trace/tracetest/recorder.go +++ b/sdk/trace/tracetest/recorder.go @@ -47,14 +47,14 @@ func (sr *SpanRecorder) OnEnd(s sdktrace.ReadOnlySpan) { // Shutdown does nothing. // // This method is safe to be called concurrently. -func (sr *SpanRecorder) Shutdown(context.Context) error { +func (*SpanRecorder) Shutdown(context.Context) error { return nil } // ForceFlush does nothing. // // This method is safe to be called concurrently. -func (sr *SpanRecorder) ForceFlush(context.Context) error { +func (*SpanRecorder) ForceFlush(context.Context) error { return nil } diff --git a/sdk/trace/tracetest/span.go b/sdk/trace/tracetest/span.go index cd2cc30ca2d..12b384b0884 100644 --- a/sdk/trace/tracetest/span.go +++ b/sdk/trace/tracetest/span.go @@ -37,7 +37,7 @@ func (s SpanStubs) Snapshots() []tracesdk.ReadOnlySpan { } ro := make([]tracesdk.ReadOnlySpan, len(s)) - for i := 0; i < len(s); i++ { + for i := range s { ro[i] = s[i].Snapshot() } return ro diff --git a/sdk/trace/util_test.go b/sdk/trace/util_test.go index 57d8bc0f4da..0b54d985d1c 100644 --- a/sdk/trace/util_test.go +++ b/sdk/trace/util_test.go @@ -11,13 +11,12 @@ import ( "testing" "time" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" - "go.opentelemetry.io/otel/trace" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) func basicTracerProvider(t *testing.T) *TracerProvider { @@ -72,7 +71,7 @@ func (h *harness) testTracerProvider(subjectFactory func() trace.TracerProvider) done := make(chan struct{}) go func(tp trace.TracerProvider) { var wg sync.WaitGroup - for i := 0; i < 20; i++ { + for i := range 20 { wg.Add(1) go func(name, version string) { _ = tp.Tracer(name, trace.WithInstrumentationVersion(version)) @@ -231,7 +230,7 @@ func (h *harness) testTracer(subjectFactory func() trace.Tracer) { done := make(chan struct{}) go func(tp trace.Tracer) { var wg sync.WaitGroup - for i := 0; i < 20; i++ { + for i := range 20 { wg.Add(1) go func(name string) { defer wg.Done() diff --git a/sdk/trace/version.go b/sdk/trace/version.go deleted file mode 100644 index b84dd2c5eef..00000000000 --- a/sdk/trace/version.go +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package trace // import "go.opentelemetry.io/otel/sdk/trace" - -// version is the current release version of the metric SDK in use. -func version() string { - return "1.16.0-rc.1" -} diff --git a/sdk/trace/version_test.go b/sdk/trace/version_test.go deleted file mode 100644 index e2a353795ee..00000000000 --- a/sdk/trace/version_test.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package trace - -import ( - "regexp" - "testing" - - "github.com/stretchr/testify/assert" -) - -var versionRegex = regexp.MustCompile(`^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)` + - `(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)` + - `(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?` + - `(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$`) - -func TestVersionSemver(t *testing.T) { - v := version() - assert.Regexp(t, versionRegex, v) -} diff --git a/sdk/version.go b/sdk/version.go index c0217af6b9a..7f97cc31e51 100644 --- a/sdk/version.go +++ b/sdk/version.go @@ -6,5 +6,5 @@ package sdk // import "go.opentelemetry.io/otel/sdk" // Version is the current release version of the OpenTelemetry SDK in use. func Version() string { - return "1.37.0" + return "1.38.0" } diff --git a/semconv/internal/http.go b/semconv/internal/http.go index e9eb577345b..58b5eddef66 100644 --- a/semconv/internal/http.go +++ b/semconv/internal/http.go @@ -104,7 +104,7 @@ func (sc *SemanticConventions) NetAttributesFromHTTPRequest( // It handles both IPv4 and IPv6 addresses. If the host portion is not recognized // as a valid IPv4 or IPv6 address, the `ip` result will be empty and the // host portion will instead be returned in `name`. -func hostIPNamePort(hostWithPort string) (ip string, name string, port int) { +func hostIPNamePort(hostWithPort string) (ip, name string, port int) { var ( hostPart, portPart string parsedPort uint64 diff --git a/semconv/internal/http_test.go b/semconv/internal/http_test.go index bf06af608a7..211f8bc764d 100644 --- a/semconv/internal/http_test.go +++ b/semconv/internal/http_test.go @@ -10,13 +10,12 @@ import ( "strings" "testing" - "go.opentelemetry.io/otel/trace" - "github.com/google/go-cmp/cmp" "github.com/stretchr/testify/assert" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/trace" ) type tlsOption int @@ -898,7 +897,7 @@ func TestHTTPAttributesFromHTTPStatusCode(t *testing.T) { } func TestSpanStatusFromHTTPStatusCode(t *testing.T) { - for code := 0; code < 1000; code++ { + for code := range 1000 { expected := getExpectedCodeForHTTPCode(code, trace.SpanKindClient) got, msg := SpanStatusFromHTTPStatusCode(code) assert.Equalf(t, expected, got, "%s vs %s", expected, got) @@ -913,7 +912,7 @@ func TestSpanStatusFromHTTPStatusCode(t *testing.T) { } func TestSpanStatusFromHTTPStatusCodeAndSpanKind(t *testing.T) { - for code := 0; code < 1000; code++ { + for code := range 1000 { expected := getExpectedCodeForHTTPCode(code, trace.SpanKindClient) got, msg := SpanStatusFromHTTPStatusCodeAndSpanKind(code, trace.SpanKindClient) assert.Equalf(t, expected, got, "%s vs %s", expected, got) @@ -954,7 +953,7 @@ func getExpectedCodeForHTTPCode(code int, spanKind trace.SpanKind) codes.Code { return codes.Error } -func assertElementsMatch(t *testing.T, expected, got []attribute.KeyValue, format string, args ...interface{}) { +func assertElementsMatch(t *testing.T, expected, got []attribute.KeyValue, format string, args ...any) { if !assert.ElementsMatchf(t, expected, got, format, args...) { t.Log("expected:", kvStr(expected)) t.Log("got:", kvStr(got)) @@ -1008,7 +1007,7 @@ func kvStr(kvs []attribute.KeyValue) string { if idx > 0 { _, _ = sb.WriteString(", ") } - _, _ = sb.WriteString((string)(attr.Key)) + _, _ = sb.WriteString(string(attr.Key)) _, _ = sb.WriteString(": ") _, _ = sb.WriteString(attr.Value.Emit()) } diff --git a/semconv/internal/v2/http.go b/semconv/internal/v2/http.go index 3709ef09933..5d5f12a56e5 100644 --- a/semconv/internal/v2/http.go +++ b/semconv/internal/v2/http.go @@ -91,8 +91,7 @@ func (c *HTTPConv) ClientRequest(req *http.Request) []attribute.KeyValue { } attrs := make([]attribute.KeyValue, 0, n) - attrs = append(attrs, c.method(req.Method)) - attrs = append(attrs, c.proto(req.Proto)) + attrs = append(attrs, c.method(req.Method), c.proto(req.Proto)) var u string if req.URL != nil { @@ -103,9 +102,11 @@ func (c *HTTPConv) ClientRequest(req *http.Request) []attribute.KeyValue { // Restore any username/password info that was removed. req.URL.User = userinfo } - attrs = append(attrs, c.HTTPURLKey.String(u)) - - attrs = append(attrs, c.NetConv.PeerName(peer)) + attrs = append( + attrs, + c.HTTPURLKey.String(u), + c.NetConv.PeerName(peer), + ) if port > 0 { attrs = append(attrs, c.NetConv.PeerPort(port)) } @@ -191,10 +192,13 @@ func (c *HTTPConv) ServerRequest(server string, req *http.Request) []attribute.K } attrs := make([]attribute.KeyValue, 0, n) - attrs = append(attrs, c.method(req.Method)) - attrs = append(attrs, c.scheme(req.TLS != nil)) - attrs = append(attrs, c.proto(req.Proto)) - attrs = append(attrs, c.NetConv.HostName(host)) + attrs = append( + attrs, + c.method(req.Method), + c.scheme(req.TLS != nil), + c.proto(req.Proto), + c.NetConv.HostName(host), + ) if hostPort > 0 { attrs = append(attrs, c.NetConv.HostPort(hostPort)) @@ -294,7 +298,7 @@ func (c *HTTPConv) ResponseHeader(h http.Header) []attribute.KeyValue { return c.header("http.response.header", h) } -func (c *HTTPConv) header(prefix string, h http.Header) []attribute.KeyValue { +func (*HTTPConv) header(prefix string, h http.Header) []attribute.KeyValue { key := func(k string) attribute.Key { k = strings.ToLower(k) k = strings.ReplaceAll(k, "-", "_") @@ -311,7 +315,7 @@ func (c *HTTPConv) header(prefix string, h http.Header) []attribute.KeyValue { // ClientStatus returns a span status code and message for an HTTP status code // value received by a client. -func (c *HTTPConv) ClientStatus(code int) (codes.Code, string) { +func (*HTTPConv) ClientStatus(code int) (codes.Code, string) { stat, valid := validateHTTPStatusCode(code) if !valid { return stat, fmt.Sprintf("Invalid HTTP status code %d", code) @@ -322,7 +326,7 @@ func (c *HTTPConv) ClientStatus(code int) (codes.Code, string) { // ServerStatus returns a span status code and message for an HTTP status code // value returned by a server. Status codes in the 400-499 range are not // returned as errors. -func (c *HTTPConv) ServerStatus(code int) (codes.Code, string) { +func (*HTTPConv) ServerStatus(code int) (codes.Code, string) { stat, valid := validateHTTPStatusCode(code) if !valid { return stat, fmt.Sprintf("Invalid HTTP status code %d", code) diff --git a/semconv/internal/v3/http.go b/semconv/internal/v3/http.go index bc503642ffa..baaad7265ba 100644 --- a/semconv/internal/v3/http.go +++ b/semconv/internal/v3/http.go @@ -91,8 +91,7 @@ func (c *HTTPConv) ClientRequest(req *http.Request) []attribute.KeyValue { } attrs := make([]attribute.KeyValue, 0, n) - attrs = append(attrs, c.method(req.Method)) - attrs = append(attrs, c.proto(req.Proto)) + attrs = append(attrs, c.method(req.Method), c.proto(req.Proto)) var u string if req.URL != nil { @@ -103,9 +102,11 @@ func (c *HTTPConv) ClientRequest(req *http.Request) []attribute.KeyValue { // Restore any username/password info that was removed. req.URL.User = userinfo } - attrs = append(attrs, c.HTTPURLKey.String(u)) - - attrs = append(attrs, c.NetConv.PeerName(peer)) + attrs = append( + attrs, + c.HTTPURLKey.String(u), + c.NetConv.PeerName(peer), + ) if port > 0 { attrs = append(attrs, c.NetConv.PeerPort(port)) } @@ -191,10 +192,13 @@ func (c *HTTPConv) ServerRequest(server string, req *http.Request) []attribute.K } attrs := make([]attribute.KeyValue, 0, n) - attrs = append(attrs, c.method(req.Method)) - attrs = append(attrs, c.scheme(req.TLS != nil)) - attrs = append(attrs, c.proto(req.Proto)) - attrs = append(attrs, c.NetConv.HostName(host)) + attrs = append( + attrs, + c.method(req.Method), + c.scheme(req.TLS != nil), + c.proto(req.Proto), + c.NetConv.HostName(host), + ) if hostPort > 0 { attrs = append(attrs, c.NetConv.HostPort(hostPort)) @@ -294,7 +298,7 @@ func (c *HTTPConv) ResponseHeader(h http.Header) []attribute.KeyValue { return c.header("http.response.header", h) } -func (c *HTTPConv) header(prefix string, h http.Header) []attribute.KeyValue { +func (*HTTPConv) header(prefix string, h http.Header) []attribute.KeyValue { key := func(k string) attribute.Key { k = strings.ToLower(k) k = strings.ReplaceAll(k, "-", "_") @@ -311,7 +315,7 @@ func (c *HTTPConv) header(prefix string, h http.Header) []attribute.KeyValue { // ClientStatus returns a span status code and message for an HTTP status code // value received by a client. -func (c *HTTPConv) ClientStatus(code int) (codes.Code, string) { +func (*HTTPConv) ClientStatus(code int) (codes.Code, string) { stat, valid := validateHTTPStatusCode(code) if !valid { return stat, fmt.Sprintf("Invalid HTTP status code %d", code) @@ -322,7 +326,7 @@ func (c *HTTPConv) ClientStatus(code int) (codes.Code, string) { // ServerStatus returns a span status code and message for an HTTP status code // value returned by a server. Status codes in the 400-499 range are not // returned as errors. -func (c *HTTPConv) ServerStatus(code int) (codes.Code, string) { +func (*HTTPConv) ServerStatus(code int) (codes.Code, string) { stat, valid := validateHTTPStatusCode(code) if !valid { return stat, fmt.Sprintf("Invalid HTTP status code %d", code) diff --git a/semconv/internal/v4/http.go b/semconv/internal/v4/http.go index 347d699350e..4595ae34ceb 100644 --- a/semconv/internal/v4/http.go +++ b/semconv/internal/v4/http.go @@ -92,8 +92,7 @@ func (c *HTTPConv) ClientRequest(req *http.Request) []attribute.KeyValue { } attrs := make([]attribute.KeyValue, 0, n) - attrs = append(attrs, c.method(req.Method)) - attrs = append(attrs, c.proto(req.Proto)) + attrs = append(attrs, c.method(req.Method), c.proto(req.Proto)) var u string if req.URL != nil { @@ -104,9 +103,11 @@ func (c *HTTPConv) ClientRequest(req *http.Request) []attribute.KeyValue { // Restore any username/password info that was removed. req.URL.User = userinfo } - attrs = append(attrs, c.HTTPURLKey.String(u)) - - attrs = append(attrs, c.NetConv.PeerName(peer)) + attrs = append( + attrs, + c.HTTPURLKey.String(u), + c.NetConv.PeerName(peer), + ) if port > 0 { attrs = append(attrs, c.NetConv.PeerPort(port)) } @@ -192,10 +193,13 @@ func (c *HTTPConv) ServerRequest(server string, req *http.Request) []attribute.K } attrs := make([]attribute.KeyValue, 0, n) - attrs = append(attrs, c.method(req.Method)) - attrs = append(attrs, c.scheme(req.TLS != nil)) - attrs = append(attrs, c.proto(req.Proto)) - attrs = append(attrs, c.NetConv.HostName(host)) + attrs = append( + attrs, + c.method(req.Method), + c.scheme(req.TLS != nil), + c.proto(req.Proto), + c.NetConv.HostName(host), + ) if hostPort > 0 { attrs = append(attrs, c.NetConv.HostPort(hostPort)) @@ -295,7 +299,7 @@ func (c *HTTPConv) ResponseHeader(h http.Header) []attribute.KeyValue { return c.header("http.response.header", h) } -func (c *HTTPConv) header(prefix string, h http.Header) []attribute.KeyValue { +func (*HTTPConv) header(prefix string, h http.Header) []attribute.KeyValue { key := func(k string) attribute.Key { k = strings.ToLower(k) k = strings.ReplaceAll(k, "-", "_") @@ -312,7 +316,7 @@ func (c *HTTPConv) header(prefix string, h http.Header) []attribute.KeyValue { // ClientStatus returns a span status code and message for an HTTP status code // value received by a client. -func (c *HTTPConv) ClientStatus(code int) (codes.Code, string) { +func (*HTTPConv) ClientStatus(code int) (codes.Code, string) { stat, valid := validateHTTPStatusCode(code) if !valid { return stat, fmt.Sprintf("Invalid HTTP status code %d", code) @@ -323,7 +327,7 @@ func (c *HTTPConv) ClientStatus(code int) (codes.Code, string) { // ServerStatus returns a span status code and message for an HTTP status code // value returned by a server. Status codes in the 400-499 range are not // returned as errors. -func (c *HTTPConv) ServerStatus(code int) (codes.Code, string) { +func (*HTTPConv) ServerStatus(code int) (codes.Code, string) { stat, valid := validateHTTPStatusCode(code) if !valid { return stat, fmt.Sprintf("Invalid HTTP status code %d", code) diff --git a/semconv/attribute_group.go.j2 b/semconv/templates/registry/go/attribute_group.go.j2 similarity index 100% rename from semconv/attribute_group.go.j2 rename to semconv/templates/registry/go/attribute_group.go.j2 diff --git a/semconv/helpers.j2 b/semconv/templates/registry/go/helpers.j2 similarity index 89% rename from semconv/helpers.j2 rename to semconv/templates/registry/go/helpers.j2 index 6e0d1d2f111..acef4abfe17 100644 --- a/semconv/helpers.j2 +++ b/semconv/templates/registry/go/helpers.j2 @@ -14,7 +14,7 @@ {%- endmacro -%} {%- macro to_go_name(fqn="", pkg="") -%} -{%- if pkg != "" -%} +{%- if pkg != "" and fqn != pkg -%} {%- set n = pkg | length -%} {%- if pkg == fqn[:n] -%} {%- set fqn = fqn[n:] -%} @@ -41,12 +41,16 @@ Examples: {{ attr.examples | trim("[]") }} {%- endmacro -%} {%- macro lower_first(line) -%} +{%- if line is string and line | length > 1 -%} {%- if line[0] is upper and line[1] is upper -%} {#- Assume an acronym -#} {{ line }} {%- else -%} {{ line[0]|lower }}{{ line[1:] }} {%- endif -%} +{%- elif line is not none -%} +{{ line }} +{%- endif -%} {%- endmacro -%} {%- macro first_word(line, delim=" ") -%} @@ -119,6 +123,12 @@ const ( func {{to_go_name(attribute.name, pkg)}}(val {{attribute.type | instantiated_type | map_text("attribute_type_value")}}) attribute.KeyValue { return {{to_go_name(attribute.name, pkg)}}Key.{{attribute.type | instantiated_type | map_text("attribute_type_method")}}(val) } +{%- elif attribute.type is template_type %} + +{{ [to_go_name(attribute.name, pkg) ~ " returns an attribute KeyValue conforming to the \"" ~ attribute.name ~ "\" semantic conventions. " ~ it_reps(attribute.brief) ] | comment(format="go") }} +func {{to_go_name(attribute.name, pkg)}}(key string, val {{attribute.type | instantiated_type | map_text("attribute_type_value")}}) attribute.KeyValue { + return attribute.{{attribute.type | instantiated_type | map_text("attribute_type_method")}}("{{attribute.name}}."+key, val) +} {%- endif %} {%- endfor %} {%- endmacro -%} @@ -130,12 +140,8 @@ func {{to_go_name(attribute.name, pkg)}}(val {{attribute.type | instantiated_typ {{ ["Enum values for " ~ attribute.name] | comment(format="go") }} var ( -{%- for value in attribute.type.members %} -{%- if value.deprecated %} -{{ ["Deprecated: " ~ value.deprecated | trim(".") ~ "." ] | comment(format="go_1tab") }} -{%- else %} +{%- for value in attribute.type.members if not value.deprecated %} {{ [value.brief or value.id, "Stability: " ~ value.stability] | comment(format="go_1tab") }} -{%- endif %} {{to_go_name(attribute.name ~ "." ~ value.id, pkg=pkg)}} = {{ to_go_name(attribute.name, pkg=pkg) }}Key.{{attribute.type | instantiated_type | map_text("attribute_type_method")}}({{ value.value | print_member_value }}) {%- endfor %} ) diff --git a/semconv/instrument.j2 b/semconv/templates/registry/go/instrument.j2 similarity index 78% rename from semconv/instrument.j2 rename to semconv/templates/registry/go/instrument.j2 index 6242f538fce..25700b64008 100644 --- a/semconv/instrument.j2 +++ b/semconv/templates/registry/go/instrument.j2 @@ -115,6 +115,11 @@ func (m {{ name }}) Add( incr {{ value_type(metric) | lower }}, {{ params(metric.attributes, pkg=pkg, prefix="\t") }} ) { + if len(attrs) == 0 { + m.{{ inst }}.Add(ctx, incr) + return + } + o := addOptPool.Get().(*[]metric.AddOption) defer func() { *o = (*o)[:0] @@ -132,8 +137,8 @@ func (m {{ name }}) Add( {%- macro add_method(metric, inst, pkg="") -%} -// Add adds incr to the existing count. -{%- if metric.attributes | length > 0 %} +// Add adds incr to the existing count for attrs. +{%- if metric.attributes is not none and metric.attributes | length > 0 %} {{ params_docs(metric.attributes, pkg=pkg) }} {%- if metric.note is defined %} // @@ -163,6 +168,30 @@ func (m {{ h.to_go_name(metric.metric_name, pkg) }}) Add(ctx context.Context, in {%- endif -%} {%- endmacro -%} +{%- macro add_set_method(metric, inst, pkg="") -%} + +// AddSet adds incr to the existing count for set. +{%- if metric.note is defined %} +// +{{ metric.note | comment }} +{%- endif %} +func (m {{ h.to_go_name(metric.metric_name, pkg) }}) AddSet(ctx context.Context, incr {{ value_type(metric) | lower }}, set attribute.Set) { + if set.Len() == 0 { + m.{{ inst }}.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.{{ inst }}.Add(ctx, incr, *o...) +} +{%- endmacro -%} + {%- macro record_method_with_optional(metric, inst, pkg="") -%} {%- set name = h.to_go_name(metric.metric_name, pkg) -%} {%- set req_attr = metric.attributes | required | attribute_sort -%} @@ -171,6 +200,11 @@ func (m {{ name }}) Record( val {{ value_type(metric) | lower }}, {{ params(metric.attributes, pkg=pkg, prefix="\t") }} ) { + if len(attrs) == 0 { + m.{{ inst }}.Record(ctx, val) + return + } + o := recOptPool.Get().(*[]metric.RecordOption) defer func() { *o = (*o)[:0] @@ -188,8 +222,8 @@ func (m {{ name }}) Record( {%- macro record_method(metric, inst, pkg="") -%} -// Record records val to the current distribution. -{%- if metric.attributes | length > 0 %} +// Record records val to the current distribution for attrs. +{%- if metric.attributes is not none and metric.attributes | length > 0 %} {{ params_docs(metric.attributes, pkg=pkg) }} {%- if metric.note is defined %} // @@ -205,6 +239,7 @@ func (m {{ name }}) Record( func (m {{ name }}) Record(ctx context.Context, val {{ value_type(metric) | lower }}, attrs ...attribute.KeyValue) { if len(attrs) == 0 { m.{{ inst }}.Record(ctx, val) + return } o := recOptPool.Get().(*[]metric.RecordOption) @@ -218,3 +253,31 @@ func (m {{ name }}) Record(ctx context.Context, val {{ value_type(metric) | lowe } {%- endif -%} {%- endmacro -%} + +{%- macro record_set_method(metric, inst, pkg="") -%} +{%- set name = h.to_go_name(metric.metric_name, pkg) -%} + +// RecordSet records val to the current distribution for set. +{%- if metric.note is defined %} +// +{{ metric.note | comment }} +{%- endif %} +func (m {{ name }}) RecordSet(ctx context.Context, val {{ value_type(metric) | lower }}, set attribute.Set) { + if set.Len() == 0 { + m.{{ inst }}.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.{{ inst }}.Record(ctx, val, *o...) +} +{%- endmacro -%} + +{%- macro desc(metric) -%} +{{metric.brief | replace('\n', ' ') | trim}} +{%- endmacro -%} diff --git a/semconv/metric.go.j2 b/semconv/templates/registry/go/metric.go.j2 similarity index 83% rename from semconv/metric.go.j2 rename to semconv/templates/registry/go/metric.go.j2 index 59e3c01620d..f516d120bc1 100644 --- a/semconv/metric.go.j2 +++ b/semconv/templates/registry/go/metric.go.j2 @@ -2,6 +2,9 @@ {% import 'instrument.j2' as i -%} // Code generated from semantic convention specification. DO NOT EDIT. +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + // Package httpconv provides types and functionality for OpenTelemetry semantic // conventions in the "{{ ctx.root_namespace }}" namespace. package {{ ctx.root_namespace | camel_case | lower }}conv @@ -26,19 +29,27 @@ var ( type {{ name }}Attr {{ h.member_type(attr.type.members[0]) }} var ( -{%- for m in attr.type.members if not m.deprecated %} - {%- set m_name = name ~ h.to_go_name(m.id, ctx.root_namespace) %} -{{ h.prefix_brief(m.brief, m_name ~ " is ") | comment(format="go_1tab") }} - {{ m_name }} {{ name }}Attr = {% if attr.type.members[0].value is string -%} - "{{ m.value }}" +{%- for m in attr.type.members %} + {%- set m_name = name ~ h.to_go_name(m.id, ctx.root_namespace) -%} + + {% if attr.type.members[0].value is string -%} + {%- set m_value = '"' + m.value + '"' -%} {%- else -%} - {{ m.value }} + {%- set m_value = m.value -%} {%- endif -%} + + {%- if m.brief is defined %} + {%- set m_brief = m.brief -%} + {%- else %} + {%- set m_brief = "standardized value " + m_value + ' of ' + name + 'Attr.' -%} + {%- endif %} +{{ h.prefix_brief(m_brief, m_name ~ " is ") | comment(format="go_1tab") }} + {{ m_name }} {{ name }}Attr = {{ m_value }} {%- endfor %} ) {%- endfor %} -{%- for metric in ctx.metrics if not metric.deprecated %} +{%- for metric in ctx.metrics %} {%- set metric_name = h.to_go_name(metric.metric_name, ctx.root_namespace) %} {%- set metric_inst = metric.metric_name | map_text("instrument", i.instrument_default(metric)) %} @@ -60,7 +71,7 @@ func New{{ metric_name }}( i, err := m.{{ metric_inst }}( "{{metric.metric_name}}", append([]metric.{{ metric_inst }}Option{ - metric.WithDescription("{{metric.brief | trim}}"), + metric.WithDescription("{{ i.desc(metric) }}"), metric.WithUnit("{{metric.unit}}"), }, opt...)..., ) @@ -88,16 +99,20 @@ func ({{ metric_name }}) Unit() string { // Description returns the semantic convention description of the instrument func ({{ metric_name }}) Description() string { - return "{{ metric.brief | trim }}" + return "{{ i.desc(metric) }}" } {%- endif %} {%- if "Observable" is in metric_inst %} {%- elif metric.instrument == "counter" or metric.instrument == "updowncounter" %} {{ i.add_method(metric, metric_inst, ctx.root_namespace) }} + +{{ i.add_set_method(metric, metric_inst, ctx.root_namespace) }} {%- elif metric.instrument == "histogram" or metric.instrument == "gauge" %} {{ i.record_method(metric, metric_inst, ctx.root_namespace) }} + +{{ i.record_set_method(metric, metric_inst, ctx.root_namespace) }} {%- endif %} {%- for attr in metric.attributes | not_required | attribute_sort %} {%- set name = h.to_go_name(attr.name, ctx.root_namespace) %} diff --git a/semconv/weaver.yaml b/semconv/templates/registry/go/weaver.yaml similarity index 90% rename from semconv/weaver.yaml rename to semconv/templates/registry/go/weaver.yaml index bde14bbccd3..814793616be 100644 --- a/semconv/weaver.yaml +++ b/semconv/templates/registry/go/weaver.yaml @@ -23,10 +23,11 @@ templates: file_name: attribute_group.go - pattern: metric.go.j2 filter: > - semconv_grouped_metrics({ + semconv_metrics({ "exclude_deprecated": true, "exclude_root_namespace": $excluded_namespaces, - }) + }) + | semconv_group_metrics_by_root_namespace application_mode: each file_name: "{{ctx.root_namespace | camel_case | lower }}conv/metric.go" comment_formats: @@ -68,7 +69,7 @@ text_maps: boolean: bool boolean[]: "...bool" instrument: - cpu.time: Float64ObservableCounter + system.cpu.time: Float64ObservableCounter go.config.gogc: Int64ObservableUpDownCounter go.goroutine.count: Int64ObservableUpDownCounter go.memory.allocated: Int64ObservableCounter @@ -77,6 +78,10 @@ text_maps: go.memory.limit: Int64ObservableUpDownCounter go.memory.used: Int64ObservableUpDownCounter go.processor.limit: Int64ObservableUpDownCounter + otel.sdk.processor.log.queue.capacity: Int64ObservableUpDownCounter + otel.sdk.processor.log.queue.size: Int64ObservableUpDownCounter + otel.sdk.processor.span.queue.capacity: Int64ObservableUpDownCounter + otel.sdk.processor.span.queue.size: Int64ObservableUpDownCounter process.cpu.time: Float64ObservableCounter system.memory.usage: Int64ObservableUpDownCounter system.memory.utilization: Float64ObservableGauge diff --git a/semconv/v1.34.0/attribute_group.go b/semconv/v1.34.0/attribute_group.go index 5b56662573a..98c0fddaa3d 100644 --- a/semconv/v1.34.0/attribute_group.go +++ b/semconv/v1.34.0/attribute_group.go @@ -3467,6 +3467,13 @@ func ContainerImageTags(val ...string) attribute.KeyValue { return ContainerImageTagsKey.StringSlice(val) } +// ContainerLabel returns an attribute KeyValue conforming to the +// "container.label" semantic conventions. It represents the container labels, +// `` being the label name, the value being the label value. +func ContainerLabel(key string, val string) attribute.KeyValue { + return attribute.String("container.label."+key, val) +} + // ContainerName returns an attribute KeyValue conforming to the "container.name" // semantic conventions. It represents the container name used by container // runtime. @@ -3794,6 +3801,22 @@ func DBOperationName(val string) attribute.KeyValue { return DBOperationNameKey.String(val) } +// DBOperationParameter returns an attribute KeyValue conforming to the +// "db.operation.parameter" semantic conventions. It represents a database +// operation parameter, with `` being the parameter name, and the attribute +// value being a string representation of the parameter value. +func DBOperationParameter(key string, val string) attribute.KeyValue { + return attribute.String("db.operation.parameter."+key, val) +} + +// DBQueryParameter returns an attribute KeyValue conforming to the +// "db.query.parameter" semantic conventions. It represents a database query +// parameter, with `` being the parameter name, and the attribute value +// being a string representation of the parameter value. +func DBQueryParameter(key string, val string) attribute.KeyValue { + return attribute.String("db.query.parameter."+key, val) +} + // DBQuerySummary returns an attribute KeyValue conforming to the // "db.query.summary" semantic conventions. It represents the low cardinality // summary of a database query. @@ -7312,6 +7335,14 @@ func HTTPRequestBodySize(val int) attribute.KeyValue { return HTTPRequestBodySizeKey.Int(val) } +// HTTPRequestHeader returns an attribute KeyValue conforming to the +// "http.request.header" semantic conventions. It represents the HTTP request +// headers, `` being the normalized HTTP Header name (lowercase), the value +// being the header values. +func HTTPRequestHeader(key string, val ...string) attribute.KeyValue { + return attribute.StringSlice("http.request.header."+key, val) +} + // HTTPRequestMethodOriginal returns an attribute KeyValue conforming to the // "http.request.method_original" semantic conventions. It represents the // original HTTP method sent by the client in the request line. @@ -7347,6 +7378,14 @@ func HTTPResponseBodySize(val int) attribute.KeyValue { return HTTPResponseBodySizeKey.Int(val) } +// HTTPResponseHeader returns an attribute KeyValue conforming to the +// "http.response.header" semantic conventions. It represents the HTTP response +// headers, `` being the normalized HTTP Header name (lowercase), the value +// being the header values. +func HTTPResponseHeader(key string, val ...string) attribute.KeyValue { + return attribute.StringSlice("http.response.header."+key, val) +} + // HTTPResponseSize returns an attribute KeyValue conforming to the // "http.response.size" semantic conventions. It represents the total size of the // response in bytes. This should be the total number of bytes sent over the @@ -8001,6 +8040,22 @@ func K8SContainerStatusLastTerminatedReason(val string) attribute.KeyValue { return K8SContainerStatusLastTerminatedReasonKey.String(val) } +// K8SCronJobAnnotation returns an attribute KeyValue conforming to the +// "k8s.cronjob.annotation" semantic conventions. It represents the cronjob +// annotation placed on the CronJob, the `` being the annotation name, the +// value being the annotation value. +func K8SCronJobAnnotation(key string, val string) attribute.KeyValue { + return attribute.String("k8s.cronjob.annotation."+key, val) +} + +// K8SCronJobLabel returns an attribute KeyValue conforming to the +// "k8s.cronjob.label" semantic conventions. It represents the label placed on +// the CronJob, the `` being the label name, the value being the label +// value. +func K8SCronJobLabel(key string, val string) attribute.KeyValue { + return attribute.String("k8s.cronjob.label."+key, val) +} + // K8SCronJobName returns an attribute KeyValue conforming to the // "k8s.cronjob.name" semantic conventions. It represents the name of the // CronJob. @@ -8014,6 +8069,20 @@ func K8SCronJobUID(val string) attribute.KeyValue { return K8SCronJobUIDKey.String(val) } +// K8SDaemonSetAnnotation returns an attribute KeyValue conforming to the +// "k8s.daemonset.annotation" semantic conventions. It represents the annotation +// key-value pairs placed on the DaemonSet. +func K8SDaemonSetAnnotation(key string, val string) attribute.KeyValue { + return attribute.String("k8s.daemonset.annotation."+key, val) +} + +// K8SDaemonSetLabel returns an attribute KeyValue conforming to the +// "k8s.daemonset.label" semantic conventions. It represents the label key-value +// pairs placed on the DaemonSet. +func K8SDaemonSetLabel(key string, val string) attribute.KeyValue { + return attribute.String("k8s.daemonset.label."+key, val) +} + // K8SDaemonSetName returns an attribute KeyValue conforming to the // "k8s.daemonset.name" semantic conventions. It represents the name of the // DaemonSet. @@ -8028,6 +8097,20 @@ func K8SDaemonSetUID(val string) attribute.KeyValue { return K8SDaemonSetUIDKey.String(val) } +// K8SDeploymentAnnotation returns an attribute KeyValue conforming to the +// "k8s.deployment.annotation" semantic conventions. It represents the annotation +// key-value pairs placed on the Deployment. +func K8SDeploymentAnnotation(key string, val string) attribute.KeyValue { + return attribute.String("k8s.deployment.annotation."+key, val) +} + +// K8SDeploymentLabel returns an attribute KeyValue conforming to the +// "k8s.deployment.label" semantic conventions. It represents the label key-value +// pairs placed on the Deployment. +func K8SDeploymentLabel(key string, val string) attribute.KeyValue { + return attribute.String("k8s.deployment.label."+key, val) +} + // K8SDeploymentName returns an attribute KeyValue conforming to the // "k8s.deployment.name" semantic conventions. It represents the name of the // Deployment. @@ -8054,6 +8137,20 @@ func K8SHPAUID(val string) attribute.KeyValue { return K8SHPAUIDKey.String(val) } +// K8SJobAnnotation returns an attribute KeyValue conforming to the +// "k8s.job.annotation" semantic conventions. It represents the annotation +// key-value pairs placed on the Job. +func K8SJobAnnotation(key string, val string) attribute.KeyValue { + return attribute.String("k8s.job.annotation."+key, val) +} + +// K8SJobLabel returns an attribute KeyValue conforming to the "k8s.job.label" +// semantic conventions. It represents the label key-value pairs placed on the +// Job. +func K8SJobLabel(key string, val string) attribute.KeyValue { + return attribute.String("k8s.job.label."+key, val) +} + // K8SJobName returns an attribute KeyValue conforming to the "k8s.job.name" // semantic conventions. It represents the name of the Job. func K8SJobName(val string) attribute.KeyValue { @@ -8066,6 +8163,20 @@ func K8SJobUID(val string) attribute.KeyValue { return K8SJobUIDKey.String(val) } +// K8SNamespaceAnnotation returns an attribute KeyValue conforming to the +// "k8s.namespace.annotation" semantic conventions. It represents the annotation +// key-value pairs placed on the Namespace. +func K8SNamespaceAnnotation(key string, val string) attribute.KeyValue { + return attribute.String("k8s.namespace.annotation."+key, val) +} + +// K8SNamespaceLabel returns an attribute KeyValue conforming to the +// "k8s.namespace.label" semantic conventions. It represents the label key-value +// pairs placed on the Namespace. +func K8SNamespaceLabel(key string, val string) attribute.KeyValue { + return attribute.String("k8s.namespace.label."+key, val) +} + // K8SNamespaceName returns an attribute KeyValue conforming to the // "k8s.namespace.name" semantic conventions. It represents the name of the // namespace that the pod is running in. @@ -8073,6 +8184,22 @@ func K8SNamespaceName(val string) attribute.KeyValue { return K8SNamespaceNameKey.String(val) } +// K8SNodeAnnotation returns an attribute KeyValue conforming to the +// "k8s.node.annotation" semantic conventions. It represents the annotation +// placed on the Node, the `` being the annotation name, the value being the +// annotation value, even if the value is empty. +func K8SNodeAnnotation(key string, val string) attribute.KeyValue { + return attribute.String("k8s.node.annotation."+key, val) +} + +// K8SNodeLabel returns an attribute KeyValue conforming to the "k8s.node.label" +// semantic conventions. It represents the label placed on the Node, the `` +// being the label name, the value being the label value, even if the value is +// empty. +func K8SNodeLabel(key string, val string) attribute.KeyValue { + return attribute.String("k8s.node.label."+key, val) +} + // K8SNodeName returns an attribute KeyValue conforming to the "k8s.node.name" // semantic conventions. It represents the name of the Node. func K8SNodeName(val string) attribute.KeyValue { @@ -8085,6 +8212,21 @@ func K8SNodeUID(val string) attribute.KeyValue { return K8SNodeUIDKey.String(val) } +// K8SPodAnnotation returns an attribute KeyValue conforming to the +// "k8s.pod.annotation" semantic conventions. It represents the annotation placed +// on the Pod, the `` being the annotation name, the value being the +// annotation value. +func K8SPodAnnotation(key string, val string) attribute.KeyValue { + return attribute.String("k8s.pod.annotation."+key, val) +} + +// K8SPodLabel returns an attribute KeyValue conforming to the "k8s.pod.label" +// semantic conventions. It represents the label placed on the Pod, the `` +// being the label name, the value being the label value. +func K8SPodLabel(key string, val string) attribute.KeyValue { + return attribute.String("k8s.pod.label."+key, val) +} + // K8SPodName returns an attribute KeyValue conforming to the "k8s.pod.name" // semantic conventions. It represents the name of the Pod. func K8SPodName(val string) attribute.KeyValue { @@ -8097,6 +8239,20 @@ func K8SPodUID(val string) attribute.KeyValue { return K8SPodUIDKey.String(val) } +// K8SReplicaSetAnnotation returns an attribute KeyValue conforming to the +// "k8s.replicaset.annotation" semantic conventions. It represents the annotation +// key-value pairs placed on the ReplicaSet. +func K8SReplicaSetAnnotation(key string, val string) attribute.KeyValue { + return attribute.String("k8s.replicaset.annotation."+key, val) +} + +// K8SReplicaSetLabel returns an attribute KeyValue conforming to the +// "k8s.replicaset.label" semantic conventions. It represents the label key-value +// pairs placed on the ReplicaSet. +func K8SReplicaSetLabel(key string, val string) attribute.KeyValue { + return attribute.String("k8s.replicaset.label."+key, val) +} + // K8SReplicaSetName returns an attribute KeyValue conforming to the // "k8s.replicaset.name" semantic conventions. It represents the name of the // ReplicaSet. @@ -8139,6 +8295,20 @@ func K8SResourceQuotaUID(val string) attribute.KeyValue { return K8SResourceQuotaUIDKey.String(val) } +// K8SStatefulSetAnnotation returns an attribute KeyValue conforming to the +// "k8s.statefulset.annotation" semantic conventions. It represents the +// annotation key-value pairs placed on the StatefulSet. +func K8SStatefulSetAnnotation(key string, val string) attribute.KeyValue { + return attribute.String("k8s.statefulset.annotation."+key, val) +} + +// K8SStatefulSetLabel returns an attribute KeyValue conforming to the +// "k8s.statefulset.label" semantic conventions. It represents the label +// key-value pairs placed on the StatefulSet. +func K8SStatefulSetLabel(key string, val string) attribute.KeyValue { + return attribute.String("k8s.statefulset.label."+key, val) +} + // K8SStatefulSetName returns an attribute KeyValue conforming to the // "k8s.statefulset.name" semantic conventions. It represents the name of the // StatefulSet. @@ -10497,6 +10667,14 @@ func ProcessCreationTime(val string) attribute.KeyValue { return ProcessCreationTimeKey.String(val) } +// ProcessEnvironmentVariable returns an attribute KeyValue conforming to the +// "process.environment_variable" semantic conventions. It represents the process +// environment variables, being the environment variable name, the value +// being the environment variable value. +func ProcessEnvironmentVariable(key string, val string) attribute.KeyValue { + return attribute.String("process.environment_variable."+key, val) +} + // ProcessExecutableBuildIDGNU returns an attribute KeyValue conforming to the // "process.executable.build_id.gnu" semantic conventions. It represents the GNU // build ID as found in the `.note.gnu.build-id` ELF section (hex string). @@ -10965,6 +11143,38 @@ const ( RPCSystemKey = attribute.Key("rpc.system") ) +// RPCConnectRPCRequestMetadata returns an attribute KeyValue conforming to the +// "rpc.connect_rpc.request.metadata" semantic conventions. It represents the +// connect request metadata, `` being the normalized Connect Metadata key +// (lowercase), the value being the metadata values. +func RPCConnectRPCRequestMetadata(key string, val ...string) attribute.KeyValue { + return attribute.StringSlice("rpc.connect_rpc.request.metadata."+key, val) +} + +// RPCConnectRPCResponseMetadata returns an attribute KeyValue conforming to the +// "rpc.connect_rpc.response.metadata" semantic conventions. It represents the +// connect response metadata, `` being the normalized Connect Metadata key +// (lowercase), the value being the metadata values. +func RPCConnectRPCResponseMetadata(key string, val ...string) attribute.KeyValue { + return attribute.StringSlice("rpc.connect_rpc.response.metadata."+key, val) +} + +// RPCGRPCRequestMetadata returns an attribute KeyValue conforming to the +// "rpc.grpc.request.metadata" semantic conventions. It represents the gRPC +// request metadata, `` being the normalized gRPC Metadata key (lowercase), +// the value being the metadata values. +func RPCGRPCRequestMetadata(key string, val ...string) attribute.KeyValue { + return attribute.StringSlice("rpc.grpc.request.metadata."+key, val) +} + +// RPCGRPCResponseMetadata returns an attribute KeyValue conforming to the +// "rpc.grpc.response.metadata" semantic conventions. It represents the gRPC +// response metadata, `` being the normalized gRPC Metadata key (lowercase), +// the value being the metadata values. +func RPCGRPCResponseMetadata(key string, val ...string) attribute.KeyValue { + return attribute.StringSlice("rpc.grpc.response.metadata."+key, val) +} + // RPCJSONRPCErrorCode returns an attribute KeyValue conforming to the // "rpc.jsonrpc.error_code" semantic conventions. It represents the `error.code` // property of response if it is an error response. diff --git a/semconv/v1.34.0/error_type.go b/semconv/v1.34.0/error_type.go new file mode 100644 index 00000000000..19bf022465f --- /dev/null +++ b/semconv/v1.34.0/error_type.go @@ -0,0 +1,31 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.34.0" + +import ( + "fmt" + "reflect" + + "go.opentelemetry.io/otel/attribute" +) + +// ErrorType returns an [attribute.KeyValue] identifying the error type of err. +func ErrorType(err error) attribute.KeyValue { + if err == nil { + return ErrorTypeOther + } + t := reflect.TypeOf(err) + var value string + if t.PkgPath() == "" && t.Name() == "" { + // Likely a builtin type. + value = t.String() + } else { + value = fmt.Sprintf("%s.%s", t.PkgPath(), t.Name()) + } + + if value == "" { + return ErrorTypeOther + } + return ErrorTypeKey.String(value) +} diff --git a/semconv/v1.34.0/error_type_test.go b/semconv/v1.34.0/error_type_test.go new file mode 100644 index 00000000000..744c29f77c5 --- /dev/null +++ b/semconv/v1.34.0/error_type_test.go @@ -0,0 +1,59 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.34.0" + +import ( + "errors" + "fmt" + "reflect" + "testing" + + "go.opentelemetry.io/otel/attribute" +) + +type CustomError struct{} + +func (CustomError) Error() string { + return "custom error" +} + +func TestErrorType(t *testing.T) { + customErr := CustomError{} + builtinErr := errors.New("something went wrong") + var nilErr error + + wantCustomType := reflect.TypeOf(customErr) + wantCustomStr := fmt.Sprintf("%s.%s", wantCustomType.PkgPath(), wantCustomType.Name()) + + tests := []struct { + name string + err error + want attribute.KeyValue + }{ + { + name: "BuiltinError", + err: builtinErr, + want: attribute.String("error.type", "*errors.errorString"), + }, + { + name: "CustomError", + err: customErr, + want: attribute.String("error.type", wantCustomStr), + }, + { + name: "NilError", + err: nilErr, + want: ErrorTypeOther, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := ErrorType(tt.err) + if got != tt.want { + t.Errorf("ErrorType(%v) = %v, want %v", tt.err, got, tt.want) + } + }) + } +} diff --git a/semconv/v1.36.0/MIGRATION.md b/semconv/v1.36.0/MIGRATION.md new file mode 100644 index 00000000000..5c691da43d6 --- /dev/null +++ b/semconv/v1.36.0/MIGRATION.md @@ -0,0 +1,29 @@ + +# Migration from v1.34.0 to v1.36.0 + +The `go.opentelemetry.io/otel/semconv/v1.36.0` package should be a drop-in replacement for `go.opentelemetry.io/otel/semconv/v1.34.0` with the following exceptions. + +## Removed + +The following declarations have been removed. +Refer to the [OpenTelemetry Semantic Conventions documentation] for deprecation instructions. + +If the type is not listed in the documentation as deprecated, it has been removed in this version due to lack of applicability or use. +If you use any of these non-deprecated declarations in your Go application, please [open an issue] describing your use-case. + +- `AzNamespace` +- `AzNamespaceKey` +- `AzServiceRequestID` +- `AzServiceRequestIDKey` +- `GenAISystemAzAIInference` +- `GenAISystemAzAIOpenAI` +- `GenAISystemGemini` +- `GenAISystemVertexAI` +- `GenAITokenTypeCompletion` +- `MessagingOperationTypeDeliver` +- `MessagingOperationTypePublish` +- `SystemMemoryStateShared` +- `VCSProviderNameGittea` + +[OpenTelemetry Semantic Conventions documentation]: https://github.com/open-telemetry/semantic-conventions +[open an issue]: https://github.com/open-telemetry/opentelemetry-go/issues/new?template=Blank+issue diff --git a/semconv/v1.36.0/README.md b/semconv/v1.36.0/README.md new file mode 100644 index 00000000000..6a38f5afaf8 --- /dev/null +++ b/semconv/v1.36.0/README.md @@ -0,0 +1,3 @@ +# Semconv v1.36.0 + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/semconv/v1.36.0)](https://pkg.go.dev/go.opentelemetry.io/otel/semconv/v1.36.0) diff --git a/semconv/v1.36.0/attribute_group.go b/semconv/v1.36.0/attribute_group.go new file mode 100644 index 00000000000..6eb34bc28e0 --- /dev/null +++ b/semconv/v1.36.0/attribute_group.go @@ -0,0 +1,14464 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.36.0" + +import "go.opentelemetry.io/otel/attribute" + +// Namespace: android +const ( + // AndroidAppStateKey is the attribute Key conforming to the "android.app.state" + // semantic conventions. It represents the this attribute represents the state + // of the application. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "created" + // Note: The Android lifecycle states are defined in + // [Activity lifecycle callbacks], and from which the `OS identifiers` are + // derived. + // + // [Activity lifecycle callbacks]: https://developer.android.com/guide/components/activities/activity-lifecycle#lc + AndroidAppStateKey = attribute.Key("android.app.state") + + // AndroidOSAPILevelKey is the attribute Key conforming to the + // "android.os.api_level" semantic conventions. It represents the uniquely + // identifies the framework API revision offered by a version (`os.version`) of + // the android operating system. More information can be found [here]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "33", "32" + // + // [here]: https://developer.android.com/guide/topics/manifest/uses-sdk-element#ApiLevels + AndroidOSAPILevelKey = attribute.Key("android.os.api_level") +) + +// AndroidOSAPILevel returns an attribute KeyValue conforming to the +// "android.os.api_level" semantic conventions. It represents the uniquely +// identifies the framework API revision offered by a version (`os.version`) of +// the android operating system. More information can be found [here]. +// +// [here]: https://developer.android.com/guide/topics/manifest/uses-sdk-element#ApiLevels +func AndroidOSAPILevel(val string) attribute.KeyValue { + return AndroidOSAPILevelKey.String(val) +} + +// Enum values for android.app.state +var ( + // Any time before Activity.onResume() or, if the app has no Activity, + // Context.startService() has been called in the app for the first time. + // + // Stability: development + AndroidAppStateCreated = AndroidAppStateKey.String("created") + // Any time after Activity.onPause() or, if the app has no Activity, + // Context.stopService() has been called when the app was in the foreground + // state. + // + // Stability: development + AndroidAppStateBackground = AndroidAppStateKey.String("background") + // Any time after Activity.onResume() or, if the app has no Activity, + // Context.startService() has been called when the app was in either the created + // or background states. + // + // Stability: development + AndroidAppStateForeground = AndroidAppStateKey.String("foreground") +) + +// Namespace: app +const ( + // AppInstallationIDKey is the attribute Key conforming to the + // "app.installation.id" semantic conventions. It represents a unique identifier + // representing the installation of an application on a specific device. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2ab2916d-a51f-4ac8-80ee-45ac31a28092" + // Note: Its value SHOULD persist across launches of the same application + // installation, including through application upgrades. + // It SHOULD change if the application is uninstalled or if all applications of + // the vendor are uninstalled. + // Additionally, users might be able to reset this value (e.g. by clearing + // application data). + // If an app is installed multiple times on the same device (e.g. in different + // accounts on Android), each `app.installation.id` SHOULD have a different + // value. + // If multiple OpenTelemetry SDKs are used within the same application, they + // SHOULD use the same value for `app.installation.id`. + // Hardware IDs (e.g. serial number, IMEI, MAC address) MUST NOT be used as the + // `app.installation.id`. + // + // For iOS, this value SHOULD be equal to the [vendor identifier]. + // + // For Android, examples of `app.installation.id` implementations include: + // + // - [Firebase Installation ID]. + // - A globally unique UUID which is persisted across sessions in your + // application. + // - [App set ID]. + // - [`Settings.getString(Settings.Secure.ANDROID_ID)`]. + // + // More information about Android identifier best practices can be found [here] + // . + // + // [vendor identifier]: https://developer.apple.com/documentation/uikit/uidevice/identifierforvendor + // [Firebase Installation ID]: https://firebase.google.com/docs/projects/manage-installations + // [App set ID]: https://developer.android.com/identity/app-set-id + // [`Settings.getString(Settings.Secure.ANDROID_ID)`]: https://developer.android.com/reference/android/provider/Settings.Secure#ANDROID_ID + // [here]: https://developer.android.com/training/articles/user-data-ids + AppInstallationIDKey = attribute.Key("app.installation.id") + + // AppScreenCoordinateXKey is the attribute Key conforming to the + // "app.screen.coordinate.x" semantic conventions. It represents the x + // (horizontal) coordinate of a screen coordinate, in screen pixels. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 0, 131 + AppScreenCoordinateXKey = attribute.Key("app.screen.coordinate.x") + + // AppScreenCoordinateYKey is the attribute Key conforming to the + // "app.screen.coordinate.y" semantic conventions. It represents the y + // (vertical) component of a screen coordinate, in screen pixels. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 12, 99 + AppScreenCoordinateYKey = attribute.Key("app.screen.coordinate.y") + + // AppWidgetIDKey is the attribute Key conforming to the "app.widget.id" + // semantic conventions. It represents an identifier that uniquely + // differentiates this widget from other widgets in the same application. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "f9bc787d-ff05-48ad-90e1-fca1d46130b3", "submit_order_1829" + // Note: A widget is an application component, typically an on-screen visual GUI + // element. + AppWidgetIDKey = attribute.Key("app.widget.id") + + // AppWidgetNameKey is the attribute Key conforming to the "app.widget.name" + // semantic conventions. It represents the name of an application widget. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "submit", "attack", "Clear Cart" + // Note: A widget is an application component, typically an on-screen visual GUI + // element. + AppWidgetNameKey = attribute.Key("app.widget.name") +) + +// AppInstallationID returns an attribute KeyValue conforming to the +// "app.installation.id" semantic conventions. It represents a unique identifier +// representing the installation of an application on a specific device. +func AppInstallationID(val string) attribute.KeyValue { + return AppInstallationIDKey.String(val) +} + +// AppScreenCoordinateX returns an attribute KeyValue conforming to the +// "app.screen.coordinate.x" semantic conventions. It represents the x +// (horizontal) coordinate of a screen coordinate, in screen pixels. +func AppScreenCoordinateX(val int) attribute.KeyValue { + return AppScreenCoordinateXKey.Int(val) +} + +// AppScreenCoordinateY returns an attribute KeyValue conforming to the +// "app.screen.coordinate.y" semantic conventions. It represents the y (vertical) +// component of a screen coordinate, in screen pixels. +func AppScreenCoordinateY(val int) attribute.KeyValue { + return AppScreenCoordinateYKey.Int(val) +} + +// AppWidgetID returns an attribute KeyValue conforming to the "app.widget.id" +// semantic conventions. It represents an identifier that uniquely differentiates +// this widget from other widgets in the same application. +func AppWidgetID(val string) attribute.KeyValue { + return AppWidgetIDKey.String(val) +} + +// AppWidgetName returns an attribute KeyValue conforming to the +// "app.widget.name" semantic conventions. It represents the name of an +// application widget. +func AppWidgetName(val string) attribute.KeyValue { + return AppWidgetNameKey.String(val) +} + +// Namespace: artifact +const ( + // ArtifactAttestationFilenameKey is the attribute Key conforming to the + // "artifact.attestation.filename" semantic conventions. It represents the + // provenance filename of the built attestation which directly relates to the + // build artifact filename. This filename SHOULD accompany the artifact at + // publish time. See the [SLSA Relationship] specification for more information. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "golang-binary-amd64-v0.1.0.attestation", + // "docker-image-amd64-v0.1.0.intoto.json1", "release-1.tar.gz.attestation", + // "file-name-package.tar.gz.intoto.json1" + // + // [SLSA Relationship]: https://slsa.dev/spec/v1.0/distributing-provenance#relationship-between-artifacts-and-attestations + ArtifactAttestationFilenameKey = attribute.Key("artifact.attestation.filename") + + // ArtifactAttestationHashKey is the attribute Key conforming to the + // "artifact.attestation.hash" semantic conventions. It represents the full + // [hash value (see glossary)], of the built attestation. Some envelopes in the + // [software attestation space] also refer to this as the **digest**. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "1b31dfcd5b7f9267bf2ff47651df1cfb9147b9e4df1f335accf65b4cda498408" + // + // [hash value (see glossary)]: https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-5.pdf + // [software attestation space]: https://github.com/in-toto/attestation/tree/main/spec + ArtifactAttestationHashKey = attribute.Key("artifact.attestation.hash") + + // ArtifactAttestationIDKey is the attribute Key conforming to the + // "artifact.attestation.id" semantic conventions. It represents the id of the + // build [software attestation]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "123" + // + // [software attestation]: https://slsa.dev/attestation-model + ArtifactAttestationIDKey = attribute.Key("artifact.attestation.id") + + // ArtifactFilenameKey is the attribute Key conforming to the + // "artifact.filename" semantic conventions. It represents the human readable + // file name of the artifact, typically generated during build and release + // processes. Often includes the package name and version in the file name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "golang-binary-amd64-v0.1.0", "docker-image-amd64-v0.1.0", + // "release-1.tar.gz", "file-name-package.tar.gz" + // Note: This file name can also act as the [Package Name] + // in cases where the package ecosystem maps accordingly. + // Additionally, the artifact [can be published] + // for others, but that is not a guarantee. + // + // [Package Name]: https://slsa.dev/spec/v1.0/terminology#package-model + // [can be published]: https://slsa.dev/spec/v1.0/terminology#software-supply-chain + ArtifactFilenameKey = attribute.Key("artifact.filename") + + // ArtifactHashKey is the attribute Key conforming to the "artifact.hash" + // semantic conventions. It represents the full [hash value (see glossary)], + // often found in checksum.txt on a release of the artifact and used to verify + // package integrity. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "9ff4c52759e2c4ac70b7d517bc7fcdc1cda631ca0045271ddd1b192544f8a3e9" + // Note: The specific algorithm used to create the cryptographic hash value is + // not defined. In situations where an artifact has multiple + // cryptographic hashes, it is up to the implementer to choose which + // hash value to set here; this should be the most secure hash algorithm + // that is suitable for the situation and consistent with the + // corresponding attestation. The implementer can then provide the other + // hash values through an additional set of attribute extensions as they + // deem necessary. + // + // [hash value (see glossary)]: https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-5.pdf + ArtifactHashKey = attribute.Key("artifact.hash") + + // ArtifactPurlKey is the attribute Key conforming to the "artifact.purl" + // semantic conventions. It represents the [Package URL] of the + // [package artifact] provides a standard way to identify and locate the + // packaged artifact. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "pkg:github/package-url/purl-spec@1209109710924", + // "pkg:npm/foo@12.12.3" + // + // [Package URL]: https://github.com/package-url/purl-spec + // [package artifact]: https://slsa.dev/spec/v1.0/terminology#package-model + ArtifactPurlKey = attribute.Key("artifact.purl") + + // ArtifactVersionKey is the attribute Key conforming to the "artifact.version" + // semantic conventions. It represents the version of the artifact. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "v0.1.0", "1.2.1", "122691-build" + ArtifactVersionKey = attribute.Key("artifact.version") +) + +// ArtifactAttestationFilename returns an attribute KeyValue conforming to the +// "artifact.attestation.filename" semantic conventions. It represents the +// provenance filename of the built attestation which directly relates to the +// build artifact filename. This filename SHOULD accompany the artifact at +// publish time. See the [SLSA Relationship] specification for more information. +// +// [SLSA Relationship]: https://slsa.dev/spec/v1.0/distributing-provenance#relationship-between-artifacts-and-attestations +func ArtifactAttestationFilename(val string) attribute.KeyValue { + return ArtifactAttestationFilenameKey.String(val) +} + +// ArtifactAttestationHash returns an attribute KeyValue conforming to the +// "artifact.attestation.hash" semantic conventions. It represents the full +// [hash value (see glossary)], of the built attestation. Some envelopes in the +// [software attestation space] also refer to this as the **digest**. +// +// [hash value (see glossary)]: https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-5.pdf +// [software attestation space]: https://github.com/in-toto/attestation/tree/main/spec +func ArtifactAttestationHash(val string) attribute.KeyValue { + return ArtifactAttestationHashKey.String(val) +} + +// ArtifactAttestationID returns an attribute KeyValue conforming to the +// "artifact.attestation.id" semantic conventions. It represents the id of the +// build [software attestation]. +// +// [software attestation]: https://slsa.dev/attestation-model +func ArtifactAttestationID(val string) attribute.KeyValue { + return ArtifactAttestationIDKey.String(val) +} + +// ArtifactFilename returns an attribute KeyValue conforming to the +// "artifact.filename" semantic conventions. It represents the human readable +// file name of the artifact, typically generated during build and release +// processes. Often includes the package name and version in the file name. +func ArtifactFilename(val string) attribute.KeyValue { + return ArtifactFilenameKey.String(val) +} + +// ArtifactHash returns an attribute KeyValue conforming to the "artifact.hash" +// semantic conventions. It represents the full [hash value (see glossary)], +// often found in checksum.txt on a release of the artifact and used to verify +// package integrity. +// +// [hash value (see glossary)]: https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-5.pdf +func ArtifactHash(val string) attribute.KeyValue { + return ArtifactHashKey.String(val) +} + +// ArtifactPurl returns an attribute KeyValue conforming to the "artifact.purl" +// semantic conventions. It represents the [Package URL] of the +// [package artifact] provides a standard way to identify and locate the packaged +// artifact. +// +// [Package URL]: https://github.com/package-url/purl-spec +// [package artifact]: https://slsa.dev/spec/v1.0/terminology#package-model +func ArtifactPurl(https://github.com/val%20string) attribute.KeyValue { + return ArtifactPurlKey.String(val) +} + +// ArtifactVersion returns an attribute KeyValue conforming to the +// "artifact.version" semantic conventions. It represents the version of the +// artifact. +func ArtifactVersion(val string) attribute.KeyValue { + return ArtifactVersionKey.String(val) +} + +// Namespace: aws +const ( + // AWSBedrockGuardrailIDKey is the attribute Key conforming to the + // "aws.bedrock.guardrail.id" semantic conventions. It represents the unique + // identifier of the AWS Bedrock Guardrail. A [guardrail] helps safeguard and + // prevent unwanted behavior from model responses or user messages. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "sgi5gkybzqak" + // + // [guardrail]: https://docs.aws.amazon.com/bedrock/latest/userguide/guardrails.html + AWSBedrockGuardrailIDKey = attribute.Key("aws.bedrock.guardrail.id") + + // AWSBedrockKnowledgeBaseIDKey is the attribute Key conforming to the + // "aws.bedrock.knowledge_base.id" semantic conventions. It represents the + // unique identifier of the AWS Bedrock Knowledge base. A [knowledge base] is a + // bank of information that can be queried by models to generate more relevant + // responses and augment prompts. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "XFWUPB9PAW" + // + // [knowledge base]: https://docs.aws.amazon.com/bedrock/latest/userguide/knowledge-base.html + AWSBedrockKnowledgeBaseIDKey = attribute.Key("aws.bedrock.knowledge_base.id") + + // AWSDynamoDBAttributeDefinitionsKey is the attribute Key conforming to the + // "aws.dynamodb.attribute_definitions" semantic conventions. It represents the + // JSON-serialized value of each item in the `AttributeDefinitions` request + // field. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "{ "AttributeName": "string", "AttributeType": "string" }" + AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions") + + // AWSDynamoDBAttributesToGetKey is the attribute Key conforming to the + // "aws.dynamodb.attributes_to_get" semantic conventions. It represents the + // value of the `AttributesToGet` request parameter. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "lives", "id" + AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get") + + // AWSDynamoDBConsistentReadKey is the attribute Key conforming to the + // "aws.dynamodb.consistent_read" semantic conventions. It represents the value + // of the `ConsistentRead` request parameter. + // + // Type: boolean + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read") + + // AWSDynamoDBConsumedCapacityKey is the attribute Key conforming to the + // "aws.dynamodb.consumed_capacity" semantic conventions. It represents the + // JSON-serialized value of each item in the `ConsumedCapacity` response field. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "{ "CapacityUnits": number, "GlobalSecondaryIndexes": { "string" : + // { "CapacityUnits": number, "ReadCapacityUnits": number, "WriteCapacityUnits": + // number } }, "LocalSecondaryIndexes": { "string" : { "CapacityUnits": number, + // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }, + // "ReadCapacityUnits": number, "Table": { "CapacityUnits": number, + // "ReadCapacityUnits": number, "WriteCapacityUnits": number }, "TableName": + // "string", "WriteCapacityUnits": number }" + AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity") + + // AWSDynamoDBCountKey is the attribute Key conforming to the + // "aws.dynamodb.count" semantic conventions. It represents the value of the + // `Count` response parameter. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 10 + AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count") + + // AWSDynamoDBExclusiveStartTableKey is the attribute Key conforming to the + // "aws.dynamodb.exclusive_start_table" semantic conventions. It represents the + // value of the `ExclusiveStartTableName` request parameter. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Users", "CatsTable" + AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table") + + // AWSDynamoDBGlobalSecondaryIndexUpdatesKey is the attribute Key conforming to + // the "aws.dynamodb.global_secondary_index_updates" semantic conventions. It + // represents the JSON-serialized value of each item in the + // `GlobalSecondaryIndexUpdates` request field. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "{ "Create": { "IndexName": "string", "KeySchema": [ { + // "AttributeName": "string", "KeyType": "string" } ], "Projection": { + // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" }, + // "ProvisionedThroughput": { "ReadCapacityUnits": number, "WriteCapacityUnits": + // number } }" + AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates") + + // AWSDynamoDBGlobalSecondaryIndexesKey is the attribute Key conforming to the + // "aws.dynamodb.global_secondary_indexes" semantic conventions. It represents + // the JSON-serialized value of each item of the `GlobalSecondaryIndexes` + // request field. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "{ "IndexName": "string", "KeySchema": [ { "AttributeName": + // "string", "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ + // "string" ], "ProjectionType": "string" }, "ProvisionedThroughput": { + // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }" + AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes") + + // AWSDynamoDBIndexNameKey is the attribute Key conforming to the + // "aws.dynamodb.index_name" semantic conventions. It represents the value of + // the `IndexName` request parameter. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "name_to_group" + AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name") + + // AWSDynamoDBItemCollectionMetricsKey is the attribute Key conforming to the + // "aws.dynamodb.item_collection_metrics" semantic conventions. It represents + // the JSON-serialized value of the `ItemCollectionMetrics` response field. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "{ "string" : [ { "ItemCollectionKey": { "string" : { "B": blob, + // "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": { "string" : + // "AttributeValue" }, "N": "string", "NS": [ "string" ], "NULL": boolean, "S": + // "string", "SS": [ "string" ] } }, "SizeEstimateRangeGB": [ number ] } ] }" + AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics") + + // AWSDynamoDBLimitKey is the attribute Key conforming to the + // "aws.dynamodb.limit" semantic conventions. It represents the value of the + // `Limit` request parameter. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 10 + AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit") + + // AWSDynamoDBLocalSecondaryIndexesKey is the attribute Key conforming to the + // "aws.dynamodb.local_secondary_indexes" semantic conventions. It represents + // the JSON-serialized value of each item of the `LocalSecondaryIndexes` request + // field. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "{ "IndexArn": "string", "IndexName": "string", "IndexSizeBytes": + // number, "ItemCount": number, "KeySchema": [ { "AttributeName": "string", + // "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ "string" ], + // "ProjectionType": "string" } }" + AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes") + + // AWSDynamoDBProjectionKey is the attribute Key conforming to the + // "aws.dynamodb.projection" semantic conventions. It represents the value of + // the `ProjectionExpression` request parameter. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Title", "Title, Price, Color", "Title, Description, RelatedItems, + // ProductReviews" + AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection") + + // AWSDynamoDBProvisionedReadCapacityKey is the attribute Key conforming to the + // "aws.dynamodb.provisioned_read_capacity" semantic conventions. It represents + // the value of the `ProvisionedThroughput.ReadCapacityUnits` request parameter. + // + // Type: double + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1.0, 2.0 + AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity") + + // AWSDynamoDBProvisionedWriteCapacityKey is the attribute Key conforming to the + // "aws.dynamodb.provisioned_write_capacity" semantic conventions. It represents + // the value of the `ProvisionedThroughput.WriteCapacityUnits` request + // parameter. + // + // Type: double + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1.0, 2.0 + AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity") + + // AWSDynamoDBScanForwardKey is the attribute Key conforming to the + // "aws.dynamodb.scan_forward" semantic conventions. It represents the value of + // the `ScanIndexForward` request parameter. + // + // Type: boolean + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward") + + // AWSDynamoDBScannedCountKey is the attribute Key conforming to the + // "aws.dynamodb.scanned_count" semantic conventions. It represents the value of + // the `ScannedCount` response parameter. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 50 + AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count") + + // AWSDynamoDBSegmentKey is the attribute Key conforming to the + // "aws.dynamodb.segment" semantic conventions. It represents the value of the + // `Segment` request parameter. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 10 + AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment") + + // AWSDynamoDBSelectKey is the attribute Key conforming to the + // "aws.dynamodb.select" semantic conventions. It represents the value of the + // `Select` request parameter. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "ALL_ATTRIBUTES", "COUNT" + AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select") + + // AWSDynamoDBTableCountKey is the attribute Key conforming to the + // "aws.dynamodb.table_count" semantic conventions. It represents the number of + // items in the `TableNames` response parameter. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 20 + AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count") + + // AWSDynamoDBTableNamesKey is the attribute Key conforming to the + // "aws.dynamodb.table_names" semantic conventions. It represents the keys in + // the `RequestItems` object field. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Users", "Cats" + AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names") + + // AWSDynamoDBTotalSegmentsKey is the attribute Key conforming to the + // "aws.dynamodb.total_segments" semantic conventions. It represents the value + // of the `TotalSegments` request parameter. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 100 + AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments") + + // AWSECSClusterARNKey is the attribute Key conforming to the + // "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an + // [ECS cluster]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster" + // + // [ECS cluster]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html + AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn") + + // AWSECSContainerARNKey is the attribute Key conforming to the + // "aws.ecs.container.arn" semantic conventions. It represents the Amazon + // Resource Name (ARN) of an [ECS container instance]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // "arn:aws:ecs:us-west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9" + // + // [ECS container instance]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html + AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn") + + // AWSECSLaunchtypeKey is the attribute Key conforming to the + // "aws.ecs.launchtype" semantic conventions. It represents the [launch type] + // for an ECS task. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // + // [launch type]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html + AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype") + + // AWSECSTaskARNKey is the attribute Key conforming to the "aws.ecs.task.arn" + // semantic conventions. It represents the ARN of a running [ECS task]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // "arn:aws:ecs:us-west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b", + // "arn:aws:ecs:us-west-1:123456789123:task/my-cluster/task-id/23ebb8ac-c18f-46c6-8bbe-d55d0e37cfbd" + // + // [ECS task]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-account-settings.html#ecs-resource-ids + AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn") + + // AWSECSTaskFamilyKey is the attribute Key conforming to the + // "aws.ecs.task.family" semantic conventions. It represents the family name of + // the [ECS task definition] used to create the ECS task. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry-family" + // + // [ECS task definition]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html + AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family") + + // AWSECSTaskIDKey is the attribute Key conforming to the "aws.ecs.task.id" + // semantic conventions. It represents the ID of a running ECS task. The ID MUST + // be extracted from `task.arn`. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "10838bed-421f-43ef-870a-f43feacbbb5b", + // "23ebb8ac-c18f-46c6-8bbe-d55d0e37cfbd" + AWSECSTaskIDKey = attribute.Key("aws.ecs.task.id") + + // AWSECSTaskRevisionKey is the attribute Key conforming to the + // "aws.ecs.task.revision" semantic conventions. It represents the revision for + // the task definition used to create the ECS task. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "8", "26" + AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision") + + // AWSEKSClusterARNKey is the attribute Key conforming to the + // "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an EKS + // cluster. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster" + AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn") + + // AWSExtendedRequestIDKey is the attribute Key conforming to the + // "aws.extended_request_id" semantic conventions. It represents the AWS + // extended request ID as returned in the response header `x-amz-id-2`. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // "wzHcyEWfmOGDIE5QOhTAqFDoDWP3y8IUvpNINCwL9N4TEHbUw0/gZJ+VZTmCNCWR7fezEN3eCiQ=" + AWSExtendedRequestIDKey = attribute.Key("aws.extended_request_id") + + // AWSKinesisStreamNameKey is the attribute Key conforming to the + // "aws.kinesis.stream_name" semantic conventions. It represents the name of the + // AWS Kinesis [stream] the request refers to. Corresponds to the + // `--stream-name` parameter of the Kinesis [describe-stream] operation. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "some-stream-name" + // + // [stream]: https://docs.aws.amazon.com/streams/latest/dev/introduction.html + // [describe-stream]: https://docs.aws.amazon.com/cli/latest/reference/kinesis/describe-stream.html + AWSKinesisStreamNameKey = attribute.Key("aws.kinesis.stream_name") + + // AWSLambdaInvokedARNKey is the attribute Key conforming to the + // "aws.lambda.invoked_arn" semantic conventions. It represents the full invoked + // ARN as provided on the `Context` passed to the function ( + // `Lambda-Runtime-Invoked-Function-Arn` header on the + // `/runtime/invocation/next` applicable). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "arn:aws:lambda:us-east-1:123456:function:myfunction:myalias" + // Note: This may be different from `cloud.resource_id` if an alias is involved. + AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn") + + // AWSLambdaResourceMappingIDKey is the attribute Key conforming to the + // "aws.lambda.resource_mapping.id" semantic conventions. It represents the UUID + // of the [AWS Lambda EvenSource Mapping]. An event source is mapped to a lambda + // function. It's contents are read by Lambda and used to trigger a function. + // This isn't available in the lambda execution context or the lambda runtime + // environtment. This is going to be populated by the AWS SDK for each language + // when that UUID is present. Some of these operations are + // Create/Delete/Get/List/Update EventSourceMapping. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "587ad24b-03b9-4413-8202-bbd56b36e5b7" + // + // [AWS Lambda EvenSource Mapping]: https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-lambda-eventsourcemapping.html + AWSLambdaResourceMappingIDKey = attribute.Key("aws.lambda.resource_mapping.id") + + // AWSLogGroupARNsKey is the attribute Key conforming to the + // "aws.log.group.arns" semantic conventions. It represents the Amazon Resource + // Name(s) (ARN) of the AWS log group(s). + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*" + // Note: See the [log group ARN format documentation]. + // + // [log group ARN format documentation]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format + AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns") + + // AWSLogGroupNamesKey is the attribute Key conforming to the + // "aws.log.group.names" semantic conventions. It represents the name(s) of the + // AWS log group(s) an application is writing to. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "/aws/lambda/my-function", "opentelemetry-service" + // Note: Multiple log groups must be supported for cases like multi-container + // applications, where a single application has sidecar containers, and each + // write to their own log group. + AWSLogGroupNamesKey = attribute.Key("aws.log.group.names") + + // AWSLogStreamARNsKey is the attribute Key conforming to the + // "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of the + // AWS log stream(s). + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // "arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log-stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b" + // Note: See the [log stream ARN format documentation]. One log group can + // contain several log streams, so these ARNs necessarily identify both a log + // group and a log stream. + // + // [log stream ARN format documentation]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format + AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns") + + // AWSLogStreamNamesKey is the attribute Key conforming to the + // "aws.log.stream.names" semantic conventions. It represents the name(s) of the + // AWS log stream(s) an application is writing to. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "logs/main/10838bed-421f-43ef-870a-f43feacbbb5b" + AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names") + + // AWSRequestIDKey is the attribute Key conforming to the "aws.request_id" + // semantic conventions. It represents the AWS request ID as returned in the + // response headers `x-amzn-requestid`, `x-amzn-request-id` or + // `x-amz-request-id`. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "79b9da39-b7ae-508a-a6bc-864b2829c622", "C9ER4AJX75574TDJ" + AWSRequestIDKey = attribute.Key("aws.request_id") + + // AWSS3BucketKey is the attribute Key conforming to the "aws.s3.bucket" + // semantic conventions. It represents the S3 bucket name the request refers to. + // Corresponds to the `--bucket` parameter of the [S3 API] operations. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "some-bucket-name" + // Note: The `bucket` attribute is applicable to all S3 operations that + // reference a bucket, i.e. that require the bucket name as a mandatory + // parameter. + // This applies to almost all S3 operations except `list-buckets`. + // + // [S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html + AWSS3BucketKey = attribute.Key("aws.s3.bucket") + + // AWSS3CopySourceKey is the attribute Key conforming to the + // "aws.s3.copy_source" semantic conventions. It represents the source object + // (in the form `bucket`/`key`) for the copy operation. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "someFile.yml" + // Note: The `copy_source` attribute applies to S3 copy operations and + // corresponds to the `--copy-source` parameter + // of the [copy-object operation within the S3 API]. + // This applies in particular to the following operations: + // + // - [copy-object] + // - [upload-part-copy] + // + // + // [copy-object operation within the S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html + // [copy-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html + // [upload-part-copy]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html + AWSS3CopySourceKey = attribute.Key("aws.s3.copy_source") + + // AWSS3DeleteKey is the attribute Key conforming to the "aws.s3.delete" + // semantic conventions. It represents the delete request container that + // specifies the objects to be deleted. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // "Objects=[{Key=string,VersionId=string},{Key=string,VersionId=string}],Quiet=boolean" + // Note: The `delete` attribute is only applicable to the [delete-object] + // operation. + // The `delete` attribute corresponds to the `--delete` parameter of the + // [delete-objects operation within the S3 API]. + // + // [delete-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html + // [delete-objects operation within the S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-objects.html + AWSS3DeleteKey = attribute.Key("aws.s3.delete") + + // AWSS3KeyKey is the attribute Key conforming to the "aws.s3.key" semantic + // conventions. It represents the S3 object key the request refers to. + // Corresponds to the `--key` parameter of the [S3 API] operations. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "someFile.yml" + // Note: The `key` attribute is applicable to all object-related S3 operations, + // i.e. that require the object key as a mandatory parameter. + // This applies in particular to the following operations: + // + // - [copy-object] + // - [delete-object] + // - [get-object] + // - [head-object] + // - [put-object] + // - [restore-object] + // - [select-object-content] + // - [abort-multipart-upload] + // - [complete-multipart-upload] + // - [create-multipart-upload] + // - [list-parts] + // - [upload-part] + // - [upload-part-copy] + // + // + // [S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html + // [copy-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html + // [delete-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html + // [get-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/get-object.html + // [head-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/head-object.html + // [put-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/put-object.html + // [restore-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/restore-object.html + // [select-object-content]: https://docs.aws.amazon.com/cli/latest/reference/s3api/select-object-content.html + // [abort-multipart-upload]: https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html + // [complete-multipart-upload]: https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html + // [create-multipart-upload]: https://docs.aws.amazon.com/cli/latest/reference/s3api/create-multipart-upload.html + // [list-parts]: https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html + // [upload-part]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html + // [upload-part-copy]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html + AWSS3KeyKey = attribute.Key("aws.s3.key") + + // AWSS3PartNumberKey is the attribute Key conforming to the + // "aws.s3.part_number" semantic conventions. It represents the part number of + // the part being uploaded in a multipart-upload operation. This is a positive + // integer between 1 and 10,000. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 3456 + // Note: The `part_number` attribute is only applicable to the [upload-part] + // and [upload-part-copy] operations. + // The `part_number` attribute corresponds to the `--part-number` parameter of + // the + // [upload-part operation within the S3 API]. + // + // [upload-part]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html + // [upload-part-copy]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html + // [upload-part operation within the S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html + AWSS3PartNumberKey = attribute.Key("aws.s3.part_number") + + // AWSS3UploadIDKey is the attribute Key conforming to the "aws.s3.upload_id" + // semantic conventions. It represents the upload ID that identifies the + // multipart upload. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "dfRtDYWFbkRONycy.Yxwh66Yjlx.cph0gtNBtJ" + // Note: The `upload_id` attribute applies to S3 multipart-upload operations and + // corresponds to the `--upload-id` parameter + // of the [S3 API] multipart operations. + // This applies in particular to the following operations: + // + // - [abort-multipart-upload] + // - [complete-multipart-upload] + // - [list-parts] + // - [upload-part] + // - [upload-part-copy] + // + // + // [S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html + // [abort-multipart-upload]: https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html + // [complete-multipart-upload]: https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html + // [list-parts]: https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html + // [upload-part]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html + // [upload-part-copy]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html + AWSS3UploadIDKey = attribute.Key("aws.s3.upload_id") + + // AWSSecretsmanagerSecretARNKey is the attribute Key conforming to the + // "aws.secretsmanager.secret.arn" semantic conventions. It represents the ARN + // of the Secret stored in the Secrets Mangger. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // "arn:aws:secretsmanager:us-east-1:123456789012:secret:SecretName-6RandomCharacters" + AWSSecretsmanagerSecretARNKey = attribute.Key("aws.secretsmanager.secret.arn") + + // AWSSNSTopicARNKey is the attribute Key conforming to the "aws.sns.topic.arn" + // semantic conventions. It represents the ARN of the AWS SNS Topic. An Amazon + // SNS [topic] is a logical access point that acts as a communication channel. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "arn:aws:sns:us-east-1:123456789012:mystack-mytopic-NZJ5JSMVGFIE" + // + // [topic]: https://docs.aws.amazon.com/sns/latest/dg/sns-create-topic.html + AWSSNSTopicARNKey = attribute.Key("aws.sns.topic.arn") + + // AWSSQSQueueURLKey is the attribute Key conforming to the "aws.sqs.queue.url" + // semantic conventions. It represents the URL of the AWS SQS Queue. It's a + // unique identifier for a queue in Amazon Simple Queue Service (SQS) and is + // used to access the queue and perform actions on it. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "https://sqs.us-east-1.amazonaws.com/123456789012/MyQueue" + AWSSQSQueueURLKey = attribute.Key("aws.sqs.queue.url") + + // AWSStepFunctionsActivityARNKey is the attribute Key conforming to the + // "aws.step_functions.activity.arn" semantic conventions. It represents the ARN + // of the AWS Step Functions Activity. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "arn:aws:states:us-east-1:123456789012:activity:get-greeting" + AWSStepFunctionsActivityARNKey = attribute.Key("aws.step_functions.activity.arn") + + // AWSStepFunctionsStateMachineARNKey is the attribute Key conforming to the + // "aws.step_functions.state_machine.arn" semantic conventions. It represents + // the ARN of the AWS Step Functions State Machine. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // "arn:aws:states:us-east-1:123456789012:stateMachine:myStateMachine:1" + AWSStepFunctionsStateMachineARNKey = attribute.Key("aws.step_functions.state_machine.arn") +) + +// AWSBedrockGuardrailID returns an attribute KeyValue conforming to the +// "aws.bedrock.guardrail.id" semantic conventions. It represents the unique +// identifier of the AWS Bedrock Guardrail. A [guardrail] helps safeguard and +// prevent unwanted behavior from model responses or user messages. +// +// [guardrail]: https://docs.aws.amazon.com/bedrock/latest/userguide/guardrails.html +func AWSBedrockGuardrailID(val string) attribute.KeyValue { + return AWSBedrockGuardrailIDKey.String(val) +} + +// AWSBedrockKnowledgeBaseID returns an attribute KeyValue conforming to the +// "aws.bedrock.knowledge_base.id" semantic conventions. It represents the unique +// identifier of the AWS Bedrock Knowledge base. A [knowledge base] is a bank of +// information that can be queried by models to generate more relevant responses +// and augment prompts. +// +// [knowledge base]: https://docs.aws.amazon.com/bedrock/latest/userguide/knowledge-base.html +func AWSBedrockKnowledgeBaseID(val string) attribute.KeyValue { + return AWSBedrockKnowledgeBaseIDKey.String(val) +} + +// AWSDynamoDBAttributeDefinitions returns an attribute KeyValue conforming to +// the "aws.dynamodb.attribute_definitions" semantic conventions. It represents +// the JSON-serialized value of each item in the `AttributeDefinitions` request +// field. +func AWSDynamoDBAttributeDefinitions(val ...string) attribute.KeyValue { + return AWSDynamoDBAttributeDefinitionsKey.StringSlice(val) +} + +// AWSDynamoDBAttributesToGet returns an attribute KeyValue conforming to the +// "aws.dynamodb.attributes_to_get" semantic conventions. It represents the value +// of the `AttributesToGet` request parameter. +func AWSDynamoDBAttributesToGet(val ...string) attribute.KeyValue { + return AWSDynamoDBAttributesToGetKey.StringSlice(val) +} + +// AWSDynamoDBConsistentRead returns an attribute KeyValue conforming to the +// "aws.dynamodb.consistent_read" semantic conventions. It represents the value +// of the `ConsistentRead` request parameter. +func AWSDynamoDBConsistentRead(val bool) attribute.KeyValue { + return AWSDynamoDBConsistentReadKey.Bool(val) +} + +// AWSDynamoDBConsumedCapacity returns an attribute KeyValue conforming to the +// "aws.dynamodb.consumed_capacity" semantic conventions. It represents the +// JSON-serialized value of each item in the `ConsumedCapacity` response field. +func AWSDynamoDBConsumedCapacity(val ...string) attribute.KeyValue { + return AWSDynamoDBConsumedCapacityKey.StringSlice(val) +} + +// AWSDynamoDBCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.count" semantic conventions. It represents the value of the +// `Count` response parameter. +func AWSDynamoDBCount(val int) attribute.KeyValue { + return AWSDynamoDBCountKey.Int(val) +} + +// AWSDynamoDBExclusiveStartTable returns an attribute KeyValue conforming to the +// "aws.dynamodb.exclusive_start_table" semantic conventions. It represents the +// value of the `ExclusiveStartTableName` request parameter. +func AWSDynamoDBExclusiveStartTable(val string) attribute.KeyValue { + return AWSDynamoDBExclusiveStartTableKey.String(val) +} + +// AWSDynamoDBGlobalSecondaryIndexUpdates returns an attribute KeyValue +// conforming to the "aws.dynamodb.global_secondary_index_updates" semantic +// conventions. It represents the JSON-serialized value of each item in the +// `GlobalSecondaryIndexUpdates` request field. +func AWSDynamoDBGlobalSecondaryIndexUpdates(val ...string) attribute.KeyValue { + return AWSDynamoDBGlobalSecondaryIndexUpdatesKey.StringSlice(val) +} + +// AWSDynamoDBGlobalSecondaryIndexes returns an attribute KeyValue conforming to +// the "aws.dynamodb.global_secondary_indexes" semantic conventions. It +// represents the JSON-serialized value of each item of the +// `GlobalSecondaryIndexes` request field. +func AWSDynamoDBGlobalSecondaryIndexes(val ...string) attribute.KeyValue { + return AWSDynamoDBGlobalSecondaryIndexesKey.StringSlice(val) +} + +// AWSDynamoDBIndexName returns an attribute KeyValue conforming to the +// "aws.dynamodb.index_name" semantic conventions. It represents the value of the +// `IndexName` request parameter. +func AWSDynamoDBIndexName(val string) attribute.KeyValue { + return AWSDynamoDBIndexNameKey.String(val) +} + +// AWSDynamoDBItemCollectionMetrics returns an attribute KeyValue conforming to +// the "aws.dynamodb.item_collection_metrics" semantic conventions. It represents +// the JSON-serialized value of the `ItemCollectionMetrics` response field. +func AWSDynamoDBItemCollectionMetrics(val string) attribute.KeyValue { + return AWSDynamoDBItemCollectionMetricsKey.String(val) +} + +// AWSDynamoDBLimit returns an attribute KeyValue conforming to the +// "aws.dynamodb.limit" semantic conventions. It represents the value of the +// `Limit` request parameter. +func AWSDynamoDBLimit(val int) attribute.KeyValue { + return AWSDynamoDBLimitKey.Int(val) +} + +// AWSDynamoDBLocalSecondaryIndexes returns an attribute KeyValue conforming to +// the "aws.dynamodb.local_secondary_indexes" semantic conventions. It represents +// the JSON-serialized value of each item of the `LocalSecondaryIndexes` request +// field. +func AWSDynamoDBLocalSecondaryIndexes(val ...string) attribute.KeyValue { + return AWSDynamoDBLocalSecondaryIndexesKey.StringSlice(val) +} + +// AWSDynamoDBProjection returns an attribute KeyValue conforming to the +// "aws.dynamodb.projection" semantic conventions. It represents the value of the +// `ProjectionExpression` request parameter. +func AWSDynamoDBProjection(val string) attribute.KeyValue { + return AWSDynamoDBProjectionKey.String(val) +} + +// AWSDynamoDBProvisionedReadCapacity returns an attribute KeyValue conforming to +// the "aws.dynamodb.provisioned_read_capacity" semantic conventions. It +// represents the value of the `ProvisionedThroughput.ReadCapacityUnits` request +// parameter. +func AWSDynamoDBProvisionedReadCapacity(val float64) attribute.KeyValue { + return AWSDynamoDBProvisionedReadCapacityKey.Float64(val) +} + +// AWSDynamoDBProvisionedWriteCapacity returns an attribute KeyValue conforming +// to the "aws.dynamodb.provisioned_write_capacity" semantic conventions. It +// represents the value of the `ProvisionedThroughput.WriteCapacityUnits` request +// parameter. +func AWSDynamoDBProvisionedWriteCapacity(val float64) attribute.KeyValue { + return AWSDynamoDBProvisionedWriteCapacityKey.Float64(val) +} + +// AWSDynamoDBScanForward returns an attribute KeyValue conforming to the +// "aws.dynamodb.scan_forward" semantic conventions. It represents the value of +// the `ScanIndexForward` request parameter. +func AWSDynamoDBScanForward(val bool) attribute.KeyValue { + return AWSDynamoDBScanForwardKey.Bool(val) +} + +// AWSDynamoDBScannedCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.scanned_count" semantic conventions. It represents the value of +// the `ScannedCount` response parameter. +func AWSDynamoDBScannedCount(val int) attribute.KeyValue { + return AWSDynamoDBScannedCountKey.Int(val) +} + +// AWSDynamoDBSegment returns an attribute KeyValue conforming to the +// "aws.dynamodb.segment" semantic conventions. It represents the value of the +// `Segment` request parameter. +func AWSDynamoDBSegment(val int) attribute.KeyValue { + return AWSDynamoDBSegmentKey.Int(val) +} + +// AWSDynamoDBSelect returns an attribute KeyValue conforming to the +// "aws.dynamodb.select" semantic conventions. It represents the value of the +// `Select` request parameter. +func AWSDynamoDBSelect(val string) attribute.KeyValue { + return AWSDynamoDBSelectKey.String(val) +} + +// AWSDynamoDBTableCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.table_count" semantic conventions. It represents the number of +// items in the `TableNames` response parameter. +func AWSDynamoDBTableCount(val int) attribute.KeyValue { + return AWSDynamoDBTableCountKey.Int(val) +} + +// AWSDynamoDBTableNames returns an attribute KeyValue conforming to the +// "aws.dynamodb.table_names" semantic conventions. It represents the keys in the +// `RequestItems` object field. +func AWSDynamoDBTableNames(val ...string) attribute.KeyValue { + return AWSDynamoDBTableNamesKey.StringSlice(val) +} + +// AWSDynamoDBTotalSegments returns an attribute KeyValue conforming to the +// "aws.dynamodb.total_segments" semantic conventions. It represents the value of +// the `TotalSegments` request parameter. +func AWSDynamoDBTotalSegments(val int) attribute.KeyValue { + return AWSDynamoDBTotalSegmentsKey.Int(val) +} + +// AWSECSClusterARN returns an attribute KeyValue conforming to the +// "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an +// [ECS cluster]. +// +// [ECS cluster]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html +func AWSECSClusterARN(val string) attribute.KeyValue { + return AWSECSClusterARNKey.String(val) +} + +// AWSECSContainerARN returns an attribute KeyValue conforming to the +// "aws.ecs.container.arn" semantic conventions. It represents the Amazon +// Resource Name (ARN) of an [ECS container instance]. +// +// [ECS container instance]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html +func AWSECSContainerARN(val string) attribute.KeyValue { + return AWSECSContainerARNKey.String(val) +} + +// AWSECSTaskARN returns an attribute KeyValue conforming to the +// "aws.ecs.task.arn" semantic conventions. It represents the ARN of a running +// [ECS task]. +// +// [ECS task]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-account-settings.html#ecs-resource-ids +func AWSECSTaskARN(val string) attribute.KeyValue { + return AWSECSTaskARNKey.String(val) +} + +// AWSECSTaskFamily returns an attribute KeyValue conforming to the +// "aws.ecs.task.family" semantic conventions. It represents the family name of +// the [ECS task definition] used to create the ECS task. +// +// [ECS task definition]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html +func AWSECSTaskFamily(val string) attribute.KeyValue { + return AWSECSTaskFamilyKey.String(val) +} + +// AWSECSTaskID returns an attribute KeyValue conforming to the "aws.ecs.task.id" +// semantic conventions. It represents the ID of a running ECS task. The ID MUST +// be extracted from `task.arn`. +func AWSECSTaskID(val string) attribute.KeyValue { + return AWSECSTaskIDKey.String(val) +} + +// AWSECSTaskRevision returns an attribute KeyValue conforming to the +// "aws.ecs.task.revision" semantic conventions. It represents the revision for +// the task definition used to create the ECS task. +func AWSECSTaskRevision(val string) attribute.KeyValue { + return AWSECSTaskRevisionKey.String(val) +} + +// AWSEKSClusterARN returns an attribute KeyValue conforming to the +// "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an EKS +// cluster. +func AWSEKSClusterARN(val string) attribute.KeyValue { + return AWSEKSClusterARNKey.String(val) +} + +// AWSExtendedRequestID returns an attribute KeyValue conforming to the +// "aws.extended_request_id" semantic conventions. It represents the AWS extended +// request ID as returned in the response header `x-amz-id-2`. +func AWSExtendedRequestID(val string) attribute.KeyValue { + return AWSExtendedRequestIDKey.String(val) +} + +// AWSKinesisStreamName returns an attribute KeyValue conforming to the +// "aws.kinesis.stream_name" semantic conventions. It represents the name of the +// AWS Kinesis [stream] the request refers to. Corresponds to the `--stream-name` +// parameter of the Kinesis [describe-stream] operation. +// +// [stream]: https://docs.aws.amazon.com/streams/latest/dev/introduction.html +// [describe-stream]: https://docs.aws.amazon.com/cli/latest/reference/kinesis/describe-stream.html +func AWSKinesisStreamName(val string) attribute.KeyValue { + return AWSKinesisStreamNameKey.String(val) +} + +// AWSLambdaInvokedARN returns an attribute KeyValue conforming to the +// "aws.lambda.invoked_arn" semantic conventions. It represents the full invoked +// ARN as provided on the `Context` passed to the function ( +// `Lambda-Runtime-Invoked-Function-Arn` header on the `/runtime/invocation/next` +// applicable). +func AWSLambdaInvokedARN(val string) attribute.KeyValue { + return AWSLambdaInvokedARNKey.String(val) +} + +// AWSLambdaResourceMappingID returns an attribute KeyValue conforming to the +// "aws.lambda.resource_mapping.id" semantic conventions. It represents the UUID +// of the [AWS Lambda EvenSource Mapping]. An event source is mapped to a lambda +// function. It's contents are read by Lambda and used to trigger a function. +// This isn't available in the lambda execution context or the lambda runtime +// environtment. This is going to be populated by the AWS SDK for each language +// when that UUID is present. Some of these operations are +// Create/Delete/Get/List/Update EventSourceMapping. +// +// [AWS Lambda EvenSource Mapping]: https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-lambda-eventsourcemapping.html +func AWSLambdaResourceMappingID(val string) attribute.KeyValue { + return AWSLambdaResourceMappingIDKey.String(val) +} + +// AWSLogGroupARNs returns an attribute KeyValue conforming to the +// "aws.log.group.arns" semantic conventions. It represents the Amazon Resource +// Name(s) (ARN) of the AWS log group(s). +func AWSLogGroupARNs(val ...string) attribute.KeyValue { + return AWSLogGroupARNsKey.StringSlice(val) +} + +// AWSLogGroupNames returns an attribute KeyValue conforming to the +// "aws.log.group.names" semantic conventions. It represents the name(s) of the +// AWS log group(s) an application is writing to. +func AWSLogGroupNames(val ...string) attribute.KeyValue { + return AWSLogGroupNamesKey.StringSlice(val) +} + +// AWSLogStreamARNs returns an attribute KeyValue conforming to the +// "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of the +// AWS log stream(s). +func AWSLogStreamARNs(val ...string) attribute.KeyValue { + return AWSLogStreamARNsKey.StringSlice(val) +} + +// AWSLogStreamNames returns an attribute KeyValue conforming to the +// "aws.log.stream.names" semantic conventions. It represents the name(s) of the +// AWS log stream(s) an application is writing to. +func AWSLogStreamNames(val ...string) attribute.KeyValue { + return AWSLogStreamNamesKey.StringSlice(val) +} + +// AWSRequestID returns an attribute KeyValue conforming to the "aws.request_id" +// semantic conventions. It represents the AWS request ID as returned in the +// response headers `x-amzn-requestid`, `x-amzn-request-id` or `x-amz-request-id` +// . +func AWSRequestID(val string) attribute.KeyValue { + return AWSRequestIDKey.String(val) +} + +// AWSS3Bucket returns an attribute KeyValue conforming to the "aws.s3.bucket" +// semantic conventions. It represents the S3 bucket name the request refers to. +// Corresponds to the `--bucket` parameter of the [S3 API] operations. +// +// [S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html +func AWSS3Bucket(val string) attribute.KeyValue { + return AWSS3BucketKey.String(val) +} + +// AWSS3CopySource returns an attribute KeyValue conforming to the +// "aws.s3.copy_source" semantic conventions. It represents the source object (in +// the form `bucket`/`key`) for the copy operation. +func AWSS3CopySource(val string) attribute.KeyValue { + return AWSS3CopySourceKey.String(val) +} + +// AWSS3Delete returns an attribute KeyValue conforming to the "aws.s3.delete" +// semantic conventions. It represents the delete request container that +// specifies the objects to be deleted. +func AWSS3Delete(val string) attribute.KeyValue { + return AWSS3DeleteKey.String(val) +} + +// AWSS3Key returns an attribute KeyValue conforming to the "aws.s3.key" semantic +// conventions. It represents the S3 object key the request refers to. +// Corresponds to the `--key` parameter of the [S3 API] operations. +// +// [S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html +func AWSS3Key(val string) attribute.KeyValue { + return AWSS3KeyKey.String(val) +} + +// AWSS3PartNumber returns an attribute KeyValue conforming to the +// "aws.s3.part_number" semantic conventions. It represents the part number of +// the part being uploaded in a multipart-upload operation. This is a positive +// integer between 1 and 10,000. +func AWSS3PartNumber(val int) attribute.KeyValue { + return AWSS3PartNumberKey.Int(val) +} + +// AWSS3UploadID returns an attribute KeyValue conforming to the +// "aws.s3.upload_id" semantic conventions. It represents the upload ID that +// identifies the multipart upload. +func AWSS3UploadID(val string) attribute.KeyValue { + return AWSS3UploadIDKey.String(val) +} + +// AWSSecretsmanagerSecretARN returns an attribute KeyValue conforming to the +// "aws.secretsmanager.secret.arn" semantic conventions. It represents the ARN of +// the Secret stored in the Secrets Mangger. +func AWSSecretsmanagerSecretARN(val string) attribute.KeyValue { + return AWSSecretsmanagerSecretARNKey.String(val) +} + +// AWSSNSTopicARN returns an attribute KeyValue conforming to the +// "aws.sns.topic.arn" semantic conventions. It represents the ARN of the AWS SNS +// Topic. An Amazon SNS [topic] is a logical access point that acts as a +// communication channel. +// +// [topic]: https://docs.aws.amazon.com/sns/latest/dg/sns-create-topic.html +func AWSSNSTopicARN(val string) attribute.KeyValue { + return AWSSNSTopicARNKey.String(val) +} + +// AWSSQSQueueURL returns an attribute KeyValue conforming to the +// "aws.sqs.queue.url" semantic conventions. It represents the URL of the AWS SQS +// Queue. It's a unique identifier for a queue in Amazon Simple Queue Service +// (SQS) and is used to access the queue and perform actions on it. +func AWSSQSQueueURL(val string) attribute.KeyValue { + return AWSSQSQueueURLKey.String(val) +} + +// AWSStepFunctionsActivityARN returns an attribute KeyValue conforming to the +// "aws.step_functions.activity.arn" semantic conventions. It represents the ARN +// of the AWS Step Functions Activity. +func AWSStepFunctionsActivityARN(val string) attribute.KeyValue { + return AWSStepFunctionsActivityARNKey.String(val) +} + +// AWSStepFunctionsStateMachineARN returns an attribute KeyValue conforming to +// the "aws.step_functions.state_machine.arn" semantic conventions. It represents +// the ARN of the AWS Step Functions State Machine. +func AWSStepFunctionsStateMachineARN(val string) attribute.KeyValue { + return AWSStepFunctionsStateMachineARNKey.String(val) +} + +// Enum values for aws.ecs.launchtype +var ( + // ec2 + // Stability: development + AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2") + // fargate + // Stability: development + AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate") +) + +// Namespace: azure +const ( + // AzureClientIDKey is the attribute Key conforming to the "azure.client.id" + // semantic conventions. It represents the unique identifier of the client + // instance. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "3ba4827d-4422-483f-b59f-85b74211c11d", "storage-client-1" + AzureClientIDKey = attribute.Key("azure.client.id") + + // AzureCosmosDBConnectionModeKey is the attribute Key conforming to the + // "azure.cosmosdb.connection.mode" semantic conventions. It represents the + // cosmos client connection mode. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + AzureCosmosDBConnectionModeKey = attribute.Key("azure.cosmosdb.connection.mode") + + // AzureCosmosDBConsistencyLevelKey is the attribute Key conforming to the + // "azure.cosmosdb.consistency.level" semantic conventions. It represents the + // account or request [consistency level]. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Eventual", "ConsistentPrefix", "BoundedStaleness", "Strong", + // "Session" + // + // [consistency level]: https://learn.microsoft.com/azure/cosmos-db/consistency-levels + AzureCosmosDBConsistencyLevelKey = attribute.Key("azure.cosmosdb.consistency.level") + + // AzureCosmosDBOperationContactedRegionsKey is the attribute Key conforming to + // the "azure.cosmosdb.operation.contacted_regions" semantic conventions. It + // represents the list of regions contacted during operation in the order that + // they were contacted. If there is more than one region listed, it indicates + // that the operation was performed on multiple regions i.e. cross-regional + // call. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "North Central US", "Australia East", "Australia Southeast" + // Note: Region name matches the format of `displayName` in [Azure Location API] + // + // [Azure Location API]: https://learn.microsoft.com/rest/api/subscription/subscriptions/list-locations?view=rest-subscription-2021-10-01&tabs=HTTP#location + AzureCosmosDBOperationContactedRegionsKey = attribute.Key("azure.cosmosdb.operation.contacted_regions") + + // AzureCosmosDBOperationRequestChargeKey is the attribute Key conforming to the + // "azure.cosmosdb.operation.request_charge" semantic conventions. It represents + // the number of request units consumed by the operation. + // + // Type: double + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 46.18, 1.0 + AzureCosmosDBOperationRequestChargeKey = attribute.Key("azure.cosmosdb.operation.request_charge") + + // AzureCosmosDBRequestBodySizeKey is the attribute Key conforming to the + // "azure.cosmosdb.request.body.size" semantic conventions. It represents the + // request payload size in bytes. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + AzureCosmosDBRequestBodySizeKey = attribute.Key("azure.cosmosdb.request.body.size") + + // AzureCosmosDBResponseSubStatusCodeKey is the attribute Key conforming to the + // "azure.cosmosdb.response.sub_status_code" semantic conventions. It represents + // the cosmos DB sub status code. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1000, 1002 + AzureCosmosDBResponseSubStatusCodeKey = attribute.Key("azure.cosmosdb.response.sub_status_code") + + // AzureResourceProviderNamespaceKey is the attribute Key conforming to the + // "azure.resource_provider.namespace" semantic conventions. It represents the + // [Azure Resource Provider Namespace] as recognized by the client. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Microsoft.Storage", "Microsoft.KeyVault", "Microsoft.ServiceBus" + // + // [Azure Resource Provider Namespace]: https://learn.microsoft.com/azure/azure-resource-manager/management/azure-services-resource-providers + AzureResourceProviderNamespaceKey = attribute.Key("azure.resource_provider.namespace") + + // AzureServiceRequestIDKey is the attribute Key conforming to the + // "azure.service.request.id" semantic conventions. It represents the unique + // identifier of the service request. It's generated by the Azure service and + // returned with the response. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "00000000-0000-0000-0000-000000000000" + AzureServiceRequestIDKey = attribute.Key("azure.service.request.id") +) + +// AzureClientID returns an attribute KeyValue conforming to the +// "azure.client.id" semantic conventions. It represents the unique identifier of +// the client instance. +func AzureClientID(val string) attribute.KeyValue { + return AzureClientIDKey.String(val) +} + +// AzureCosmosDBOperationContactedRegions returns an attribute KeyValue +// conforming to the "azure.cosmosdb.operation.contacted_regions" semantic +// conventions. It represents the list of regions contacted during operation in +// the order that they were contacted. If there is more than one region listed, +// it indicates that the operation was performed on multiple regions i.e. +// cross-regional call. +func AzureCosmosDBOperationContactedRegions(val ...string) attribute.KeyValue { + return AzureCosmosDBOperationContactedRegionsKey.StringSlice(val) +} + +// AzureCosmosDBOperationRequestCharge returns an attribute KeyValue conforming +// to the "azure.cosmosdb.operation.request_charge" semantic conventions. It +// represents the number of request units consumed by the operation. +func AzureCosmosDBOperationRequestCharge(val float64) attribute.KeyValue { + return AzureCosmosDBOperationRequestChargeKey.Float64(val) +} + +// AzureCosmosDBRequestBodySize returns an attribute KeyValue conforming to the +// "azure.cosmosdb.request.body.size" semantic conventions. It represents the +// request payload size in bytes. +func AzureCosmosDBRequestBodySize(val int) attribute.KeyValue { + return AzureCosmosDBRequestBodySizeKey.Int(val) +} + +// AzureCosmosDBResponseSubStatusCode returns an attribute KeyValue conforming to +// the "azure.cosmosdb.response.sub_status_code" semantic conventions. It +// represents the cosmos DB sub status code. +func AzureCosmosDBResponseSubStatusCode(val int) attribute.KeyValue { + return AzureCosmosDBResponseSubStatusCodeKey.Int(val) +} + +// AzureResourceProviderNamespace returns an attribute KeyValue conforming to the +// "azure.resource_provider.namespace" semantic conventions. It represents the +// [Azure Resource Provider Namespace] as recognized by the client. +// +// [Azure Resource Provider Namespace]: https://learn.microsoft.com/azure/azure-resource-manager/management/azure-services-resource-providers +func AzureResourceProviderNamespace(val string) attribute.KeyValue { + return AzureResourceProviderNamespaceKey.String(val) +} + +// AzureServiceRequestID returns an attribute KeyValue conforming to the +// "azure.service.request.id" semantic conventions. It represents the unique +// identifier of the service request. It's generated by the Azure service and +// returned with the response. +func AzureServiceRequestID(val string) attribute.KeyValue { + return AzureServiceRequestIDKey.String(val) +} + +// Enum values for azure.cosmosdb.connection.mode +var ( + // Gateway (HTTP) connection. + // Stability: development + AzureCosmosDBConnectionModeGateway = AzureCosmosDBConnectionModeKey.String("gateway") + // Direct connection. + // Stability: development + AzureCosmosDBConnectionModeDirect = AzureCosmosDBConnectionModeKey.String("direct") +) + +// Enum values for azure.cosmosdb.consistency.level +var ( + // strong + // Stability: development + AzureCosmosDBConsistencyLevelStrong = AzureCosmosDBConsistencyLevelKey.String("Strong") + // bounded_staleness + // Stability: development + AzureCosmosDBConsistencyLevelBoundedStaleness = AzureCosmosDBConsistencyLevelKey.String("BoundedStaleness") + // session + // Stability: development + AzureCosmosDBConsistencyLevelSession = AzureCosmosDBConsistencyLevelKey.String("Session") + // eventual + // Stability: development + AzureCosmosDBConsistencyLevelEventual = AzureCosmosDBConsistencyLevelKey.String("Eventual") + // consistent_prefix + // Stability: development + AzureCosmosDBConsistencyLevelConsistentPrefix = AzureCosmosDBConsistencyLevelKey.String("ConsistentPrefix") +) + +// Namespace: browser +const ( + // BrowserBrandsKey is the attribute Key conforming to the "browser.brands" + // semantic conventions. It represents the array of brand name and version + // separated by a space. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: " Not A;Brand 99", "Chromium 99", "Chrome 99" + // Note: This value is intended to be taken from the [UA client hints API] ( + // `navigator.userAgentData.brands`). + // + // [UA client hints API]: https://wicg.github.io/ua-client-hints/#interface + BrowserBrandsKey = attribute.Key("browser.brands") + + // BrowserLanguageKey is the attribute Key conforming to the "browser.language" + // semantic conventions. It represents the preferred language of the user using + // the browser. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "en", "en-US", "fr", "fr-FR" + // Note: This value is intended to be taken from the Navigator API + // `navigator.language`. + BrowserLanguageKey = attribute.Key("browser.language") + + // BrowserMobileKey is the attribute Key conforming to the "browser.mobile" + // semantic conventions. It represents a boolean that is true if the browser is + // running on a mobile device. + // + // Type: boolean + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: This value is intended to be taken from the [UA client hints API] ( + // `navigator.userAgentData.mobile`). If unavailable, this attribute SHOULD be + // left unset. + // + // [UA client hints API]: https://wicg.github.io/ua-client-hints/#interface + BrowserMobileKey = attribute.Key("browser.mobile") + + // BrowserPlatformKey is the attribute Key conforming to the "browser.platform" + // semantic conventions. It represents the platform on which the browser is + // running. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Windows", "macOS", "Android" + // Note: This value is intended to be taken from the [UA client hints API] ( + // `navigator.userAgentData.platform`). If unavailable, the legacy + // `navigator.platform` API SHOULD NOT be used instead and this attribute SHOULD + // be left unset in order for the values to be consistent. + // The list of possible values is defined in the + // [W3C User-Agent Client Hints specification]. Note that some (but not all) of + // these values can overlap with values in the + // [`os.type` and `os.name` attributes]. However, for consistency, the values in + // the `browser.platform` attribute should capture the exact value that the user + // agent provides. + // + // [UA client hints API]: https://wicg.github.io/ua-client-hints/#interface + // [W3C User-Agent Client Hints specification]: https://wicg.github.io/ua-client-hints/#sec-ch-ua-platform + // [`os.type` and `os.name` attributes]: ./os.md + BrowserPlatformKey = attribute.Key("browser.platform") +) + +// BrowserBrands returns an attribute KeyValue conforming to the "browser.brands" +// semantic conventions. It represents the array of brand name and version +// separated by a space. +func BrowserBrands(val ...string) attribute.KeyValue { + return BrowserBrandsKey.StringSlice(val) +} + +// BrowserLanguage returns an attribute KeyValue conforming to the +// "browser.language" semantic conventions. It represents the preferred language +// of the user using the browser. +func BrowserLanguage(val string) attribute.KeyValue { + return BrowserLanguageKey.String(val) +} + +// BrowserMobile returns an attribute KeyValue conforming to the "browser.mobile" +// semantic conventions. It represents a boolean that is true if the browser is +// running on a mobile device. +func BrowserMobile(val bool) attribute.KeyValue { + return BrowserMobileKey.Bool(val) +} + +// BrowserPlatform returns an attribute KeyValue conforming to the +// "browser.platform" semantic conventions. It represents the platform on which +// the browser is running. +func BrowserPlatform(val string) attribute.KeyValue { + return BrowserPlatformKey.String(val) +} + +// Namespace: cassandra +const ( + // CassandraConsistencyLevelKey is the attribute Key conforming to the + // "cassandra.consistency.level" semantic conventions. It represents the + // consistency level of the query. Based on consistency values from [CQL]. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // + // [CQL]: https://docs.datastax.com/en/cassandra-oss/3.0/cassandra/dml/dmlConfigConsistency.html + CassandraConsistencyLevelKey = attribute.Key("cassandra.consistency.level") + + // CassandraCoordinatorDCKey is the attribute Key conforming to the + // "cassandra.coordinator.dc" semantic conventions. It represents the data + // center of the coordinating node for a query. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: us-west-2 + CassandraCoordinatorDCKey = attribute.Key("cassandra.coordinator.dc") + + // CassandraCoordinatorIDKey is the attribute Key conforming to the + // "cassandra.coordinator.id" semantic conventions. It represents the ID of the + // coordinating node for a query. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: be13faa2-8574-4d71-926d-27f16cf8a7af + CassandraCoordinatorIDKey = attribute.Key("cassandra.coordinator.id") + + // CassandraPageSizeKey is the attribute Key conforming to the + // "cassandra.page.size" semantic conventions. It represents the fetch size used + // for paging, i.e. how many rows will be returned at once. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 5000 + CassandraPageSizeKey = attribute.Key("cassandra.page.size") + + // CassandraQueryIdempotentKey is the attribute Key conforming to the + // "cassandra.query.idempotent" semantic conventions. It represents the whether + // or not the query is idempotent. + // + // Type: boolean + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + CassandraQueryIdempotentKey = attribute.Key("cassandra.query.idempotent") + + // CassandraSpeculativeExecutionCountKey is the attribute Key conforming to the + // "cassandra.speculative_execution.count" semantic conventions. It represents + // the number of times a query was speculatively executed. Not set or `0` if the + // query was not executed speculatively. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 0, 2 + CassandraSpeculativeExecutionCountKey = attribute.Key("cassandra.speculative_execution.count") +) + +// CassandraCoordinatorDC returns an attribute KeyValue conforming to the +// "cassandra.coordinator.dc" semantic conventions. It represents the data center +// of the coordinating node for a query. +func CassandraCoordinatorDC(val string) attribute.KeyValue { + return CassandraCoordinatorDCKey.String(val) +} + +// CassandraCoordinatorID returns an attribute KeyValue conforming to the +// "cassandra.coordinator.id" semantic conventions. It represents the ID of the +// coordinating node for a query. +func CassandraCoordinatorID(val string) attribute.KeyValue { + return CassandraCoordinatorIDKey.String(val) +} + +// CassandraPageSize returns an attribute KeyValue conforming to the +// "cassandra.page.size" semantic conventions. It represents the fetch size used +// for paging, i.e. how many rows will be returned at once. +func CassandraPageSize(val int) attribute.KeyValue { + return CassandraPageSizeKey.Int(val) +} + +// CassandraQueryIdempotent returns an attribute KeyValue conforming to the +// "cassandra.query.idempotent" semantic conventions. It represents the whether +// or not the query is idempotent. +func CassandraQueryIdempotent(val bool) attribute.KeyValue { + return CassandraQueryIdempotentKey.Bool(val) +} + +// CassandraSpeculativeExecutionCount returns an attribute KeyValue conforming to +// the "cassandra.speculative_execution.count" semantic conventions. It +// represents the number of times a query was speculatively executed. Not set or +// `0` if the query was not executed speculatively. +func CassandraSpeculativeExecutionCount(val int) attribute.KeyValue { + return CassandraSpeculativeExecutionCountKey.Int(val) +} + +// Enum values for cassandra.consistency.level +var ( + // all + // Stability: development + CassandraConsistencyLevelAll = CassandraConsistencyLevelKey.String("all") + // each_quorum + // Stability: development + CassandraConsistencyLevelEachQuorum = CassandraConsistencyLevelKey.String("each_quorum") + // quorum + // Stability: development + CassandraConsistencyLevelQuorum = CassandraConsistencyLevelKey.String("quorum") + // local_quorum + // Stability: development + CassandraConsistencyLevelLocalQuorum = CassandraConsistencyLevelKey.String("local_quorum") + // one + // Stability: development + CassandraConsistencyLevelOne = CassandraConsistencyLevelKey.String("one") + // two + // Stability: development + CassandraConsistencyLevelTwo = CassandraConsistencyLevelKey.String("two") + // three + // Stability: development + CassandraConsistencyLevelThree = CassandraConsistencyLevelKey.String("three") + // local_one + // Stability: development + CassandraConsistencyLevelLocalOne = CassandraConsistencyLevelKey.String("local_one") + // any + // Stability: development + CassandraConsistencyLevelAny = CassandraConsistencyLevelKey.String("any") + // serial + // Stability: development + CassandraConsistencyLevelSerial = CassandraConsistencyLevelKey.String("serial") + // local_serial + // Stability: development + CassandraConsistencyLevelLocalSerial = CassandraConsistencyLevelKey.String("local_serial") +) + +// Namespace: cicd +const ( + // CICDPipelineActionNameKey is the attribute Key conforming to the + // "cicd.pipeline.action.name" semantic conventions. It represents the kind of + // action a pipeline run is performing. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "BUILD", "RUN", "SYNC" + CICDPipelineActionNameKey = attribute.Key("cicd.pipeline.action.name") + + // CICDPipelineNameKey is the attribute Key conforming to the + // "cicd.pipeline.name" semantic conventions. It represents the human readable + // name of the pipeline within a CI/CD system. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Build and Test", "Lint", "Deploy Go Project", + // "deploy_to_environment" + CICDPipelineNameKey = attribute.Key("cicd.pipeline.name") + + // CICDPipelineResultKey is the attribute Key conforming to the + // "cicd.pipeline.result" semantic conventions. It represents the result of a + // pipeline run. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "success", "failure", "timeout", "skipped" + CICDPipelineResultKey = attribute.Key("cicd.pipeline.result") + + // CICDPipelineRunIDKey is the attribute Key conforming to the + // "cicd.pipeline.run.id" semantic conventions. It represents the unique + // identifier of a pipeline run within a CI/CD system. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "120912" + CICDPipelineRunIDKey = attribute.Key("cicd.pipeline.run.id") + + // CICDPipelineRunStateKey is the attribute Key conforming to the + // "cicd.pipeline.run.state" semantic conventions. It represents the pipeline + // run goes through these states during its lifecycle. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "pending", "executing", "finalizing" + CICDPipelineRunStateKey = attribute.Key("cicd.pipeline.run.state") + + // CICDPipelineRunURLFullKey is the attribute Key conforming to the + // "cicd.pipeline.run.url.full" semantic conventions. It represents the [URL] of + // the pipeline run, providing the complete address in order to locate and + // identify the pipeline run. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // "https://github.com/open-telemetry/semantic-conventions/actions/runs/9753949763?pr=1075" + // + // [URL]: https://wikipedia.org/wiki/URL + CICDPipelineRunURLFullKey = attribute.Key("cicd.pipeline.run.url.full") + + // CICDPipelineTaskNameKey is the attribute Key conforming to the + // "cicd.pipeline.task.name" semantic conventions. It represents the human + // readable name of a task within a pipeline. Task here most closely aligns with + // a [computing process] in a pipeline. Other terms for tasks include commands, + // steps, and procedures. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Run GoLang Linter", "Go Build", "go-test", "deploy_binary" + // + // [computing process]: https://wikipedia.org/wiki/Pipeline_(computing) + CICDPipelineTaskNameKey = attribute.Key("cicd.pipeline.task.name") + + // CICDPipelineTaskRunIDKey is the attribute Key conforming to the + // "cicd.pipeline.task.run.id" semantic conventions. It represents the unique + // identifier of a task run within a pipeline. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "12097" + CICDPipelineTaskRunIDKey = attribute.Key("cicd.pipeline.task.run.id") + + // CICDPipelineTaskRunResultKey is the attribute Key conforming to the + // "cicd.pipeline.task.run.result" semantic conventions. It represents the + // result of a task run. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "success", "failure", "timeout", "skipped" + CICDPipelineTaskRunResultKey = attribute.Key("cicd.pipeline.task.run.result") + + // CICDPipelineTaskRunURLFullKey is the attribute Key conforming to the + // "cicd.pipeline.task.run.url.full" semantic conventions. It represents the + // [URL] of the pipeline task run, providing the complete address in order to + // locate and identify the pipeline task run. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // "https://github.com/open-telemetry/semantic-conventions/actions/runs/9753949763/job/26920038674?pr=1075" + // + // [URL]: https://wikipedia.org/wiki/URL + CICDPipelineTaskRunURLFullKey = attribute.Key("cicd.pipeline.task.run.url.full") + + // CICDPipelineTaskTypeKey is the attribute Key conforming to the + // "cicd.pipeline.task.type" semantic conventions. It represents the type of the + // task within a pipeline. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "build", "test", "deploy" + CICDPipelineTaskTypeKey = attribute.Key("cicd.pipeline.task.type") + + // CICDSystemComponentKey is the attribute Key conforming to the + // "cicd.system.component" semantic conventions. It represents the name of a + // component of the CICD system. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "controller", "scheduler", "agent" + CICDSystemComponentKey = attribute.Key("cicd.system.component") + + // CICDWorkerIDKey is the attribute Key conforming to the "cicd.worker.id" + // semantic conventions. It represents the unique identifier of a worker within + // a CICD system. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "abc123", "10.0.1.2", "controller" + CICDWorkerIDKey = attribute.Key("cicd.worker.id") + + // CICDWorkerNameKey is the attribute Key conforming to the "cicd.worker.name" + // semantic conventions. It represents the name of a worker within a CICD + // system. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "agent-abc", "controller", "Ubuntu LTS" + CICDWorkerNameKey = attribute.Key("cicd.worker.name") + + // CICDWorkerStateKey is the attribute Key conforming to the "cicd.worker.state" + // semantic conventions. It represents the state of a CICD worker / agent. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "idle", "busy", "down" + CICDWorkerStateKey = attribute.Key("cicd.worker.state") + + // CICDWorkerURLFullKey is the attribute Key conforming to the + // "cicd.worker.url.full" semantic conventions. It represents the [URL] of the + // worker, providing the complete address in order to locate and identify the + // worker. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "https://cicd.example.org/worker/abc123" + // + // [URL]: https://wikipedia.org/wiki/URL + CICDWorkerURLFullKey = attribute.Key("cicd.worker.url.full") +) + +// CICDPipelineName returns an attribute KeyValue conforming to the +// "cicd.pipeline.name" semantic conventions. It represents the human readable +// name of the pipeline within a CI/CD system. +func CICDPipelineName(val string) attribute.KeyValue { + return CICDPipelineNameKey.String(val) +} + +// CICDPipelineRunID returns an attribute KeyValue conforming to the +// "cicd.pipeline.run.id" semantic conventions. It represents the unique +// identifier of a pipeline run within a CI/CD system. +func CICDPipelineRunID(val string) attribute.KeyValue { + return CICDPipelineRunIDKey.String(val) +} + +// CICDPipelineRunURLFull returns an attribute KeyValue conforming to the +// "cicd.pipeline.run.url.full" semantic conventions. It represents the [URL] of +// the pipeline run, providing the complete address in order to locate and +// identify the pipeline run. +// +// [URL]: https://wikipedia.org/wiki/URL +func CICDPipelineRunURLFull(val string) attribute.KeyValue { + return CICDPipelineRunURLFullKey.String(val) +} + +// CICDPipelineTaskName returns an attribute KeyValue conforming to the +// "cicd.pipeline.task.name" semantic conventions. It represents the human +// readable name of a task within a pipeline. Task here most closely aligns with +// a [computing process] in a pipeline. Other terms for tasks include commands, +// steps, and procedures. +// +// [computing process]: https://wikipedia.org/wiki/Pipeline_(computing) +func CICDPipelineTaskName(val string) attribute.KeyValue { + return CICDPipelineTaskNameKey.String(val) +} + +// CICDPipelineTaskRunID returns an attribute KeyValue conforming to the +// "cicd.pipeline.task.run.id" semantic conventions. It represents the unique +// identifier of a task run within a pipeline. +func CICDPipelineTaskRunID(val string) attribute.KeyValue { + return CICDPipelineTaskRunIDKey.String(val) +} + +// CICDPipelineTaskRunURLFull returns an attribute KeyValue conforming to the +// "cicd.pipeline.task.run.url.full" semantic conventions. It represents the +// [URL] of the pipeline task run, providing the complete address in order to +// locate and identify the pipeline task run. +// +// [URL]: https://wikipedia.org/wiki/URL +func CICDPipelineTaskRunURLFull(val string) attribute.KeyValue { + return CICDPipelineTaskRunURLFullKey.String(val) +} + +// CICDSystemComponent returns an attribute KeyValue conforming to the +// "cicd.system.component" semantic conventions. It represents the name of a +// component of the CICD system. +func CICDSystemComponent(val string) attribute.KeyValue { + return CICDSystemComponentKey.String(val) +} + +// CICDWorkerID returns an attribute KeyValue conforming to the "cicd.worker.id" +// semantic conventions. It represents the unique identifier of a worker within a +// CICD system. +func CICDWorkerID(val string) attribute.KeyValue { + return CICDWorkerIDKey.String(val) +} + +// CICDWorkerName returns an attribute KeyValue conforming to the +// "cicd.worker.name" semantic conventions. It represents the name of a worker +// within a CICD system. +func CICDWorkerName(val string) attribute.KeyValue { + return CICDWorkerNameKey.String(val) +} + +// CICDWorkerURLFull returns an attribute KeyValue conforming to the +// "cicd.worker.url.full" semantic conventions. It represents the [URL] of the +// worker, providing the complete address in order to locate and identify the +// worker. +// +// [URL]: https://wikipedia.org/wiki/URL +func CICDWorkerURLFull(val string) attribute.KeyValue { + return CICDWorkerURLFullKey.String(val) +} + +// Enum values for cicd.pipeline.action.name +var ( + // The pipeline run is executing a build. + // Stability: development + CICDPipelineActionNameBuild = CICDPipelineActionNameKey.String("BUILD") + // The pipeline run is executing. + // Stability: development + CICDPipelineActionNameRun = CICDPipelineActionNameKey.String("RUN") + // The pipeline run is executing a sync. + // Stability: development + CICDPipelineActionNameSync = CICDPipelineActionNameKey.String("SYNC") +) + +// Enum values for cicd.pipeline.result +var ( + // The pipeline run finished successfully. + // Stability: development + CICDPipelineResultSuccess = CICDPipelineResultKey.String("success") + // The pipeline run did not finish successfully, eg. due to a compile error or a + // failing test. Such failures are usually detected by non-zero exit codes of + // the tools executed in the pipeline run. + // Stability: development + CICDPipelineResultFailure = CICDPipelineResultKey.String("failure") + // The pipeline run failed due to an error in the CICD system, eg. due to the + // worker being killed. + // Stability: development + CICDPipelineResultError = CICDPipelineResultKey.String("error") + // A timeout caused the pipeline run to be interrupted. + // Stability: development + CICDPipelineResultTimeout = CICDPipelineResultKey.String("timeout") + // The pipeline run was cancelled, eg. by a user manually cancelling the + // pipeline run. + // Stability: development + CICDPipelineResultCancellation = CICDPipelineResultKey.String("cancellation") + // The pipeline run was skipped, eg. due to a precondition not being met. + // Stability: development + CICDPipelineResultSkip = CICDPipelineResultKey.String("skip") +) + +// Enum values for cicd.pipeline.run.state +var ( + // The run pending state spans from the event triggering the pipeline run until + // the execution of the run starts (eg. time spent in a queue, provisioning + // agents, creating run resources). + // + // Stability: development + CICDPipelineRunStatePending = CICDPipelineRunStateKey.String("pending") + // The executing state spans the execution of any run tasks (eg. build, test). + // Stability: development + CICDPipelineRunStateExecuting = CICDPipelineRunStateKey.String("executing") + // The finalizing state spans from when the run has finished executing (eg. + // cleanup of run resources). + // Stability: development + CICDPipelineRunStateFinalizing = CICDPipelineRunStateKey.String("finalizing") +) + +// Enum values for cicd.pipeline.task.run.result +var ( + // The task run finished successfully. + // Stability: development + CICDPipelineTaskRunResultSuccess = CICDPipelineTaskRunResultKey.String("success") + // The task run did not finish successfully, eg. due to a compile error or a + // failing test. Such failures are usually detected by non-zero exit codes of + // the tools executed in the task run. + // Stability: development + CICDPipelineTaskRunResultFailure = CICDPipelineTaskRunResultKey.String("failure") + // The task run failed due to an error in the CICD system, eg. due to the worker + // being killed. + // Stability: development + CICDPipelineTaskRunResultError = CICDPipelineTaskRunResultKey.String("error") + // A timeout caused the task run to be interrupted. + // Stability: development + CICDPipelineTaskRunResultTimeout = CICDPipelineTaskRunResultKey.String("timeout") + // The task run was cancelled, eg. by a user manually cancelling the task run. + // Stability: development + CICDPipelineTaskRunResultCancellation = CICDPipelineTaskRunResultKey.String("cancellation") + // The task run was skipped, eg. due to a precondition not being met. + // Stability: development + CICDPipelineTaskRunResultSkip = CICDPipelineTaskRunResultKey.String("skip") +) + +// Enum values for cicd.pipeline.task.type +var ( + // build + // Stability: development + CICDPipelineTaskTypeBuild = CICDPipelineTaskTypeKey.String("build") + // test + // Stability: development + CICDPipelineTaskTypeTest = CICDPipelineTaskTypeKey.String("test") + // deploy + // Stability: development + CICDPipelineTaskTypeDeploy = CICDPipelineTaskTypeKey.String("deploy") +) + +// Enum values for cicd.worker.state +var ( + // The worker is not performing work for the CICD system. It is available to the + // CICD system to perform work on (online / idle). + // Stability: development + CICDWorkerStateAvailable = CICDWorkerStateKey.String("available") + // The worker is performing work for the CICD system. + // Stability: development + CICDWorkerStateBusy = CICDWorkerStateKey.String("busy") + // The worker is not available to the CICD system (disconnected / down). + // Stability: development + CICDWorkerStateOffline = CICDWorkerStateKey.String("offline") +) + +// Namespace: client +const ( + // ClientAddressKey is the attribute Key conforming to the "client.address" + // semantic conventions. It represents the client address - domain name if + // available without reverse DNS lookup; otherwise, IP address or Unix domain + // socket name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "client.example.com", "10.1.2.80", "/tmp/my.sock" + // Note: When observed from the server side, and when communicating through an + // intermediary, `client.address` SHOULD represent the client address behind any + // intermediaries, for example proxies, if it's available. + ClientAddressKey = attribute.Key("client.address") + + // ClientPortKey is the attribute Key conforming to the "client.port" semantic + // conventions. It represents the client port number. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: 65123 + // Note: When observed from the server side, and when communicating through an + // intermediary, `client.port` SHOULD represent the client port behind any + // intermediaries, for example proxies, if it's available. + ClientPortKey = attribute.Key("client.port") +) + +// ClientAddress returns an attribute KeyValue conforming to the "client.address" +// semantic conventions. It represents the client address - domain name if +// available without reverse DNS lookup; otherwise, IP address or Unix domain +// socket name. +func ClientAddress(val string) attribute.KeyValue { + return ClientAddressKey.String(val) +} + +// ClientPort returns an attribute KeyValue conforming to the "client.port" +// semantic conventions. It represents the client port number. +func ClientPort(val int) attribute.KeyValue { + return ClientPortKey.Int(val) +} + +// Namespace: cloud +const ( + // CloudAccountIDKey is the attribute Key conforming to the "cloud.account.id" + // semantic conventions. It represents the cloud account ID the resource is + // assigned to. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "111111111111", "opentelemetry" + CloudAccountIDKey = attribute.Key("cloud.account.id") + + // CloudAvailabilityZoneKey is the attribute Key conforming to the + // "cloud.availability_zone" semantic conventions. It represents the cloud + // regions often have multiple, isolated locations known as zones to increase + // availability. Availability zone represents the zone where the resource is + // running. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "us-east-1c" + // Note: Availability zones are called "zones" on Alibaba Cloud and Google + // Cloud. + CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone") + + // CloudPlatformKey is the attribute Key conforming to the "cloud.platform" + // semantic conventions. It represents the cloud platform in use. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: The prefix of the service SHOULD match the one specified in + // `cloud.provider`. + CloudPlatformKey = attribute.Key("cloud.platform") + + // CloudProviderKey is the attribute Key conforming to the "cloud.provider" + // semantic conventions. It represents the name of the cloud provider. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + CloudProviderKey = attribute.Key("cloud.provider") + + // CloudRegionKey is the attribute Key conforming to the "cloud.region" semantic + // conventions. It represents the geographical region within a cloud provider. + // When associated with a resource, this attribute specifies the region where + // the resource operates. When calling services or APIs deployed on a cloud, + // this attribute identifies the region where the called destination is + // deployed. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "us-central1", "us-east-1" + // Note: Refer to your provider's docs to see the available regions, for example + // [Alibaba Cloud regions], [AWS regions], [Azure regions], + // [Google Cloud regions], or [Tencent Cloud regions]. + // + // [Alibaba Cloud regions]: https://www.alibabacloud.com/help/doc-detail/40654.htm + // [AWS regions]: https://aws.amazon.com/about-aws/global-infrastructure/regions_az/ + // [Azure regions]: https://azure.microsoft.com/global-infrastructure/geographies/ + // [Google Cloud regions]: https://cloud.google.com/about/locations + // [Tencent Cloud regions]: https://www.tencentcloud.com/document/product/213/6091 + CloudRegionKey = attribute.Key("cloud.region") + + // CloudResourceIDKey is the attribute Key conforming to the "cloud.resource_id" + // semantic conventions. It represents the cloud provider-specific native + // identifier of the monitored cloud resource (e.g. an [ARN] on AWS, a + // [fully qualified resource ID] on Azure, a [full resource name] on GCP). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "arn:aws:lambda:REGION:ACCOUNT_ID:function:my-function", + // "//run.googleapis.com/projects/PROJECT_ID/locations/LOCATION_ID/services/SERVICE_ID", + // "/subscriptions//resourceGroups/ + // /providers/Microsoft.Web/sites//functions/" + // Note: On some cloud providers, it may not be possible to determine the full + // ID at startup, + // so it may be necessary to set `cloud.resource_id` as a span attribute + // instead. + // + // The exact value to use for `cloud.resource_id` depends on the cloud provider. + // The following well-known definitions MUST be used if you set this attribute + // and they apply: + // + // - **AWS Lambda:** The function [ARN]. + // Take care not to use the "invoked ARN" directly but replace any + // [alias suffix] + // with the resolved function version, as the same runtime instance may be + // invocable with + // multiple different aliases. + // - **GCP:** The [URI of the resource] + // - **Azure:** The [Fully Qualified Resource ID] of the invoked function, + // *not* the function app, having the form + // + // `/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/` + // . + // This means that a span attribute MUST be used, as an Azure function app + // can host multiple functions that would usually share + // a TracerProvider. + // + // + // [ARN]: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html + // [fully qualified resource ID]: https://learn.microsoft.com/rest/api/resources/resources/get-by-id + // [full resource name]: https://google.aip.dev/122#full-resource-names + // [ARN]: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html + // [alias suffix]: https://docs.aws.amazon.com/lambda/latest/dg/configuration-aliases.html + // [URI of the resource]: https://cloud.google.com/iam/docs/full-resource-names + // [Fully Qualified Resource ID]: https://learn.microsoft.com/rest/api/resources/resources/get-by-id + CloudResourceIDKey = attribute.Key("cloud.resource_id") +) + +// CloudAccountID returns an attribute KeyValue conforming to the +// "cloud.account.id" semantic conventions. It represents the cloud account ID +// the resource is assigned to. +func CloudAccountID(val string) attribute.KeyValue { + return CloudAccountIDKey.String(val) +} + +// CloudAvailabilityZone returns an attribute KeyValue conforming to the +// "cloud.availability_zone" semantic conventions. It represents the cloud +// regions often have multiple, isolated locations known as zones to increase +// availability. Availability zone represents the zone where the resource is +// running. +func CloudAvailabilityZone(val string) attribute.KeyValue { + return CloudAvailabilityZoneKey.String(val) +} + +// CloudRegion returns an attribute KeyValue conforming to the "cloud.region" +// semantic conventions. It represents the geographical region within a cloud +// provider. When associated with a resource, this attribute specifies the region +// where the resource operates. When calling services or APIs deployed on a +// cloud, this attribute identifies the region where the called destination is +// deployed. +func CloudRegion(val string) attribute.KeyValue { + return CloudRegionKey.String(val) +} + +// CloudResourceID returns an attribute KeyValue conforming to the +// "cloud.resource_id" semantic conventions. It represents the cloud +// provider-specific native identifier of the monitored cloud resource (e.g. an +// [ARN] on AWS, a [fully qualified resource ID] on Azure, a [full resource name] +// on GCP). +// +// [ARN]: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html +// [fully qualified resource ID]: https://learn.microsoft.com/rest/api/resources/resources/get-by-id +// [full resource name]: https://google.aip.dev/122#full-resource-names +func CloudResourceID(val string) attribute.KeyValue { + return CloudResourceIDKey.String(val) +} + +// Enum values for cloud.platform +var ( + // Alibaba Cloud Elastic Compute Service + // Stability: development + CloudPlatformAlibabaCloudECS = CloudPlatformKey.String("alibaba_cloud_ecs") + // Alibaba Cloud Function Compute + // Stability: development + CloudPlatformAlibabaCloudFC = CloudPlatformKey.String("alibaba_cloud_fc") + // Red Hat OpenShift on Alibaba Cloud + // Stability: development + CloudPlatformAlibabaCloudOpenShift = CloudPlatformKey.String("alibaba_cloud_openshift") + // AWS Elastic Compute Cloud + // Stability: development + CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2") + // AWS Elastic Container Service + // Stability: development + CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs") + // AWS Elastic Kubernetes Service + // Stability: development + CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks") + // AWS Lambda + // Stability: development + CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda") + // AWS Elastic Beanstalk + // Stability: development + CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk") + // AWS App Runner + // Stability: development + CloudPlatformAWSAppRunner = CloudPlatformKey.String("aws_app_runner") + // Red Hat OpenShift on AWS (ROSA) + // Stability: development + CloudPlatformAWSOpenShift = CloudPlatformKey.String("aws_openshift") + // Azure Virtual Machines + // Stability: development + CloudPlatformAzureVM = CloudPlatformKey.String("azure.vm") + // Azure Container Apps + // Stability: development + CloudPlatformAzureContainerApps = CloudPlatformKey.String("azure.container_apps") + // Azure Container Instances + // Stability: development + CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure.container_instances") + // Azure Kubernetes Service + // Stability: development + CloudPlatformAzureAKS = CloudPlatformKey.String("azure.aks") + // Azure Functions + // Stability: development + CloudPlatformAzureFunctions = CloudPlatformKey.String("azure.functions") + // Azure App Service + // Stability: development + CloudPlatformAzureAppService = CloudPlatformKey.String("azure.app_service") + // Azure Red Hat OpenShift + // Stability: development + CloudPlatformAzureOpenShift = CloudPlatformKey.String("azure.openshift") + // Google Bare Metal Solution (BMS) + // Stability: development + CloudPlatformGCPBareMetalSolution = CloudPlatformKey.String("gcp_bare_metal_solution") + // Google Cloud Compute Engine (GCE) + // Stability: development + CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine") + // Google Cloud Run + // Stability: development + CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run") + // Google Cloud Kubernetes Engine (GKE) + // Stability: development + CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine") + // Google Cloud Functions (GCF) + // Stability: development + CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions") + // Google Cloud App Engine (GAE) + // Stability: development + CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine") + // Red Hat OpenShift on Google Cloud + // Stability: development + CloudPlatformGCPOpenShift = CloudPlatformKey.String("gcp_openshift") + // Red Hat OpenShift on IBM Cloud + // Stability: development + CloudPlatformIBMCloudOpenShift = CloudPlatformKey.String("ibm_cloud_openshift") + // Compute on Oracle Cloud Infrastructure (OCI) + // Stability: development + CloudPlatformOracleCloudCompute = CloudPlatformKey.String("oracle_cloud_compute") + // Kubernetes Engine (OKE) on Oracle Cloud Infrastructure (OCI) + // Stability: development + CloudPlatformOracleCloudOKE = CloudPlatformKey.String("oracle_cloud_oke") + // Tencent Cloud Cloud Virtual Machine (CVM) + // Stability: development + CloudPlatformTencentCloudCVM = CloudPlatformKey.String("tencent_cloud_cvm") + // Tencent Cloud Elastic Kubernetes Service (EKS) + // Stability: development + CloudPlatformTencentCloudEKS = CloudPlatformKey.String("tencent_cloud_eks") + // Tencent Cloud Serverless Cloud Function (SCF) + // Stability: development + CloudPlatformTencentCloudSCF = CloudPlatformKey.String("tencent_cloud_scf") +) + +// Enum values for cloud.provider +var ( + // Alibaba Cloud + // Stability: development + CloudProviderAlibabaCloud = CloudProviderKey.String("alibaba_cloud") + // Amazon Web Services + // Stability: development + CloudProviderAWS = CloudProviderKey.String("aws") + // Microsoft Azure + // Stability: development + CloudProviderAzure = CloudProviderKey.String("azure") + // Google Cloud Platform + // Stability: development + CloudProviderGCP = CloudProviderKey.String("gcp") + // Heroku Platform as a Service + // Stability: development + CloudProviderHeroku = CloudProviderKey.String("heroku") + // IBM Cloud + // Stability: development + CloudProviderIBMCloud = CloudProviderKey.String("ibm_cloud") + // Oracle Cloud Infrastructure (OCI) + // Stability: development + CloudProviderOracleCloud = CloudProviderKey.String("oracle_cloud") + // Tencent Cloud + // Stability: development + CloudProviderTencentCloud = CloudProviderKey.String("tencent_cloud") +) + +// Namespace: cloudevents +const ( + // CloudEventsEventIDKey is the attribute Key conforming to the + // "cloudevents.event_id" semantic conventions. It represents the [event_id] + // uniquely identifies the event. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "123e4567-e89b-12d3-a456-426614174000", "0001" + // + // [event_id]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id + CloudEventsEventIDKey = attribute.Key("cloudevents.event_id") + + // CloudEventsEventSourceKey is the attribute Key conforming to the + // "cloudevents.event_source" semantic conventions. It represents the [source] + // identifies the context in which an event happened. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "https://github.com/cloudevents", "/cloudevents/spec/pull/123", + // "my-service" + // + // [source]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1 + CloudEventsEventSourceKey = attribute.Key("cloudevents.event_source") + + // CloudEventsEventSpecVersionKey is the attribute Key conforming to the + // "cloudevents.event_spec_version" semantic conventions. It represents the + // [version of the CloudEvents specification] which the event uses. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1.0 + // + // [version of the CloudEvents specification]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion + CloudEventsEventSpecVersionKey = attribute.Key("cloudevents.event_spec_version") + + // CloudEventsEventSubjectKey is the attribute Key conforming to the + // "cloudevents.event_subject" semantic conventions. It represents the [subject] + // of the event in the context of the event producer (identified by source). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: mynewfile.jpg + // + // [subject]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject + CloudEventsEventSubjectKey = attribute.Key("cloudevents.event_subject") + + // CloudEventsEventTypeKey is the attribute Key conforming to the + // "cloudevents.event_type" semantic conventions. It represents the [event_type] + // contains a value describing the type of event related to the originating + // occurrence. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "com.github.pull_request.opened", "com.example.object.deleted.v2" + // + // [event_type]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type + CloudEventsEventTypeKey = attribute.Key("cloudevents.event_type") +) + +// CloudEventsEventID returns an attribute KeyValue conforming to the +// "cloudevents.event_id" semantic conventions. It represents the [event_id] +// uniquely identifies the event. +// +// [event_id]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id +func CloudEventsEventID(val string) attribute.KeyValue { + return CloudEventsEventIDKey.String(val) +} + +// CloudEventsEventSource returns an attribute KeyValue conforming to the +// "cloudevents.event_source" semantic conventions. It represents the [source] +// identifies the context in which an event happened. +// +// [source]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1 +func CloudEventsEventSource(val string) attribute.KeyValue { + return CloudEventsEventSourceKey.String(val) +} + +// CloudEventsEventSpecVersion returns an attribute KeyValue conforming to the +// "cloudevents.event_spec_version" semantic conventions. It represents the +// [version of the CloudEvents specification] which the event uses. +// +// [version of the CloudEvents specification]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion +func CloudEventsEventSpecVersion(val string) attribute.KeyValue { + return CloudEventsEventSpecVersionKey.String(val) +} + +// CloudEventsEventSubject returns an attribute KeyValue conforming to the +// "cloudevents.event_subject" semantic conventions. It represents the [subject] +// of the event in the context of the event producer (identified by source). +// +// [subject]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject +func CloudEventsEventSubject(val string) attribute.KeyValue { + return CloudEventsEventSubjectKey.String(val) +} + +// CloudEventsEventType returns an attribute KeyValue conforming to the +// "cloudevents.event_type" semantic conventions. It represents the [event_type] +// contains a value describing the type of event related to the originating +// occurrence. +// +// [event_type]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type +func CloudEventsEventType(val string) attribute.KeyValue { + return CloudEventsEventTypeKey.String(val) +} + +// Namespace: cloudfoundry +const ( + // CloudFoundryAppIDKey is the attribute Key conforming to the + // "cloudfoundry.app.id" semantic conventions. It represents the guid of the + // application. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "218fc5a9-a5f1-4b54-aa05-46717d0ab26d" + // Note: Application instrumentation should use the value from environment + // variable `VCAP_APPLICATION.application_id`. This is the same value as + // reported by `cf app --guid`. + CloudFoundryAppIDKey = attribute.Key("cloudfoundry.app.id") + + // CloudFoundryAppInstanceIDKey is the attribute Key conforming to the + // "cloudfoundry.app.instance.id" semantic conventions. It represents the index + // of the application instance. 0 when just one instance is active. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "0", "1" + // Note: CloudFoundry defines the `instance_id` in the [Loggregator v2 envelope] + // . + // It is used for logs and metrics emitted by CloudFoundry. It is + // supposed to contain the application instance index for applications + // deployed on the runtime. + // + // Application instrumentation should use the value from environment + // variable `CF_INSTANCE_INDEX`. + // + // [Loggregator v2 envelope]: https://github.com/cloudfoundry/loggregator-api#v2-envelope + CloudFoundryAppInstanceIDKey = attribute.Key("cloudfoundry.app.instance.id") + + // CloudFoundryAppNameKey is the attribute Key conforming to the + // "cloudfoundry.app.name" semantic conventions. It represents the name of the + // application. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "my-app-name" + // Note: Application instrumentation should use the value from environment + // variable `VCAP_APPLICATION.application_name`. This is the same value + // as reported by `cf apps`. + CloudFoundryAppNameKey = attribute.Key("cloudfoundry.app.name") + + // CloudFoundryOrgIDKey is the attribute Key conforming to the + // "cloudfoundry.org.id" semantic conventions. It represents the guid of the + // CloudFoundry org the application is running in. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "218fc5a9-a5f1-4b54-aa05-46717d0ab26d" + // Note: Application instrumentation should use the value from environment + // variable `VCAP_APPLICATION.org_id`. This is the same value as + // reported by `cf org --guid`. + CloudFoundryOrgIDKey = attribute.Key("cloudfoundry.org.id") + + // CloudFoundryOrgNameKey is the attribute Key conforming to the + // "cloudfoundry.org.name" semantic conventions. It represents the name of the + // CloudFoundry organization the app is running in. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "my-org-name" + // Note: Application instrumentation should use the value from environment + // variable `VCAP_APPLICATION.org_name`. This is the same value as + // reported by `cf orgs`. + CloudFoundryOrgNameKey = attribute.Key("cloudfoundry.org.name") + + // CloudFoundryProcessIDKey is the attribute Key conforming to the + // "cloudfoundry.process.id" semantic conventions. It represents the UID + // identifying the process. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "218fc5a9-a5f1-4b54-aa05-46717d0ab26d" + // Note: Application instrumentation should use the value from environment + // variable `VCAP_APPLICATION.process_id`. It is supposed to be equal to + // `VCAP_APPLICATION.app_id` for applications deployed to the runtime. + // For system components, this could be the actual PID. + CloudFoundryProcessIDKey = attribute.Key("cloudfoundry.process.id") + + // CloudFoundryProcessTypeKey is the attribute Key conforming to the + // "cloudfoundry.process.type" semantic conventions. It represents the type of + // process. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "web" + // Note: CloudFoundry applications can consist of multiple jobs. Usually the + // main process will be of type `web`. There can be additional background + // tasks or side-cars with different process types. + CloudFoundryProcessTypeKey = attribute.Key("cloudfoundry.process.type") + + // CloudFoundrySpaceIDKey is the attribute Key conforming to the + // "cloudfoundry.space.id" semantic conventions. It represents the guid of the + // CloudFoundry space the application is running in. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "218fc5a9-a5f1-4b54-aa05-46717d0ab26d" + // Note: Application instrumentation should use the value from environment + // variable `VCAP_APPLICATION.space_id`. This is the same value as + // reported by `cf space --guid`. + CloudFoundrySpaceIDKey = attribute.Key("cloudfoundry.space.id") + + // CloudFoundrySpaceNameKey is the attribute Key conforming to the + // "cloudfoundry.space.name" semantic conventions. It represents the name of the + // CloudFoundry space the application is running in. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "my-space-name" + // Note: Application instrumentation should use the value from environment + // variable `VCAP_APPLICATION.space_name`. This is the same value as + // reported by `cf spaces`. + CloudFoundrySpaceNameKey = attribute.Key("cloudfoundry.space.name") + + // CloudFoundrySystemIDKey is the attribute Key conforming to the + // "cloudfoundry.system.id" semantic conventions. It represents a guid or + // another name describing the event source. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "cf/gorouter" + // Note: CloudFoundry defines the `source_id` in the [Loggregator v2 envelope]. + // It is used for logs and metrics emitted by CloudFoundry. It is + // supposed to contain the component name, e.g. "gorouter", for + // CloudFoundry components. + // + // When system components are instrumented, values from the + // [Bosh spec] + // should be used. The `system.id` should be set to + // `spec.deployment/spec.name`. + // + // [Loggregator v2 envelope]: https://github.com/cloudfoundry/loggregator-api#v2-envelope + // [Bosh spec]: https://bosh.io/docs/jobs/#properties-spec + CloudFoundrySystemIDKey = attribute.Key("cloudfoundry.system.id") + + // CloudFoundrySystemInstanceIDKey is the attribute Key conforming to the + // "cloudfoundry.system.instance.id" semantic conventions. It represents a guid + // describing the concrete instance of the event source. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "218fc5a9-a5f1-4b54-aa05-46717d0ab26d" + // Note: CloudFoundry defines the `instance_id` in the [Loggregator v2 envelope] + // . + // It is used for logs and metrics emitted by CloudFoundry. It is + // supposed to contain the vm id for CloudFoundry components. + // + // When system components are instrumented, values from the + // [Bosh spec] + // should be used. The `system.instance.id` should be set to `spec.id`. + // + // [Loggregator v2 envelope]: https://github.com/cloudfoundry/loggregator-api#v2-envelope + // [Bosh spec]: https://bosh.io/docs/jobs/#properties-spec + CloudFoundrySystemInstanceIDKey = attribute.Key("cloudfoundry.system.instance.id") +) + +// CloudFoundryAppID returns an attribute KeyValue conforming to the +// "cloudfoundry.app.id" semantic conventions. It represents the guid of the +// application. +func CloudFoundryAppID(val string) attribute.KeyValue { + return CloudFoundryAppIDKey.String(val) +} + +// CloudFoundryAppInstanceID returns an attribute KeyValue conforming to the +// "cloudfoundry.app.instance.id" semantic conventions. It represents the index +// of the application instance. 0 when just one instance is active. +func CloudFoundryAppInstanceID(val string) attribute.KeyValue { + return CloudFoundryAppInstanceIDKey.String(val) +} + +// CloudFoundryAppName returns an attribute KeyValue conforming to the +// "cloudfoundry.app.name" semantic conventions. It represents the name of the +// application. +func CloudFoundryAppName(val string) attribute.KeyValue { + return CloudFoundryAppNameKey.String(val) +} + +// CloudFoundryOrgID returns an attribute KeyValue conforming to the +// "cloudfoundry.org.id" semantic conventions. It represents the guid of the +// CloudFoundry org the application is running in. +func CloudFoundryOrgID(val string) attribute.KeyValue { + return CloudFoundryOrgIDKey.String(val) +} + +// CloudFoundryOrgName returns an attribute KeyValue conforming to the +// "cloudfoundry.org.name" semantic conventions. It represents the name of the +// CloudFoundry organization the app is running in. +func CloudFoundryOrgName(val string) attribute.KeyValue { + return CloudFoundryOrgNameKey.String(val) +} + +// CloudFoundryProcessID returns an attribute KeyValue conforming to the +// "cloudfoundry.process.id" semantic conventions. It represents the UID +// identifying the process. +func CloudFoundryProcessID(val string) attribute.KeyValue { + return CloudFoundryProcessIDKey.String(val) +} + +// CloudFoundryProcessType returns an attribute KeyValue conforming to the +// "cloudfoundry.process.type" semantic conventions. It represents the type of +// process. +func CloudFoundryProcessType(val string) attribute.KeyValue { + return CloudFoundryProcessTypeKey.String(val) +} + +// CloudFoundrySpaceID returns an attribute KeyValue conforming to the +// "cloudfoundry.space.id" semantic conventions. It represents the guid of the +// CloudFoundry space the application is running in. +func CloudFoundrySpaceID(val string) attribute.KeyValue { + return CloudFoundrySpaceIDKey.String(val) +} + +// CloudFoundrySpaceName returns an attribute KeyValue conforming to the +// "cloudfoundry.space.name" semantic conventions. It represents the name of the +// CloudFoundry space the application is running in. +func CloudFoundrySpaceName(val string) attribute.KeyValue { + return CloudFoundrySpaceNameKey.String(val) +} + +// CloudFoundrySystemID returns an attribute KeyValue conforming to the +// "cloudfoundry.system.id" semantic conventions. It represents a guid or another +// name describing the event source. +func CloudFoundrySystemID(val string) attribute.KeyValue { + return CloudFoundrySystemIDKey.String(val) +} + +// CloudFoundrySystemInstanceID returns an attribute KeyValue conforming to the +// "cloudfoundry.system.instance.id" semantic conventions. It represents a guid +// describing the concrete instance of the event source. +func CloudFoundrySystemInstanceID(val string) attribute.KeyValue { + return CloudFoundrySystemInstanceIDKey.String(val) +} + +// Namespace: code +const ( + // CodeColumnNumberKey is the attribute Key conforming to the + // "code.column.number" semantic conventions. It represents the column number in + // `code.file.path` best representing the operation. It SHOULD point within the + // code unit named in `code.function.name`. This attribute MUST NOT be used on + // the Profile signal since the data is already captured in 'message Line'. This + // constraint is imposed to prevent redundancy and maintain data integrity. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Stable + CodeColumnNumberKey = attribute.Key("code.column.number") + + // CodeFilePathKey is the attribute Key conforming to the "code.file.path" + // semantic conventions. It represents the source code file name that identifies + // the code unit as uniquely as possible (preferably an absolute file path). + // This attribute MUST NOT be used on the Profile signal since the data is + // already captured in 'message Function'. This constraint is imposed to prevent + // redundancy and maintain data integrity. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: /usr/local/MyApplication/content_root/app/index.php + CodeFilePathKey = attribute.Key("code.file.path") + + // CodeFunctionNameKey is the attribute Key conforming to the + // "code.function.name" semantic conventions. It represents the method or + // function fully-qualified name without arguments. The value should fit the + // natural representation of the language runtime, which is also likely the same + // used within `code.stacktrace` attribute value. This attribute MUST NOT be + // used on the Profile signal since the data is already captured in 'message + // Function'. This constraint is imposed to prevent redundancy and maintain data + // integrity. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "com.example.MyHttpService.serveRequest", + // "GuzzleHttp\Client::transfer", "fopen" + // Note: Values and format depends on each language runtime, thus it is + // impossible to provide an exhaustive list of examples. + // The values are usually the same (or prefixes of) the ones found in native + // stack trace representation stored in + // `code.stacktrace` without information on arguments. + // + // Examples: + // + // - Java method: `com.example.MyHttpService.serveRequest` + // - Java anonymous class method: `com.mycompany.Main$1.myMethod` + // - Java lambda method: + // `com.mycompany.Main$$Lambda/0x0000748ae4149c00.myMethod` + // - PHP function: `GuzzleHttp\Client::transfer` + // - Go function: `github.com/my/repo/pkg.foo.func5` + // - Elixir: `OpenTelemetry.Ctx.new` + // - Erlang: `opentelemetry_ctx:new` + // - Rust: `playground::my_module::my_cool_func` + // - C function: `fopen` + CodeFunctionNameKey = attribute.Key("code.function.name") + + // CodeLineNumberKey is the attribute Key conforming to the "code.line.number" + // semantic conventions. It represents the line number in `code.file.path` best + // representing the operation. It SHOULD point within the code unit named in + // `code.function.name`. This attribute MUST NOT be used on the Profile signal + // since the data is already captured in 'message Line'. This constraint is + // imposed to prevent redundancy and maintain data integrity. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Stable + CodeLineNumberKey = attribute.Key("code.line.number") + + // CodeStacktraceKey is the attribute Key conforming to the "code.stacktrace" + // semantic conventions. It represents a stacktrace as a string in the natural + // representation for the language runtime. The representation is identical to + // [`exception.stacktrace`]. This attribute MUST NOT be used on the Profile + // signal since the data is already captured in 'message Location'. This + // constraint is imposed to prevent redundancy and maintain data integrity. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: at com.example.GenerateTrace.methodB(GenerateTrace.java:13)\n at + // com.example.GenerateTrace.methodA(GenerateTrace.java:9)\n at + // com.example.GenerateTrace.main(GenerateTrace.java:5) + // + // [`exception.stacktrace`]: /docs/exceptions/exceptions-spans.md#stacktrace-representation + CodeStacktraceKey = attribute.Key("code.stacktrace") +) + +// CodeColumnNumber returns an attribute KeyValue conforming to the +// "code.column.number" semantic conventions. It represents the column number in +// `code.file.path` best representing the operation. It SHOULD point within the +// code unit named in `code.function.name`. This attribute MUST NOT be used on +// the Profile signal since the data is already captured in 'message Line'. This +// constraint is imposed to prevent redundancy and maintain data integrity. +func CodeColumnNumber(val int) attribute.KeyValue { + return CodeColumnNumberKey.Int(val) +} + +// CodeFilePath returns an attribute KeyValue conforming to the "code.file.path" +// semantic conventions. It represents the source code file name that identifies +// the code unit as uniquely as possible (preferably an absolute file path). This +// attribute MUST NOT be used on the Profile signal since the data is already +// captured in 'message Function'. This constraint is imposed to prevent +// redundancy and maintain data integrity. +func CodeFilePath(val string) attribute.KeyValue { + return CodeFilePathKey.String(val) +} + +// CodeFunctionName returns an attribute KeyValue conforming to the +// "code.function.name" semantic conventions. It represents the method or +// function fully-qualified name without arguments. The value should fit the +// natural representation of the language runtime, which is also likely the same +// used within `code.stacktrace` attribute value. This attribute MUST NOT be used +// on the Profile signal since the data is already captured in 'message +// Function'. This constraint is imposed to prevent redundancy and maintain data +// integrity. +func CodeFunctionName(val string) attribute.KeyValue { + return CodeFunctionNameKey.String(val) +} + +// CodeLineNumber returns an attribute KeyValue conforming to the +// "code.line.number" semantic conventions. It represents the line number in +// `code.file.path` best representing the operation. It SHOULD point within the +// code unit named in `code.function.name`. This attribute MUST NOT be used on +// the Profile signal since the data is already captured in 'message Line'. This +// constraint is imposed to prevent redundancy and maintain data integrity. +func CodeLineNumber(val int) attribute.KeyValue { + return CodeLineNumberKey.Int(val) +} + +// CodeStacktrace returns an attribute KeyValue conforming to the +// "code.stacktrace" semantic conventions. It represents a stacktrace as a string +// in the natural representation for the language runtime. The representation is +// identical to [`exception.stacktrace`]. This attribute MUST NOT be used on the +// Profile signal since the data is already captured in 'message Location'. This +// constraint is imposed to prevent redundancy and maintain data integrity. +// +// [`exception.stacktrace`]: /docs/exceptions/exceptions-spans.md#stacktrace-representation +func CodeStacktrace(val string) attribute.KeyValue { + return CodeStacktraceKey.String(val) +} + +// Namespace: container +const ( + // ContainerCommandKey is the attribute Key conforming to the + // "container.command" semantic conventions. It represents the command used to + // run the container (i.e. the command name). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "otelcontribcol" + // Note: If using embedded credentials or sensitive data, it is recommended to + // remove them to prevent potential leakage. + ContainerCommandKey = attribute.Key("container.command") + + // ContainerCommandArgsKey is the attribute Key conforming to the + // "container.command_args" semantic conventions. It represents the all the + // command arguments (including the command/executable itself) run by the + // container. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "otelcontribcol", "--config", "config.yaml" + ContainerCommandArgsKey = attribute.Key("container.command_args") + + // ContainerCommandLineKey is the attribute Key conforming to the + // "container.command_line" semantic conventions. It represents the full command + // run by the container as a single string representing the full command. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "otelcontribcol --config config.yaml" + ContainerCommandLineKey = attribute.Key("container.command_line") + + // ContainerCSIPluginNameKey is the attribute Key conforming to the + // "container.csi.plugin.name" semantic conventions. It represents the name of + // the CSI ([Container Storage Interface]) plugin used by the volume. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "pd.csi.storage.gke.io" + // Note: This can sometimes be referred to as a "driver" in CSI implementations. + // This should represent the `name` field of the GetPluginInfo RPC. + // + // [Container Storage Interface]: https://github.com/container-storage-interface/spec + ContainerCSIPluginNameKey = attribute.Key("container.csi.plugin.name") + + // ContainerCSIVolumeIDKey is the attribute Key conforming to the + // "container.csi.volume.id" semantic conventions. It represents the unique + // volume ID returned by the CSI ([Container Storage Interface]) plugin. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "projects/my-gcp-project/zones/my-gcp-zone/disks/my-gcp-disk" + // Note: This can sometimes be referred to as a "volume handle" in CSI + // implementations. This should represent the `Volume.volume_id` field in CSI + // spec. + // + // [Container Storage Interface]: https://github.com/container-storage-interface/spec + ContainerCSIVolumeIDKey = attribute.Key("container.csi.volume.id") + + // ContainerIDKey is the attribute Key conforming to the "container.id" semantic + // conventions. It represents the container ID. Usually a UUID, as for example + // used to [identify Docker containers]. The UUID might be abbreviated. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "a3bf90e006b2" + // + // [identify Docker containers]: https://docs.docker.com/engine/containers/run/#container-identification + ContainerIDKey = attribute.Key("container.id") + + // ContainerImageIDKey is the attribute Key conforming to the + // "container.image.id" semantic conventions. It represents the runtime specific + // image identifier. Usually a hash algorithm followed by a UUID. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // "sha256:19c92d0a00d1b66d897bceaa7319bee0dd38a10a851c60bcec9474aa3f01e50f" + // Note: Docker defines a sha256 of the image id; `container.image.id` + // corresponds to the `Image` field from the Docker container inspect [API] + // endpoint. + // K8s defines a link to the container registry repository with digest + // `"imageID": "registry.azurecr.io /namespace/service/dockerfile@sha256:bdeabd40c3a8a492eaf9e8e44d0ebbb84bac7ee25ac0cf8a7159d25f62555625"` + // . + // The ID is assigned by the container runtime and can vary in different + // environments. Consider using `oci.manifest.digest` if it is important to + // identify the same image in different environments/runtimes. + // + // [API]: https://docs.docker.com/engine/api/v1.43/#tag/Container/operation/ContainerInspect + ContainerImageIDKey = attribute.Key("container.image.id") + + // ContainerImageNameKey is the attribute Key conforming to the + // "container.image.name" semantic conventions. It represents the name of the + // image the container was built on. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "gcr.io/opentelemetry/operator" + ContainerImageNameKey = attribute.Key("container.image.name") + + // ContainerImageRepoDigestsKey is the attribute Key conforming to the + // "container.image.repo_digests" semantic conventions. It represents the repo + // digests of the container image as provided by the container runtime. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // "example@sha256:afcc7f1ac1b49db317a7196c902e61c6c3c4607d63599ee1a82d702d249a0ccb", + // "internal.registry.example.com:5000/example@sha256:b69959407d21e8a062e0416bf13405bb2b71ed7a84dde4158ebafacfa06f5578" + // Note: [Docker] and [CRI] report those under the `RepoDigests` field. + // + // [Docker]: https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect + // [CRI]: https://github.com/kubernetes/cri-api/blob/c75ef5b473bbe2d0a4fc92f82235efd665ea8e9f/pkg/apis/runtime/v1/api.proto#L1237-L1238 + ContainerImageRepoDigestsKey = attribute.Key("container.image.repo_digests") + + // ContainerImageTagsKey is the attribute Key conforming to the + // "container.image.tags" semantic conventions. It represents the container + // image tags. An example can be found in [Docker Image Inspect]. Should be only + // the `` section of the full name for example from + // `registry.example.com/my-org/my-image:`. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "v1.27.1", "3.5.7-0" + // + // [Docker Image Inspect]: https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect + ContainerImageTagsKey = attribute.Key("container.image.tags") + + // ContainerNameKey is the attribute Key conforming to the "container.name" + // semantic conventions. It represents the container name used by container + // runtime. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry-autoconf" + ContainerNameKey = attribute.Key("container.name") + + // ContainerRuntimeKey is the attribute Key conforming to the + // "container.runtime" semantic conventions. It represents the container runtime + // managing this container. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "docker", "containerd", "rkt" + ContainerRuntimeKey = attribute.Key("container.runtime") +) + +// ContainerCommand returns an attribute KeyValue conforming to the +// "container.command" semantic conventions. It represents the command used to +// run the container (i.e. the command name). +func ContainerCommand(val string) attribute.KeyValue { + return ContainerCommandKey.String(val) +} + +// ContainerCommandArgs returns an attribute KeyValue conforming to the +// "container.command_args" semantic conventions. It represents the all the +// command arguments (including the command/executable itself) run by the +// container. +func ContainerCommandArgs(val ...string) attribute.KeyValue { + return ContainerCommandArgsKey.StringSlice(val) +} + +// ContainerCommandLine returns an attribute KeyValue conforming to the +// "container.command_line" semantic conventions. It represents the full command +// run by the container as a single string representing the full command. +func ContainerCommandLine(val string) attribute.KeyValue { + return ContainerCommandLineKey.String(val) +} + +// ContainerCSIPluginName returns an attribute KeyValue conforming to the +// "container.csi.plugin.name" semantic conventions. It represents the name of +// the CSI ([Container Storage Interface]) plugin used by the volume. +// +// [Container Storage Interface]: https://github.com/container-storage-interface/spec +func ContainerCSIPluginName(val string) attribute.KeyValue { + return ContainerCSIPluginNameKey.String(val) +} + +// ContainerCSIVolumeID returns an attribute KeyValue conforming to the +// "container.csi.volume.id" semantic conventions. It represents the unique +// volume ID returned by the CSI ([Container Storage Interface]) plugin. +// +// [Container Storage Interface]: https://github.com/container-storage-interface/spec +func ContainerCSIVolumeID(val string) attribute.KeyValue { + return ContainerCSIVolumeIDKey.String(val) +} + +// ContainerID returns an attribute KeyValue conforming to the "container.id" +// semantic conventions. It represents the container ID. Usually a UUID, as for +// example used to [identify Docker containers]. The UUID might be abbreviated. +// +// [identify Docker containers]: https://docs.docker.com/engine/containers/run/#container-identification +func ContainerID(val string) attribute.KeyValue { + return ContainerIDKey.String(val) +} + +// ContainerImageID returns an attribute KeyValue conforming to the +// "container.image.id" semantic conventions. It represents the runtime specific +// image identifier. Usually a hash algorithm followed by a UUID. +func ContainerImageID(val string) attribute.KeyValue { + return ContainerImageIDKey.String(val) +} + +// ContainerImageName returns an attribute KeyValue conforming to the +// "container.image.name" semantic conventions. It represents the name of the +// image the container was built on. +func ContainerImageName(val string) attribute.KeyValue { + return ContainerImageNameKey.String(val) +} + +// ContainerImageRepoDigests returns an attribute KeyValue conforming to the +// "container.image.repo_digests" semantic conventions. It represents the repo +// digests of the container image as provided by the container runtime. +func ContainerImageRepoDigests(val ...string) attribute.KeyValue { + return ContainerImageRepoDigestsKey.StringSlice(val) +} + +// ContainerImageTags returns an attribute KeyValue conforming to the +// "container.image.tags" semantic conventions. It represents the container image +// tags. An example can be found in [Docker Image Inspect]. Should be only the +// `` section of the full name for example from +// `registry.example.com/my-org/my-image:`. +// +// [Docker Image Inspect]: https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect +func ContainerImageTags(val ...string) attribute.KeyValue { + return ContainerImageTagsKey.StringSlice(val) +} + +// ContainerLabel returns an attribute KeyValue conforming to the +// "container.label" semantic conventions. It represents the container labels, +// `` being the label name, the value being the label value. +func ContainerLabel(key string, val string) attribute.KeyValue { + return attribute.String("container.label."+key, val) +} + +// ContainerName returns an attribute KeyValue conforming to the "container.name" +// semantic conventions. It represents the container name used by container +// runtime. +func ContainerName(val string) attribute.KeyValue { + return ContainerNameKey.String(val) +} + +// ContainerRuntime returns an attribute KeyValue conforming to the +// "container.runtime" semantic conventions. It represents the container runtime +// managing this container. +func ContainerRuntime(val string) attribute.KeyValue { + return ContainerRuntimeKey.String(val) +} + +// Namespace: cpu +const ( + // CPULogicalNumberKey is the attribute Key conforming to the + // "cpu.logical_number" semantic conventions. It represents the logical CPU + // number [0..n-1]. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1 + CPULogicalNumberKey = attribute.Key("cpu.logical_number") + + // CPUModeKey is the attribute Key conforming to the "cpu.mode" semantic + // conventions. It represents the mode of the CPU. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "user", "system" + CPUModeKey = attribute.Key("cpu.mode") +) + +// CPULogicalNumber returns an attribute KeyValue conforming to the +// "cpu.logical_number" semantic conventions. It represents the logical CPU +// number [0..n-1]. +func CPULogicalNumber(val int) attribute.KeyValue { + return CPULogicalNumberKey.Int(val) +} + +// Enum values for cpu.mode +var ( + // user + // Stability: development + CPUModeUser = CPUModeKey.String("user") + // system + // Stability: development + CPUModeSystem = CPUModeKey.String("system") + // nice + // Stability: development + CPUModeNice = CPUModeKey.String("nice") + // idle + // Stability: development + CPUModeIdle = CPUModeKey.String("idle") + // iowait + // Stability: development + CPUModeIOWait = CPUModeKey.String("iowait") + // interrupt + // Stability: development + CPUModeInterrupt = CPUModeKey.String("interrupt") + // steal + // Stability: development + CPUModeSteal = CPUModeKey.String("steal") + // kernel + // Stability: development + CPUModeKernel = CPUModeKey.String("kernel") +) + +// Namespace: db +const ( + // DBClientConnectionPoolNameKey is the attribute Key conforming to the + // "db.client.connection.pool.name" semantic conventions. It represents the name + // of the connection pool; unique within the instrumented application. In case + // the connection pool implementation doesn't provide a name, instrumentation + // SHOULD use a combination of parameters that would make the name unique, for + // example, combining attributes `server.address`, `server.port`, and + // `db.namespace`, formatted as `server.address:server.port/db.namespace`. + // Instrumentations that generate connection pool name following different + // patterns SHOULD document it. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "myDataSource" + DBClientConnectionPoolNameKey = attribute.Key("db.client.connection.pool.name") + + // DBClientConnectionStateKey is the attribute Key conforming to the + // "db.client.connection.state" semantic conventions. It represents the state of + // a connection in the pool. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "idle" + DBClientConnectionStateKey = attribute.Key("db.client.connection.state") + + // DBCollectionNameKey is the attribute Key conforming to the + // "db.collection.name" semantic conventions. It represents the name of a + // collection (table, container) within the database. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "public.users", "customers" + // Note: It is RECOMMENDED to capture the value as provided by the application + // without attempting to do any case normalization. + // + // The collection name SHOULD NOT be extracted from `db.query.text`, + // when the database system supports query text with multiple collections + // in non-batch operations. + // + // For batch operations, if the individual operations are known to have the same + // collection name then that collection name SHOULD be used. + DBCollectionNameKey = attribute.Key("db.collection.name") + + // DBNamespaceKey is the attribute Key conforming to the "db.namespace" semantic + // conventions. It represents the name of the database, fully qualified within + // the server address and port. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "customers", "test.users" + // Note: If a database system has multiple namespace components, they SHOULD be + // concatenated from the most general to the most specific namespace component, + // using `|` as a separator between the components. Any missing components (and + // their associated separators) SHOULD be omitted. + // Semantic conventions for individual database systems SHOULD document what + // `db.namespace` means in the context of that system. + // It is RECOMMENDED to capture the value as provided by the application without + // attempting to do any case normalization. + DBNamespaceKey = attribute.Key("db.namespace") + + // DBOperationBatchSizeKey is the attribute Key conforming to the + // "db.operation.batch.size" semantic conventions. It represents the number of + // queries included in a batch operation. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: 2, 3, 4 + // Note: Operations are only considered batches when they contain two or more + // operations, and so `db.operation.batch.size` SHOULD never be `1`. + DBOperationBatchSizeKey = attribute.Key("db.operation.batch.size") + + // DBOperationNameKey is the attribute Key conforming to the "db.operation.name" + // semantic conventions. It represents the name of the operation or command + // being executed. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "findAndModify", "HMSET", "SELECT" + // Note: It is RECOMMENDED to capture the value as provided by the application + // without attempting to do any case normalization. + // + // The operation name SHOULD NOT be extracted from `db.query.text`, + // when the database system supports query text with multiple operations + // in non-batch operations. + // + // If spaces can occur in the operation name, multiple consecutive spaces + // SHOULD be normalized to a single space. + // + // For batch operations, if the individual operations are known to have the same + // operation name + // then that operation name SHOULD be used prepended by `BATCH `, + // otherwise `db.operation.name` SHOULD be `BATCH` or some other database + // system specific term if more applicable. + DBOperationNameKey = attribute.Key("db.operation.name") + + // DBQuerySummaryKey is the attribute Key conforming to the "db.query.summary" + // semantic conventions. It represents the low cardinality summary of a database + // query. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "SELECT wuser_table", "INSERT shipping_details SELECT orders", "get + // user by id" + // Note: The query summary describes a class of database queries and is useful + // as a grouping key, especially when analyzing telemetry for database + // calls involving complex queries. + // + // Summary may be available to the instrumentation through + // instrumentation hooks or other means. If it is not available, + // instrumentations + // that support query parsing SHOULD generate a summary following + // [Generating query summary] + // section. + // + // [Generating query summary]: /docs/database/database-spans.md#generating-a-summary-of-the-query + DBQuerySummaryKey = attribute.Key("db.query.summary") + + // DBQueryTextKey is the attribute Key conforming to the "db.query.text" + // semantic conventions. It represents the database query being executed. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "SELECT * FROM wuser_table where username = ?", "SET mykey ?" + // Note: For sanitization see [Sanitization of `db.query.text`]. + // For batch operations, if the individual operations are known to have the same + // query text then that query text SHOULD be used, otherwise all of the + // individual query texts SHOULD be concatenated with separator `; ` or some + // other database system specific separator if more applicable. + // Parameterized query text SHOULD NOT be sanitized. Even though parameterized + // query text can potentially have sensitive data, by using a parameterized + // query the user is giving a strong signal that any sensitive data will be + // passed as parameter values, and the benefit to observability of capturing the + // static part of the query text by default outweighs the risk. + // + // [Sanitization of `db.query.text`]: /docs/database/database-spans.md#sanitization-of-dbquerytext + DBQueryTextKey = attribute.Key("db.query.text") + + // DBResponseReturnedRowsKey is the attribute Key conforming to the + // "db.response.returned_rows" semantic conventions. It represents the number of + // rows returned by the operation. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 10, 30, 1000 + DBResponseReturnedRowsKey = attribute.Key("db.response.returned_rows") + + // DBResponseStatusCodeKey is the attribute Key conforming to the + // "db.response.status_code" semantic conventions. It represents the database + // response status code. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "102", "ORA-17002", "08P01", "404" + // Note: The status code returned by the database. Usually it represents an + // error code, but may also represent partial success, warning, or differentiate + // between various types of successful outcomes. + // Semantic conventions for individual database systems SHOULD document what + // `db.response.status_code` means in the context of that system. + DBResponseStatusCodeKey = attribute.Key("db.response.status_code") + + // DBStoredProcedureNameKey is the attribute Key conforming to the + // "db.stored_procedure.name" semantic conventions. It represents the name of a + // stored procedure within the database. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "GetCustomer" + // Note: It is RECOMMENDED to capture the value as provided by the application + // without attempting to do any case normalization. + // + // For batch operations, if the individual operations are known to have the same + // stored procedure name then that stored procedure name SHOULD be used. + DBStoredProcedureNameKey = attribute.Key("db.stored_procedure.name") + + // DBSystemNameKey is the attribute Key conforming to the "db.system.name" + // semantic conventions. It represents the database management system (DBMS) + // product as identified by the client instrumentation. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: + // Note: The actual DBMS may differ from the one identified by the client. For + // example, when using PostgreSQL client libraries to connect to a CockroachDB, + // the `db.system.name` is set to `postgresql` based on the instrumentation's + // best knowledge. + DBSystemNameKey = attribute.Key("db.system.name") +) + +// DBClientConnectionPoolName returns an attribute KeyValue conforming to the +// "db.client.connection.pool.name" semantic conventions. It represents the name +// of the connection pool; unique within the instrumented application. In case +// the connection pool implementation doesn't provide a name, instrumentation +// SHOULD use a combination of parameters that would make the name unique, for +// example, combining attributes `server.address`, `server.port`, and +// `db.namespace`, formatted as `server.address:server.port/db.namespace`. +// Instrumentations that generate connection pool name following different +// patterns SHOULD document it. +func DBClientConnectionPoolName(val string) attribute.KeyValue { + return DBClientConnectionPoolNameKey.String(val) +} + +// DBCollectionName returns an attribute KeyValue conforming to the +// "db.collection.name" semantic conventions. It represents the name of a +// collection (table, container) within the database. +func DBCollectionName(val string) attribute.KeyValue { + return DBCollectionNameKey.String(val) +} + +// DBNamespace returns an attribute KeyValue conforming to the "db.namespace" +// semantic conventions. It represents the name of the database, fully qualified +// within the server address and port. +func DBNamespace(val string) attribute.KeyValue { + return DBNamespaceKey.String(val) +} + +// DBOperationBatchSize returns an attribute KeyValue conforming to the +// "db.operation.batch.size" semantic conventions. It represents the number of +// queries included in a batch operation. +func DBOperationBatchSize(val int) attribute.KeyValue { + return DBOperationBatchSizeKey.Int(val) +} + +// DBOperationName returns an attribute KeyValue conforming to the +// "db.operation.name" semantic conventions. It represents the name of the +// operation or command being executed. +func DBOperationName(val string) attribute.KeyValue { + return DBOperationNameKey.String(val) +} + +// DBOperationParameter returns an attribute KeyValue conforming to the +// "db.operation.parameter" semantic conventions. It represents a database +// operation parameter, with `` being the parameter name, and the attribute +// value being a string representation of the parameter value. +func DBOperationParameter(key string, val string) attribute.KeyValue { + return attribute.String("db.operation.parameter."+key, val) +} + +// DBQueryParameter returns an attribute KeyValue conforming to the +// "db.query.parameter" semantic conventions. It represents a database query +// parameter, with `` being the parameter name, and the attribute value +// being a string representation of the parameter value. +func DBQueryParameter(key string, val string) attribute.KeyValue { + return attribute.String("db.query.parameter."+key, val) +} + +// DBQuerySummary returns an attribute KeyValue conforming to the +// "db.query.summary" semantic conventions. It represents the low cardinality +// summary of a database query. +func DBQuerySummary(val string) attribute.KeyValue { + return DBQuerySummaryKey.String(val) +} + +// DBQueryText returns an attribute KeyValue conforming to the "db.query.text" +// semantic conventions. It represents the database query being executed. +func DBQueryText(val string) attribute.KeyValue { + return DBQueryTextKey.String(val) +} + +// DBResponseReturnedRows returns an attribute KeyValue conforming to the +// "db.response.returned_rows" semantic conventions. It represents the number of +// rows returned by the operation. +func DBResponseReturnedRows(val int) attribute.KeyValue { + return DBResponseReturnedRowsKey.Int(val) +} + +// DBResponseStatusCode returns an attribute KeyValue conforming to the +// "db.response.status_code" semantic conventions. It represents the database +// response status code. +func DBResponseStatusCode(val string) attribute.KeyValue { + return DBResponseStatusCodeKey.String(val) +} + +// DBStoredProcedureName returns an attribute KeyValue conforming to the +// "db.stored_procedure.name" semantic conventions. It represents the name of a +// stored procedure within the database. +func DBStoredProcedureName(val string) attribute.KeyValue { + return DBStoredProcedureNameKey.String(val) +} + +// Enum values for db.client.connection.state +var ( + // idle + // Stability: development + DBClientConnectionStateIdle = DBClientConnectionStateKey.String("idle") + // used + // Stability: development + DBClientConnectionStateUsed = DBClientConnectionStateKey.String("used") +) + +// Enum values for db.system.name +var ( + // Some other SQL database. Fallback only. + // Stability: development + DBSystemNameOtherSQL = DBSystemNameKey.String("other_sql") + // [Adabas (Adaptable Database System)] + // Stability: development + // + // [Adabas (Adaptable Database System)]: https://documentation.softwareag.com/?pf=adabas + DBSystemNameSoftwareagAdabas = DBSystemNameKey.String("softwareag.adabas") + // [Actian Ingres] + // Stability: development + // + // [Actian Ingres]: https://www.actian.com/databases/ingres/ + DBSystemNameActianIngres = DBSystemNameKey.String("actian.ingres") + // [Amazon DynamoDB] + // Stability: development + // + // [Amazon DynamoDB]: https://aws.amazon.com/pm/dynamodb/ + DBSystemNameAWSDynamoDB = DBSystemNameKey.String("aws.dynamodb") + // [Amazon Redshift] + // Stability: development + // + // [Amazon Redshift]: https://aws.amazon.com/redshift/ + DBSystemNameAWSRedshift = DBSystemNameKey.String("aws.redshift") + // [Azure Cosmos DB] + // Stability: development + // + // [Azure Cosmos DB]: https://learn.microsoft.com/azure/cosmos-db + DBSystemNameAzureCosmosDB = DBSystemNameKey.String("azure.cosmosdb") + // [InterSystems Caché] + // Stability: development + // + // [InterSystems Caché]: https://www.intersystems.com/products/cache/ + DBSystemNameIntersystemsCache = DBSystemNameKey.String("intersystems.cache") + // [Apache Cassandra] + // Stability: development + // + // [Apache Cassandra]: https://cassandra.apache.org/ + DBSystemNameCassandra = DBSystemNameKey.String("cassandra") + // [ClickHouse] + // Stability: development + // + // [ClickHouse]: https://clickhouse.com/ + DBSystemNameClickHouse = DBSystemNameKey.String("clickhouse") + // [CockroachDB] + // Stability: development + // + // [CockroachDB]: https://www.cockroachlabs.com/ + DBSystemNameCockroachDB = DBSystemNameKey.String("cockroachdb") + // [Couchbase] + // Stability: development + // + // [Couchbase]: https://www.couchbase.com/ + DBSystemNameCouchbase = DBSystemNameKey.String("couchbase") + // [Apache CouchDB] + // Stability: development + // + // [Apache CouchDB]: https://couchdb.apache.org/ + DBSystemNameCouchDB = DBSystemNameKey.String("couchdb") + // [Apache Derby] + // Stability: development + // + // [Apache Derby]: https://db.apache.org/derby/ + DBSystemNameDerby = DBSystemNameKey.String("derby") + // [Elasticsearch] + // Stability: development + // + // [Elasticsearch]: https://www.elastic.co/elasticsearch + DBSystemNameElasticsearch = DBSystemNameKey.String("elasticsearch") + // [Firebird] + // Stability: development + // + // [Firebird]: https://www.firebirdsql.org/ + DBSystemNameFirebirdSQL = DBSystemNameKey.String("firebirdsql") + // [Google Cloud Spanner] + // Stability: development + // + // [Google Cloud Spanner]: https://cloud.google.com/spanner + DBSystemNameGCPSpanner = DBSystemNameKey.String("gcp.spanner") + // [Apache Geode] + // Stability: development + // + // [Apache Geode]: https://geode.apache.org/ + DBSystemNameGeode = DBSystemNameKey.String("geode") + // [H2 Database] + // Stability: development + // + // [H2 Database]: https://h2database.com/ + DBSystemNameH2database = DBSystemNameKey.String("h2database") + // [Apache HBase] + // Stability: development + // + // [Apache HBase]: https://hbase.apache.org/ + DBSystemNameHBase = DBSystemNameKey.String("hbase") + // [Apache Hive] + // Stability: development + // + // [Apache Hive]: https://hive.apache.org/ + DBSystemNameHive = DBSystemNameKey.String("hive") + // [HyperSQL Database] + // Stability: development + // + // [HyperSQL Database]: https://hsqldb.org/ + DBSystemNameHSQLDB = DBSystemNameKey.String("hsqldb") + // [IBM Db2] + // Stability: development + // + // [IBM Db2]: https://www.ibm.com/db2 + DBSystemNameIBMDB2 = DBSystemNameKey.String("ibm.db2") + // [IBM Informix] + // Stability: development + // + // [IBM Informix]: https://www.ibm.com/products/informix + DBSystemNameIBMInformix = DBSystemNameKey.String("ibm.informix") + // [IBM Netezza] + // Stability: development + // + // [IBM Netezza]: https://www.ibm.com/products/netezza + DBSystemNameIBMNetezza = DBSystemNameKey.String("ibm.netezza") + // [InfluxDB] + // Stability: development + // + // [InfluxDB]: https://www.influxdata.com/ + DBSystemNameInfluxDB = DBSystemNameKey.String("influxdb") + // [Instant] + // Stability: development + // + // [Instant]: https://www.instantdb.com/ + DBSystemNameInstantDB = DBSystemNameKey.String("instantdb") + // [MariaDB] + // Stability: stable + // + // [MariaDB]: https://mariadb.org/ + DBSystemNameMariaDB = DBSystemNameKey.String("mariadb") + // [Memcached] + // Stability: development + // + // [Memcached]: https://memcached.org/ + DBSystemNameMemcached = DBSystemNameKey.String("memcached") + // [MongoDB] + // Stability: development + // + // [MongoDB]: https://www.mongodb.com/ + DBSystemNameMongoDB = DBSystemNameKey.String("mongodb") + // [Microsoft SQL Server] + // Stability: stable + // + // [Microsoft SQL Server]: https://www.microsoft.com/sql-server + DBSystemNameMicrosoftSQLServer = DBSystemNameKey.String("microsoft.sql_server") + // [MySQL] + // Stability: stable + // + // [MySQL]: https://www.mysql.com/ + DBSystemNameMySQL = DBSystemNameKey.String("mysql") + // [Neo4j] + // Stability: development + // + // [Neo4j]: https://neo4j.com/ + DBSystemNameNeo4j = DBSystemNameKey.String("neo4j") + // [OpenSearch] + // Stability: development + // + // [OpenSearch]: https://opensearch.org/ + DBSystemNameOpenSearch = DBSystemNameKey.String("opensearch") + // [Oracle Database] + // Stability: development + // + // [Oracle Database]: https://www.oracle.com/database/ + DBSystemNameOracleDB = DBSystemNameKey.String("oracle.db") + // [PostgreSQL] + // Stability: stable + // + // [PostgreSQL]: https://www.postgresql.org/ + DBSystemNamePostgreSQL = DBSystemNameKey.String("postgresql") + // [Redis] + // Stability: development + // + // [Redis]: https://redis.io/ + DBSystemNameRedis = DBSystemNameKey.String("redis") + // [SAP HANA] + // Stability: development + // + // [SAP HANA]: https://www.sap.com/products/technology-platform/hana/what-is-sap-hana.html + DBSystemNameSAPHANA = DBSystemNameKey.String("sap.hana") + // [SAP MaxDB] + // Stability: development + // + // [SAP MaxDB]: https://maxdb.sap.com/ + DBSystemNameSAPMaxDB = DBSystemNameKey.String("sap.maxdb") + // [SQLite] + // Stability: development + // + // [SQLite]: https://www.sqlite.org/ + DBSystemNameSQLite = DBSystemNameKey.String("sqlite") + // [Teradata] + // Stability: development + // + // [Teradata]: https://www.teradata.com/ + DBSystemNameTeradata = DBSystemNameKey.String("teradata") + // [Trino] + // Stability: development + // + // [Trino]: https://trino.io/ + DBSystemNameTrino = DBSystemNameKey.String("trino") +) + +// Namespace: deployment +const ( + // DeploymentEnvironmentNameKey is the attribute Key conforming to the + // "deployment.environment.name" semantic conventions. It represents the name of + // the [deployment environment] (aka deployment tier). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "staging", "production" + // Note: `deployment.environment.name` does not affect the uniqueness + // constraints defined through + // the `service.namespace`, `service.name` and `service.instance.id` resource + // attributes. + // This implies that resources carrying the following attribute combinations + // MUST be + // considered to be identifying the same service: + // + // - `service.name=frontend`, `deployment.environment.name=production` + // - `service.name=frontend`, `deployment.environment.name=staging`. + // + // + // [deployment environment]: https://wikipedia.org/wiki/Deployment_environment + DeploymentEnvironmentNameKey = attribute.Key("deployment.environment.name") + + // DeploymentIDKey is the attribute Key conforming to the "deployment.id" + // semantic conventions. It represents the id of the deployment. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "1208" + DeploymentIDKey = attribute.Key("deployment.id") + + // DeploymentNameKey is the attribute Key conforming to the "deployment.name" + // semantic conventions. It represents the name of the deployment. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "deploy my app", "deploy-frontend" + DeploymentNameKey = attribute.Key("deployment.name") + + // DeploymentStatusKey is the attribute Key conforming to the + // "deployment.status" semantic conventions. It represents the status of the + // deployment. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + DeploymentStatusKey = attribute.Key("deployment.status") +) + +// DeploymentEnvironmentName returns an attribute KeyValue conforming to the +// "deployment.environment.name" semantic conventions. It represents the name of +// the [deployment environment] (aka deployment tier). +// +// [deployment environment]: https://wikipedia.org/wiki/Deployment_environment +func DeploymentEnvironmentName(val string) attribute.KeyValue { + return DeploymentEnvironmentNameKey.String(val) +} + +// DeploymentID returns an attribute KeyValue conforming to the "deployment.id" +// semantic conventions. It represents the id of the deployment. +func DeploymentID(val string) attribute.KeyValue { + return DeploymentIDKey.String(val) +} + +// DeploymentName returns an attribute KeyValue conforming to the +// "deployment.name" semantic conventions. It represents the name of the +// deployment. +func DeploymentName(val string) attribute.KeyValue { + return DeploymentNameKey.String(val) +} + +// Enum values for deployment.status +var ( + // failed + // Stability: development + DeploymentStatusFailed = DeploymentStatusKey.String("failed") + // succeeded + // Stability: development + DeploymentStatusSucceeded = DeploymentStatusKey.String("succeeded") +) + +// Namespace: destination +const ( + // DestinationAddressKey is the attribute Key conforming to the + // "destination.address" semantic conventions. It represents the destination + // address - domain name if available without reverse DNS lookup; otherwise, IP + // address or Unix domain socket name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "destination.example.com", "10.1.2.80", "/tmp/my.sock" + // Note: When observed from the source side, and when communicating through an + // intermediary, `destination.address` SHOULD represent the destination address + // behind any intermediaries, for example proxies, if it's available. + DestinationAddressKey = attribute.Key("destination.address") + + // DestinationPortKey is the attribute Key conforming to the "destination.port" + // semantic conventions. It represents the destination port number. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 3389, 2888 + DestinationPortKey = attribute.Key("destination.port") +) + +// DestinationAddress returns an attribute KeyValue conforming to the +// "destination.address" semantic conventions. It represents the destination +// address - domain name if available without reverse DNS lookup; otherwise, IP +// address or Unix domain socket name. +func DestinationAddress(val string) attribute.KeyValue { + return DestinationAddressKey.String(val) +} + +// DestinationPort returns an attribute KeyValue conforming to the +// "destination.port" semantic conventions. It represents the destination port +// number. +func DestinationPort(val int) attribute.KeyValue { + return DestinationPortKey.Int(val) +} + +// Namespace: device +const ( + // DeviceIDKey is the attribute Key conforming to the "device.id" semantic + // conventions. It represents a unique identifier representing the device. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "123456789012345", "01:23:45:67:89:AB" + // Note: Its value SHOULD be identical for all apps on a device and it SHOULD + // NOT change if an app is uninstalled and re-installed. + // However, it might be resettable by the user for all apps on a device. + // Hardware IDs (e.g. vendor-specific serial number, IMEI or MAC address) MAY be + // used as values. + // + // More information about Android identifier best practices can be found [here] + // . + // + // > [!WARNING]> This attribute may contain sensitive (PII) information. Caution + // > should be taken when storing personal data or anything which can identify a + // > user. GDPR and data protection laws may apply, + // > ensure you do your own due diligence.> Due to these reasons, this + // > identifier is not recommended for consumer applications and will likely + // > result in rejection from both Google Play and App Store. + // > However, it may be appropriate for specific enterprise scenarios, such as + // > kiosk devices or enterprise-managed devices, with appropriate compliance + // > clearance. + // > Any instrumentation providing this identifier MUST implement it as an + // > opt-in feature.> See [`app.installation.id`]> for a more + // > privacy-preserving alternative. + // + // [here]: https://developer.android.com/training/articles/user-data-ids + // [`app.installation.id`]: /docs/registry/attributes/app.md#app-installation-id + DeviceIDKey = attribute.Key("device.id") + + // DeviceManufacturerKey is the attribute Key conforming to the + // "device.manufacturer" semantic conventions. It represents the name of the + // device manufacturer. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Apple", "Samsung" + // Note: The Android OS provides this field via [Build]. iOS apps SHOULD + // hardcode the value `Apple`. + // + // [Build]: https://developer.android.com/reference/android/os/Build#MANUFACTURER + DeviceManufacturerKey = attribute.Key("device.manufacturer") + + // DeviceModelIdentifierKey is the attribute Key conforming to the + // "device.model.identifier" semantic conventions. It represents the model + // identifier for the device. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "iPhone3,4", "SM-G920F" + // Note: It's recommended this value represents a machine-readable version of + // the model identifier rather than the market or consumer-friendly name of the + // device. + DeviceModelIdentifierKey = attribute.Key("device.model.identifier") + + // DeviceModelNameKey is the attribute Key conforming to the "device.model.name" + // semantic conventions. It represents the marketing name for the device model. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "iPhone 6s Plus", "Samsung Galaxy S6" + // Note: It's recommended this value represents a human-readable version of the + // device model rather than a machine-readable alternative. + DeviceModelNameKey = attribute.Key("device.model.name") +) + +// DeviceID returns an attribute KeyValue conforming to the "device.id" semantic +// conventions. It represents a unique identifier representing the device. +func DeviceID(val string) attribute.KeyValue { + return DeviceIDKey.String(val) +} + +// DeviceManufacturer returns an attribute KeyValue conforming to the +// "device.manufacturer" semantic conventions. It represents the name of the +// device manufacturer. +func DeviceManufacturer(val string) attribute.KeyValue { + return DeviceManufacturerKey.String(val) +} + +// DeviceModelIdentifier returns an attribute KeyValue conforming to the +// "device.model.identifier" semantic conventions. It represents the model +// identifier for the device. +func DeviceModelIdentifier(val string) attribute.KeyValue { + return DeviceModelIdentifierKey.String(val) +} + +// DeviceModelName returns an attribute KeyValue conforming to the +// "device.model.name" semantic conventions. It represents the marketing name for +// the device model. +func DeviceModelName(val string) attribute.KeyValue { + return DeviceModelNameKey.String(val) +} + +// Namespace: disk +const ( + // DiskIODirectionKey is the attribute Key conforming to the "disk.io.direction" + // semantic conventions. It represents the disk IO operation direction. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "read" + DiskIODirectionKey = attribute.Key("disk.io.direction") +) + +// Enum values for disk.io.direction +var ( + // read + // Stability: development + DiskIODirectionRead = DiskIODirectionKey.String("read") + // write + // Stability: development + DiskIODirectionWrite = DiskIODirectionKey.String("write") +) + +// Namespace: dns +const ( + // DNSAnswersKey is the attribute Key conforming to the "dns.answers" semantic + // conventions. It represents the list of IPv4 or IPv6 addresses resolved during + // DNS lookup. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "10.0.0.1", "2001:0db8:85a3:0000:0000:8a2e:0370:7334" + DNSAnswersKey = attribute.Key("dns.answers") + + // DNSQuestionNameKey is the attribute Key conforming to the "dns.question.name" + // semantic conventions. It represents the name being queried. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "www.example.com", "opentelemetry.io" + // Note: If the name field contains non-printable characters (below 32 or above + // 126), those characters should be represented as escaped base 10 integers + // (\DDD). Back slashes and quotes should be escaped. Tabs, carriage returns, + // and line feeds should be converted to \t, \r, and \n respectively. + DNSQuestionNameKey = attribute.Key("dns.question.name") +) + +// DNSAnswers returns an attribute KeyValue conforming to the "dns.answers" +// semantic conventions. It represents the list of IPv4 or IPv6 addresses +// resolved during DNS lookup. +func DNSAnswers(val ...string) attribute.KeyValue { + return DNSAnswersKey.StringSlice(val) +} + +// DNSQuestionName returns an attribute KeyValue conforming to the +// "dns.question.name" semantic conventions. It represents the name being +// queried. +func DNSQuestionName(val string) attribute.KeyValue { + return DNSQuestionNameKey.String(val) +} + +// Namespace: elasticsearch +const ( + // ElasticsearchNodeNameKey is the attribute Key conforming to the + // "elasticsearch.node.name" semantic conventions. It represents the represents + // the human-readable identifier of the node/instance to which a request was + // routed. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "instance-0000000001" + ElasticsearchNodeNameKey = attribute.Key("elasticsearch.node.name") +) + +// ElasticsearchNodeName returns an attribute KeyValue conforming to the +// "elasticsearch.node.name" semantic conventions. It represents the represents +// the human-readable identifier of the node/instance to which a request was +// routed. +func ElasticsearchNodeName(val string) attribute.KeyValue { + return ElasticsearchNodeNameKey.String(val) +} + +// Namespace: enduser +const ( + // EnduserIDKey is the attribute Key conforming to the "enduser.id" semantic + // conventions. It represents the unique identifier of an end user in the + // system. It maybe a username, email address, or other identifier. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "username" + // Note: Unique identifier of an end user in the system. + // + // > [!Warning] + // > This field contains sensitive (PII) information. + EnduserIDKey = attribute.Key("enduser.id") + + // EnduserPseudoIDKey is the attribute Key conforming to the "enduser.pseudo.id" + // semantic conventions. It represents the pseudonymous identifier of an end + // user. This identifier should be a random value that is not directly linked or + // associated with the end user's actual identity. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "QdH5CAWJgqVT4rOr0qtumf" + // Note: Pseudonymous identifier of an end user. + // + // > [!Warning] + // > This field contains sensitive (linkable PII) information. + EnduserPseudoIDKey = attribute.Key("enduser.pseudo.id") +) + +// EnduserID returns an attribute KeyValue conforming to the "enduser.id" +// semantic conventions. It represents the unique identifier of an end user in +// the system. It maybe a username, email address, or other identifier. +func EnduserID(val string) attribute.KeyValue { + return EnduserIDKey.String(val) +} + +// EnduserPseudoID returns an attribute KeyValue conforming to the +// "enduser.pseudo.id" semantic conventions. It represents the pseudonymous +// identifier of an end user. This identifier should be a random value that is +// not directly linked or associated with the end user's actual identity. +func EnduserPseudoID(val string) attribute.KeyValue { + return EnduserPseudoIDKey.String(val) +} + +// Namespace: error +const ( + // ErrorMessageKey is the attribute Key conforming to the "error.message" + // semantic conventions. It represents a message providing more detail about an + // error in human-readable form. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Unexpected input type: string", "The user has exceeded their + // storage quota" + // Note: `error.message` should provide additional context and detail about an + // error. + // It is NOT RECOMMENDED to duplicate the value of `error.type` in + // `error.message`. + // It is also NOT RECOMMENDED to duplicate the value of `exception.message` in + // `error.message`. + // + // `error.message` is NOT RECOMMENDED for metrics or spans due to its unbounded + // cardinality and overlap with span status. + ErrorMessageKey = attribute.Key("error.message") + + // ErrorTypeKey is the attribute Key conforming to the "error.type" semantic + // conventions. It represents the describes a class of error the operation ended + // with. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "timeout", "java.net.UnknownHostException", + // "server_certificate_invalid", "500" + // Note: The `error.type` SHOULD be predictable, and SHOULD have low + // cardinality. + // + // When `error.type` is set to a type (e.g., an exception type), its + // canonical class name identifying the type within the artifact SHOULD be used. + // + // Instrumentations SHOULD document the list of errors they report. + // + // The cardinality of `error.type` within one instrumentation library SHOULD be + // low. + // Telemetry consumers that aggregate data from multiple instrumentation + // libraries and applications + // should be prepared for `error.type` to have high cardinality at query time + // when no + // additional filters are applied. + // + // If the operation has completed successfully, instrumentations SHOULD NOT set + // `error.type`. + // + // If a specific domain defines its own set of error identifiers (such as HTTP + // or gRPC status codes), + // it's RECOMMENDED to: + // + // - Use a domain-specific attribute + // - Set `error.type` to capture all errors, regardless of whether they are + // defined within the domain-specific set or not. + ErrorTypeKey = attribute.Key("error.type") +) + +// ErrorMessage returns an attribute KeyValue conforming to the "error.message" +// semantic conventions. It represents a message providing more detail about an +// error in human-readable form. +func ErrorMessage(val string) attribute.KeyValue { + return ErrorMessageKey.String(val) +} + +// Enum values for error.type +var ( + // A fallback error value to be used when the instrumentation doesn't define a + // custom value. + // + // Stability: stable + ErrorTypeOther = ErrorTypeKey.String("_OTHER") +) + +// Namespace: exception +const ( + // ExceptionMessageKey is the attribute Key conforming to the + // "exception.message" semantic conventions. It represents the exception + // message. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "Division by zero", "Can't convert 'int' object to str implicitly" + ExceptionMessageKey = attribute.Key("exception.message") + + // ExceptionStacktraceKey is the attribute Key conforming to the + // "exception.stacktrace" semantic conventions. It represents a stacktrace as a + // string in the natural representation for the language runtime. The + // representation is to be determined and documented by each language SIG. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: Exception in thread "main" java.lang.RuntimeException: Test + // exception\n at com.example.GenerateTrace.methodB(GenerateTrace.java:13)\n at + // com.example.GenerateTrace.methodA(GenerateTrace.java:9)\n at + // com.example.GenerateTrace.main(GenerateTrace.java:5) + ExceptionStacktraceKey = attribute.Key("exception.stacktrace") + + // ExceptionTypeKey is the attribute Key conforming to the "exception.type" + // semantic conventions. It represents the type of the exception (its + // fully-qualified class name, if applicable). The dynamic type of the exception + // should be preferred over the static type in languages that support it. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "java.net.ConnectException", "OSError" + ExceptionTypeKey = attribute.Key("exception.type") +) + +// ExceptionMessage returns an attribute KeyValue conforming to the +// "exception.message" semantic conventions. It represents the exception message. +func ExceptionMessage(val string) attribute.KeyValue { + return ExceptionMessageKey.String(val) +} + +// ExceptionStacktrace returns an attribute KeyValue conforming to the +// "exception.stacktrace" semantic conventions. It represents a stacktrace as a +// string in the natural representation for the language runtime. The +// representation is to be determined and documented by each language SIG. +func ExceptionStacktrace(val string) attribute.KeyValue { + return ExceptionStacktraceKey.String(val) +} + +// ExceptionType returns an attribute KeyValue conforming to the "exception.type" +// semantic conventions. It represents the type of the exception (its +// fully-qualified class name, if applicable). The dynamic type of the exception +// should be preferred over the static type in languages that support it. +func ExceptionType(val string) attribute.KeyValue { + return ExceptionTypeKey.String(val) +} + +// Namespace: faas +const ( + // FaaSColdstartKey is the attribute Key conforming to the "faas.coldstart" + // semantic conventions. It represents a boolean that is true if the serverless + // function is executed for the first time (aka cold-start). + // + // Type: boolean + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + FaaSColdstartKey = attribute.Key("faas.coldstart") + + // FaaSCronKey is the attribute Key conforming to the "faas.cron" semantic + // conventions. It represents a string containing the schedule period as + // [Cron Expression]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 0/5 * * * ? * + // + // [Cron Expression]: https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm + FaaSCronKey = attribute.Key("faas.cron") + + // FaaSDocumentCollectionKey is the attribute Key conforming to the + // "faas.document.collection" semantic conventions. It represents the name of + // the source on which the triggering operation was performed. For example, in + // Cloud Storage or S3 corresponds to the bucket name, and in Cosmos DB to the + // database name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "myBucketName", "myDbName" + FaaSDocumentCollectionKey = attribute.Key("faas.document.collection") + + // FaaSDocumentNameKey is the attribute Key conforming to the + // "faas.document.name" semantic conventions. It represents the document + // name/table subjected to the operation. For example, in Cloud Storage or S3 is + // the name of the file, and in Cosmos DB the table name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "myFile.txt", "myTableName" + FaaSDocumentNameKey = attribute.Key("faas.document.name") + + // FaaSDocumentOperationKey is the attribute Key conforming to the + // "faas.document.operation" semantic conventions. It represents the describes + // the type of the operation that was performed on the data. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + FaaSDocumentOperationKey = attribute.Key("faas.document.operation") + + // FaaSDocumentTimeKey is the attribute Key conforming to the + // "faas.document.time" semantic conventions. It represents a string containing + // the time when the data was accessed in the [ISO 8601] format expressed in + // [UTC]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 2020-01-23T13:47:06Z + // + // [ISO 8601]: https://www.iso.org/iso-8601-date-and-time-format.html + // [UTC]: https://www.w3.org/TR/NOTE-datetime + FaaSDocumentTimeKey = attribute.Key("faas.document.time") + + // FaaSInstanceKey is the attribute Key conforming to the "faas.instance" + // semantic conventions. It represents the execution environment ID as a string, + // that will be potentially reused for other invocations to the same + // function/function version. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de" + // Note: - **AWS Lambda:** Use the (full) log stream name. + FaaSInstanceKey = attribute.Key("faas.instance") + + // FaaSInvocationIDKey is the attribute Key conforming to the + // "faas.invocation_id" semantic conventions. It represents the invocation ID of + // the current function invocation. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: af9d5aa4-a685-4c5f-a22b-444f80b3cc28 + FaaSInvocationIDKey = attribute.Key("faas.invocation_id") + + // FaaSInvokedNameKey is the attribute Key conforming to the "faas.invoked_name" + // semantic conventions. It represents the name of the invoked function. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: my-function + // Note: SHOULD be equal to the `faas.name` resource attribute of the invoked + // function. + FaaSInvokedNameKey = attribute.Key("faas.invoked_name") + + // FaaSInvokedProviderKey is the attribute Key conforming to the + // "faas.invoked_provider" semantic conventions. It represents the cloud + // provider of the invoked function. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: SHOULD be equal to the `cloud.provider` resource attribute of the + // invoked function. + FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider") + + // FaaSInvokedRegionKey is the attribute Key conforming to the + // "faas.invoked_region" semantic conventions. It represents the cloud region of + // the invoked function. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: eu-central-1 + // Note: SHOULD be equal to the `cloud.region` resource attribute of the invoked + // function. + FaaSInvokedRegionKey = attribute.Key("faas.invoked_region") + + // FaaSMaxMemoryKey is the attribute Key conforming to the "faas.max_memory" + // semantic conventions. It represents the amount of memory available to the + // serverless function converted to Bytes. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Note: It's recommended to set this attribute since e.g. too little memory can + // easily stop a Java AWS Lambda function from working correctly. On AWS Lambda, + // the environment variable `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this + // information (which must be multiplied by 1,048,576). + FaaSMaxMemoryKey = attribute.Key("faas.max_memory") + + // FaaSNameKey is the attribute Key conforming to the "faas.name" semantic + // conventions. It represents the name of the single function that this runtime + // instance executes. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "my-function", "myazurefunctionapp/some-function-name" + // Note: This is the name of the function as configured/deployed on the FaaS + // platform and is usually different from the name of the callback + // function (which may be stored in the + // [`code.namespace`/`code.function.name`] + // span attributes). + // + // For some cloud providers, the above definition is ambiguous. The following + // definition of function name MUST be used for this attribute + // (and consequently the span name) for the listed cloud providers/products: + // + // - **Azure:** The full name `/`, i.e., function app name + // followed by a forward slash followed by the function name (this form + // can also be seen in the resource JSON for the function). + // This means that a span attribute MUST be used, as an Azure function + // app can host multiple functions that would usually share + // a TracerProvider (see also the `cloud.resource_id` attribute). + // + // + // [`code.namespace`/`code.function.name`]: /docs/general/attributes.md#source-code-attributes + FaaSNameKey = attribute.Key("faas.name") + + // FaaSTimeKey is the attribute Key conforming to the "faas.time" semantic + // conventions. It represents a string containing the function invocation time + // in the [ISO 8601] format expressed in [UTC]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 2020-01-23T13:47:06Z + // + // [ISO 8601]: https://www.iso.org/iso-8601-date-and-time-format.html + // [UTC]: https://www.w3.org/TR/NOTE-datetime + FaaSTimeKey = attribute.Key("faas.time") + + // FaaSTriggerKey is the attribute Key conforming to the "faas.trigger" semantic + // conventions. It represents the type of the trigger which caused this function + // invocation. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + FaaSTriggerKey = attribute.Key("faas.trigger") + + // FaaSVersionKey is the attribute Key conforming to the "faas.version" semantic + // conventions. It represents the immutable version of the function being + // executed. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "26", "pinkfroid-00002" + // Note: Depending on the cloud provider and platform, use: + // + // - **AWS Lambda:** The [function version] + // (an integer represented as a decimal string). + // - **Google Cloud Run (Services):** The [revision] + // (i.e., the function name plus the revision suffix). + // - **Google Cloud Functions:** The value of the + // [`K_REVISION` environment variable]. + // - **Azure Functions:** Not applicable. Do not set this attribute. + // + // + // [function version]: https://docs.aws.amazon.com/lambda/latest/dg/configuration-versions.html + // [revision]: https://cloud.google.com/run/docs/managing/revisions + // [`K_REVISION` environment variable]: https://cloud.google.com/functions/docs/env-var#runtime_environment_variables_set_automatically + FaaSVersionKey = attribute.Key("faas.version") +) + +// FaaSColdstart returns an attribute KeyValue conforming to the "faas.coldstart" +// semantic conventions. It represents a boolean that is true if the serverless +// function is executed for the first time (aka cold-start). +func FaaSColdstart(val bool) attribute.KeyValue { + return FaaSColdstartKey.Bool(val) +} + +// FaaSCron returns an attribute KeyValue conforming to the "faas.cron" semantic +// conventions. It represents a string containing the schedule period as +// [Cron Expression]. +// +// [Cron Expression]: https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm +func FaaSCron(val string) attribute.KeyValue { + return FaaSCronKey.String(val) +} + +// FaaSDocumentCollection returns an attribute KeyValue conforming to the +// "faas.document.collection" semantic conventions. It represents the name of the +// source on which the triggering operation was performed. For example, in Cloud +// Storage or S3 corresponds to the bucket name, and in Cosmos DB to the database +// name. +func FaaSDocumentCollection(val string) attribute.KeyValue { + return FaaSDocumentCollectionKey.String(val) +} + +// FaaSDocumentName returns an attribute KeyValue conforming to the +// "faas.document.name" semantic conventions. It represents the document +// name/table subjected to the operation. For example, in Cloud Storage or S3 is +// the name of the file, and in Cosmos DB the table name. +func FaaSDocumentName(val string) attribute.KeyValue { + return FaaSDocumentNameKey.String(val) +} + +// FaaSDocumentTime returns an attribute KeyValue conforming to the +// "faas.document.time" semantic conventions. It represents a string containing +// the time when the data was accessed in the [ISO 8601] format expressed in +// [UTC]. +// +// [ISO 8601]: https://www.iso.org/iso-8601-date-and-time-format.html +// [UTC]: https://www.w3.org/TR/NOTE-datetime +func FaaSDocumentTime(val string) attribute.KeyValue { + return FaaSDocumentTimeKey.String(val) +} + +// FaaSInstance returns an attribute KeyValue conforming to the "faas.instance" +// semantic conventions. It represents the execution environment ID as a string, +// that will be potentially reused for other invocations to the same +// function/function version. +func FaaSInstance(val string) attribute.KeyValue { + return FaaSInstanceKey.String(val) +} + +// FaaSInvocationID returns an attribute KeyValue conforming to the +// "faas.invocation_id" semantic conventions. It represents the invocation ID of +// the current function invocation. +func FaaSInvocationID(val string) attribute.KeyValue { + return FaaSInvocationIDKey.String(val) +} + +// FaaSInvokedName returns an attribute KeyValue conforming to the +// "faas.invoked_name" semantic conventions. It represents the name of the +// invoked function. +func FaaSInvokedName(val string) attribute.KeyValue { + return FaaSInvokedNameKey.String(val) +} + +// FaaSInvokedRegion returns an attribute KeyValue conforming to the +// "faas.invoked_region" semantic conventions. It represents the cloud region of +// the invoked function. +func FaaSInvokedRegion(val string) attribute.KeyValue { + return FaaSInvokedRegionKey.String(val) +} + +// FaaSMaxMemory returns an attribute KeyValue conforming to the +// "faas.max_memory" semantic conventions. It represents the amount of memory +// available to the serverless function converted to Bytes. +func FaaSMaxMemory(val int) attribute.KeyValue { + return FaaSMaxMemoryKey.Int(val) +} + +// FaaSName returns an attribute KeyValue conforming to the "faas.name" semantic +// conventions. It represents the name of the single function that this runtime +// instance executes. +func FaaSName(val string) attribute.KeyValue { + return FaaSNameKey.String(val) +} + +// FaaSTime returns an attribute KeyValue conforming to the "faas.time" semantic +// conventions. It represents a string containing the function invocation time in +// the [ISO 8601] format expressed in [UTC]. +// +// [ISO 8601]: https://www.iso.org/iso-8601-date-and-time-format.html +// [UTC]: https://www.w3.org/TR/NOTE-datetime +func FaaSTime(val string) attribute.KeyValue { + return FaaSTimeKey.String(val) +} + +// FaaSVersion returns an attribute KeyValue conforming to the "faas.version" +// semantic conventions. It represents the immutable version of the function +// being executed. +func FaaSVersion(val string) attribute.KeyValue { + return FaaSVersionKey.String(val) +} + +// Enum values for faas.document.operation +var ( + // When a new object is created. + // Stability: development + FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert") + // When an object is modified. + // Stability: development + FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit") + // When an object is deleted. + // Stability: development + FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete") +) + +// Enum values for faas.invoked_provider +var ( + // Alibaba Cloud + // Stability: development + FaaSInvokedProviderAlibabaCloud = FaaSInvokedProviderKey.String("alibaba_cloud") + // Amazon Web Services + // Stability: development + FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws") + // Microsoft Azure + // Stability: development + FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure") + // Google Cloud Platform + // Stability: development + FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp") + // Tencent Cloud + // Stability: development + FaaSInvokedProviderTencentCloud = FaaSInvokedProviderKey.String("tencent_cloud") +) + +// Enum values for faas.trigger +var ( + // A response to some data source operation such as a database or filesystem + // read/write + // Stability: development + FaaSTriggerDatasource = FaaSTriggerKey.String("datasource") + // To provide an answer to an inbound HTTP request + // Stability: development + FaaSTriggerHTTP = FaaSTriggerKey.String("http") + // A function is set to be executed when messages are sent to a messaging system + // Stability: development + FaaSTriggerPubSub = FaaSTriggerKey.String("pubsub") + // A function is scheduled to be executed regularly + // Stability: development + FaaSTriggerTimer = FaaSTriggerKey.String("timer") + // If none of the others apply + // Stability: development + FaaSTriggerOther = FaaSTriggerKey.String("other") +) + +// Namespace: feature_flag +const ( + // FeatureFlagContextIDKey is the attribute Key conforming to the + // "feature_flag.context.id" semantic conventions. It represents the unique + // identifier for the flag evaluation context. For example, the targeting key. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Release_Candidate + // + // Examples: "5157782b-2203-4c80-a857-dbbd5e7761db" + FeatureFlagContextIDKey = attribute.Key("feature_flag.context.id") + + // FeatureFlagKeyKey is the attribute Key conforming to the "feature_flag.key" + // semantic conventions. It represents the lookup key of the feature flag. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Release_Candidate + // + // Examples: "logo-color" + FeatureFlagKeyKey = attribute.Key("feature_flag.key") + + // FeatureFlagProviderNameKey is the attribute Key conforming to the + // "feature_flag.provider.name" semantic conventions. It represents the + // identifies the feature flag provider. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Release_Candidate + // + // Examples: "Flag Manager" + FeatureFlagProviderNameKey = attribute.Key("feature_flag.provider.name") + + // FeatureFlagResultReasonKey is the attribute Key conforming to the + // "feature_flag.result.reason" semantic conventions. It represents the reason + // code which shows how a feature flag value was determined. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Release_Candidate + // + // Examples: "static", "targeting_match", "error", "default" + FeatureFlagResultReasonKey = attribute.Key("feature_flag.result.reason") + + // FeatureFlagResultValueKey is the attribute Key conforming to the + // "feature_flag.result.value" semantic conventions. It represents the evaluated + // value of the feature flag. + // + // Type: any + // RequirementLevel: Recommended + // Stability: Release_Candidate + // + // Examples: "#ff0000", true, 3 + // Note: With some feature flag providers, feature flag results can be quite + // large or contain private or sensitive details. + // Because of this, `feature_flag.result.variant` is often the preferred + // attribute if it is available. + // + // It may be desirable to redact or otherwise limit the size and scope of + // `feature_flag.result.value` if possible. + // Because the evaluated flag value is unstructured and may be any type, it is + // left to the instrumentation author to determine how best to achieve this. + FeatureFlagResultValueKey = attribute.Key("feature_flag.result.value") + + // FeatureFlagResultVariantKey is the attribute Key conforming to the + // "feature_flag.result.variant" semantic conventions. It represents a semantic + // identifier for an evaluated flag value. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Release_Candidate + // + // Examples: "red", "true", "on" + // Note: A semantic identifier, commonly referred to as a variant, provides a + // means + // for referring to a value without including the value itself. This can + // provide additional context for understanding the meaning behind a value. + // For example, the variant `red` maybe be used for the value `#c05543`. + FeatureFlagResultVariantKey = attribute.Key("feature_flag.result.variant") + + // FeatureFlagSetIDKey is the attribute Key conforming to the + // "feature_flag.set.id" semantic conventions. It represents the identifier of + // the [flag set] to which the feature flag belongs. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Release_Candidate + // + // Examples: "proj-1", "ab98sgs", "service1/dev" + // + // [flag set]: https://openfeature.dev/specification/glossary/#flag-set + FeatureFlagSetIDKey = attribute.Key("feature_flag.set.id") + + // FeatureFlagVersionKey is the attribute Key conforming to the + // "feature_flag.version" semantic conventions. It represents the version of the + // ruleset used during the evaluation. This may be any stable value which + // uniquely identifies the ruleset. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Release_Candidate + // + // Examples: "1", "01ABCDEF" + FeatureFlagVersionKey = attribute.Key("feature_flag.version") +) + +// FeatureFlagContextID returns an attribute KeyValue conforming to the +// "feature_flag.context.id" semantic conventions. It represents the unique +// identifier for the flag evaluation context. For example, the targeting key. +func FeatureFlagContextID(val string) attribute.KeyValue { + return FeatureFlagContextIDKey.String(val) +} + +// FeatureFlagKey returns an attribute KeyValue conforming to the +// "feature_flag.key" semantic conventions. It represents the lookup key of the +// feature flag. +func FeatureFlagKey(val string) attribute.KeyValue { + return FeatureFlagKeyKey.String(val) +} + +// FeatureFlagProviderName returns an attribute KeyValue conforming to the +// "feature_flag.provider.name" semantic conventions. It represents the +// identifies the feature flag provider. +func FeatureFlagProviderName(val string) attribute.KeyValue { + return FeatureFlagProviderNameKey.String(val) +} + +// FeatureFlagResultVariant returns an attribute KeyValue conforming to the +// "feature_flag.result.variant" semantic conventions. It represents a semantic +// identifier for an evaluated flag value. +func FeatureFlagResultVariant(val string) attribute.KeyValue { + return FeatureFlagResultVariantKey.String(val) +} + +// FeatureFlagSetID returns an attribute KeyValue conforming to the +// "feature_flag.set.id" semantic conventions. It represents the identifier of +// the [flag set] to which the feature flag belongs. +// +// [flag set]: https://openfeature.dev/specification/glossary/#flag-set +func FeatureFlagSetID(val string) attribute.KeyValue { + return FeatureFlagSetIDKey.String(val) +} + +// FeatureFlagVersion returns an attribute KeyValue conforming to the +// "feature_flag.version" semantic conventions. It represents the version of the +// ruleset used during the evaluation. This may be any stable value which +// uniquely identifies the ruleset. +func FeatureFlagVersion(val string) attribute.KeyValue { + return FeatureFlagVersionKey.String(val) +} + +// Enum values for feature_flag.result.reason +var ( + // The resolved value is static (no dynamic evaluation). + // Stability: release_candidate + FeatureFlagResultReasonStatic = FeatureFlagResultReasonKey.String("static") + // The resolved value fell back to a pre-configured value (no dynamic evaluation + // occurred or dynamic evaluation yielded no result). + // Stability: release_candidate + FeatureFlagResultReasonDefault = FeatureFlagResultReasonKey.String("default") + // The resolved value was the result of a dynamic evaluation, such as a rule or + // specific user-targeting. + // Stability: release_candidate + FeatureFlagResultReasonTargetingMatch = FeatureFlagResultReasonKey.String("targeting_match") + // The resolved value was the result of pseudorandom assignment. + // Stability: release_candidate + FeatureFlagResultReasonSplit = FeatureFlagResultReasonKey.String("split") + // The resolved value was retrieved from cache. + // Stability: release_candidate + FeatureFlagResultReasonCached = FeatureFlagResultReasonKey.String("cached") + // The resolved value was the result of the flag being disabled in the + // management system. + // Stability: release_candidate + FeatureFlagResultReasonDisabled = FeatureFlagResultReasonKey.String("disabled") + // The reason for the resolved value could not be determined. + // Stability: release_candidate + FeatureFlagResultReasonUnknown = FeatureFlagResultReasonKey.String("unknown") + // The resolved value is non-authoritative or possibly out of date + // Stability: release_candidate + FeatureFlagResultReasonStale = FeatureFlagResultReasonKey.String("stale") + // The resolved value was the result of an error. + // Stability: release_candidate + FeatureFlagResultReasonError = FeatureFlagResultReasonKey.String("error") +) + +// Namespace: file +const ( + // FileAccessedKey is the attribute Key conforming to the "file.accessed" + // semantic conventions. It represents the time when the file was last accessed, + // in ISO 8601 format. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2021-01-01T12:00:00Z" + // Note: This attribute might not be supported by some file systems — NFS, + // FAT32, in embedded OS, etc. + FileAccessedKey = attribute.Key("file.accessed") + + // FileAttributesKey is the attribute Key conforming to the "file.attributes" + // semantic conventions. It represents the array of file attributes. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "readonly", "hidden" + // Note: Attributes names depend on the OS or file system. Here’s a + // non-exhaustive list of values expected for this attribute: `archive`, + // `compressed`, `directory`, `encrypted`, `execute`, `hidden`, `immutable`, + // `journaled`, `read`, `readonly`, `symbolic link`, `system`, `temporary`, + // `write`. + FileAttributesKey = attribute.Key("file.attributes") + + // FileChangedKey is the attribute Key conforming to the "file.changed" semantic + // conventions. It represents the time when the file attributes or metadata was + // last changed, in ISO 8601 format. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2021-01-01T12:00:00Z" + // Note: `file.changed` captures the time when any of the file's properties or + // attributes (including the content) are changed, while `file.modified` + // captures the timestamp when the file content is modified. + FileChangedKey = attribute.Key("file.changed") + + // FileCreatedKey is the attribute Key conforming to the "file.created" semantic + // conventions. It represents the time when the file was created, in ISO 8601 + // format. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2021-01-01T12:00:00Z" + // Note: This attribute might not be supported by some file systems — NFS, + // FAT32, in embedded OS, etc. + FileCreatedKey = attribute.Key("file.created") + + // FileDirectoryKey is the attribute Key conforming to the "file.directory" + // semantic conventions. It represents the directory where the file is located. + // It should include the drive letter, when appropriate. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "/home/user", "C:\Program Files\MyApp" + FileDirectoryKey = attribute.Key("file.directory") + + // FileExtensionKey is the attribute Key conforming to the "file.extension" + // semantic conventions. It represents the file extension, excluding the leading + // dot. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "png", "gz" + // Note: When the file name has multiple extensions (example.tar.gz), only the + // last one should be captured ("gz", not "tar.gz"). + FileExtensionKey = attribute.Key("file.extension") + + // FileForkNameKey is the attribute Key conforming to the "file.fork_name" + // semantic conventions. It represents the name of the fork. A fork is + // additional data associated with a filesystem object. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Zone.Identifer" + // Note: On Linux, a resource fork is used to store additional data with a + // filesystem object. A file always has at least one fork for the data portion, + // and additional forks may exist. + // On NTFS, this is analogous to an Alternate Data Stream (ADS), and the default + // data stream for a file is just called $DATA. Zone.Identifier is commonly used + // by Windows to track contents downloaded from the Internet. An ADS is + // typically of the form: C:\path\to\filename.extension:some_fork_name, and + // some_fork_name is the value that should populate `fork_name`. + // `filename.extension` should populate `file.name`, and `extension` should + // populate `file.extension`. The full path, `file.path`, will include the fork + // name. + FileForkNameKey = attribute.Key("file.fork_name") + + // FileGroupIDKey is the attribute Key conforming to the "file.group.id" + // semantic conventions. It represents the primary Group ID (GID) of the file. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "1000" + FileGroupIDKey = attribute.Key("file.group.id") + + // FileGroupNameKey is the attribute Key conforming to the "file.group.name" + // semantic conventions. It represents the primary group name of the file. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "users" + FileGroupNameKey = attribute.Key("file.group.name") + + // FileInodeKey is the attribute Key conforming to the "file.inode" semantic + // conventions. It represents the inode representing the file in the filesystem. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "256383" + FileInodeKey = attribute.Key("file.inode") + + // FileModeKey is the attribute Key conforming to the "file.mode" semantic + // conventions. It represents the mode of the file in octal representation. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "0640" + FileModeKey = attribute.Key("file.mode") + + // FileModifiedKey is the attribute Key conforming to the "file.modified" + // semantic conventions. It represents the time when the file content was last + // modified, in ISO 8601 format. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2021-01-01T12:00:00Z" + FileModifiedKey = attribute.Key("file.modified") + + // FileNameKey is the attribute Key conforming to the "file.name" semantic + // conventions. It represents the name of the file including the extension, + // without the directory. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "example.png" + FileNameKey = attribute.Key("file.name") + + // FileOwnerIDKey is the attribute Key conforming to the "file.owner.id" + // semantic conventions. It represents the user ID (UID) or security identifier + // (SID) of the file owner. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "1000" + FileOwnerIDKey = attribute.Key("file.owner.id") + + // FileOwnerNameKey is the attribute Key conforming to the "file.owner.name" + // semantic conventions. It represents the username of the file owner. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "root" + FileOwnerNameKey = attribute.Key("file.owner.name") + + // FilePathKey is the attribute Key conforming to the "file.path" semantic + // conventions. It represents the full path to the file, including the file + // name. It should include the drive letter, when appropriate. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "/home/alice/example.png", "C:\Program Files\MyApp\myapp.exe" + FilePathKey = attribute.Key("file.path") + + // FileSizeKey is the attribute Key conforming to the "file.size" semantic + // conventions. It represents the file size in bytes. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + FileSizeKey = attribute.Key("file.size") + + // FileSymbolicLinkTargetPathKey is the attribute Key conforming to the + // "file.symbolic_link.target_path" semantic conventions. It represents the path + // to the target of a symbolic link. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "/usr/bin/python3" + // Note: This attribute is only applicable to symbolic links. + FileSymbolicLinkTargetPathKey = attribute.Key("file.symbolic_link.target_path") +) + +// FileAccessed returns an attribute KeyValue conforming to the "file.accessed" +// semantic conventions. It represents the time when the file was last accessed, +// in ISO 8601 format. +func FileAccessed(val string) attribute.KeyValue { + return FileAccessedKey.String(val) +} + +// FileAttributes returns an attribute KeyValue conforming to the +// "file.attributes" semantic conventions. It represents the array of file +// attributes. +func FileAttributes(val ...string) attribute.KeyValue { + return FileAttributesKey.StringSlice(val) +} + +// FileChanged returns an attribute KeyValue conforming to the "file.changed" +// semantic conventions. It represents the time when the file attributes or +// metadata was last changed, in ISO 8601 format. +func FileChanged(val string) attribute.KeyValue { + return FileChangedKey.String(val) +} + +// FileCreated returns an attribute KeyValue conforming to the "file.created" +// semantic conventions. It represents the time when the file was created, in ISO +// 8601 format. +func FileCreated(val string) attribute.KeyValue { + return FileCreatedKey.String(val) +} + +// FileDirectory returns an attribute KeyValue conforming to the "file.directory" +// semantic conventions. It represents the directory where the file is located. +// It should include the drive letter, when appropriate. +func FileDirectory(val string) attribute.KeyValue { + return FileDirectoryKey.String(val) +} + +// FileExtension returns an attribute KeyValue conforming to the "file.extension" +// semantic conventions. It represents the file extension, excluding the leading +// dot. +func FileExtension(val string) attribute.KeyValue { + return FileExtensionKey.String(val) +} + +// FileForkName returns an attribute KeyValue conforming to the "file.fork_name" +// semantic conventions. It represents the name of the fork. A fork is additional +// data associated with a filesystem object. +func FileForkName(val string) attribute.KeyValue { + return FileForkNameKey.String(val) +} + +// FileGroupID returns an attribute KeyValue conforming to the "file.group.id" +// semantic conventions. It represents the primary Group ID (GID) of the file. +func FileGroupID(val string) attribute.KeyValue { + return FileGroupIDKey.String(val) +} + +// FileGroupName returns an attribute KeyValue conforming to the +// "file.group.name" semantic conventions. It represents the primary group name +// of the file. +func FileGroupName(val string) attribute.KeyValue { + return FileGroupNameKey.String(val) +} + +// FileInode returns an attribute KeyValue conforming to the "file.inode" +// semantic conventions. It represents the inode representing the file in the +// filesystem. +func FileInode(val string) attribute.KeyValue { + return FileInodeKey.String(val) +} + +// FileMode returns an attribute KeyValue conforming to the "file.mode" semantic +// conventions. It represents the mode of the file in octal representation. +func FileMode(val string) attribute.KeyValue { + return FileModeKey.String(val) +} + +// FileModified returns an attribute KeyValue conforming to the "file.modified" +// semantic conventions. It represents the time when the file content was last +// modified, in ISO 8601 format. +func FileModified(val string) attribute.KeyValue { + return FileModifiedKey.String(val) +} + +// FileName returns an attribute KeyValue conforming to the "file.name" semantic +// conventions. It represents the name of the file including the extension, +// without the directory. +func FileName(val string) attribute.KeyValue { + return FileNameKey.String(val) +} + +// FileOwnerID returns an attribute KeyValue conforming to the "file.owner.id" +// semantic conventions. It represents the user ID (UID) or security identifier +// (SID) of the file owner. +func FileOwnerID(val string) attribute.KeyValue { + return FileOwnerIDKey.String(val) +} + +// FileOwnerName returns an attribute KeyValue conforming to the +// "file.owner.name" semantic conventions. It represents the username of the file +// owner. +func FileOwnerName(val string) attribute.KeyValue { + return FileOwnerNameKey.String(val) +} + +// FilePath returns an attribute KeyValue conforming to the "file.path" semantic +// conventions. It represents the full path to the file, including the file name. +// It should include the drive letter, when appropriate. +func FilePath(val string) attribute.KeyValue { + return FilePathKey.String(val) +} + +// FileSize returns an attribute KeyValue conforming to the "file.size" semantic +// conventions. It represents the file size in bytes. +func FileSize(val int) attribute.KeyValue { + return FileSizeKey.Int(val) +} + +// FileSymbolicLinkTargetPath returns an attribute KeyValue conforming to the +// "file.symbolic_link.target_path" semantic conventions. It represents the path +// to the target of a symbolic link. +func FileSymbolicLinkTargetPath(val string) attribute.KeyValue { + return FileSymbolicLinkTargetPathKey.String(val) +} + +// Namespace: gcp +const ( + // GCPAppHubApplicationContainerKey is the attribute Key conforming to the + // "gcp.apphub.application.container" semantic conventions. It represents the + // container within GCP where the AppHub application is defined. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "projects/my-container-project" + GCPAppHubApplicationContainerKey = attribute.Key("gcp.apphub.application.container") + + // GCPAppHubApplicationIDKey is the attribute Key conforming to the + // "gcp.apphub.application.id" semantic conventions. It represents the name of + // the application as configured in AppHub. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "my-application" + GCPAppHubApplicationIDKey = attribute.Key("gcp.apphub.application.id") + + // GCPAppHubApplicationLocationKey is the attribute Key conforming to the + // "gcp.apphub.application.location" semantic conventions. It represents the GCP + // zone or region where the application is defined. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "us-central1" + GCPAppHubApplicationLocationKey = attribute.Key("gcp.apphub.application.location") + + // GCPAppHubServiceCriticalityTypeKey is the attribute Key conforming to the + // "gcp.apphub.service.criticality_type" semantic conventions. It represents the + // criticality of a service indicates its importance to the business. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: [See AppHub type enum] + // + // [See AppHub type enum]: https://cloud.google.com/app-hub/docs/reference/rest/v1/Attributes#type + GCPAppHubServiceCriticalityTypeKey = attribute.Key("gcp.apphub.service.criticality_type") + + // GCPAppHubServiceEnvironmentTypeKey is the attribute Key conforming to the + // "gcp.apphub.service.environment_type" semantic conventions. It represents the + // environment of a service is the stage of a software lifecycle. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: [See AppHub environment type] + // + // [See AppHub environment type]: https://cloud.google.com/app-hub/docs/reference/rest/v1/Attributes#type_1 + GCPAppHubServiceEnvironmentTypeKey = attribute.Key("gcp.apphub.service.environment_type") + + // GCPAppHubServiceIDKey is the attribute Key conforming to the + // "gcp.apphub.service.id" semantic conventions. It represents the name of the + // service as configured in AppHub. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "my-service" + GCPAppHubServiceIDKey = attribute.Key("gcp.apphub.service.id") + + // GCPAppHubWorkloadCriticalityTypeKey is the attribute Key conforming to the + // "gcp.apphub.workload.criticality_type" semantic conventions. It represents + // the criticality of a workload indicates its importance to the business. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: [See AppHub type enum] + // + // [See AppHub type enum]: https://cloud.google.com/app-hub/docs/reference/rest/v1/Attributes#type + GCPAppHubWorkloadCriticalityTypeKey = attribute.Key("gcp.apphub.workload.criticality_type") + + // GCPAppHubWorkloadEnvironmentTypeKey is the attribute Key conforming to the + // "gcp.apphub.workload.environment_type" semantic conventions. It represents + // the environment of a workload is the stage of a software lifecycle. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: [See AppHub environment type] + // + // [See AppHub environment type]: https://cloud.google.com/app-hub/docs/reference/rest/v1/Attributes#type_1 + GCPAppHubWorkloadEnvironmentTypeKey = attribute.Key("gcp.apphub.workload.environment_type") + + // GCPAppHubWorkloadIDKey is the attribute Key conforming to the + // "gcp.apphub.workload.id" semantic conventions. It represents the name of the + // workload as configured in AppHub. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "my-workload" + GCPAppHubWorkloadIDKey = attribute.Key("gcp.apphub.workload.id") + + // GCPClientServiceKey is the attribute Key conforming to the + // "gcp.client.service" semantic conventions. It represents the identifies the + // Google Cloud service for which the official client library is intended. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "appengine", "run", "firestore", "alloydb", "spanner" + // Note: Intended to be a stable identifier for Google Cloud client libraries + // that is uniform across implementation languages. The value should be derived + // from the canonical service domain for the service; for example, + // 'foo.googleapis.com' should result in a value of 'foo'. + GCPClientServiceKey = attribute.Key("gcp.client.service") + + // GCPCloudRunJobExecutionKey is the attribute Key conforming to the + // "gcp.cloud_run.job.execution" semantic conventions. It represents the name of + // the Cloud Run [execution] being run for the Job, as set by the + // [`CLOUD_RUN_EXECUTION`] environment variable. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "job-name-xxxx", "sample-job-mdw84" + // + // [execution]: https://cloud.google.com/run/docs/managing/job-executions + // [`CLOUD_RUN_EXECUTION`]: https://cloud.google.com/run/docs/container-contract#jobs-env-vars + GCPCloudRunJobExecutionKey = attribute.Key("gcp.cloud_run.job.execution") + + // GCPCloudRunJobTaskIndexKey is the attribute Key conforming to the + // "gcp.cloud_run.job.task_index" semantic conventions. It represents the index + // for a task within an execution as provided by the [`CLOUD_RUN_TASK_INDEX`] + // environment variable. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 0, 1 + // + // [`CLOUD_RUN_TASK_INDEX`]: https://cloud.google.com/run/docs/container-contract#jobs-env-vars + GCPCloudRunJobTaskIndexKey = attribute.Key("gcp.cloud_run.job.task_index") + + // GCPGCEInstanceHostnameKey is the attribute Key conforming to the + // "gcp.gce.instance.hostname" semantic conventions. It represents the hostname + // of a GCE instance. This is the full value of the default or [custom hostname] + // . + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "my-host1234.example.com", + // "sample-vm.us-west1-b.c.my-project.internal" + // + // [custom hostname]: https://cloud.google.com/compute/docs/instances/custom-hostname-vm + GCPGCEInstanceHostnameKey = attribute.Key("gcp.gce.instance.hostname") + + // GCPGCEInstanceNameKey is the attribute Key conforming to the + // "gcp.gce.instance.name" semantic conventions. It represents the instance name + // of a GCE instance. This is the value provided by `host.name`, the visible + // name of the instance in the Cloud Console UI, and the prefix for the default + // hostname of the instance as defined by the [default internal DNS name]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "instance-1", "my-vm-name" + // + // [default internal DNS name]: https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names + GCPGCEInstanceNameKey = attribute.Key("gcp.gce.instance.name") +) + +// GCPAppHubApplicationContainer returns an attribute KeyValue conforming to the +// "gcp.apphub.application.container" semantic conventions. It represents the +// container within GCP where the AppHub application is defined. +func GCPAppHubApplicationContainer(val string) attribute.KeyValue { + return GCPAppHubApplicationContainerKey.String(val) +} + +// GCPAppHubApplicationID returns an attribute KeyValue conforming to the +// "gcp.apphub.application.id" semantic conventions. It represents the name of +// the application as configured in AppHub. +func GCPAppHubApplicationID(val string) attribute.KeyValue { + return GCPAppHubApplicationIDKey.String(val) +} + +// GCPAppHubApplicationLocation returns an attribute KeyValue conforming to the +// "gcp.apphub.application.location" semantic conventions. It represents the GCP +// zone or region where the application is defined. +func GCPAppHubApplicationLocation(val string) attribute.KeyValue { + return GCPAppHubApplicationLocationKey.String(val) +} + +// GCPAppHubServiceID returns an attribute KeyValue conforming to the +// "gcp.apphub.service.id" semantic conventions. It represents the name of the +// service as configured in AppHub. +func GCPAppHubServiceID(val string) attribute.KeyValue { + return GCPAppHubServiceIDKey.String(val) +} + +// GCPAppHubWorkloadID returns an attribute KeyValue conforming to the +// "gcp.apphub.workload.id" semantic conventions. It represents the name of the +// workload as configured in AppHub. +func GCPAppHubWorkloadID(val string) attribute.KeyValue { + return GCPAppHubWorkloadIDKey.String(val) +} + +// GCPClientService returns an attribute KeyValue conforming to the +// "gcp.client.service" semantic conventions. It represents the identifies the +// Google Cloud service for which the official client library is intended. +func GCPClientService(val string) attribute.KeyValue { + return GCPClientServiceKey.String(val) +} + +// GCPCloudRunJobExecution returns an attribute KeyValue conforming to the +// "gcp.cloud_run.job.execution" semantic conventions. It represents the name of +// the Cloud Run [execution] being run for the Job, as set by the +// [`CLOUD_RUN_EXECUTION`] environment variable. +// +// [execution]: https://cloud.google.com/run/docs/managing/job-executions +// [`CLOUD_RUN_EXECUTION`]: https://cloud.google.com/run/docs/container-contract#jobs-env-vars +func GCPCloudRunJobExecution(val string) attribute.KeyValue { + return GCPCloudRunJobExecutionKey.String(val) +} + +// GCPCloudRunJobTaskIndex returns an attribute KeyValue conforming to the +// "gcp.cloud_run.job.task_index" semantic conventions. It represents the index +// for a task within an execution as provided by the [`CLOUD_RUN_TASK_INDEX`] +// environment variable. +// +// [`CLOUD_RUN_TASK_INDEX`]: https://cloud.google.com/run/docs/container-contract#jobs-env-vars +func GCPCloudRunJobTaskIndex(val int) attribute.KeyValue { + return GCPCloudRunJobTaskIndexKey.Int(val) +} + +// GCPGCEInstanceHostname returns an attribute KeyValue conforming to the +// "gcp.gce.instance.hostname" semantic conventions. It represents the hostname +// of a GCE instance. This is the full value of the default or [custom hostname] +// . +// +// [custom hostname]: https://cloud.google.com/compute/docs/instances/custom-hostname-vm +func GCPGCEInstanceHostname(val string) attribute.KeyValue { + return GCPGCEInstanceHostnameKey.String(val) +} + +// GCPGCEInstanceName returns an attribute KeyValue conforming to the +// "gcp.gce.instance.name" semantic conventions. It represents the instance name +// of a GCE instance. This is the value provided by `host.name`, the visible name +// of the instance in the Cloud Console UI, and the prefix for the default +// hostname of the instance as defined by the [default internal DNS name]. +// +// [default internal DNS name]: https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names +func GCPGCEInstanceName(val string) attribute.KeyValue { + return GCPGCEInstanceNameKey.String(val) +} + +// Enum values for gcp.apphub.service.criticality_type +var ( + // Mission critical service. + // Stability: development + GCPAppHubServiceCriticalityTypeMissionCritical = GCPAppHubServiceCriticalityTypeKey.String("MISSION_CRITICAL") + // High impact. + // Stability: development + GCPAppHubServiceCriticalityTypeHigh = GCPAppHubServiceCriticalityTypeKey.String("HIGH") + // Medium impact. + // Stability: development + GCPAppHubServiceCriticalityTypeMedium = GCPAppHubServiceCriticalityTypeKey.String("MEDIUM") + // Low impact. + // Stability: development + GCPAppHubServiceCriticalityTypeLow = GCPAppHubServiceCriticalityTypeKey.String("LOW") +) + +// Enum values for gcp.apphub.service.environment_type +var ( + // Production environment. + // Stability: development + GCPAppHubServiceEnvironmentTypeProduction = GCPAppHubServiceEnvironmentTypeKey.String("PRODUCTION") + // Staging environment. + // Stability: development + GCPAppHubServiceEnvironmentTypeStaging = GCPAppHubServiceEnvironmentTypeKey.String("STAGING") + // Test environment. + // Stability: development + GCPAppHubServiceEnvironmentTypeTest = GCPAppHubServiceEnvironmentTypeKey.String("TEST") + // Development environment. + // Stability: development + GCPAppHubServiceEnvironmentTypeDevelopment = GCPAppHubServiceEnvironmentTypeKey.String("DEVELOPMENT") +) + +// Enum values for gcp.apphub.workload.criticality_type +var ( + // Mission critical service. + // Stability: development + GCPAppHubWorkloadCriticalityTypeMissionCritical = GCPAppHubWorkloadCriticalityTypeKey.String("MISSION_CRITICAL") + // High impact. + // Stability: development + GCPAppHubWorkloadCriticalityTypeHigh = GCPAppHubWorkloadCriticalityTypeKey.String("HIGH") + // Medium impact. + // Stability: development + GCPAppHubWorkloadCriticalityTypeMedium = GCPAppHubWorkloadCriticalityTypeKey.String("MEDIUM") + // Low impact. + // Stability: development + GCPAppHubWorkloadCriticalityTypeLow = GCPAppHubWorkloadCriticalityTypeKey.String("LOW") +) + +// Enum values for gcp.apphub.workload.environment_type +var ( + // Production environment. + // Stability: development + GCPAppHubWorkloadEnvironmentTypeProduction = GCPAppHubWorkloadEnvironmentTypeKey.String("PRODUCTION") + // Staging environment. + // Stability: development + GCPAppHubWorkloadEnvironmentTypeStaging = GCPAppHubWorkloadEnvironmentTypeKey.String("STAGING") + // Test environment. + // Stability: development + GCPAppHubWorkloadEnvironmentTypeTest = GCPAppHubWorkloadEnvironmentTypeKey.String("TEST") + // Development environment. + // Stability: development + GCPAppHubWorkloadEnvironmentTypeDevelopment = GCPAppHubWorkloadEnvironmentTypeKey.String("DEVELOPMENT") +) + +// Namespace: gen_ai +const ( + // GenAIAgentDescriptionKey is the attribute Key conforming to the + // "gen_ai.agent.description" semantic conventions. It represents the free-form + // description of the GenAI agent provided by the application. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Helps with math problems", "Generates fiction stories" + GenAIAgentDescriptionKey = attribute.Key("gen_ai.agent.description") + + // GenAIAgentIDKey is the attribute Key conforming to the "gen_ai.agent.id" + // semantic conventions. It represents the unique identifier of the GenAI agent. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "asst_5j66UpCpwteGg4YSxUnt7lPY" + GenAIAgentIDKey = attribute.Key("gen_ai.agent.id") + + // GenAIAgentNameKey is the attribute Key conforming to the "gen_ai.agent.name" + // semantic conventions. It represents the human-readable name of the GenAI + // agent provided by the application. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Math Tutor", "Fiction Writer" + GenAIAgentNameKey = attribute.Key("gen_ai.agent.name") + + // GenAIConversationIDKey is the attribute Key conforming to the + // "gen_ai.conversation.id" semantic conventions. It represents the unique + // identifier for a conversation (session, thread), used to store and correlate + // messages within this conversation. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "conv_5j66UpCpwteGg4YSxUnt7lPY" + GenAIConversationIDKey = attribute.Key("gen_ai.conversation.id") + + // GenAIDataSourceIDKey is the attribute Key conforming to the + // "gen_ai.data_source.id" semantic conventions. It represents the data source + // identifier. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "H7STPQYOND" + // Note: Data sources are used by AI agents and RAG applications to store + // grounding data. A data source may be an external database, object store, + // document collection, website, or any other storage system used by the GenAI + // agent or application. The `gen_ai.data_source.id` SHOULD match the identifier + // used by the GenAI system rather than a name specific to the external storage, + // such as a database or object store. Semantic conventions referencing + // `gen_ai.data_source.id` MAY also leverage additional attributes, such as + // `db.*`, to further identify and describe the data source. + GenAIDataSourceIDKey = attribute.Key("gen_ai.data_source.id") + + // GenAIOpenAIRequestServiceTierKey is the attribute Key conforming to the + // "gen_ai.openai.request.service_tier" semantic conventions. It represents the + // service tier requested. May be a specific tier, default, or auto. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "auto", "default" + GenAIOpenAIRequestServiceTierKey = attribute.Key("gen_ai.openai.request.service_tier") + + // GenAIOpenAIResponseServiceTierKey is the attribute Key conforming to the + // "gen_ai.openai.response.service_tier" semantic conventions. It represents the + // service tier used for the response. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "scale", "default" + GenAIOpenAIResponseServiceTierKey = attribute.Key("gen_ai.openai.response.service_tier") + + // GenAIOpenAIResponseSystemFingerprintKey is the attribute Key conforming to + // the "gen_ai.openai.response.system_fingerprint" semantic conventions. It + // represents a fingerprint to track any eventual change in the Generative AI + // environment. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "fp_44709d6fcb" + GenAIOpenAIResponseSystemFingerprintKey = attribute.Key("gen_ai.openai.response.system_fingerprint") + + // GenAIOperationNameKey is the attribute Key conforming to the + // "gen_ai.operation.name" semantic conventions. It represents the name of the + // operation being performed. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: If one of the predefined values applies, but specific system uses a + // different name it's RECOMMENDED to document it in the semantic conventions + // for specific GenAI system and use system-specific name in the + // instrumentation. If a different name is not documented, instrumentation + // libraries SHOULD use applicable predefined value. + GenAIOperationNameKey = attribute.Key("gen_ai.operation.name") + + // GenAIOutputTypeKey is the attribute Key conforming to the + // "gen_ai.output.type" semantic conventions. It represents the represents the + // content type requested by the client. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: This attribute SHOULD be used when the client requests output of a + // specific type. The model may return zero or more outputs of this type. + // This attribute specifies the output modality and not the actual output + // format. For example, if an image is requested, the actual output could be a + // URL pointing to an image file. + // Additional output format details may be recorded in the future in the + // `gen_ai.output.{type}.*` attributes. + GenAIOutputTypeKey = attribute.Key("gen_ai.output.type") + + // GenAIRequestChoiceCountKey is the attribute Key conforming to the + // "gen_ai.request.choice.count" semantic conventions. It represents the target + // number of candidate completions to return. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 3 + GenAIRequestChoiceCountKey = attribute.Key("gen_ai.request.choice.count") + + // GenAIRequestEncodingFormatsKey is the attribute Key conforming to the + // "gen_ai.request.encoding_formats" semantic conventions. It represents the + // encoding formats requested in an embeddings operation, if specified. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "base64"], ["float", "binary" + // Note: In some GenAI systems the encoding formats are called embedding types. + // Also, some GenAI systems only accept a single format per request. + GenAIRequestEncodingFormatsKey = attribute.Key("gen_ai.request.encoding_formats") + + // GenAIRequestFrequencyPenaltyKey is the attribute Key conforming to the + // "gen_ai.request.frequency_penalty" semantic conventions. It represents the + // frequency penalty setting for the GenAI request. + // + // Type: double + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 0.1 + GenAIRequestFrequencyPenaltyKey = attribute.Key("gen_ai.request.frequency_penalty") + + // GenAIRequestMaxTokensKey is the attribute Key conforming to the + // "gen_ai.request.max_tokens" semantic conventions. It represents the maximum + // number of tokens the model generates for a request. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 100 + GenAIRequestMaxTokensKey = attribute.Key("gen_ai.request.max_tokens") + + // GenAIRequestModelKey is the attribute Key conforming to the + // "gen_ai.request.model" semantic conventions. It represents the name of the + // GenAI model a request is being made to. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: gpt-4 + GenAIRequestModelKey = attribute.Key("gen_ai.request.model") + + // GenAIRequestPresencePenaltyKey is the attribute Key conforming to the + // "gen_ai.request.presence_penalty" semantic conventions. It represents the + // presence penalty setting for the GenAI request. + // + // Type: double + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 0.1 + GenAIRequestPresencePenaltyKey = attribute.Key("gen_ai.request.presence_penalty") + + // GenAIRequestSeedKey is the attribute Key conforming to the + // "gen_ai.request.seed" semantic conventions. It represents the requests with + // same seed value more likely to return same result. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 100 + GenAIRequestSeedKey = attribute.Key("gen_ai.request.seed") + + // GenAIRequestStopSequencesKey is the attribute Key conforming to the + // "gen_ai.request.stop_sequences" semantic conventions. It represents the list + // of sequences that the model will use to stop generating further tokens. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "forest", "lived" + GenAIRequestStopSequencesKey = attribute.Key("gen_ai.request.stop_sequences") + + // GenAIRequestTemperatureKey is the attribute Key conforming to the + // "gen_ai.request.temperature" semantic conventions. It represents the + // temperature setting for the GenAI request. + // + // Type: double + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 0.0 + GenAIRequestTemperatureKey = attribute.Key("gen_ai.request.temperature") + + // GenAIRequestTopKKey is the attribute Key conforming to the + // "gen_ai.request.top_k" semantic conventions. It represents the top_k sampling + // setting for the GenAI request. + // + // Type: double + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1.0 + GenAIRequestTopKKey = attribute.Key("gen_ai.request.top_k") + + // GenAIRequestTopPKey is the attribute Key conforming to the + // "gen_ai.request.top_p" semantic conventions. It represents the top_p sampling + // setting for the GenAI request. + // + // Type: double + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1.0 + GenAIRequestTopPKey = attribute.Key("gen_ai.request.top_p") + + // GenAIResponseFinishReasonsKey is the attribute Key conforming to the + // "gen_ai.response.finish_reasons" semantic conventions. It represents the + // array of reasons the model stopped generating tokens, corresponding to each + // generation received. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "stop"], ["stop", "length" + GenAIResponseFinishReasonsKey = attribute.Key("gen_ai.response.finish_reasons") + + // GenAIResponseIDKey is the attribute Key conforming to the + // "gen_ai.response.id" semantic conventions. It represents the unique + // identifier for the completion. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "chatcmpl-123" + GenAIResponseIDKey = attribute.Key("gen_ai.response.id") + + // GenAIResponseModelKey is the attribute Key conforming to the + // "gen_ai.response.model" semantic conventions. It represents the name of the + // model that generated the response. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "gpt-4-0613" + GenAIResponseModelKey = attribute.Key("gen_ai.response.model") + + // GenAISystemKey is the attribute Key conforming to the "gen_ai.system" + // semantic conventions. It represents the Generative AI product as identified + // by the client or server instrumentation. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: openai + // Note: The `gen_ai.system` describes a family of GenAI models with specific + // model identified + // by `gen_ai.request.model` and `gen_ai.response.model` attributes. + // + // The actual GenAI product may differ from the one identified by the client. + // Multiple systems, including Azure OpenAI and Gemini, are accessible by OpenAI + // client + // libraries. In such cases, the `gen_ai.system` is set to `openai` based on the + // instrumentation's best knowledge, instead of the actual system. The + // `server.address` + // attribute may help identify the actual system in use for `openai`. + // + // For custom model, a custom friendly name SHOULD be used. + // If none of these options apply, the `gen_ai.system` SHOULD be set to `_OTHER` + // . + GenAISystemKey = attribute.Key("gen_ai.system") + + // GenAITokenTypeKey is the attribute Key conforming to the "gen_ai.token.type" + // semantic conventions. It represents the type of token being counted. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "input", "output" + GenAITokenTypeKey = attribute.Key("gen_ai.token.type") + + // GenAIToolCallIDKey is the attribute Key conforming to the + // "gen_ai.tool.call.id" semantic conventions. It represents the tool call + // identifier. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "call_mszuSIzqtI65i1wAUOE8w5H4" + GenAIToolCallIDKey = attribute.Key("gen_ai.tool.call.id") + + // GenAIToolDescriptionKey is the attribute Key conforming to the + // "gen_ai.tool.description" semantic conventions. It represents the tool + // description. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Multiply two numbers" + GenAIToolDescriptionKey = attribute.Key("gen_ai.tool.description") + + // GenAIToolNameKey is the attribute Key conforming to the "gen_ai.tool.name" + // semantic conventions. It represents the name of the tool utilized by the + // agent. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Flights" + GenAIToolNameKey = attribute.Key("gen_ai.tool.name") + + // GenAIToolTypeKey is the attribute Key conforming to the "gen_ai.tool.type" + // semantic conventions. It represents the type of the tool utilized by the + // agent. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "function", "extension", "datastore" + // Note: Extension: A tool executed on the agent-side to directly call external + // APIs, bridging the gap between the agent and real-world systems. + // Agent-side operations involve actions that are performed by the agent on the + // server or within the agent's controlled environment. + // Function: A tool executed on the client-side, where the agent generates + // parameters for a predefined function, and the client executes the logic. + // Client-side operations are actions taken on the user's end or within the + // client application. + // Datastore: A tool used by the agent to access and query structured or + // unstructured external data for retrieval-augmented tasks or knowledge + // updates. + GenAIToolTypeKey = attribute.Key("gen_ai.tool.type") + + // GenAIUsageInputTokensKey is the attribute Key conforming to the + // "gen_ai.usage.input_tokens" semantic conventions. It represents the number of + // tokens used in the GenAI input (prompt). + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 100 + GenAIUsageInputTokensKey = attribute.Key("gen_ai.usage.input_tokens") + + // GenAIUsageOutputTokensKey is the attribute Key conforming to the + // "gen_ai.usage.output_tokens" semantic conventions. It represents the number + // of tokens used in the GenAI response (completion). + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 180 + GenAIUsageOutputTokensKey = attribute.Key("gen_ai.usage.output_tokens") +) + +// GenAIAgentDescription returns an attribute KeyValue conforming to the +// "gen_ai.agent.description" semantic conventions. It represents the free-form +// description of the GenAI agent provided by the application. +func GenAIAgentDescription(val string) attribute.KeyValue { + return GenAIAgentDescriptionKey.String(val) +} + +// GenAIAgentID returns an attribute KeyValue conforming to the "gen_ai.agent.id" +// semantic conventions. It represents the unique identifier of the GenAI agent. +func GenAIAgentID(val string) attribute.KeyValue { + return GenAIAgentIDKey.String(val) +} + +// GenAIAgentName returns an attribute KeyValue conforming to the +// "gen_ai.agent.name" semantic conventions. It represents the human-readable +// name of the GenAI agent provided by the application. +func GenAIAgentName(val string) attribute.KeyValue { + return GenAIAgentNameKey.String(val) +} + +// GenAIConversationID returns an attribute KeyValue conforming to the +// "gen_ai.conversation.id" semantic conventions. It represents the unique +// identifier for a conversation (session, thread), used to store and correlate +// messages within this conversation. +func GenAIConversationID(val string) attribute.KeyValue { + return GenAIConversationIDKey.String(val) +} + +// GenAIDataSourceID returns an attribute KeyValue conforming to the +// "gen_ai.data_source.id" semantic conventions. It represents the data source +// identifier. +func GenAIDataSourceID(val string) attribute.KeyValue { + return GenAIDataSourceIDKey.String(val) +} + +// GenAIOpenAIResponseServiceTier returns an attribute KeyValue conforming to the +// "gen_ai.openai.response.service_tier" semantic conventions. It represents the +// service tier used for the response. +func GenAIOpenAIResponseServiceTier(val string) attribute.KeyValue { + return GenAIOpenAIResponseServiceTierKey.String(val) +} + +// GenAIOpenAIResponseSystemFingerprint returns an attribute KeyValue conforming +// to the "gen_ai.openai.response.system_fingerprint" semantic conventions. It +// represents a fingerprint to track any eventual change in the Generative AI +// environment. +func GenAIOpenAIResponseSystemFingerprint(val string) attribute.KeyValue { + return GenAIOpenAIResponseSystemFingerprintKey.String(val) +} + +// GenAIRequestChoiceCount returns an attribute KeyValue conforming to the +// "gen_ai.request.choice.count" semantic conventions. It represents the target +// number of candidate completions to return. +func GenAIRequestChoiceCount(val int) attribute.KeyValue { + return GenAIRequestChoiceCountKey.Int(val) +} + +// GenAIRequestEncodingFormats returns an attribute KeyValue conforming to the +// "gen_ai.request.encoding_formats" semantic conventions. It represents the +// encoding formats requested in an embeddings operation, if specified. +func GenAIRequestEncodingFormats(val ...string) attribute.KeyValue { + return GenAIRequestEncodingFormatsKey.StringSlice(val) +} + +// GenAIRequestFrequencyPenalty returns an attribute KeyValue conforming to the +// "gen_ai.request.frequency_penalty" semantic conventions. It represents the +// frequency penalty setting for the GenAI request. +func GenAIRequestFrequencyPenalty(val float64) attribute.KeyValue { + return GenAIRequestFrequencyPenaltyKey.Float64(val) +} + +// GenAIRequestMaxTokens returns an attribute KeyValue conforming to the +// "gen_ai.request.max_tokens" semantic conventions. It represents the maximum +// number of tokens the model generates for a request. +func GenAIRequestMaxTokens(val int) attribute.KeyValue { + return GenAIRequestMaxTokensKey.Int(val) +} + +// GenAIRequestModel returns an attribute KeyValue conforming to the +// "gen_ai.request.model" semantic conventions. It represents the name of the +// GenAI model a request is being made to. +func GenAIRequestModel(val string) attribute.KeyValue { + return GenAIRequestModelKey.String(val) +} + +// GenAIRequestPresencePenalty returns an attribute KeyValue conforming to the +// "gen_ai.request.presence_penalty" semantic conventions. It represents the +// presence penalty setting for the GenAI request. +func GenAIRequestPresencePenalty(val float64) attribute.KeyValue { + return GenAIRequestPresencePenaltyKey.Float64(val) +} + +// GenAIRequestSeed returns an attribute KeyValue conforming to the +// "gen_ai.request.seed" semantic conventions. It represents the requests with +// same seed value more likely to return same result. +func GenAIRequestSeed(val int) attribute.KeyValue { + return GenAIRequestSeedKey.Int(val) +} + +// GenAIRequestStopSequences returns an attribute KeyValue conforming to the +// "gen_ai.request.stop_sequences" semantic conventions. It represents the list +// of sequences that the model will use to stop generating further tokens. +func GenAIRequestStopSequences(val ...string) attribute.KeyValue { + return GenAIRequestStopSequencesKey.StringSlice(val) +} + +// GenAIRequestTemperature returns an attribute KeyValue conforming to the +// "gen_ai.request.temperature" semantic conventions. It represents the +// temperature setting for the GenAI request. +func GenAIRequestTemperature(val float64) attribute.KeyValue { + return GenAIRequestTemperatureKey.Float64(val) +} + +// GenAIRequestTopK returns an attribute KeyValue conforming to the +// "gen_ai.request.top_k" semantic conventions. It represents the top_k sampling +// setting for the GenAI request. +func GenAIRequestTopK(val float64) attribute.KeyValue { + return GenAIRequestTopKKey.Float64(val) +} + +// GenAIRequestTopP returns an attribute KeyValue conforming to the +// "gen_ai.request.top_p" semantic conventions. It represents the top_p sampling +// setting for the GenAI request. +func GenAIRequestTopP(val float64) attribute.KeyValue { + return GenAIRequestTopPKey.Float64(val) +} + +// GenAIResponseFinishReasons returns an attribute KeyValue conforming to the +// "gen_ai.response.finish_reasons" semantic conventions. It represents the array +// of reasons the model stopped generating tokens, corresponding to each +// generation received. +func GenAIResponseFinishReasons(val ...string) attribute.KeyValue { + return GenAIResponseFinishReasonsKey.StringSlice(val) +} + +// GenAIResponseID returns an attribute KeyValue conforming to the +// "gen_ai.response.id" semantic conventions. It represents the unique identifier +// for the completion. +func GenAIResponseID(val string) attribute.KeyValue { + return GenAIResponseIDKey.String(val) +} + +// GenAIResponseModel returns an attribute KeyValue conforming to the +// "gen_ai.response.model" semantic conventions. It represents the name of the +// model that generated the response. +func GenAIResponseModel(val string) attribute.KeyValue { + return GenAIResponseModelKey.String(val) +} + +// GenAIToolCallID returns an attribute KeyValue conforming to the +// "gen_ai.tool.call.id" semantic conventions. It represents the tool call +// identifier. +func GenAIToolCallID(val string) attribute.KeyValue { + return GenAIToolCallIDKey.String(val) +} + +// GenAIToolDescription returns an attribute KeyValue conforming to the +// "gen_ai.tool.description" semantic conventions. It represents the tool +// description. +func GenAIToolDescription(val string) attribute.KeyValue { + return GenAIToolDescriptionKey.String(val) +} + +// GenAIToolName returns an attribute KeyValue conforming to the +// "gen_ai.tool.name" semantic conventions. It represents the name of the tool +// utilized by the agent. +func GenAIToolName(val string) attribute.KeyValue { + return GenAIToolNameKey.String(val) +} + +// GenAIToolType returns an attribute KeyValue conforming to the +// "gen_ai.tool.type" semantic conventions. It represents the type of the tool +// utilized by the agent. +func GenAIToolType(val string) attribute.KeyValue { + return GenAIToolTypeKey.String(val) +} + +// GenAIUsageInputTokens returns an attribute KeyValue conforming to the +// "gen_ai.usage.input_tokens" semantic conventions. It represents the number of +// tokens used in the GenAI input (prompt). +func GenAIUsageInputTokens(val int) attribute.KeyValue { + return GenAIUsageInputTokensKey.Int(val) +} + +// GenAIUsageOutputTokens returns an attribute KeyValue conforming to the +// "gen_ai.usage.output_tokens" semantic conventions. It represents the number of +// tokens used in the GenAI response (completion). +func GenAIUsageOutputTokens(val int) attribute.KeyValue { + return GenAIUsageOutputTokensKey.Int(val) +} + +// Enum values for gen_ai.openai.request.service_tier +var ( + // The system will utilize scale tier credits until they are exhausted. + // Stability: development + GenAIOpenAIRequestServiceTierAuto = GenAIOpenAIRequestServiceTierKey.String("auto") + // The system will utilize the default scale tier. + // Stability: development + GenAIOpenAIRequestServiceTierDefault = GenAIOpenAIRequestServiceTierKey.String("default") +) + +// Enum values for gen_ai.operation.name +var ( + // Chat completion operation such as [OpenAI Chat API] + // Stability: development + // + // [OpenAI Chat API]: https://platform.openai.com/docs/api-reference/chat + GenAIOperationNameChat = GenAIOperationNameKey.String("chat") + // Multimodal content generation operation such as [Gemini Generate Content] + // Stability: development + // + // [Gemini Generate Content]: https://ai.google.dev/api/generate-content + GenAIOperationNameGenerateContent = GenAIOperationNameKey.String("generate_content") + // Text completions operation such as [OpenAI Completions API (Legacy)] + // Stability: development + // + // [OpenAI Completions API (Legacy)]: https://platform.openai.com/docs/api-reference/completions + GenAIOperationNameTextCompletion = GenAIOperationNameKey.String("text_completion") + // Embeddings operation such as [OpenAI Create embeddings API] + // Stability: development + // + // [OpenAI Create embeddings API]: https://platform.openai.com/docs/api-reference/embeddings/create + GenAIOperationNameEmbeddings = GenAIOperationNameKey.String("embeddings") + // Create GenAI agent + // Stability: development + GenAIOperationNameCreateAgent = GenAIOperationNameKey.String("create_agent") + // Invoke GenAI agent + // Stability: development + GenAIOperationNameInvokeAgent = GenAIOperationNameKey.String("invoke_agent") + // Execute a tool + // Stability: development + GenAIOperationNameExecuteTool = GenAIOperationNameKey.String("execute_tool") +) + +// Enum values for gen_ai.output.type +var ( + // Plain text + // Stability: development + GenAIOutputTypeText = GenAIOutputTypeKey.String("text") + // JSON object with known or unknown schema + // Stability: development + GenAIOutputTypeJSON = GenAIOutputTypeKey.String("json") + // Image + // Stability: development + GenAIOutputTypeImage = GenAIOutputTypeKey.String("image") + // Speech + // Stability: development + GenAIOutputTypeSpeech = GenAIOutputTypeKey.String("speech") +) + +// Enum values for gen_ai.system +var ( + // OpenAI + // Stability: development + GenAISystemOpenAI = GenAISystemKey.String("openai") + // Any Google generative AI endpoint + // Stability: development + GenAISystemGCPGenAI = GenAISystemKey.String("gcp.gen_ai") + // Vertex AI + // Stability: development + GenAISystemGCPVertexAI = GenAISystemKey.String("gcp.vertex_ai") + // Gemini + // Stability: development + GenAISystemGCPGemini = GenAISystemKey.String("gcp.gemini") + // Anthropic + // Stability: development + GenAISystemAnthropic = GenAISystemKey.String("anthropic") + // Cohere + // Stability: development + GenAISystemCohere = GenAISystemKey.String("cohere") + // Azure AI Inference + // Stability: development + GenAISystemAzureAIInference = GenAISystemKey.String("azure.ai.inference") + // Azure OpenAI + // Stability: development + GenAISystemAzureAIOpenAI = GenAISystemKey.String("azure.ai.openai") + // IBM Watsonx AI + // Stability: development + GenAISystemIBMWatsonxAI = GenAISystemKey.String("ibm.watsonx.ai") + // AWS Bedrock + // Stability: development + GenAISystemAWSBedrock = GenAISystemKey.String("aws.bedrock") + // Perplexity + // Stability: development + GenAISystemPerplexity = GenAISystemKey.String("perplexity") + // xAI + // Stability: development + GenAISystemXai = GenAISystemKey.String("xai") + // DeepSeek + // Stability: development + GenAISystemDeepseek = GenAISystemKey.String("deepseek") + // Groq + // Stability: development + GenAISystemGroq = GenAISystemKey.String("groq") + // Mistral AI + // Stability: development + GenAISystemMistralAI = GenAISystemKey.String("mistral_ai") +) + +// Enum values for gen_ai.token.type +var ( + // Input tokens (prompt, input, etc.) + // Stability: development + GenAITokenTypeInput = GenAITokenTypeKey.String("input") + // Output tokens (completion, response, etc.) + // Stability: development + GenAITokenTypeOutput = GenAITokenTypeKey.String("output") +) + +// Namespace: geo +const ( + // GeoContinentCodeKey is the attribute Key conforming to the + // "geo.continent.code" semantic conventions. It represents the two-letter code + // representing continent’s name. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + GeoContinentCodeKey = attribute.Key("geo.continent.code") + + // GeoCountryISOCodeKey is the attribute Key conforming to the + // "geo.country.iso_code" semantic conventions. It represents the two-letter ISO + // Country Code ([ISO 3166-1 alpha2]). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "CA" + // + // [ISO 3166-1 alpha2]: https://wikipedia.org/wiki/ISO_3166-1#Codes + GeoCountryISOCodeKey = attribute.Key("geo.country.iso_code") + + // GeoLocalityNameKey is the attribute Key conforming to the "geo.locality.name" + // semantic conventions. It represents the locality name. Represents the name of + // a city, town, village, or similar populated place. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Montreal", "Berlin" + GeoLocalityNameKey = attribute.Key("geo.locality.name") + + // GeoLocationLatKey is the attribute Key conforming to the "geo.location.lat" + // semantic conventions. It represents the latitude of the geo location in + // [WGS84]. + // + // Type: double + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 45.505918 + // + // [WGS84]: https://wikipedia.org/wiki/World_Geodetic_System#WGS84 + GeoLocationLatKey = attribute.Key("geo.location.lat") + + // GeoLocationLonKey is the attribute Key conforming to the "geo.location.lon" + // semantic conventions. It represents the longitude of the geo location in + // [WGS84]. + // + // Type: double + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: -73.61483 + // + // [WGS84]: https://wikipedia.org/wiki/World_Geodetic_System#WGS84 + GeoLocationLonKey = attribute.Key("geo.location.lon") + + // GeoPostalCodeKey is the attribute Key conforming to the "geo.postal_code" + // semantic conventions. It represents the postal code associated with the + // location. Values appropriate for this field may also be known as a postcode + // or ZIP code and will vary widely from country to country. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "94040" + GeoPostalCodeKey = attribute.Key("geo.postal_code") + + // GeoRegionISOCodeKey is the attribute Key conforming to the + // "geo.region.iso_code" semantic conventions. It represents the region ISO code + // ([ISO 3166-2]). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "CA-QC" + // + // [ISO 3166-2]: https://wikipedia.org/wiki/ISO_3166-2 + GeoRegionISOCodeKey = attribute.Key("geo.region.iso_code") +) + +// GeoCountryISOCode returns an attribute KeyValue conforming to the +// "geo.country.iso_code" semantic conventions. It represents the two-letter ISO +// Country Code ([ISO 3166-1 alpha2]). +// +// [ISO 3166-1 alpha2]: https://wikipedia.org/wiki/ISO_3166-1#Codes +func GeoCountryISOCode(val string) attribute.KeyValue { + return GeoCountryISOCodeKey.String(val) +} + +// GeoLocalityName returns an attribute KeyValue conforming to the +// "geo.locality.name" semantic conventions. It represents the locality name. +// Represents the name of a city, town, village, or similar populated place. +func GeoLocalityName(val string) attribute.KeyValue { + return GeoLocalityNameKey.String(val) +} + +// GeoLocationLat returns an attribute KeyValue conforming to the +// "geo.location.lat" semantic conventions. It represents the latitude of the geo +// location in [WGS84]. +// +// [WGS84]: https://wikipedia.org/wiki/World_Geodetic_System#WGS84 +func GeoLocationLat(val float64) attribute.KeyValue { + return GeoLocationLatKey.Float64(val) +} + +// GeoLocationLon returns an attribute KeyValue conforming to the +// "geo.location.lon" semantic conventions. It represents the longitude of the +// geo location in [WGS84]. +// +// [WGS84]: https://wikipedia.org/wiki/World_Geodetic_System#WGS84 +func GeoLocationLon(val float64) attribute.KeyValue { + return GeoLocationLonKey.Float64(val) +} + +// GeoPostalCode returns an attribute KeyValue conforming to the +// "geo.postal_code" semantic conventions. It represents the postal code +// associated with the location. Values appropriate for this field may also be +// known as a postcode or ZIP code and will vary widely from country to country. +func GeoPostalCode(val string) attribute.KeyValue { + return GeoPostalCodeKey.String(val) +} + +// GeoRegionISOCode returns an attribute KeyValue conforming to the +// "geo.region.iso_code" semantic conventions. It represents the region ISO code +// ([ISO 3166-2]). +// +// [ISO 3166-2]: https://wikipedia.org/wiki/ISO_3166-2 +func GeoRegionISOCode(val string) attribute.KeyValue { + return GeoRegionISOCodeKey.String(val) +} + +// Enum values for geo.continent.code +var ( + // Africa + // Stability: development + GeoContinentCodeAf = GeoContinentCodeKey.String("AF") + // Antarctica + // Stability: development + GeoContinentCodeAn = GeoContinentCodeKey.String("AN") + // Asia + // Stability: development + GeoContinentCodeAs = GeoContinentCodeKey.String("AS") + // Europe + // Stability: development + GeoContinentCodeEu = GeoContinentCodeKey.String("EU") + // North America + // Stability: development + GeoContinentCodeNa = GeoContinentCodeKey.String("NA") + // Oceania + // Stability: development + GeoContinentCodeOc = GeoContinentCodeKey.String("OC") + // South America + // Stability: development + GeoContinentCodeSa = GeoContinentCodeKey.String("SA") +) + +// Namespace: go +const ( + // GoMemoryTypeKey is the attribute Key conforming to the "go.memory.type" + // semantic conventions. It represents the type of memory. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "other", "stack" + GoMemoryTypeKey = attribute.Key("go.memory.type") +) + +// Enum values for go.memory.type +var ( + // Memory allocated from the heap that is reserved for stack space, whether or + // not it is currently in-use. + // Stability: development + GoMemoryTypeStack = GoMemoryTypeKey.String("stack") + // Memory used by the Go runtime, excluding other categories of memory usage + // described in this enumeration. + // Stability: development + GoMemoryTypeOther = GoMemoryTypeKey.String("other") +) + +// Namespace: graphql +const ( + // GraphQLDocumentKey is the attribute Key conforming to the "graphql.document" + // semantic conventions. It represents the GraphQL document being executed. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: query findBookById { bookById(id: ?) { name } } + // Note: The value may be sanitized to exclude sensitive information. + GraphQLDocumentKey = attribute.Key("graphql.document") + + // GraphQLOperationNameKey is the attribute Key conforming to the + // "graphql.operation.name" semantic conventions. It represents the name of the + // operation being executed. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: findBookById + GraphQLOperationNameKey = attribute.Key("graphql.operation.name") + + // GraphQLOperationTypeKey is the attribute Key conforming to the + // "graphql.operation.type" semantic conventions. It represents the type of the + // operation being executed. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "query", "mutation", "subscription" + GraphQLOperationTypeKey = attribute.Key("graphql.operation.type") +) + +// GraphQLDocument returns an attribute KeyValue conforming to the +// "graphql.document" semantic conventions. It represents the GraphQL document +// being executed. +func GraphQLDocument(val string) attribute.KeyValue { + return GraphQLDocumentKey.String(val) +} + +// GraphQLOperationName returns an attribute KeyValue conforming to the +// "graphql.operation.name" semantic conventions. It represents the name of the +// operation being executed. +func GraphQLOperationName(val string) attribute.KeyValue { + return GraphQLOperationNameKey.String(val) +} + +// Enum values for graphql.operation.type +var ( + // GraphQL query + // Stability: development + GraphQLOperationTypeQuery = GraphQLOperationTypeKey.String("query") + // GraphQL mutation + // Stability: development + GraphQLOperationTypeMutation = GraphQLOperationTypeKey.String("mutation") + // GraphQL subscription + // Stability: development + GraphQLOperationTypeSubscription = GraphQLOperationTypeKey.String("subscription") +) + +// Namespace: heroku +const ( + // HerokuAppIDKey is the attribute Key conforming to the "heroku.app.id" + // semantic conventions. It represents the unique identifier for the + // application. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2daa2797-e42b-4624-9322-ec3f968df4da" + HerokuAppIDKey = attribute.Key("heroku.app.id") + + // HerokuReleaseCommitKey is the attribute Key conforming to the + // "heroku.release.commit" semantic conventions. It represents the commit hash + // for the current release. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "e6134959463efd8966b20e75b913cafe3f5ec" + HerokuReleaseCommitKey = attribute.Key("heroku.release.commit") + + // HerokuReleaseCreationTimestampKey is the attribute Key conforming to the + // "heroku.release.creation_timestamp" semantic conventions. It represents the + // time and date the release was created. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2022-10-23T18:00:42Z" + HerokuReleaseCreationTimestampKey = attribute.Key("heroku.release.creation_timestamp") +) + +// HerokuAppID returns an attribute KeyValue conforming to the "heroku.app.id" +// semantic conventions. It represents the unique identifier for the application. +func HerokuAppID(val string) attribute.KeyValue { + return HerokuAppIDKey.String(val) +} + +// HerokuReleaseCommit returns an attribute KeyValue conforming to the +// "heroku.release.commit" semantic conventions. It represents the commit hash +// for the current release. +func HerokuReleaseCommit(val string) attribute.KeyValue { + return HerokuReleaseCommitKey.String(val) +} + +// HerokuReleaseCreationTimestamp returns an attribute KeyValue conforming to the +// "heroku.release.creation_timestamp" semantic conventions. It represents the +// time and date the release was created. +func HerokuReleaseCreationTimestamp(val string) attribute.KeyValue { + return HerokuReleaseCreationTimestampKey.String(val) +} + +// Namespace: host +const ( + // HostArchKey is the attribute Key conforming to the "host.arch" semantic + // conventions. It represents the CPU architecture the host system is running + // on. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + HostArchKey = attribute.Key("host.arch") + + // HostCPUCacheL2SizeKey is the attribute Key conforming to the + // "host.cpu.cache.l2.size" semantic conventions. It represents the amount of + // level 2 memory cache available to the processor (in Bytes). + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 12288000 + HostCPUCacheL2SizeKey = attribute.Key("host.cpu.cache.l2.size") + + // HostCPUFamilyKey is the attribute Key conforming to the "host.cpu.family" + // semantic conventions. It represents the family or generation of the CPU. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "6", "PA-RISC 1.1e" + HostCPUFamilyKey = attribute.Key("host.cpu.family") + + // HostCPUModelIDKey is the attribute Key conforming to the "host.cpu.model.id" + // semantic conventions. It represents the model identifier. It provides more + // granular information about the CPU, distinguishing it from other CPUs within + // the same family. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "6", "9000/778/B180L" + HostCPUModelIDKey = attribute.Key("host.cpu.model.id") + + // HostCPUModelNameKey is the attribute Key conforming to the + // "host.cpu.model.name" semantic conventions. It represents the model + // designation of the processor. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "11th Gen Intel(R) Core(TM) i7-1185G7 @ 3.00GHz" + HostCPUModelNameKey = attribute.Key("host.cpu.model.name") + + // HostCPUSteppingKey is the attribute Key conforming to the "host.cpu.stepping" + // semantic conventions. It represents the stepping or core revisions. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "1", "r1p1" + HostCPUSteppingKey = attribute.Key("host.cpu.stepping") + + // HostCPUVendorIDKey is the attribute Key conforming to the + // "host.cpu.vendor.id" semantic conventions. It represents the processor + // manufacturer identifier. A maximum 12-character string. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "GenuineIntel" + // Note: [CPUID] command returns the vendor ID string in EBX, EDX and ECX + // registers. Writing these to memory in this order results in a 12-character + // string. + // + // [CPUID]: https://wiki.osdev.org/CPUID + HostCPUVendorIDKey = attribute.Key("host.cpu.vendor.id") + + // HostIDKey is the attribute Key conforming to the "host.id" semantic + // conventions. It represents the unique host ID. For Cloud, this must be the + // instance_id assigned by the cloud provider. For non-containerized systems, + // this should be the `machine-id`. See the table below for the sources to use + // to determine the `machine-id` based on operating system. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "fdbf79e8af94cb7f9e8df36789187052" + HostIDKey = attribute.Key("host.id") + + // HostImageIDKey is the attribute Key conforming to the "host.image.id" + // semantic conventions. It represents the VM image ID or host OS image ID. For + // Cloud, this value is from the provider. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "ami-07b06b442921831e5" + HostImageIDKey = attribute.Key("host.image.id") + + // HostImageNameKey is the attribute Key conforming to the "host.image.name" + // semantic conventions. It represents the name of the VM image or OS install + // the host was instantiated from. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "infra-ami-eks-worker-node-7d4ec78312", "CentOS-8-x86_64-1905" + HostImageNameKey = attribute.Key("host.image.name") + + // HostImageVersionKey is the attribute Key conforming to the + // "host.image.version" semantic conventions. It represents the version string + // of the VM image or host OS as defined in [Version Attributes]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "0.1" + // + // [Version Attributes]: /docs/resource/README.md#version-attributes + HostImageVersionKey = attribute.Key("host.image.version") + + // HostIPKey is the attribute Key conforming to the "host.ip" semantic + // conventions. It represents the available IP addresses of the host, excluding + // loopback interfaces. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "192.168.1.140", "fe80::abc2:4a28:737a:609e" + // Note: IPv4 Addresses MUST be specified in dotted-quad notation. IPv6 + // addresses MUST be specified in the [RFC 5952] format. + // + // [RFC 5952]: https://www.rfc-editor.org/rfc/rfc5952.html + HostIPKey = attribute.Key("host.ip") + + // HostMacKey is the attribute Key conforming to the "host.mac" semantic + // conventions. It represents the available MAC addresses of the host, excluding + // loopback interfaces. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "AC-DE-48-23-45-67", "AC-DE-48-23-45-67-01-9F" + // Note: MAC Addresses MUST be represented in [IEEE RA hexadecimal form]: as + // hyphen-separated octets in uppercase hexadecimal form from most to least + // significant. + // + // [IEEE RA hexadecimal form]: https://standards.ieee.org/wp-content/uploads/import/documents/tutorials/eui.pdf + HostMacKey = attribute.Key("host.mac") + + // HostNameKey is the attribute Key conforming to the "host.name" semantic + // conventions. It represents the name of the host. On Unix systems, it may + // contain what the hostname command returns, or the fully qualified hostname, + // or another name specified by the user. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry-test" + HostNameKey = attribute.Key("host.name") + + // HostTypeKey is the attribute Key conforming to the "host.type" semantic + // conventions. It represents the type of host. For Cloud, this must be the + // machine type. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "n1-standard-1" + HostTypeKey = attribute.Key("host.type") +) + +// HostCPUCacheL2Size returns an attribute KeyValue conforming to the +// "host.cpu.cache.l2.size" semantic conventions. It represents the amount of +// level 2 memory cache available to the processor (in Bytes). +func HostCPUCacheL2Size(val int) attribute.KeyValue { + return HostCPUCacheL2SizeKey.Int(val) +} + +// HostCPUFamily returns an attribute KeyValue conforming to the +// "host.cpu.family" semantic conventions. It represents the family or generation +// of the CPU. +func HostCPUFamily(val string) attribute.KeyValue { + return HostCPUFamilyKey.String(val) +} + +// HostCPUModelID returns an attribute KeyValue conforming to the +// "host.cpu.model.id" semantic conventions. It represents the model identifier. +// It provides more granular information about the CPU, distinguishing it from +// other CPUs within the same family. +func HostCPUModelID(val string) attribute.KeyValue { + return HostCPUModelIDKey.String(val) +} + +// HostCPUModelName returns an attribute KeyValue conforming to the +// "host.cpu.model.name" semantic conventions. It represents the model +// designation of the processor. +func HostCPUModelName(val string) attribute.KeyValue { + return HostCPUModelNameKey.String(val) +} + +// HostCPUStepping returns an attribute KeyValue conforming to the +// "host.cpu.stepping" semantic conventions. It represents the stepping or core +// revisions. +func HostCPUStepping(val string) attribute.KeyValue { + return HostCPUSteppingKey.String(val) +} + +// HostCPUVendorID returns an attribute KeyValue conforming to the +// "host.cpu.vendor.id" semantic conventions. It represents the processor +// manufacturer identifier. A maximum 12-character string. +func HostCPUVendorID(val string) attribute.KeyValue { + return HostCPUVendorIDKey.String(val) +} + +// HostID returns an attribute KeyValue conforming to the "host.id" semantic +// conventions. It represents the unique host ID. For Cloud, this must be the +// instance_id assigned by the cloud provider. For non-containerized systems, +// this should be the `machine-id`. See the table below for the sources to use to +// determine the `machine-id` based on operating system. +func HostID(val string) attribute.KeyValue { + return HostIDKey.String(val) +} + +// HostImageID returns an attribute KeyValue conforming to the "host.image.id" +// semantic conventions. It represents the VM image ID or host OS image ID. For +// Cloud, this value is from the provider. +func HostImageID(val string) attribute.KeyValue { + return HostImageIDKey.String(val) +} + +// HostImageName returns an attribute KeyValue conforming to the +// "host.image.name" semantic conventions. It represents the name of the VM image +// or OS install the host was instantiated from. +func HostImageName(val string) attribute.KeyValue { + return HostImageNameKey.String(val) +} + +// HostImageVersion returns an attribute KeyValue conforming to the +// "host.image.version" semantic conventions. It represents the version string of +// the VM image or host OS as defined in [Version Attributes]. +// +// [Version Attributes]: /docs/resource/README.md#version-attributes +func HostImageVersion(val string) attribute.KeyValue { + return HostImageVersionKey.String(val) +} + +// HostIP returns an attribute KeyValue conforming to the "host.ip" semantic +// conventions. It represents the available IP addresses of the host, excluding +// loopback interfaces. +func HostIP(val ...string) attribute.KeyValue { + return HostIPKey.StringSlice(val) +} + +// HostMac returns an attribute KeyValue conforming to the "host.mac" semantic +// conventions. It represents the available MAC addresses of the host, excluding +// loopback interfaces. +func HostMac(val ...string) attribute.KeyValue { + return HostMacKey.StringSlice(val) +} + +// HostName returns an attribute KeyValue conforming to the "host.name" semantic +// conventions. It represents the name of the host. On Unix systems, it may +// contain what the hostname command returns, or the fully qualified hostname, or +// another name specified by the user. +func HostName(val string) attribute.KeyValue { + return HostNameKey.String(val) +} + +// HostType returns an attribute KeyValue conforming to the "host.type" semantic +// conventions. It represents the type of host. For Cloud, this must be the +// machine type. +func HostType(val string) attribute.KeyValue { + return HostTypeKey.String(val) +} + +// Enum values for host.arch +var ( + // AMD64 + // Stability: development + HostArchAMD64 = HostArchKey.String("amd64") + // ARM32 + // Stability: development + HostArchARM32 = HostArchKey.String("arm32") + // ARM64 + // Stability: development + HostArchARM64 = HostArchKey.String("arm64") + // Itanium + // Stability: development + HostArchIA64 = HostArchKey.String("ia64") + // 32-bit PowerPC + // Stability: development + HostArchPPC32 = HostArchKey.String("ppc32") + // 64-bit PowerPC + // Stability: development + HostArchPPC64 = HostArchKey.String("ppc64") + // IBM z/Architecture + // Stability: development + HostArchS390x = HostArchKey.String("s390x") + // 32-bit x86 + // Stability: development + HostArchX86 = HostArchKey.String("x86") +) + +// Namespace: http +const ( + // HTTPConnectionStateKey is the attribute Key conforming to the + // "http.connection.state" semantic conventions. It represents the state of the + // HTTP connection in the HTTP connection pool. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "active", "idle" + HTTPConnectionStateKey = attribute.Key("http.connection.state") + + // HTTPRequestBodySizeKey is the attribute Key conforming to the + // "http.request.body.size" semantic conventions. It represents the size of the + // request payload body in bytes. This is the number of bytes transferred + // excluding headers and is often, but not always, present as the + // [Content-Length] header. For requests using transport encoding, this should + // be the compressed size. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // [Content-Length]: https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length + HTTPRequestBodySizeKey = attribute.Key("http.request.body.size") + + // HTTPRequestMethodKey is the attribute Key conforming to the + // "http.request.method" semantic conventions. It represents the HTTP request + // method. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "GET", "POST", "HEAD" + // Note: HTTP request method value SHOULD be "known" to the instrumentation. + // By default, this convention defines "known" methods as the ones listed in + // [RFC9110] + // and the PATCH method defined in [RFC5789]. + // + // If the HTTP request method is not known to instrumentation, it MUST set the + // `http.request.method` attribute to `_OTHER`. + // + // If the HTTP instrumentation could end up converting valid HTTP request + // methods to `_OTHER`, then it MUST provide a way to override + // the list of known HTTP methods. If this override is done via environment + // variable, then the environment variable MUST be named + // OTEL_INSTRUMENTATION_HTTP_KNOWN_METHODS and support a comma-separated list of + // case-sensitive known HTTP methods + // (this list MUST be a full override of the default known method, it is not a + // list of known methods in addition to the defaults). + // + // HTTP method names are case-sensitive and `http.request.method` attribute + // value MUST match a known HTTP method name exactly. + // Instrumentations for specific web frameworks that consider HTTP methods to be + // case insensitive, SHOULD populate a canonical equivalent. + // Tracing instrumentations that do so, MUST also set + // `http.request.method_original` to the original value. + // + // [RFC9110]: https://www.rfc-editor.org/rfc/rfc9110.html#name-methods + // [RFC5789]: https://www.rfc-editor.org/rfc/rfc5789.html + HTTPRequestMethodKey = attribute.Key("http.request.method") + + // HTTPRequestMethodOriginalKey is the attribute Key conforming to the + // "http.request.method_original" semantic conventions. It represents the + // original HTTP method sent by the client in the request line. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "GeT", "ACL", "foo" + HTTPRequestMethodOriginalKey = attribute.Key("http.request.method_original") + + // HTTPRequestResendCountKey is the attribute Key conforming to the + // "http.request.resend_count" semantic conventions. It represents the ordinal + // number of request resending attempt (for any reason, including redirects). + // + // Type: int + // RequirementLevel: Recommended + // Stability: Stable + // + // Note: The resend count SHOULD be updated each time an HTTP request gets + // resent by the client, regardless of what was the cause of the resending (e.g. + // redirection, authorization failure, 503 Server Unavailable, network issues, + // or any other). + HTTPRequestResendCountKey = attribute.Key("http.request.resend_count") + + // HTTPRequestSizeKey is the attribute Key conforming to the "http.request.size" + // semantic conventions. It represents the total size of the request in bytes. + // This should be the total number of bytes sent over the wire, including the + // request line (HTTP/1.1), framing (HTTP/2 and HTTP/3), headers, and request + // body if any. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + HTTPRequestSizeKey = attribute.Key("http.request.size") + + // HTTPResponseBodySizeKey is the attribute Key conforming to the + // "http.response.body.size" semantic conventions. It represents the size of the + // response payload body in bytes. This is the number of bytes transferred + // excluding headers and is often, but not always, present as the + // [Content-Length] header. For requests using transport encoding, this should + // be the compressed size. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // [Content-Length]: https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length + HTTPResponseBodySizeKey = attribute.Key("http.response.body.size") + + // HTTPResponseSizeKey is the attribute Key conforming to the + // "http.response.size" semantic conventions. It represents the total size of + // the response in bytes. This should be the total number of bytes sent over the + // wire, including the status line (HTTP/1.1), framing (HTTP/2 and HTTP/3), + // headers, and response body and trailers if any. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + HTTPResponseSizeKey = attribute.Key("http.response.size") + + // HTTPResponseStatusCodeKey is the attribute Key conforming to the + // "http.response.status_code" semantic conventions. It represents the + // [HTTP response status code]. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: 200 + // + // [HTTP response status code]: https://tools.ietf.org/html/rfc7231#section-6 + HTTPResponseStatusCodeKey = attribute.Key("http.response.status_code") + + // HTTPRouteKey is the attribute Key conforming to the "http.route" semantic + // conventions. It represents the matched route, that is, the path template in + // the format used by the respective server framework. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "/users/:userID?", "{controller}/{action}/{id?}" + // Note: MUST NOT be populated when this is not supported by the HTTP server + // framework as the route attribute should have low-cardinality and the URI path + // can NOT substitute it. + // SHOULD include the [application root] if there is one. + // + // [application root]: /docs/http/http-spans.md#http-server-definitions + HTTPRouteKey = attribute.Key("http.route") +) + +// HTTPRequestBodySize returns an attribute KeyValue conforming to the +// "http.request.body.size" semantic conventions. It represents the size of the +// request payload body in bytes. This is the number of bytes transferred +// excluding headers and is often, but not always, present as the +// [Content-Length] header. For requests using transport encoding, this should be +// the compressed size. +// +// [Content-Length]: https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length +func HTTPRequestBodySize(val int) attribute.KeyValue { + return HTTPRequestBodySizeKey.Int(val) +} + +// HTTPRequestHeader returns an attribute KeyValue conforming to the +// "http.request.header" semantic conventions. It represents the HTTP request +// headers, `` being the normalized HTTP Header name (lowercase), the value +// being the header values. +func HTTPRequestHeader(key string, val ...string) attribute.KeyValue { + return attribute.StringSlice("http.request.header."+key, val) +} + +// HTTPRequestMethodOriginal returns an attribute KeyValue conforming to the +// "http.request.method_original" semantic conventions. It represents the +// original HTTP method sent by the client in the request line. +func HTTPRequestMethodOriginal(val string) attribute.KeyValue { + return HTTPRequestMethodOriginalKey.String(val) +} + +// HTTPRequestResendCount returns an attribute KeyValue conforming to the +// "http.request.resend_count" semantic conventions. It represents the ordinal +// number of request resending attempt (for any reason, including redirects). +func HTTPRequestResendCount(val int) attribute.KeyValue { + return HTTPRequestResendCountKey.Int(val) +} + +// HTTPRequestSize returns an attribute KeyValue conforming to the +// "http.request.size" semantic conventions. It represents the total size of the +// request in bytes. This should be the total number of bytes sent over the wire, +// including the request line (HTTP/1.1), framing (HTTP/2 and HTTP/3), headers, +// and request body if any. +func HTTPRequestSize(val int) attribute.KeyValue { + return HTTPRequestSizeKey.Int(val) +} + +// HTTPResponseBodySize returns an attribute KeyValue conforming to the +// "http.response.body.size" semantic conventions. It represents the size of the +// response payload body in bytes. This is the number of bytes transferred +// excluding headers and is often, but not always, present as the +// [Content-Length] header. For requests using transport encoding, this should be +// the compressed size. +// +// [Content-Length]: https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length +func HTTPResponseBodySize(val int) attribute.KeyValue { + return HTTPResponseBodySizeKey.Int(val) +} + +// HTTPResponseHeader returns an attribute KeyValue conforming to the +// "http.response.header" semantic conventions. It represents the HTTP response +// headers, `` being the normalized HTTP Header name (lowercase), the value +// being the header values. +func HTTPResponseHeader(key string, val ...string) attribute.KeyValue { + return attribute.StringSlice("http.response.header."+key, val) +} + +// HTTPResponseSize returns an attribute KeyValue conforming to the +// "http.response.size" semantic conventions. It represents the total size of the +// response in bytes. This should be the total number of bytes sent over the +// wire, including the status line (HTTP/1.1), framing (HTTP/2 and HTTP/3), +// headers, and response body and trailers if any. +func HTTPResponseSize(val int) attribute.KeyValue { + return HTTPResponseSizeKey.Int(val) +} + +// HTTPResponseStatusCode returns an attribute KeyValue conforming to the +// "http.response.status_code" semantic conventions. It represents the +// [HTTP response status code]. +// +// [HTTP response status code]: https://tools.ietf.org/html/rfc7231#section-6 +func HTTPResponseStatusCode(val int) attribute.KeyValue { + return HTTPResponseStatusCodeKey.Int(val) +} + +// HTTPRoute returns an attribute KeyValue conforming to the "http.route" +// semantic conventions. It represents the matched route, that is, the path +// template in the format used by the respective server framework. +func HTTPRoute(val string) attribute.KeyValue { + return HTTPRouteKey.String(val) +} + +// Enum values for http.connection.state +var ( + // active state. + // Stability: development + HTTPConnectionStateActive = HTTPConnectionStateKey.String("active") + // idle state. + // Stability: development + HTTPConnectionStateIdle = HTTPConnectionStateKey.String("idle") +) + +// Enum values for http.request.method +var ( + // CONNECT method. + // Stability: stable + HTTPRequestMethodConnect = HTTPRequestMethodKey.String("CONNECT") + // DELETE method. + // Stability: stable + HTTPRequestMethodDelete = HTTPRequestMethodKey.String("DELETE") + // GET method. + // Stability: stable + HTTPRequestMethodGet = HTTPRequestMethodKey.String("GET") + // HEAD method. + // Stability: stable + HTTPRequestMethodHead = HTTPRequestMethodKey.String("HEAD") + // OPTIONS method. + // Stability: stable + HTTPRequestMethodOptions = HTTPRequestMethodKey.String("OPTIONS") + // PATCH method. + // Stability: stable + HTTPRequestMethodPatch = HTTPRequestMethodKey.String("PATCH") + // POST method. + // Stability: stable + HTTPRequestMethodPost = HTTPRequestMethodKey.String("POST") + // PUT method. + // Stability: stable + HTTPRequestMethodPut = HTTPRequestMethodKey.String("PUT") + // TRACE method. + // Stability: stable + HTTPRequestMethodTrace = HTTPRequestMethodKey.String("TRACE") + // Any HTTP method that the instrumentation has no prior knowledge of. + // Stability: stable + HTTPRequestMethodOther = HTTPRequestMethodKey.String("_OTHER") +) + +// Namespace: hw +const ( + // HwIDKey is the attribute Key conforming to the "hw.id" semantic conventions. + // It represents an identifier for the hardware component, unique within the + // monitored host. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "win32battery_battery_testsysa33_1" + HwIDKey = attribute.Key("hw.id") + + // HwNameKey is the attribute Key conforming to the "hw.name" semantic + // conventions. It represents an easily-recognizable name for the hardware + // component. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "eth0" + HwNameKey = attribute.Key("hw.name") + + // HwParentKey is the attribute Key conforming to the "hw.parent" semantic + // conventions. It represents the unique identifier of the parent component + // (typically the `hw.id` attribute of the enclosure, or disk controller). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "dellStorage_perc_0" + HwParentKey = attribute.Key("hw.parent") + + // HwStateKey is the attribute Key conforming to the "hw.state" semantic + // conventions. It represents the current state of the component. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + HwStateKey = attribute.Key("hw.state") + + // HwTypeKey is the attribute Key conforming to the "hw.type" semantic + // conventions. It represents the type of the component. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: Describes the category of the hardware component for which `hw.state` + // is being reported. For example, `hw.type=temperature` along with + // `hw.state=degraded` would indicate that the temperature of the hardware + // component has been reported as `degraded`. + HwTypeKey = attribute.Key("hw.type") +) + +// HwID returns an attribute KeyValue conforming to the "hw.id" semantic +// conventions. It represents an identifier for the hardware component, unique +// within the monitored host. +func HwID(val string) attribute.KeyValue { + return HwIDKey.String(val) +} + +// HwName returns an attribute KeyValue conforming to the "hw.name" semantic +// conventions. It represents an easily-recognizable name for the hardware +// component. +func HwName(val string) attribute.KeyValue { + return HwNameKey.String(val) +} + +// HwParent returns an attribute KeyValue conforming to the "hw.parent" semantic +// conventions. It represents the unique identifier of the parent component +// (typically the `hw.id` attribute of the enclosure, or disk controller). +func HwParent(val string) attribute.KeyValue { + return HwParentKey.String(val) +} + +// Enum values for hw.state +var ( + // Ok + // Stability: development + HwStateOk = HwStateKey.String("ok") + // Degraded + // Stability: development + HwStateDegraded = HwStateKey.String("degraded") + // Failed + // Stability: development + HwStateFailed = HwStateKey.String("failed") +) + +// Enum values for hw.type +var ( + // Battery + // Stability: development + HwTypeBattery = HwTypeKey.String("battery") + // CPU + // Stability: development + HwTypeCPU = HwTypeKey.String("cpu") + // Disk controller + // Stability: development + HwTypeDiskController = HwTypeKey.String("disk_controller") + // Enclosure + // Stability: development + HwTypeEnclosure = HwTypeKey.String("enclosure") + // Fan + // Stability: development + HwTypeFan = HwTypeKey.String("fan") + // GPU + // Stability: development + HwTypeGpu = HwTypeKey.String("gpu") + // Logical disk + // Stability: development + HwTypeLogicalDisk = HwTypeKey.String("logical_disk") + // Memory + // Stability: development + HwTypeMemory = HwTypeKey.String("memory") + // Network + // Stability: development + HwTypeNetwork = HwTypeKey.String("network") + // Physical disk + // Stability: development + HwTypePhysicalDisk = HwTypeKey.String("physical_disk") + // Power supply + // Stability: development + HwTypePowerSupply = HwTypeKey.String("power_supply") + // Tape drive + // Stability: development + HwTypeTapeDrive = HwTypeKey.String("tape_drive") + // Temperature + // Stability: development + HwTypeTemperature = HwTypeKey.String("temperature") + // Voltage + // Stability: development + HwTypeVoltage = HwTypeKey.String("voltage") +) + +// Namespace: ios +const ( + // IOSAppStateKey is the attribute Key conforming to the "ios.app.state" + // semantic conventions. It represents the this attribute represents the state + // of the application. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: The iOS lifecycle states are defined in the + // [UIApplicationDelegate documentation], and from which the `OS terminology` + // column values are derived. + // + // [UIApplicationDelegate documentation]: https://developer.apple.com/documentation/uikit/uiapplicationdelegate + IOSAppStateKey = attribute.Key("ios.app.state") +) + +// Enum values for ios.app.state +var ( + // The app has become `active`. Associated with UIKit notification + // `applicationDidBecomeActive`. + // + // Stability: development + IOSAppStateActive = IOSAppStateKey.String("active") + // The app is now `inactive`. Associated with UIKit notification + // `applicationWillResignActive`. + // + // Stability: development + IOSAppStateInactive = IOSAppStateKey.String("inactive") + // The app is now in the background. This value is associated with UIKit + // notification `applicationDidEnterBackground`. + // + // Stability: development + IOSAppStateBackground = IOSAppStateKey.String("background") + // The app is now in the foreground. This value is associated with UIKit + // notification `applicationWillEnterForeground`. + // + // Stability: development + IOSAppStateForeground = IOSAppStateKey.String("foreground") + // The app is about to terminate. Associated with UIKit notification + // `applicationWillTerminate`. + // + // Stability: development + IOSAppStateTerminate = IOSAppStateKey.String("terminate") +) + +// Namespace: k8s +const ( + // K8SClusterNameKey is the attribute Key conforming to the "k8s.cluster.name" + // semantic conventions. It represents the name of the cluster. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry-cluster" + K8SClusterNameKey = attribute.Key("k8s.cluster.name") + + // K8SClusterUIDKey is the attribute Key conforming to the "k8s.cluster.uid" + // semantic conventions. It represents a pseudo-ID for the cluster, set to the + // UID of the `kube-system` namespace. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "218fc5a9-a5f1-4b54-aa05-46717d0ab26d" + // Note: K8s doesn't have support for obtaining a cluster ID. If this is ever + // added, we will recommend collecting the `k8s.cluster.uid` through the + // official APIs. In the meantime, we are able to use the `uid` of the + // `kube-system` namespace as a proxy for cluster ID. Read on for the + // rationale. + // + // Every object created in a K8s cluster is assigned a distinct UID. The + // `kube-system` namespace is used by Kubernetes itself and will exist + // for the lifetime of the cluster. Using the `uid` of the `kube-system` + // namespace is a reasonable proxy for the K8s ClusterID as it will only + // change if the cluster is rebuilt. Furthermore, Kubernetes UIDs are + // UUIDs as standardized by + // [ISO/IEC 9834-8 and ITU-T X.667]. + // Which states: + // + // > If generated according to one of the mechanisms defined in Rec. + // > ITU-T X.667 | ISO/IEC 9834-8, a UUID is either guaranteed to be + // > different from all other UUIDs generated before 3603 A.D., or is + // > extremely likely to be different (depending on the mechanism chosen). + // + // Therefore, UIDs between clusters should be extremely unlikely to + // conflict. + // + // [ISO/IEC 9834-8 and ITU-T X.667]: https://www.itu.int/ITU-T/studygroups/com17/oid.html + K8SClusterUIDKey = attribute.Key("k8s.cluster.uid") + + // K8SContainerNameKey is the attribute Key conforming to the + // "k8s.container.name" semantic conventions. It represents the name of the + // Container from Pod specification, must be unique within a Pod. Container + // runtime usually uses different globally unique name (`container.name`). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "redis" + K8SContainerNameKey = attribute.Key("k8s.container.name") + + // K8SContainerRestartCountKey is the attribute Key conforming to the + // "k8s.container.restart_count" semantic conventions. It represents the number + // of times the container was restarted. This attribute can be used to identify + // a particular container (running or stopped) within a container spec. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + K8SContainerRestartCountKey = attribute.Key("k8s.container.restart_count") + + // K8SContainerStatusLastTerminatedReasonKey is the attribute Key conforming to + // the "k8s.container.status.last_terminated_reason" semantic conventions. It + // represents the last terminated reason of the Container. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Evicted", "Error" + K8SContainerStatusLastTerminatedReasonKey = attribute.Key("k8s.container.status.last_terminated_reason") + + // K8SContainerStatusReasonKey is the attribute Key conforming to the + // "k8s.container.status.reason" semantic conventions. It represents the reason + // for the container state. Corresponds to the `reason` field of the: + // [K8s ContainerStateWaiting] or [K8s ContainerStateTerminated]. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "ContainerCreating", "CrashLoopBackOff", + // "CreateContainerConfigError", "ErrImagePull", "ImagePullBackOff", + // "OOMKilled", "Completed", "Error", "ContainerCannotRun" + // + // [K8s ContainerStateWaiting]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#containerstatewaiting-v1-core + // [K8s ContainerStateTerminated]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#containerstateterminated-v1-core + K8SContainerStatusReasonKey = attribute.Key("k8s.container.status.reason") + + // K8SContainerStatusStateKey is the attribute Key conforming to the + // "k8s.container.status.state" semantic conventions. It represents the state of + // the container. [K8s ContainerState]. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "terminated", "running", "waiting" + // + // [K8s ContainerState]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#containerstate-v1-core + K8SContainerStatusStateKey = attribute.Key("k8s.container.status.state") + + // K8SCronJobNameKey is the attribute Key conforming to the "k8s.cronjob.name" + // semantic conventions. It represents the name of the CronJob. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry" + K8SCronJobNameKey = attribute.Key("k8s.cronjob.name") + + // K8SCronJobUIDKey is the attribute Key conforming to the "k8s.cronjob.uid" + // semantic conventions. It represents the UID of the CronJob. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" + K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid") + + // K8SDaemonSetNameKey is the attribute Key conforming to the + // "k8s.daemonset.name" semantic conventions. It represents the name of the + // DaemonSet. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry" + K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name") + + // K8SDaemonSetUIDKey is the attribute Key conforming to the "k8s.daemonset.uid" + // semantic conventions. It represents the UID of the DaemonSet. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" + K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid") + + // K8SDeploymentNameKey is the attribute Key conforming to the + // "k8s.deployment.name" semantic conventions. It represents the name of the + // Deployment. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry" + K8SDeploymentNameKey = attribute.Key("k8s.deployment.name") + + // K8SDeploymentUIDKey is the attribute Key conforming to the + // "k8s.deployment.uid" semantic conventions. It represents the UID of the + // Deployment. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" + K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid") + + // K8SHPAMetricTypeKey is the attribute Key conforming to the + // "k8s.hpa.metric.type" semantic conventions. It represents the type of metric + // source for the horizontal pod autoscaler. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Resource", "ContainerResource" + // Note: This attribute reflects the `type` field of spec.metrics[] in the HPA. + K8SHPAMetricTypeKey = attribute.Key("k8s.hpa.metric.type") + + // K8SHPANameKey is the attribute Key conforming to the "k8s.hpa.name" semantic + // conventions. It represents the name of the horizontal pod autoscaler. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry" + K8SHPANameKey = attribute.Key("k8s.hpa.name") + + // K8SHPAScaletargetrefAPIVersionKey is the attribute Key conforming to the + // "k8s.hpa.scaletargetref.api_version" semantic conventions. It represents the + // API version of the target resource to scale for the HorizontalPodAutoscaler. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "apps/v1", "autoscaling/v2" + // Note: This maps to the `apiVersion` field in the `scaleTargetRef` of the HPA + // spec. + K8SHPAScaletargetrefAPIVersionKey = attribute.Key("k8s.hpa.scaletargetref.api_version") + + // K8SHPAScaletargetrefKindKey is the attribute Key conforming to the + // "k8s.hpa.scaletargetref.kind" semantic conventions. It represents the kind of + // the target resource to scale for the HorizontalPodAutoscaler. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Deployment", "StatefulSet" + // Note: This maps to the `kind` field in the `scaleTargetRef` of the HPA spec. + K8SHPAScaletargetrefKindKey = attribute.Key("k8s.hpa.scaletargetref.kind") + + // K8SHPAScaletargetrefNameKey is the attribute Key conforming to the + // "k8s.hpa.scaletargetref.name" semantic conventions. It represents the name of + // the target resource to scale for the HorizontalPodAutoscaler. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "my-deployment", "my-statefulset" + // Note: This maps to the `name` field in the `scaleTargetRef` of the HPA spec. + K8SHPAScaletargetrefNameKey = attribute.Key("k8s.hpa.scaletargetref.name") + + // K8SHPAUIDKey is the attribute Key conforming to the "k8s.hpa.uid" semantic + // conventions. It represents the UID of the horizontal pod autoscaler. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" + K8SHPAUIDKey = attribute.Key("k8s.hpa.uid") + + // K8SHugepageSizeKey is the attribute Key conforming to the "k8s.hugepage.size" + // semantic conventions. It represents the size (identifier) of the K8s huge + // page. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2Mi" + K8SHugepageSizeKey = attribute.Key("k8s.hugepage.size") + + // K8SJobNameKey is the attribute Key conforming to the "k8s.job.name" semantic + // conventions. It represents the name of the Job. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry" + K8SJobNameKey = attribute.Key("k8s.job.name") + + // K8SJobUIDKey is the attribute Key conforming to the "k8s.job.uid" semantic + // conventions. It represents the UID of the Job. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" + K8SJobUIDKey = attribute.Key("k8s.job.uid") + + // K8SNamespaceNameKey is the attribute Key conforming to the + // "k8s.namespace.name" semantic conventions. It represents the name of the + // namespace that the pod is running in. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "default" + K8SNamespaceNameKey = attribute.Key("k8s.namespace.name") + + // K8SNamespacePhaseKey is the attribute Key conforming to the + // "k8s.namespace.phase" semantic conventions. It represents the phase of the + // K8s namespace. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "active", "terminating" + // Note: This attribute aligns with the `phase` field of the + // [K8s NamespaceStatus] + // + // [K8s NamespaceStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#namespacestatus-v1-core + K8SNamespacePhaseKey = attribute.Key("k8s.namespace.phase") + + // K8SNodeConditionStatusKey is the attribute Key conforming to the + // "k8s.node.condition.status" semantic conventions. It represents the status of + // the condition, one of True, False, Unknown. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "true", "false", "unknown" + // Note: This attribute aligns with the `status` field of the + // [NodeCondition] + // + // [NodeCondition]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#nodecondition-v1-core + K8SNodeConditionStatusKey = attribute.Key("k8s.node.condition.status") + + // K8SNodeConditionTypeKey is the attribute Key conforming to the + // "k8s.node.condition.type" semantic conventions. It represents the condition + // type of a K8s Node. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Ready", "DiskPressure" + // Note: K8s Node conditions as described + // by [K8s documentation]. + // + // This attribute aligns with the `type` field of the + // [NodeCondition] + // + // The set of possible values is not limited to those listed here. Managed + // Kubernetes environments, + // or custom controllers MAY introduce additional node condition types. + // When this occurs, the exact value as reported by the Kubernetes API SHOULD be + // used. + // + // [K8s documentation]: https://v1-32.docs.kubernetes.io/docs/reference/node/node-status/#condition + // [NodeCondition]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#nodecondition-v1-core + K8SNodeConditionTypeKey = attribute.Key("k8s.node.condition.type") + + // K8SNodeNameKey is the attribute Key conforming to the "k8s.node.name" + // semantic conventions. It represents the name of the Node. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "node-1" + K8SNodeNameKey = attribute.Key("k8s.node.name") + + // K8SNodeUIDKey is the attribute Key conforming to the "k8s.node.uid" semantic + // conventions. It represents the UID of the Node. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2" + K8SNodeUIDKey = attribute.Key("k8s.node.uid") + + // K8SPodNameKey is the attribute Key conforming to the "k8s.pod.name" semantic + // conventions. It represents the name of the Pod. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry-pod-autoconf" + K8SPodNameKey = attribute.Key("k8s.pod.name") + + // K8SPodUIDKey is the attribute Key conforming to the "k8s.pod.uid" semantic + // conventions. It represents the UID of the Pod. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" + K8SPodUIDKey = attribute.Key("k8s.pod.uid") + + // K8SReplicaSetNameKey is the attribute Key conforming to the + // "k8s.replicaset.name" semantic conventions. It represents the name of the + // ReplicaSet. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry" + K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name") + + // K8SReplicaSetUIDKey is the attribute Key conforming to the + // "k8s.replicaset.uid" semantic conventions. It represents the UID of the + // ReplicaSet. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" + K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid") + + // K8SReplicationControllerNameKey is the attribute Key conforming to the + // "k8s.replicationcontroller.name" semantic conventions. It represents the name + // of the replication controller. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry" + K8SReplicationControllerNameKey = attribute.Key("k8s.replicationcontroller.name") + + // K8SReplicationControllerUIDKey is the attribute Key conforming to the + // "k8s.replicationcontroller.uid" semantic conventions. It represents the UID + // of the replication controller. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" + K8SReplicationControllerUIDKey = attribute.Key("k8s.replicationcontroller.uid") + + // K8SResourceQuotaNameKey is the attribute Key conforming to the + // "k8s.resourcequota.name" semantic conventions. It represents the name of the + // resource quota. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry" + K8SResourceQuotaNameKey = attribute.Key("k8s.resourcequota.name") + + // K8SResourceQuotaResourceNameKey is the attribute Key conforming to the + // "k8s.resourcequota.resource_name" semantic conventions. It represents the + // name of the K8s resource a resource quota defines. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "count/replicationcontrollers" + // Note: The value for this attribute can be either the full + // `count/[.]` string (e.g., count/deployments.apps, + // count/pods), or, for certain core Kubernetes resources, just the resource + // name (e.g., pods, services, configmaps). Both forms are supported by + // Kubernetes for object count quotas. See + // [Kubernetes Resource Quotas documentation] for more details. + // + // [Kubernetes Resource Quotas documentation]: https://kubernetes.io/docs/concepts/policy/resource-quotas/#object-count-quota + K8SResourceQuotaResourceNameKey = attribute.Key("k8s.resourcequota.resource_name") + + // K8SResourceQuotaUIDKey is the attribute Key conforming to the + // "k8s.resourcequota.uid" semantic conventions. It represents the UID of the + // resource quota. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" + K8SResourceQuotaUIDKey = attribute.Key("k8s.resourcequota.uid") + + // K8SStatefulSetNameKey is the attribute Key conforming to the + // "k8s.statefulset.name" semantic conventions. It represents the name of the + // StatefulSet. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry" + K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name") + + // K8SStatefulSetUIDKey is the attribute Key conforming to the + // "k8s.statefulset.uid" semantic conventions. It represents the UID of the + // StatefulSet. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" + K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid") + + // K8SStorageclassNameKey is the attribute Key conforming to the + // "k8s.storageclass.name" semantic conventions. It represents the name of K8s + // [StorageClass] object. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "gold.storageclass.storage.k8s.io" + // + // [StorageClass]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#storageclass-v1-storage-k8s-io + K8SStorageclassNameKey = attribute.Key("k8s.storageclass.name") + + // K8SVolumeNameKey is the attribute Key conforming to the "k8s.volume.name" + // semantic conventions. It represents the name of the K8s volume. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "volume0" + K8SVolumeNameKey = attribute.Key("k8s.volume.name") + + // K8SVolumeTypeKey is the attribute Key conforming to the "k8s.volume.type" + // semantic conventions. It represents the type of the K8s volume. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "emptyDir", "persistentVolumeClaim" + K8SVolumeTypeKey = attribute.Key("k8s.volume.type") +) + +// K8SClusterName returns an attribute KeyValue conforming to the +// "k8s.cluster.name" semantic conventions. It represents the name of the +// cluster. +func K8SClusterName(val string) attribute.KeyValue { + return K8SClusterNameKey.String(val) +} + +// K8SClusterUID returns an attribute KeyValue conforming to the +// "k8s.cluster.uid" semantic conventions. It represents a pseudo-ID for the +// cluster, set to the UID of the `kube-system` namespace. +func K8SClusterUID(val string) attribute.KeyValue { + return K8SClusterUIDKey.String(val) +} + +// K8SContainerName returns an attribute KeyValue conforming to the +// "k8s.container.name" semantic conventions. It represents the name of the +// Container from Pod specification, must be unique within a Pod. Container +// runtime usually uses different globally unique name (`container.name`). +func K8SContainerName(val string) attribute.KeyValue { + return K8SContainerNameKey.String(val) +} + +// K8SContainerRestartCount returns an attribute KeyValue conforming to the +// "k8s.container.restart_count" semantic conventions. It represents the number +// of times the container was restarted. This attribute can be used to identify a +// particular container (running or stopped) within a container spec. +func K8SContainerRestartCount(val int) attribute.KeyValue { + return K8SContainerRestartCountKey.Int(val) +} + +// K8SContainerStatusLastTerminatedReason returns an attribute KeyValue +// conforming to the "k8s.container.status.last_terminated_reason" semantic +// conventions. It represents the last terminated reason of the Container. +func K8SContainerStatusLastTerminatedReason(val string) attribute.KeyValue { + return K8SContainerStatusLastTerminatedReasonKey.String(val) +} + +// K8SCronJobAnnotation returns an attribute KeyValue conforming to the +// "k8s.cronjob.annotation" semantic conventions. It represents the cronjob +// annotation placed on the CronJob, the `` being the annotation name, the +// value being the annotation value. +func K8SCronJobAnnotation(key string, val string) attribute.KeyValue { + return attribute.String("k8s.cronjob.annotation."+key, val) +} + +// K8SCronJobLabel returns an attribute KeyValue conforming to the +// "k8s.cronjob.label" semantic conventions. It represents the label placed on +// the CronJob, the `` being the label name, the value being the label +// value. +func K8SCronJobLabel(key string, val string) attribute.KeyValue { + return attribute.String("k8s.cronjob.label."+key, val) +} + +// K8SCronJobName returns an attribute KeyValue conforming to the +// "k8s.cronjob.name" semantic conventions. It represents the name of the +// CronJob. +func K8SCronJobName(val string) attribute.KeyValue { + return K8SCronJobNameKey.String(val) +} + +// K8SCronJobUID returns an attribute KeyValue conforming to the +// "k8s.cronjob.uid" semantic conventions. It represents the UID of the CronJob. +func K8SCronJobUID(val string) attribute.KeyValue { + return K8SCronJobUIDKey.String(val) +} + +// K8SDaemonSetAnnotation returns an attribute KeyValue conforming to the +// "k8s.daemonset.annotation" semantic conventions. It represents the annotation +// placed on the DaemonSet, the `` being the annotation name, the value +// being the annotation value, even if the value is empty. +func K8SDaemonSetAnnotation(key string, val string) attribute.KeyValue { + return attribute.String("k8s.daemonset.annotation."+key, val) +} + +// K8SDaemonSetLabel returns an attribute KeyValue conforming to the +// "k8s.daemonset.label" semantic conventions. It represents the label placed on +// the DaemonSet, the `` being the label name, the value being the label +// value, even if the value is empty. +func K8SDaemonSetLabel(key string, val string) attribute.KeyValue { + return attribute.String("k8s.daemonset.label."+key, val) +} + +// K8SDaemonSetName returns an attribute KeyValue conforming to the +// "k8s.daemonset.name" semantic conventions. It represents the name of the +// DaemonSet. +func K8SDaemonSetName(val string) attribute.KeyValue { + return K8SDaemonSetNameKey.String(val) +} + +// K8SDaemonSetUID returns an attribute KeyValue conforming to the +// "k8s.daemonset.uid" semantic conventions. It represents the UID of the +// DaemonSet. +func K8SDaemonSetUID(val string) attribute.KeyValue { + return K8SDaemonSetUIDKey.String(val) +} + +// K8SDeploymentAnnotation returns an attribute KeyValue conforming to the +// "k8s.deployment.annotation" semantic conventions. It represents the annotation +// placed on the Deployment, the `` being the annotation name, the value +// being the annotation value, even if the value is empty. +func K8SDeploymentAnnotation(key string, val string) attribute.KeyValue { + return attribute.String("k8s.deployment.annotation."+key, val) +} + +// K8SDeploymentLabel returns an attribute KeyValue conforming to the +// "k8s.deployment.label" semantic conventions. It represents the label placed on +// the Deployment, the `` being the label name, the value being the label +// value, even if the value is empty. +func K8SDeploymentLabel(key string, val string) attribute.KeyValue { + return attribute.String("k8s.deployment.label."+key, val) +} + +// K8SDeploymentName returns an attribute KeyValue conforming to the +// "k8s.deployment.name" semantic conventions. It represents the name of the +// Deployment. +func K8SDeploymentName(val string) attribute.KeyValue { + return K8SDeploymentNameKey.String(val) +} + +// K8SDeploymentUID returns an attribute KeyValue conforming to the +// "k8s.deployment.uid" semantic conventions. It represents the UID of the +// Deployment. +func K8SDeploymentUID(val string) attribute.KeyValue { + return K8SDeploymentUIDKey.String(val) +} + +// K8SHPAMetricType returns an attribute KeyValue conforming to the +// "k8s.hpa.metric.type" semantic conventions. It represents the type of metric +// source for the horizontal pod autoscaler. +func K8SHPAMetricType(val string) attribute.KeyValue { + return K8SHPAMetricTypeKey.String(val) +} + +// K8SHPAName returns an attribute KeyValue conforming to the "k8s.hpa.name" +// semantic conventions. It represents the name of the horizontal pod autoscaler. +func K8SHPAName(val string) attribute.KeyValue { + return K8SHPANameKey.String(val) +} + +// K8SHPAScaletargetrefAPIVersion returns an attribute KeyValue conforming to the +// "k8s.hpa.scaletargetref.api_version" semantic conventions. It represents the +// API version of the target resource to scale for the HorizontalPodAutoscaler. +func K8SHPAScaletargetrefAPIVersion(val string) attribute.KeyValue { + return K8SHPAScaletargetrefAPIVersionKey.String(val) +} + +// K8SHPAScaletargetrefKind returns an attribute KeyValue conforming to the +// "k8s.hpa.scaletargetref.kind" semantic conventions. It represents the kind of +// the target resource to scale for the HorizontalPodAutoscaler. +func K8SHPAScaletargetrefKind(val string) attribute.KeyValue { + return K8SHPAScaletargetrefKindKey.String(val) +} + +// K8SHPAScaletargetrefName returns an attribute KeyValue conforming to the +// "k8s.hpa.scaletargetref.name" semantic conventions. It represents the name of +// the target resource to scale for the HorizontalPodAutoscaler. +func K8SHPAScaletargetrefName(val string) attribute.KeyValue { + return K8SHPAScaletargetrefNameKey.String(val) +} + +// K8SHPAUID returns an attribute KeyValue conforming to the "k8s.hpa.uid" +// semantic conventions. It represents the UID of the horizontal pod autoscaler. +func K8SHPAUID(val string) attribute.KeyValue { + return K8SHPAUIDKey.String(val) +} + +// K8SHugepageSize returns an attribute KeyValue conforming to the +// "k8s.hugepage.size" semantic conventions. It represents the size (identifier) +// of the K8s huge page. +func K8SHugepageSize(val string) attribute.KeyValue { + return K8SHugepageSizeKey.String(val) +} + +// K8SJobAnnotation returns an attribute KeyValue conforming to the +// "k8s.job.annotation" semantic conventions. It represents the annotation placed +// on the Job, the `` being the annotation name, the value being the +// annotation value, even if the value is empty. +func K8SJobAnnotation(key string, val string) attribute.KeyValue { + return attribute.String("k8s.job.annotation."+key, val) +} + +// K8SJobLabel returns an attribute KeyValue conforming to the "k8s.job.label" +// semantic conventions. It represents the label placed on the Job, the `` +// being the label name, the value being the label value, even if the value is +// empty. +func K8SJobLabel(key string, val string) attribute.KeyValue { + return attribute.String("k8s.job.label."+key, val) +} + +// K8SJobName returns an attribute KeyValue conforming to the "k8s.job.name" +// semantic conventions. It represents the name of the Job. +func K8SJobName(val string) attribute.KeyValue { + return K8SJobNameKey.String(val) +} + +// K8SJobUID returns an attribute KeyValue conforming to the "k8s.job.uid" +// semantic conventions. It represents the UID of the Job. +func K8SJobUID(val string) attribute.KeyValue { + return K8SJobUIDKey.String(val) +} + +// K8SNamespaceAnnotation returns an attribute KeyValue conforming to the +// "k8s.namespace.annotation" semantic conventions. It represents the annotation +// placed on the Namespace, the `` being the annotation name, the value +// being the annotation value, even if the value is empty. +func K8SNamespaceAnnotation(key string, val string) attribute.KeyValue { + return attribute.String("k8s.namespace.annotation."+key, val) +} + +// K8SNamespaceLabel returns an attribute KeyValue conforming to the +// "k8s.namespace.label" semantic conventions. It represents the label placed on +// the Namespace, the `` being the label name, the value being the label +// value, even if the value is empty. +func K8SNamespaceLabel(key string, val string) attribute.KeyValue { + return attribute.String("k8s.namespace.label."+key, val) +} + +// K8SNamespaceName returns an attribute KeyValue conforming to the +// "k8s.namespace.name" semantic conventions. It represents the name of the +// namespace that the pod is running in. +func K8SNamespaceName(val string) attribute.KeyValue { + return K8SNamespaceNameKey.String(val) +} + +// K8SNodeAnnotation returns an attribute KeyValue conforming to the +// "k8s.node.annotation" semantic conventions. It represents the annotation +// placed on the Node, the `` being the annotation name, the value being the +// annotation value, even if the value is empty. +func K8SNodeAnnotation(key string, val string) attribute.KeyValue { + return attribute.String("k8s.node.annotation."+key, val) +} + +// K8SNodeLabel returns an attribute KeyValue conforming to the "k8s.node.label" +// semantic conventions. It represents the label placed on the Node, the `` +// being the label name, the value being the label value, even if the value is +// empty. +func K8SNodeLabel(key string, val string) attribute.KeyValue { + return attribute.String("k8s.node.label."+key, val) +} + +// K8SNodeName returns an attribute KeyValue conforming to the "k8s.node.name" +// semantic conventions. It represents the name of the Node. +func K8SNodeName(val string) attribute.KeyValue { + return K8SNodeNameKey.String(val) +} + +// K8SNodeUID returns an attribute KeyValue conforming to the "k8s.node.uid" +// semantic conventions. It represents the UID of the Node. +func K8SNodeUID(val string) attribute.KeyValue { + return K8SNodeUIDKey.String(val) +} + +// K8SPodAnnotation returns an attribute KeyValue conforming to the +// "k8s.pod.annotation" semantic conventions. It represents the annotation placed +// on the Pod, the `` being the annotation name, the value being the +// annotation value. +func K8SPodAnnotation(key string, val string) attribute.KeyValue { + return attribute.String("k8s.pod.annotation."+key, val) +} + +// K8SPodLabel returns an attribute KeyValue conforming to the "k8s.pod.label" +// semantic conventions. It represents the label placed on the Pod, the `` +// being the label name, the value being the label value. +func K8SPodLabel(key string, val string) attribute.KeyValue { + return attribute.String("k8s.pod.label."+key, val) +} + +// K8SPodName returns an attribute KeyValue conforming to the "k8s.pod.name" +// semantic conventions. It represents the name of the Pod. +func K8SPodName(val string) attribute.KeyValue { + return K8SPodNameKey.String(val) +} + +// K8SPodUID returns an attribute KeyValue conforming to the "k8s.pod.uid" +// semantic conventions. It represents the UID of the Pod. +func K8SPodUID(val string) attribute.KeyValue { + return K8SPodUIDKey.String(val) +} + +// K8SReplicaSetAnnotation returns an attribute KeyValue conforming to the +// "k8s.replicaset.annotation" semantic conventions. It represents the annotation +// placed on the ReplicaSet, the `` being the annotation name, the value +// being the annotation value, even if the value is empty. +func K8SReplicaSetAnnotation(key string, val string) attribute.KeyValue { + return attribute.String("k8s.replicaset.annotation."+key, val) +} + +// K8SReplicaSetLabel returns an attribute KeyValue conforming to the +// "k8s.replicaset.label" semantic conventions. It represents the label placed on +// the ReplicaSet, the `` being the label name, the value being the label +// value, even if the value is empty. +func K8SReplicaSetLabel(key string, val string) attribute.KeyValue { + return attribute.String("k8s.replicaset.label."+key, val) +} + +// K8SReplicaSetName returns an attribute KeyValue conforming to the +// "k8s.replicaset.name" semantic conventions. It represents the name of the +// ReplicaSet. +func K8SReplicaSetName(val string) attribute.KeyValue { + return K8SReplicaSetNameKey.String(val) +} + +// K8SReplicaSetUID returns an attribute KeyValue conforming to the +// "k8s.replicaset.uid" semantic conventions. It represents the UID of the +// ReplicaSet. +func K8SReplicaSetUID(val string) attribute.KeyValue { + return K8SReplicaSetUIDKey.String(val) +} + +// K8SReplicationControllerName returns an attribute KeyValue conforming to the +// "k8s.replicationcontroller.name" semantic conventions. It represents the name +// of the replication controller. +func K8SReplicationControllerName(val string) attribute.KeyValue { + return K8SReplicationControllerNameKey.String(val) +} + +// K8SReplicationControllerUID returns an attribute KeyValue conforming to the +// "k8s.replicationcontroller.uid" semantic conventions. It represents the UID of +// the replication controller. +func K8SReplicationControllerUID(val string) attribute.KeyValue { + return K8SReplicationControllerUIDKey.String(val) +} + +// K8SResourceQuotaName returns an attribute KeyValue conforming to the +// "k8s.resourcequota.name" semantic conventions. It represents the name of the +// resource quota. +func K8SResourceQuotaName(val string) attribute.KeyValue { + return K8SResourceQuotaNameKey.String(val) +} + +// K8SResourceQuotaResourceName returns an attribute KeyValue conforming to the +// "k8s.resourcequota.resource_name" semantic conventions. It represents the name +// of the K8s resource a resource quota defines. +func K8SResourceQuotaResourceName(val string) attribute.KeyValue { + return K8SResourceQuotaResourceNameKey.String(val) +} + +// K8SResourceQuotaUID returns an attribute KeyValue conforming to the +// "k8s.resourcequota.uid" semantic conventions. It represents the UID of the +// resource quota. +func K8SResourceQuotaUID(val string) attribute.KeyValue { + return K8SResourceQuotaUIDKey.String(val) +} + +// K8SStatefulSetAnnotation returns an attribute KeyValue conforming to the +// "k8s.statefulset.annotation" semantic conventions. It represents the +// annotation placed on the StatefulSet, the `` being the annotation name, +// the value being the annotation value, even if the value is empty. +func K8SStatefulSetAnnotation(key string, val string) attribute.KeyValue { + return attribute.String("k8s.statefulset.annotation."+key, val) +} + +// K8SStatefulSetLabel returns an attribute KeyValue conforming to the +// "k8s.statefulset.label" semantic conventions. It represents the label placed +// on the StatefulSet, the `` being the label name, the value being the +// label value, even if the value is empty. +func K8SStatefulSetLabel(key string, val string) attribute.KeyValue { + return attribute.String("k8s.statefulset.label."+key, val) +} + +// K8SStatefulSetName returns an attribute KeyValue conforming to the +// "k8s.statefulset.name" semantic conventions. It represents the name of the +// StatefulSet. +func K8SStatefulSetName(val string) attribute.KeyValue { + return K8SStatefulSetNameKey.String(val) +} + +// K8SStatefulSetUID returns an attribute KeyValue conforming to the +// "k8s.statefulset.uid" semantic conventions. It represents the UID of the +// StatefulSet. +func K8SStatefulSetUID(val string) attribute.KeyValue { + return K8SStatefulSetUIDKey.String(val) +} + +// K8SStorageclassName returns an attribute KeyValue conforming to the +// "k8s.storageclass.name" semantic conventions. It represents the name of K8s +// [StorageClass] object. +// +// [StorageClass]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#storageclass-v1-storage-k8s-io +func K8SStorageclassName(val string) attribute.KeyValue { + return K8SStorageclassNameKey.String(val) +} + +// K8SVolumeName returns an attribute KeyValue conforming to the +// "k8s.volume.name" semantic conventions. It represents the name of the K8s +// volume. +func K8SVolumeName(val string) attribute.KeyValue { + return K8SVolumeNameKey.String(val) +} + +// Enum values for k8s.container.status.reason +var ( + // The container is being created. + // Stability: development + K8SContainerStatusReasonContainerCreating = K8SContainerStatusReasonKey.String("ContainerCreating") + // The container is in a crash loop back off state. + // Stability: development + K8SContainerStatusReasonCrashLoopBackOff = K8SContainerStatusReasonKey.String("CrashLoopBackOff") + // There was an error creating the container configuration. + // Stability: development + K8SContainerStatusReasonCreateContainerConfigError = K8SContainerStatusReasonKey.String("CreateContainerConfigError") + // There was an error pulling the container image. + // Stability: development + K8SContainerStatusReasonErrImagePull = K8SContainerStatusReasonKey.String("ErrImagePull") + // The container image pull is in back off state. + // Stability: development + K8SContainerStatusReasonImagePullBackOff = K8SContainerStatusReasonKey.String("ImagePullBackOff") + // The container was killed due to out of memory. + // Stability: development + K8SContainerStatusReasonOomKilled = K8SContainerStatusReasonKey.String("OOMKilled") + // The container has completed execution. + // Stability: development + K8SContainerStatusReasonCompleted = K8SContainerStatusReasonKey.String("Completed") + // There was an error with the container. + // Stability: development + K8SContainerStatusReasonError = K8SContainerStatusReasonKey.String("Error") + // The container cannot run. + // Stability: development + K8SContainerStatusReasonContainerCannotRun = K8SContainerStatusReasonKey.String("ContainerCannotRun") +) + +// Enum values for k8s.container.status.state +var ( + // The container has terminated. + // Stability: development + K8SContainerStatusStateTerminated = K8SContainerStatusStateKey.String("terminated") + // The container is running. + // Stability: development + K8SContainerStatusStateRunning = K8SContainerStatusStateKey.String("running") + // The container is waiting. + // Stability: development + K8SContainerStatusStateWaiting = K8SContainerStatusStateKey.String("waiting") +) + +// Enum values for k8s.namespace.phase +var ( + // Active namespace phase as described by [K8s API] + // Stability: development + // + // [K8s API]: https://pkg.go.dev/k8s.io/api@v0.31.3/core/v1#NamespacePhase + K8SNamespacePhaseActive = K8SNamespacePhaseKey.String("active") + // Terminating namespace phase as described by [K8s API] + // Stability: development + // + // [K8s API]: https://pkg.go.dev/k8s.io/api@v0.31.3/core/v1#NamespacePhase + K8SNamespacePhaseTerminating = K8SNamespacePhaseKey.String("terminating") +) + +// Enum values for k8s.node.condition.status +var ( + // condition_true + // Stability: development + K8SNodeConditionStatusConditionTrue = K8SNodeConditionStatusKey.String("true") + // condition_false + // Stability: development + K8SNodeConditionStatusConditionFalse = K8SNodeConditionStatusKey.String("false") + // condition_unknown + // Stability: development + K8SNodeConditionStatusConditionUnknown = K8SNodeConditionStatusKey.String("unknown") +) + +// Enum values for k8s.node.condition.type +var ( + // The node is healthy and ready to accept pods + // Stability: development + K8SNodeConditionTypeReady = K8SNodeConditionTypeKey.String("Ready") + // Pressure exists on the disk size—that is, if the disk capacity is low + // Stability: development + K8SNodeConditionTypeDiskPressure = K8SNodeConditionTypeKey.String("DiskPressure") + // Pressure exists on the node memory—that is, if the node memory is low + // Stability: development + K8SNodeConditionTypeMemoryPressure = K8SNodeConditionTypeKey.String("MemoryPressure") + // Pressure exists on the processes—that is, if there are too many processes + // on the node + // Stability: development + K8SNodeConditionTypePIDPressure = K8SNodeConditionTypeKey.String("PIDPressure") + // The network for the node is not correctly configured + // Stability: development + K8SNodeConditionTypeNetworkUnavailable = K8SNodeConditionTypeKey.String("NetworkUnavailable") +) + +// Enum values for k8s.volume.type +var ( + // A [persistentVolumeClaim] volume + // Stability: development + // + // [persistentVolumeClaim]: https://v1-30.docs.kubernetes.io/docs/concepts/storage/volumes/#persistentvolumeclaim + K8SVolumeTypePersistentVolumeClaim = K8SVolumeTypeKey.String("persistentVolumeClaim") + // A [configMap] volume + // Stability: development + // + // [configMap]: https://v1-30.docs.kubernetes.io/docs/concepts/storage/volumes/#configmap + K8SVolumeTypeConfigMap = K8SVolumeTypeKey.String("configMap") + // A [downwardAPI] volume + // Stability: development + // + // [downwardAPI]: https://v1-30.docs.kubernetes.io/docs/concepts/storage/volumes/#downwardapi + K8SVolumeTypeDownwardAPI = K8SVolumeTypeKey.String("downwardAPI") + // An [emptyDir] volume + // Stability: development + // + // [emptyDir]: https://v1-30.docs.kubernetes.io/docs/concepts/storage/volumes/#emptydir + K8SVolumeTypeEmptyDir = K8SVolumeTypeKey.String("emptyDir") + // A [secret] volume + // Stability: development + // + // [secret]: https://v1-30.docs.kubernetes.io/docs/concepts/storage/volumes/#secret + K8SVolumeTypeSecret = K8SVolumeTypeKey.String("secret") + // A [local] volume + // Stability: development + // + // [local]: https://v1-30.docs.kubernetes.io/docs/concepts/storage/volumes/#local + K8SVolumeTypeLocal = K8SVolumeTypeKey.String("local") +) + +// Namespace: linux +const ( + // LinuxMemorySlabStateKey is the attribute Key conforming to the + // "linux.memory.slab.state" semantic conventions. It represents the Linux Slab + // memory state. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "reclaimable", "unreclaimable" + LinuxMemorySlabStateKey = attribute.Key("linux.memory.slab.state") +) + +// Enum values for linux.memory.slab.state +var ( + // reclaimable + // Stability: development + LinuxMemorySlabStateReclaimable = LinuxMemorySlabStateKey.String("reclaimable") + // unreclaimable + // Stability: development + LinuxMemorySlabStateUnreclaimable = LinuxMemorySlabStateKey.String("unreclaimable") +) + +// Namespace: log +const ( + // LogFileNameKey is the attribute Key conforming to the "log.file.name" + // semantic conventions. It represents the basename of the file. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "audit.log" + LogFileNameKey = attribute.Key("log.file.name") + + // LogFileNameResolvedKey is the attribute Key conforming to the + // "log.file.name_resolved" semantic conventions. It represents the basename of + // the file, with symlinks resolved. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "uuid.log" + LogFileNameResolvedKey = attribute.Key("log.file.name_resolved") + + // LogFilePathKey is the attribute Key conforming to the "log.file.path" + // semantic conventions. It represents the full path to the file. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "/var/log/mysql/audit.log" + LogFilePathKey = attribute.Key("log.file.path") + + // LogFilePathResolvedKey is the attribute Key conforming to the + // "log.file.path_resolved" semantic conventions. It represents the full path to + // the file, with symlinks resolved. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "/var/lib/docker/uuid.log" + LogFilePathResolvedKey = attribute.Key("log.file.path_resolved") + + // LogIostreamKey is the attribute Key conforming to the "log.iostream" semantic + // conventions. It represents the stream associated with the log. See below for + // a list of well-known values. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + LogIostreamKey = attribute.Key("log.iostream") + + // LogRecordOriginalKey is the attribute Key conforming to the + // "log.record.original" semantic conventions. It represents the complete + // original Log Record. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "77 <86>1 2015-08-06T21:58:59.694Z 192.168.2.133 inactive - - - + // Something happened", "[INFO] 8/3/24 12:34:56 Something happened" + // Note: This value MAY be added when processing a Log Record which was + // originally transmitted as a string or equivalent data type AND the Body field + // of the Log Record does not contain the same value. (e.g. a syslog or a log + // record read from a file.) + LogRecordOriginalKey = attribute.Key("log.record.original") + + // LogRecordUIDKey is the attribute Key conforming to the "log.record.uid" + // semantic conventions. It represents a unique identifier for the Log Record. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "01ARZ3NDEKTSV4RRFFQ69G5FAV" + // Note: If an id is provided, other log records with the same id will be + // considered duplicates and can be removed safely. This means, that two + // distinguishable log records MUST have different values. + // The id MAY be an + // [Universally Unique Lexicographically Sortable Identifier (ULID)], but other + // identifiers (e.g. UUID) may be used as needed. + // + // [Universally Unique Lexicographically Sortable Identifier (ULID)]: https://github.com/ulid/spec + LogRecordUIDKey = attribute.Key("log.record.uid") +) + +// LogFileName returns an attribute KeyValue conforming to the "log.file.name" +// semantic conventions. It represents the basename of the file. +func LogFileName(val string) attribute.KeyValue { + return LogFileNameKey.String(val) +} + +// LogFileNameResolved returns an attribute KeyValue conforming to the +// "log.file.name_resolved" semantic conventions. It represents the basename of +// the file, with symlinks resolved. +func LogFileNameResolved(val string) attribute.KeyValue { + return LogFileNameResolvedKey.String(val) +} + +// LogFilePath returns an attribute KeyValue conforming to the "log.file.path" +// semantic conventions. It represents the full path to the file. +func LogFilePath(val string) attribute.KeyValue { + return LogFilePathKey.String(val) +} + +// LogFilePathResolved returns an attribute KeyValue conforming to the +// "log.file.path_resolved" semantic conventions. It represents the full path to +// the file, with symlinks resolved. +func LogFilePathResolved(val string) attribute.KeyValue { + return LogFilePathResolvedKey.String(val) +} + +// LogRecordOriginal returns an attribute KeyValue conforming to the +// "log.record.original" semantic conventions. It represents the complete +// original Log Record. +func LogRecordOriginal(val string) attribute.KeyValue { + return LogRecordOriginalKey.String(val) +} + +// LogRecordUID returns an attribute KeyValue conforming to the "log.record.uid" +// semantic conventions. It represents a unique identifier for the Log Record. +func LogRecordUID(val string) attribute.KeyValue { + return LogRecordUIDKey.String(val) +} + +// Enum values for log.iostream +var ( + // Logs from stdout stream + // Stability: development + LogIostreamStdout = LogIostreamKey.String("stdout") + // Events from stderr stream + // Stability: development + LogIostreamStderr = LogIostreamKey.String("stderr") +) + +// Namespace: mainframe +const ( + // MainframeLparNameKey is the attribute Key conforming to the + // "mainframe.lpar.name" semantic conventions. It represents the name of the + // logical partition that hosts a systems with a mainframe operating system. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "LPAR01" + MainframeLparNameKey = attribute.Key("mainframe.lpar.name") +) + +// MainframeLparName returns an attribute KeyValue conforming to the +// "mainframe.lpar.name" semantic conventions. It represents the name of the +// logical partition that hosts a systems with a mainframe operating system. +func MainframeLparName(val string) attribute.KeyValue { + return MainframeLparNameKey.String(val) +} + +// Namespace: messaging +const ( + // MessagingBatchMessageCountKey is the attribute Key conforming to the + // "messaging.batch.message_count" semantic conventions. It represents the + // number of messages sent, received, or processed in the scope of the batching + // operation. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 0, 1, 2 + // Note: Instrumentations SHOULD NOT set `messaging.batch.message_count` on + // spans that operate with a single message. When a messaging client library + // supports both batch and single-message API for the same operation, + // instrumentations SHOULD use `messaging.batch.message_count` for batching APIs + // and SHOULD NOT use it for single-message APIs. + MessagingBatchMessageCountKey = attribute.Key("messaging.batch.message_count") + + // MessagingClientIDKey is the attribute Key conforming to the + // "messaging.client.id" semantic conventions. It represents a unique identifier + // for the client that consumes or produces a message. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "client-5", "myhost@8742@s8083jm" + MessagingClientIDKey = attribute.Key("messaging.client.id") + + // MessagingConsumerGroupNameKey is the attribute Key conforming to the + // "messaging.consumer.group.name" semantic conventions. It represents the name + // of the consumer group with which a consumer is associated. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "my-group", "indexer" + // Note: Semantic conventions for individual messaging systems SHOULD document + // whether `messaging.consumer.group.name` is applicable and what it means in + // the context of that system. + MessagingConsumerGroupNameKey = attribute.Key("messaging.consumer.group.name") + + // MessagingDestinationAnonymousKey is the attribute Key conforming to the + // "messaging.destination.anonymous" semantic conventions. It represents a + // boolean that is true if the message destination is anonymous (could be + // unnamed or have auto-generated name). + // + // Type: boolean + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + MessagingDestinationAnonymousKey = attribute.Key("messaging.destination.anonymous") + + // MessagingDestinationNameKey is the attribute Key conforming to the + // "messaging.destination.name" semantic conventions. It represents the message + // destination name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "MyQueue", "MyTopic" + // Note: Destination name SHOULD uniquely identify a specific queue, topic or + // other entity within the broker. If + // the broker doesn't have such notion, the destination name SHOULD uniquely + // identify the broker. + MessagingDestinationNameKey = attribute.Key("messaging.destination.name") + + // MessagingDestinationPartitionIDKey is the attribute Key conforming to the + // "messaging.destination.partition.id" semantic conventions. It represents the + // identifier of the partition messages are sent to or received from, unique + // within the `messaging.destination.name`. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1 + MessagingDestinationPartitionIDKey = attribute.Key("messaging.destination.partition.id") + + // MessagingDestinationSubscriptionNameKey is the attribute Key conforming to + // the "messaging.destination.subscription.name" semantic conventions. It + // represents the name of the destination subscription from which a message is + // consumed. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "subscription-a" + // Note: Semantic conventions for individual messaging systems SHOULD document + // whether `messaging.destination.subscription.name` is applicable and what it + // means in the context of that system. + MessagingDestinationSubscriptionNameKey = attribute.Key("messaging.destination.subscription.name") + + // MessagingDestinationTemplateKey is the attribute Key conforming to the + // "messaging.destination.template" semantic conventions. It represents the low + // cardinality representation of the messaging destination name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "/customers/{customerId}" + // Note: Destination names could be constructed from templates. An example would + // be a destination name involving a user name or product id. Although the + // destination name in this case is of high cardinality, the underlying template + // is of low cardinality and can be effectively used for grouping and + // aggregation. + MessagingDestinationTemplateKey = attribute.Key("messaging.destination.template") + + // MessagingDestinationTemporaryKey is the attribute Key conforming to the + // "messaging.destination.temporary" semantic conventions. It represents a + // boolean that is true if the message destination is temporary and might not + // exist anymore after messages are processed. + // + // Type: boolean + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + MessagingDestinationTemporaryKey = attribute.Key("messaging.destination.temporary") + + // MessagingEventHubsMessageEnqueuedTimeKey is the attribute Key conforming to + // the "messaging.eventhubs.message.enqueued_time" semantic conventions. It + // represents the UTC epoch seconds at which the message has been accepted and + // stored in the entity. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + MessagingEventHubsMessageEnqueuedTimeKey = attribute.Key("messaging.eventhubs.message.enqueued_time") + + // MessagingGCPPubSubMessageAckDeadlineKey is the attribute Key conforming to + // the "messaging.gcp_pubsub.message.ack_deadline" semantic conventions. It + // represents the ack deadline in seconds set for the modify ack deadline + // request. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + MessagingGCPPubSubMessageAckDeadlineKey = attribute.Key("messaging.gcp_pubsub.message.ack_deadline") + + // MessagingGCPPubSubMessageAckIDKey is the attribute Key conforming to the + // "messaging.gcp_pubsub.message.ack_id" semantic conventions. It represents the + // ack id for a given message. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: ack_id + MessagingGCPPubSubMessageAckIDKey = attribute.Key("messaging.gcp_pubsub.message.ack_id") + + // MessagingGCPPubSubMessageDeliveryAttemptKey is the attribute Key conforming + // to the "messaging.gcp_pubsub.message.delivery_attempt" semantic conventions. + // It represents the delivery attempt for a given message. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + MessagingGCPPubSubMessageDeliveryAttemptKey = attribute.Key("messaging.gcp_pubsub.message.delivery_attempt") + + // MessagingGCPPubSubMessageOrderingKeyKey is the attribute Key conforming to + // the "messaging.gcp_pubsub.message.ordering_key" semantic conventions. It + // represents the ordering key for a given message. If the attribute is not + // present, the message does not have an ordering key. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: ordering_key + MessagingGCPPubSubMessageOrderingKeyKey = attribute.Key("messaging.gcp_pubsub.message.ordering_key") + + // MessagingKafkaMessageKeyKey is the attribute Key conforming to the + // "messaging.kafka.message.key" semantic conventions. It represents the message + // keys in Kafka are used for grouping alike messages to ensure they're + // processed on the same partition. They differ from `messaging.message.id` in + // that they're not unique. If the key is `null`, the attribute MUST NOT be set. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: myKey + // Note: If the key type is not string, it's string representation has to be + // supplied for the attribute. If the key has no unambiguous, canonical string + // form, don't include its value. + MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message.key") + + // MessagingKafkaMessageTombstoneKey is the attribute Key conforming to the + // "messaging.kafka.message.tombstone" semantic conventions. It represents a + // boolean that is true if the message is a tombstone. + // + // Type: boolean + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + MessagingKafkaMessageTombstoneKey = attribute.Key("messaging.kafka.message.tombstone") + + // MessagingKafkaOffsetKey is the attribute Key conforming to the + // "messaging.kafka.offset" semantic conventions. It represents the offset of a + // record in the corresponding Kafka partition. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + MessagingKafkaOffsetKey = attribute.Key("messaging.kafka.offset") + + // MessagingMessageBodySizeKey is the attribute Key conforming to the + // "messaging.message.body.size" semantic conventions. It represents the size of + // the message body in bytes. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Note: This can refer to both the compressed or uncompressed body size. If + // both sizes are known, the uncompressed + // body size should be used. + MessagingMessageBodySizeKey = attribute.Key("messaging.message.body.size") + + // MessagingMessageConversationIDKey is the attribute Key conforming to the + // "messaging.message.conversation_id" semantic conventions. It represents the + // conversation ID identifying the conversation to which the message belongs, + // represented as a string. Sometimes called "Correlation ID". + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: MyConversationId + MessagingMessageConversationIDKey = attribute.Key("messaging.message.conversation_id") + + // MessagingMessageEnvelopeSizeKey is the attribute Key conforming to the + // "messaging.message.envelope.size" semantic conventions. It represents the + // size of the message body and metadata in bytes. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Note: This can refer to both the compressed or uncompressed size. If both + // sizes are known, the uncompressed + // size should be used. + MessagingMessageEnvelopeSizeKey = attribute.Key("messaging.message.envelope.size") + + // MessagingMessageIDKey is the attribute Key conforming to the + // "messaging.message.id" semantic conventions. It represents a value used by + // the messaging system as an identifier for the message, represented as a + // string. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 452a7c7c7c7048c2f887f61572b18fc2 + MessagingMessageIDKey = attribute.Key("messaging.message.id") + + // MessagingOperationNameKey is the attribute Key conforming to the + // "messaging.operation.name" semantic conventions. It represents the + // system-specific name of the messaging operation. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "ack", "nack", "send" + MessagingOperationNameKey = attribute.Key("messaging.operation.name") + + // MessagingOperationTypeKey is the attribute Key conforming to the + // "messaging.operation.type" semantic conventions. It represents a string + // identifying the type of the messaging operation. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: If a custom value is used, it MUST be of low cardinality. + MessagingOperationTypeKey = attribute.Key("messaging.operation.type") + + // MessagingRabbitMQDestinationRoutingKeyKey is the attribute Key conforming to + // the "messaging.rabbitmq.destination.routing_key" semantic conventions. It + // represents the rabbitMQ message routing key. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: myKey + MessagingRabbitMQDestinationRoutingKeyKey = attribute.Key("messaging.rabbitmq.destination.routing_key") + + // MessagingRabbitMQMessageDeliveryTagKey is the attribute Key conforming to the + // "messaging.rabbitmq.message.delivery_tag" semantic conventions. It represents + // the rabbitMQ message delivery tag. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + MessagingRabbitMQMessageDeliveryTagKey = attribute.Key("messaging.rabbitmq.message.delivery_tag") + + // MessagingRocketMQConsumptionModelKey is the attribute Key conforming to the + // "messaging.rocketmq.consumption_model" semantic conventions. It represents + // the model of message consumption. This only applies to consumer spans. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + MessagingRocketMQConsumptionModelKey = attribute.Key("messaging.rocketmq.consumption_model") + + // MessagingRocketMQMessageDelayTimeLevelKey is the attribute Key conforming to + // the "messaging.rocketmq.message.delay_time_level" semantic conventions. It + // represents the delay time level for delay message, which determines the + // message delay time. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + MessagingRocketMQMessageDelayTimeLevelKey = attribute.Key("messaging.rocketmq.message.delay_time_level") + + // MessagingRocketMQMessageDeliveryTimestampKey is the attribute Key conforming + // to the "messaging.rocketmq.message.delivery_timestamp" semantic conventions. + // It represents the timestamp in milliseconds that the delay message is + // expected to be delivered to consumer. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + MessagingRocketMQMessageDeliveryTimestampKey = attribute.Key("messaging.rocketmq.message.delivery_timestamp") + + // MessagingRocketMQMessageGroupKey is the attribute Key conforming to the + // "messaging.rocketmq.message.group" semantic conventions. It represents the it + // is essential for FIFO message. Messages that belong to the same message group + // are always processed one by one within the same consumer group. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: myMessageGroup + MessagingRocketMQMessageGroupKey = attribute.Key("messaging.rocketmq.message.group") + + // MessagingRocketMQMessageKeysKey is the attribute Key conforming to the + // "messaging.rocketmq.message.keys" semantic conventions. It represents the + // key(s) of message, another way to mark message besides message id. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "keyA", "keyB" + MessagingRocketMQMessageKeysKey = attribute.Key("messaging.rocketmq.message.keys") + + // MessagingRocketMQMessageTagKey is the attribute Key conforming to the + // "messaging.rocketmq.message.tag" semantic conventions. It represents the + // secondary classifier of message besides topic. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: tagA + MessagingRocketMQMessageTagKey = attribute.Key("messaging.rocketmq.message.tag") + + // MessagingRocketMQMessageTypeKey is the attribute Key conforming to the + // "messaging.rocketmq.message.type" semantic conventions. It represents the + // type of message. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + MessagingRocketMQMessageTypeKey = attribute.Key("messaging.rocketmq.message.type") + + // MessagingRocketMQNamespaceKey is the attribute Key conforming to the + // "messaging.rocketmq.namespace" semantic conventions. It represents the + // namespace of RocketMQ resources, resources in different namespaces are + // individual. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: myNamespace + MessagingRocketMQNamespaceKey = attribute.Key("messaging.rocketmq.namespace") + + // MessagingServiceBusDispositionStatusKey is the attribute Key conforming to + // the "messaging.servicebus.disposition_status" semantic conventions. It + // represents the describes the [settlement type]. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // + // [settlement type]: https://learn.microsoft.com/azure/service-bus-messaging/message-transfers-locks-settlement#peeklock + MessagingServiceBusDispositionStatusKey = attribute.Key("messaging.servicebus.disposition_status") + + // MessagingServiceBusMessageDeliveryCountKey is the attribute Key conforming to + // the "messaging.servicebus.message.delivery_count" semantic conventions. It + // represents the number of deliveries that have been attempted for this + // message. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + MessagingServiceBusMessageDeliveryCountKey = attribute.Key("messaging.servicebus.message.delivery_count") + + // MessagingServiceBusMessageEnqueuedTimeKey is the attribute Key conforming to + // the "messaging.servicebus.message.enqueued_time" semantic conventions. It + // represents the UTC epoch seconds at which the message has been accepted and + // stored in the entity. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + MessagingServiceBusMessageEnqueuedTimeKey = attribute.Key("messaging.servicebus.message.enqueued_time") + + // MessagingSystemKey is the attribute Key conforming to the "messaging.system" + // semantic conventions. It represents the messaging system as identified by the + // client instrumentation. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: The actual messaging system may differ from the one known by the + // client. For example, when using Kafka client libraries to communicate with + // Azure Event Hubs, the `messaging.system` is set to `kafka` based on the + // instrumentation's best knowledge. + MessagingSystemKey = attribute.Key("messaging.system") +) + +// MessagingBatchMessageCount returns an attribute KeyValue conforming to the +// "messaging.batch.message_count" semantic conventions. It represents the number +// of messages sent, received, or processed in the scope of the batching +// operation. +func MessagingBatchMessageCount(val int) attribute.KeyValue { + return MessagingBatchMessageCountKey.Int(val) +} + +// MessagingClientID returns an attribute KeyValue conforming to the +// "messaging.client.id" semantic conventions. It represents a unique identifier +// for the client that consumes or produces a message. +func MessagingClientID(val string) attribute.KeyValue { + return MessagingClientIDKey.String(val) +} + +// MessagingConsumerGroupName returns an attribute KeyValue conforming to the +// "messaging.consumer.group.name" semantic conventions. It represents the name +// of the consumer group with which a consumer is associated. +func MessagingConsumerGroupName(val string) attribute.KeyValue { + return MessagingConsumerGroupNameKey.String(val) +} + +// MessagingDestinationAnonymous returns an attribute KeyValue conforming to the +// "messaging.destination.anonymous" semantic conventions. It represents a +// boolean that is true if the message destination is anonymous (could be unnamed +// or have auto-generated name). +func MessagingDestinationAnonymous(val bool) attribute.KeyValue { + return MessagingDestinationAnonymousKey.Bool(val) +} + +// MessagingDestinationName returns an attribute KeyValue conforming to the +// "messaging.destination.name" semantic conventions. It represents the message +// destination name. +func MessagingDestinationName(val string) attribute.KeyValue { + return MessagingDestinationNameKey.String(val) +} + +// MessagingDestinationPartitionID returns an attribute KeyValue conforming to +// the "messaging.destination.partition.id" semantic conventions. It represents +// the identifier of the partition messages are sent to or received from, unique +// within the `messaging.destination.name`. +func MessagingDestinationPartitionID(val string) attribute.KeyValue { + return MessagingDestinationPartitionIDKey.String(val) +} + +// MessagingDestinationSubscriptionName returns an attribute KeyValue conforming +// to the "messaging.destination.subscription.name" semantic conventions. It +// represents the name of the destination subscription from which a message is +// consumed. +func MessagingDestinationSubscriptionName(val string) attribute.KeyValue { + return MessagingDestinationSubscriptionNameKey.String(val) +} + +// MessagingDestinationTemplate returns an attribute KeyValue conforming to the +// "messaging.destination.template" semantic conventions. It represents the low +// cardinality representation of the messaging destination name. +func MessagingDestinationTemplate(val string) attribute.KeyValue { + return MessagingDestinationTemplateKey.String(val) +} + +// MessagingDestinationTemporary returns an attribute KeyValue conforming to the +// "messaging.destination.temporary" semantic conventions. It represents a +// boolean that is true if the message destination is temporary and might not +// exist anymore after messages are processed. +func MessagingDestinationTemporary(val bool) attribute.KeyValue { + return MessagingDestinationTemporaryKey.Bool(val) +} + +// MessagingEventHubsMessageEnqueuedTime returns an attribute KeyValue conforming +// to the "messaging.eventhubs.message.enqueued_time" semantic conventions. It +// represents the UTC epoch seconds at which the message has been accepted and +// stored in the entity. +func MessagingEventHubsMessageEnqueuedTime(val int) attribute.KeyValue { + return MessagingEventHubsMessageEnqueuedTimeKey.Int(val) +} + +// MessagingGCPPubSubMessageAckDeadline returns an attribute KeyValue conforming +// to the "messaging.gcp_pubsub.message.ack_deadline" semantic conventions. It +// represents the ack deadline in seconds set for the modify ack deadline +// request. +func MessagingGCPPubSubMessageAckDeadline(val int) attribute.KeyValue { + return MessagingGCPPubSubMessageAckDeadlineKey.Int(val) +} + +// MessagingGCPPubSubMessageAckID returns an attribute KeyValue conforming to the +// "messaging.gcp_pubsub.message.ack_id" semantic conventions. It represents the +// ack id for a given message. +func MessagingGCPPubSubMessageAckID(val string) attribute.KeyValue { + return MessagingGCPPubSubMessageAckIDKey.String(val) +} + +// MessagingGCPPubSubMessageDeliveryAttempt returns an attribute KeyValue +// conforming to the "messaging.gcp_pubsub.message.delivery_attempt" semantic +// conventions. It represents the delivery attempt for a given message. +func MessagingGCPPubSubMessageDeliveryAttempt(val int) attribute.KeyValue { + return MessagingGCPPubSubMessageDeliveryAttemptKey.Int(val) +} + +// MessagingGCPPubSubMessageOrderingKey returns an attribute KeyValue conforming +// to the "messaging.gcp_pubsub.message.ordering_key" semantic conventions. It +// represents the ordering key for a given message. If the attribute is not +// present, the message does not have an ordering key. +func MessagingGCPPubSubMessageOrderingKey(val string) attribute.KeyValue { + return MessagingGCPPubSubMessageOrderingKeyKey.String(val) +} + +// MessagingKafkaMessageKey returns an attribute KeyValue conforming to the +// "messaging.kafka.message.key" semantic conventions. It represents the message +// keys in Kafka are used for grouping alike messages to ensure they're processed +// on the same partition. They differ from `messaging.message.id` in that they're +// not unique. If the key is `null`, the attribute MUST NOT be set. +func MessagingKafkaMessageKey(val string) attribute.KeyValue { + return MessagingKafkaMessageKeyKey.String(val) +} + +// MessagingKafkaMessageTombstone returns an attribute KeyValue conforming to the +// "messaging.kafka.message.tombstone" semantic conventions. It represents a +// boolean that is true if the message is a tombstone. +func MessagingKafkaMessageTombstone(val bool) attribute.KeyValue { + return MessagingKafkaMessageTombstoneKey.Bool(val) +} + +// MessagingKafkaOffset returns an attribute KeyValue conforming to the +// "messaging.kafka.offset" semantic conventions. It represents the offset of a +// record in the corresponding Kafka partition. +func MessagingKafkaOffset(val int) attribute.KeyValue { + return MessagingKafkaOffsetKey.Int(val) +} + +// MessagingMessageBodySize returns an attribute KeyValue conforming to the +// "messaging.message.body.size" semantic conventions. It represents the size of +// the message body in bytes. +func MessagingMessageBodySize(val int) attribute.KeyValue { + return MessagingMessageBodySizeKey.Int(val) +} + +// MessagingMessageConversationID returns an attribute KeyValue conforming to the +// "messaging.message.conversation_id" semantic conventions. It represents the +// conversation ID identifying the conversation to which the message belongs, +// represented as a string. Sometimes called "Correlation ID". +func MessagingMessageConversationID(val string) attribute.KeyValue { + return MessagingMessageConversationIDKey.String(val) +} + +// MessagingMessageEnvelopeSize returns an attribute KeyValue conforming to the +// "messaging.message.envelope.size" semantic conventions. It represents the size +// of the message body and metadata in bytes. +func MessagingMessageEnvelopeSize(val int) attribute.KeyValue { + return MessagingMessageEnvelopeSizeKey.Int(val) +} + +// MessagingMessageID returns an attribute KeyValue conforming to the +// "messaging.message.id" semantic conventions. It represents a value used by the +// messaging system as an identifier for the message, represented as a string. +func MessagingMessageID(val string) attribute.KeyValue { + return MessagingMessageIDKey.String(val) +} + +// MessagingOperationName returns an attribute KeyValue conforming to the +// "messaging.operation.name" semantic conventions. It represents the +// system-specific name of the messaging operation. +func MessagingOperationName(val string) attribute.KeyValue { + return MessagingOperationNameKey.String(val) +} + +// MessagingRabbitMQDestinationRoutingKey returns an attribute KeyValue +// conforming to the "messaging.rabbitmq.destination.routing_key" semantic +// conventions. It represents the rabbitMQ message routing key. +func MessagingRabbitMQDestinationRoutingKey(val string) attribute.KeyValue { + return MessagingRabbitMQDestinationRoutingKeyKey.String(val) +} + +// MessagingRabbitMQMessageDeliveryTag returns an attribute KeyValue conforming +// to the "messaging.rabbitmq.message.delivery_tag" semantic conventions. It +// represents the rabbitMQ message delivery tag. +func MessagingRabbitMQMessageDeliveryTag(val int) attribute.KeyValue { + return MessagingRabbitMQMessageDeliveryTagKey.Int(val) +} + +// MessagingRocketMQMessageDelayTimeLevel returns an attribute KeyValue +// conforming to the "messaging.rocketmq.message.delay_time_level" semantic +// conventions. It represents the delay time level for delay message, which +// determines the message delay time. +func MessagingRocketMQMessageDelayTimeLevel(val int) attribute.KeyValue { + return MessagingRocketMQMessageDelayTimeLevelKey.Int(val) +} + +// MessagingRocketMQMessageDeliveryTimestamp returns an attribute KeyValue +// conforming to the "messaging.rocketmq.message.delivery_timestamp" semantic +// conventions. It represents the timestamp in milliseconds that the delay +// message is expected to be delivered to consumer. +func MessagingRocketMQMessageDeliveryTimestamp(val int) attribute.KeyValue { + return MessagingRocketMQMessageDeliveryTimestampKey.Int(val) +} + +// MessagingRocketMQMessageGroup returns an attribute KeyValue conforming to the +// "messaging.rocketmq.message.group" semantic conventions. It represents the it +// is essential for FIFO message. Messages that belong to the same message group +// are always processed one by one within the same consumer group. +func MessagingRocketMQMessageGroup(val string) attribute.KeyValue { + return MessagingRocketMQMessageGroupKey.String(val) +} + +// MessagingRocketMQMessageKeys returns an attribute KeyValue conforming to the +// "messaging.rocketmq.message.keys" semantic conventions. It represents the +// key(s) of message, another way to mark message besides message id. +func MessagingRocketMQMessageKeys(val ...string) attribute.KeyValue { + return MessagingRocketMQMessageKeysKey.StringSlice(val) +} + +// MessagingRocketMQMessageTag returns an attribute KeyValue conforming to the +// "messaging.rocketmq.message.tag" semantic conventions. It represents the +// secondary classifier of message besides topic. +func MessagingRocketMQMessageTag(val string) attribute.KeyValue { + return MessagingRocketMQMessageTagKey.String(val) +} + +// MessagingRocketMQNamespace returns an attribute KeyValue conforming to the +// "messaging.rocketmq.namespace" semantic conventions. It represents the +// namespace of RocketMQ resources, resources in different namespaces are +// individual. +func MessagingRocketMQNamespace(val string) attribute.KeyValue { + return MessagingRocketMQNamespaceKey.String(val) +} + +// MessagingServiceBusMessageDeliveryCount returns an attribute KeyValue +// conforming to the "messaging.servicebus.message.delivery_count" semantic +// conventions. It represents the number of deliveries that have been attempted +// for this message. +func MessagingServiceBusMessageDeliveryCount(val int) attribute.KeyValue { + return MessagingServiceBusMessageDeliveryCountKey.Int(val) +} + +// MessagingServiceBusMessageEnqueuedTime returns an attribute KeyValue +// conforming to the "messaging.servicebus.message.enqueued_time" semantic +// conventions. It represents the UTC epoch seconds at which the message has been +// accepted and stored in the entity. +func MessagingServiceBusMessageEnqueuedTime(val int) attribute.KeyValue { + return MessagingServiceBusMessageEnqueuedTimeKey.Int(val) +} + +// Enum values for messaging.operation.type +var ( + // A message is created. "Create" spans always refer to a single message and are + // used to provide a unique creation context for messages in batch sending + // scenarios. + // + // Stability: development + MessagingOperationTypeCreate = MessagingOperationTypeKey.String("create") + // One or more messages are provided for sending to an intermediary. If a single + // message is sent, the context of the "Send" span can be used as the creation + // context and no "Create" span needs to be created. + // + // Stability: development + MessagingOperationTypeSend = MessagingOperationTypeKey.String("send") + // One or more messages are requested by a consumer. This operation refers to + // pull-based scenarios, where consumers explicitly call methods of messaging + // SDKs to receive messages. + // + // Stability: development + MessagingOperationTypeReceive = MessagingOperationTypeKey.String("receive") + // One or more messages are processed by a consumer. + // + // Stability: development + MessagingOperationTypeProcess = MessagingOperationTypeKey.String("process") + // One or more messages are settled. + // + // Stability: development + MessagingOperationTypeSettle = MessagingOperationTypeKey.String("settle") +) + +// Enum values for messaging.rocketmq.consumption_model +var ( + // Clustering consumption model + // Stability: development + MessagingRocketMQConsumptionModelClustering = MessagingRocketMQConsumptionModelKey.String("clustering") + // Broadcasting consumption model + // Stability: development + MessagingRocketMQConsumptionModelBroadcasting = MessagingRocketMQConsumptionModelKey.String("broadcasting") +) + +// Enum values for messaging.rocketmq.message.type +var ( + // Normal message + // Stability: development + MessagingRocketMQMessageTypeNormal = MessagingRocketMQMessageTypeKey.String("normal") + // FIFO message + // Stability: development + MessagingRocketMQMessageTypeFifo = MessagingRocketMQMessageTypeKey.String("fifo") + // Delay message + // Stability: development + MessagingRocketMQMessageTypeDelay = MessagingRocketMQMessageTypeKey.String("delay") + // Transaction message + // Stability: development + MessagingRocketMQMessageTypeTransaction = MessagingRocketMQMessageTypeKey.String("transaction") +) + +// Enum values for messaging.servicebus.disposition_status +var ( + // Message is completed + // Stability: development + MessagingServiceBusDispositionStatusComplete = MessagingServiceBusDispositionStatusKey.String("complete") + // Message is abandoned + // Stability: development + MessagingServiceBusDispositionStatusAbandon = MessagingServiceBusDispositionStatusKey.String("abandon") + // Message is sent to dead letter queue + // Stability: development + MessagingServiceBusDispositionStatusDeadLetter = MessagingServiceBusDispositionStatusKey.String("dead_letter") + // Message is deferred + // Stability: development + MessagingServiceBusDispositionStatusDefer = MessagingServiceBusDispositionStatusKey.String("defer") +) + +// Enum values for messaging.system +var ( + // Apache ActiveMQ + // Stability: development + MessagingSystemActiveMQ = MessagingSystemKey.String("activemq") + // Amazon Simple Queue Service (SQS) + // Stability: development + MessagingSystemAWSSQS = MessagingSystemKey.String("aws_sqs") + // Azure Event Grid + // Stability: development + MessagingSystemEventGrid = MessagingSystemKey.String("eventgrid") + // Azure Event Hubs + // Stability: development + MessagingSystemEventHubs = MessagingSystemKey.String("eventhubs") + // Azure Service Bus + // Stability: development + MessagingSystemServiceBus = MessagingSystemKey.String("servicebus") + // Google Cloud Pub/Sub + // Stability: development + MessagingSystemGCPPubSub = MessagingSystemKey.String("gcp_pubsub") + // Java Message Service + // Stability: development + MessagingSystemJMS = MessagingSystemKey.String("jms") + // Apache Kafka + // Stability: development + MessagingSystemKafka = MessagingSystemKey.String("kafka") + // RabbitMQ + // Stability: development + MessagingSystemRabbitMQ = MessagingSystemKey.String("rabbitmq") + // Apache RocketMQ + // Stability: development + MessagingSystemRocketMQ = MessagingSystemKey.String("rocketmq") + // Apache Pulsar + // Stability: development + MessagingSystemPulsar = MessagingSystemKey.String("pulsar") +) + +// Namespace: network +const ( + // NetworkCarrierICCKey is the attribute Key conforming to the + // "network.carrier.icc" semantic conventions. It represents the ISO 3166-1 + // alpha-2 2-character country code associated with the mobile carrier network. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: DE + NetworkCarrierICCKey = attribute.Key("network.carrier.icc") + + // NetworkCarrierMCCKey is the attribute Key conforming to the + // "network.carrier.mcc" semantic conventions. It represents the mobile carrier + // country code. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 310 + NetworkCarrierMCCKey = attribute.Key("network.carrier.mcc") + + // NetworkCarrierMNCKey is the attribute Key conforming to the + // "network.carrier.mnc" semantic conventions. It represents the mobile carrier + // network code. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 001 + NetworkCarrierMNCKey = attribute.Key("network.carrier.mnc") + + // NetworkCarrierNameKey is the attribute Key conforming to the + // "network.carrier.name" semantic conventions. It represents the name of the + // mobile carrier. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: sprint + NetworkCarrierNameKey = attribute.Key("network.carrier.name") + + // NetworkConnectionStateKey is the attribute Key conforming to the + // "network.connection.state" semantic conventions. It represents the state of + // network connection. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "close_wait" + // Note: Connection states are defined as part of the [rfc9293] + // + // [rfc9293]: https://datatracker.ietf.org/doc/html/rfc9293#section-3.3.2 + NetworkConnectionStateKey = attribute.Key("network.connection.state") + + // NetworkConnectionSubtypeKey is the attribute Key conforming to the + // "network.connection.subtype" semantic conventions. It represents the this + // describes more details regarding the connection.type. It may be the type of + // cell technology connection, but it could be used for describing details about + // a wifi connection. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: LTE + NetworkConnectionSubtypeKey = attribute.Key("network.connection.subtype") + + // NetworkConnectionTypeKey is the attribute Key conforming to the + // "network.connection.type" semantic conventions. It represents the internet + // connection type. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: wifi + NetworkConnectionTypeKey = attribute.Key("network.connection.type") + + // NetworkInterfaceNameKey is the attribute Key conforming to the + // "network.interface.name" semantic conventions. It represents the network + // interface name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "lo", "eth0" + NetworkInterfaceNameKey = attribute.Key("network.interface.name") + + // NetworkIODirectionKey is the attribute Key conforming to the + // "network.io.direction" semantic conventions. It represents the network IO + // operation direction. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "transmit" + NetworkIODirectionKey = attribute.Key("network.io.direction") + + // NetworkLocalAddressKey is the attribute Key conforming to the + // "network.local.address" semantic conventions. It represents the local address + // of the network connection - IP address or Unix domain socket name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "10.1.2.80", "/tmp/my.sock" + NetworkLocalAddressKey = attribute.Key("network.local.address") + + // NetworkLocalPortKey is the attribute Key conforming to the + // "network.local.port" semantic conventions. It represents the local port + // number of the network connection. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: 65123 + NetworkLocalPortKey = attribute.Key("network.local.port") + + // NetworkPeerAddressKey is the attribute Key conforming to the + // "network.peer.address" semantic conventions. It represents the peer address + // of the network connection - IP address or Unix domain socket name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "10.1.2.80", "/tmp/my.sock" + NetworkPeerAddressKey = attribute.Key("network.peer.address") + + // NetworkPeerPortKey is the attribute Key conforming to the "network.peer.port" + // semantic conventions. It represents the peer port number of the network + // connection. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: 65123 + NetworkPeerPortKey = attribute.Key("network.peer.port") + + // NetworkProtocolNameKey is the attribute Key conforming to the + // "network.protocol.name" semantic conventions. It represents the + // [OSI application layer] or non-OSI equivalent. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "amqp", "http", "mqtt" + // Note: The value SHOULD be normalized to lowercase. + // + // [OSI application layer]: https://wikipedia.org/wiki/Application_layer + NetworkProtocolNameKey = attribute.Key("network.protocol.name") + + // NetworkProtocolVersionKey is the attribute Key conforming to the + // "network.protocol.version" semantic conventions. It represents the actual + // version of the protocol used for network communication. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "1.1", "2" + // Note: If protocol version is subject to negotiation (for example using [ALPN] + // ), this attribute SHOULD be set to the negotiated version. If the actual + // protocol version is not known, this attribute SHOULD NOT be set. + // + // [ALPN]: https://www.rfc-editor.org/rfc/rfc7301.html + NetworkProtocolVersionKey = attribute.Key("network.protocol.version") + + // NetworkTransportKey is the attribute Key conforming to the + // "network.transport" semantic conventions. It represents the + // [OSI transport layer] or [inter-process communication method]. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "tcp", "udp" + // Note: The value SHOULD be normalized to lowercase. + // + // Consider always setting the transport when setting a port number, since + // a port number is ambiguous without knowing the transport. For example + // different processes could be listening on TCP port 12345 and UDP port 12345. + // + // [OSI transport layer]: https://wikipedia.org/wiki/Transport_layer + // [inter-process communication method]: https://wikipedia.org/wiki/Inter-process_communication + NetworkTransportKey = attribute.Key("network.transport") + + // NetworkTypeKey is the attribute Key conforming to the "network.type" semantic + // conventions. It represents the [OSI network layer] or non-OSI equivalent. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "ipv4", "ipv6" + // Note: The value SHOULD be normalized to lowercase. + // + // [OSI network layer]: https://wikipedia.org/wiki/Network_layer + NetworkTypeKey = attribute.Key("network.type") +) + +// NetworkCarrierICC returns an attribute KeyValue conforming to the +// "network.carrier.icc" semantic conventions. It represents the ISO 3166-1 +// alpha-2 2-character country code associated with the mobile carrier network. +func NetworkCarrierICC(val string) attribute.KeyValue { + return NetworkCarrierICCKey.String(val) +} + +// NetworkCarrierMCC returns an attribute KeyValue conforming to the +// "network.carrier.mcc" semantic conventions. It represents the mobile carrier +// country code. +func NetworkCarrierMCC(val string) attribute.KeyValue { + return NetworkCarrierMCCKey.String(val) +} + +// NetworkCarrierMNC returns an attribute KeyValue conforming to the +// "network.carrier.mnc" semantic conventions. It represents the mobile carrier +// network code. +func NetworkCarrierMNC(val string) attribute.KeyValue { + return NetworkCarrierMNCKey.String(val) +} + +// NetworkCarrierName returns an attribute KeyValue conforming to the +// "network.carrier.name" semantic conventions. It represents the name of the +// mobile carrier. +func NetworkCarrierName(val string) attribute.KeyValue { + return NetworkCarrierNameKey.String(val) +} + +// NetworkInterfaceName returns an attribute KeyValue conforming to the +// "network.interface.name" semantic conventions. It represents the network +// interface name. +func NetworkInterfaceName(val string) attribute.KeyValue { + return NetworkInterfaceNameKey.String(val) +} + +// NetworkLocalAddress returns an attribute KeyValue conforming to the +// "network.local.address" semantic conventions. It represents the local address +// of the network connection - IP address or Unix domain socket name. +func NetworkLocalAddress(val string) attribute.KeyValue { + return NetworkLocalAddressKey.String(val) +} + +// NetworkLocalPort returns an attribute KeyValue conforming to the +// "network.local.port" semantic conventions. It represents the local port number +// of the network connection. +func NetworkLocalPort(val int) attribute.KeyValue { + return NetworkLocalPortKey.Int(val) +} + +// NetworkPeerAddress returns an attribute KeyValue conforming to the +// "network.peer.address" semantic conventions. It represents the peer address of +// the network connection - IP address or Unix domain socket name. +func NetworkPeerAddress(val string) attribute.KeyValue { + return NetworkPeerAddressKey.String(val) +} + +// NetworkPeerPort returns an attribute KeyValue conforming to the +// "network.peer.port" semantic conventions. It represents the peer port number +// of the network connection. +func NetworkPeerPort(val int) attribute.KeyValue { + return NetworkPeerPortKey.Int(val) +} + +// NetworkProtocolName returns an attribute KeyValue conforming to the +// "network.protocol.name" semantic conventions. It represents the +// [OSI application layer] or non-OSI equivalent. +// +// [OSI application layer]: https://wikipedia.org/wiki/Application_layer +func NetworkProtocolName(val string) attribute.KeyValue { + return NetworkProtocolNameKey.String(val) +} + +// NetworkProtocolVersion returns an attribute KeyValue conforming to the +// "network.protocol.version" semantic conventions. It represents the actual +// version of the protocol used for network communication. +func NetworkProtocolVersion(val string) attribute.KeyValue { + return NetworkProtocolVersionKey.String(val) +} + +// Enum values for network.connection.state +var ( + // closed + // Stability: development + NetworkConnectionStateClosed = NetworkConnectionStateKey.String("closed") + // close_wait + // Stability: development + NetworkConnectionStateCloseWait = NetworkConnectionStateKey.String("close_wait") + // closing + // Stability: development + NetworkConnectionStateClosing = NetworkConnectionStateKey.String("closing") + // established + // Stability: development + NetworkConnectionStateEstablished = NetworkConnectionStateKey.String("established") + // fin_wait_1 + // Stability: development + NetworkConnectionStateFinWait1 = NetworkConnectionStateKey.String("fin_wait_1") + // fin_wait_2 + // Stability: development + NetworkConnectionStateFinWait2 = NetworkConnectionStateKey.String("fin_wait_2") + // last_ack + // Stability: development + NetworkConnectionStateLastAck = NetworkConnectionStateKey.String("last_ack") + // listen + // Stability: development + NetworkConnectionStateListen = NetworkConnectionStateKey.String("listen") + // syn_received + // Stability: development + NetworkConnectionStateSynReceived = NetworkConnectionStateKey.String("syn_received") + // syn_sent + // Stability: development + NetworkConnectionStateSynSent = NetworkConnectionStateKey.String("syn_sent") + // time_wait + // Stability: development + NetworkConnectionStateTimeWait = NetworkConnectionStateKey.String("time_wait") +) + +// Enum values for network.connection.subtype +var ( + // GPRS + // Stability: development + NetworkConnectionSubtypeGprs = NetworkConnectionSubtypeKey.String("gprs") + // EDGE + // Stability: development + NetworkConnectionSubtypeEdge = NetworkConnectionSubtypeKey.String("edge") + // UMTS + // Stability: development + NetworkConnectionSubtypeUmts = NetworkConnectionSubtypeKey.String("umts") + // CDMA + // Stability: development + NetworkConnectionSubtypeCdma = NetworkConnectionSubtypeKey.String("cdma") + // EVDO Rel. 0 + // Stability: development + NetworkConnectionSubtypeEvdo0 = NetworkConnectionSubtypeKey.String("evdo_0") + // EVDO Rev. A + // Stability: development + NetworkConnectionSubtypeEvdoA = NetworkConnectionSubtypeKey.String("evdo_a") + // CDMA2000 1XRTT + // Stability: development + NetworkConnectionSubtypeCdma20001xrtt = NetworkConnectionSubtypeKey.String("cdma2000_1xrtt") + // HSDPA + // Stability: development + NetworkConnectionSubtypeHsdpa = NetworkConnectionSubtypeKey.String("hsdpa") + // HSUPA + // Stability: development + NetworkConnectionSubtypeHsupa = NetworkConnectionSubtypeKey.String("hsupa") + // HSPA + // Stability: development + NetworkConnectionSubtypeHspa = NetworkConnectionSubtypeKey.String("hspa") + // IDEN + // Stability: development + NetworkConnectionSubtypeIden = NetworkConnectionSubtypeKey.String("iden") + // EVDO Rev. B + // Stability: development + NetworkConnectionSubtypeEvdoB = NetworkConnectionSubtypeKey.String("evdo_b") + // LTE + // Stability: development + NetworkConnectionSubtypeLte = NetworkConnectionSubtypeKey.String("lte") + // EHRPD + // Stability: development + NetworkConnectionSubtypeEhrpd = NetworkConnectionSubtypeKey.String("ehrpd") + // HSPAP + // Stability: development + NetworkConnectionSubtypeHspap = NetworkConnectionSubtypeKey.String("hspap") + // GSM + // Stability: development + NetworkConnectionSubtypeGsm = NetworkConnectionSubtypeKey.String("gsm") + // TD-SCDMA + // Stability: development + NetworkConnectionSubtypeTdScdma = NetworkConnectionSubtypeKey.String("td_scdma") + // IWLAN + // Stability: development + NetworkConnectionSubtypeIwlan = NetworkConnectionSubtypeKey.String("iwlan") + // 5G NR (New Radio) + // Stability: development + NetworkConnectionSubtypeNr = NetworkConnectionSubtypeKey.String("nr") + // 5G NRNSA (New Radio Non-Standalone) + // Stability: development + NetworkConnectionSubtypeNrnsa = NetworkConnectionSubtypeKey.String("nrnsa") + // LTE CA + // Stability: development + NetworkConnectionSubtypeLteCa = NetworkConnectionSubtypeKey.String("lte_ca") +) + +// Enum values for network.connection.type +var ( + // wifi + // Stability: development + NetworkConnectionTypeWifi = NetworkConnectionTypeKey.String("wifi") + // wired + // Stability: development + NetworkConnectionTypeWired = NetworkConnectionTypeKey.String("wired") + // cell + // Stability: development + NetworkConnectionTypeCell = NetworkConnectionTypeKey.String("cell") + // unavailable + // Stability: development + NetworkConnectionTypeUnavailable = NetworkConnectionTypeKey.String("unavailable") + // unknown + // Stability: development + NetworkConnectionTypeUnknown = NetworkConnectionTypeKey.String("unknown") +) + +// Enum values for network.io.direction +var ( + // transmit + // Stability: development + NetworkIODirectionTransmit = NetworkIODirectionKey.String("transmit") + // receive + // Stability: development + NetworkIODirectionReceive = NetworkIODirectionKey.String("receive") +) + +// Enum values for network.transport +var ( + // TCP + // Stability: stable + NetworkTransportTCP = NetworkTransportKey.String("tcp") + // UDP + // Stability: stable + NetworkTransportUDP = NetworkTransportKey.String("udp") + // Named or anonymous pipe. + // Stability: stable + NetworkTransportPipe = NetworkTransportKey.String("pipe") + // Unix domain socket + // Stability: stable + NetworkTransportUnix = NetworkTransportKey.String("unix") + // QUIC + // Stability: stable + NetworkTransportQUIC = NetworkTransportKey.String("quic") +) + +// Enum values for network.type +var ( + // IPv4 + // Stability: stable + NetworkTypeIPv4 = NetworkTypeKey.String("ipv4") + // IPv6 + // Stability: stable + NetworkTypeIPv6 = NetworkTypeKey.String("ipv6") +) + +// Namespace: oci +const ( + // OCIManifestDigestKey is the attribute Key conforming to the + // "oci.manifest.digest" semantic conventions. It represents the digest of the + // OCI image manifest. For container images specifically is the digest by which + // the container image is known. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // "sha256:e4ca62c0d62f3e886e684806dfe9d4e0cda60d54986898173c1083856cfda0f4" + // Note: Follows [OCI Image Manifest Specification], and specifically the + // [Digest property]. + // An example can be found in [Example Image Manifest]. + // + // [OCI Image Manifest Specification]: https://github.com/opencontainers/image-spec/blob/main/manifest.md + // [Digest property]: https://github.com/opencontainers/image-spec/blob/main/descriptor.md#digests + // [Example Image Manifest]: https://github.com/opencontainers/image-spec/blob/main/manifest.md#example-image-manifest + OCIManifestDigestKey = attribute.Key("oci.manifest.digest") +) + +// OCIManifestDigest returns an attribute KeyValue conforming to the +// "oci.manifest.digest" semantic conventions. It represents the digest of the +// OCI image manifest. For container images specifically is the digest by which +// the container image is known. +func OCIManifestDigest(val string) attribute.KeyValue { + return OCIManifestDigestKey.String(val) +} + +// Namespace: opentracing +const ( + // OpenTracingRefTypeKey is the attribute Key conforming to the + // "opentracing.ref_type" semantic conventions. It represents the parent-child + // Reference type. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: The causal relationship between a child Span and a parent Span. + OpenTracingRefTypeKey = attribute.Key("opentracing.ref_type") +) + +// Enum values for opentracing.ref_type +var ( + // The parent Span depends on the child Span in some capacity + // Stability: development + OpenTracingRefTypeChildOf = OpenTracingRefTypeKey.String("child_of") + // The parent Span doesn't depend in any way on the result of the child Span + // Stability: development + OpenTracingRefTypeFollowsFrom = OpenTracingRefTypeKey.String("follows_from") +) + +// Namespace: os +const ( + // OSBuildIDKey is the attribute Key conforming to the "os.build_id" semantic + // conventions. It represents the unique identifier for a particular build or + // compilation of the operating system. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "TQ3C.230805.001.B2", "20E247", "22621" + OSBuildIDKey = attribute.Key("os.build_id") + + // OSDescriptionKey is the attribute Key conforming to the "os.description" + // semantic conventions. It represents the human readable (not intended to be + // parsed) OS version information, like e.g. reported by `ver` or + // `lsb_release -a` commands. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Microsoft Windows [Version 10.0.18363.778]", "Ubuntu 18.04.1 LTS" + OSDescriptionKey = attribute.Key("os.description") + + // OSNameKey is the attribute Key conforming to the "os.name" semantic + // conventions. It represents the human readable operating system name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "iOS", "Android", "Ubuntu" + OSNameKey = attribute.Key("os.name") + + // OSTypeKey is the attribute Key conforming to the "os.type" semantic + // conventions. It represents the operating system type. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + OSTypeKey = attribute.Key("os.type") + + // OSVersionKey is the attribute Key conforming to the "os.version" semantic + // conventions. It represents the version string of the operating system as + // defined in [Version Attributes]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "14.2.1", "18.04.1" + // + // [Version Attributes]: /docs/resource/README.md#version-attributes + OSVersionKey = attribute.Key("os.version") +) + +// OSBuildID returns an attribute KeyValue conforming to the "os.build_id" +// semantic conventions. It represents the unique identifier for a particular +// build or compilation of the operating system. +func OSBuildID(val string) attribute.KeyValue { + return OSBuildIDKey.String(val) +} + +// OSDescription returns an attribute KeyValue conforming to the "os.description" +// semantic conventions. It represents the human readable (not intended to be +// parsed) OS version information, like e.g. reported by `ver` or +// `lsb_release -a` commands. +func OSDescription(val string) attribute.KeyValue { + return OSDescriptionKey.String(val) +} + +// OSName returns an attribute KeyValue conforming to the "os.name" semantic +// conventions. It represents the human readable operating system name. +func OSName(val string) attribute.KeyValue { + return OSNameKey.String(val) +} + +// OSVersion returns an attribute KeyValue conforming to the "os.version" +// semantic conventions. It represents the version string of the operating system +// as defined in [Version Attributes]. +// +// [Version Attributes]: /docs/resource/README.md#version-attributes +func OSVersion(val string) attribute.KeyValue { + return OSVersionKey.String(val) +} + +// Enum values for os.type +var ( + // Microsoft Windows + // Stability: development + OSTypeWindows = OSTypeKey.String("windows") + // Linux + // Stability: development + OSTypeLinux = OSTypeKey.String("linux") + // Apple Darwin + // Stability: development + OSTypeDarwin = OSTypeKey.String("darwin") + // FreeBSD + // Stability: development + OSTypeFreeBSD = OSTypeKey.String("freebsd") + // NetBSD + // Stability: development + OSTypeNetBSD = OSTypeKey.String("netbsd") + // OpenBSD + // Stability: development + OSTypeOpenBSD = OSTypeKey.String("openbsd") + // DragonFly BSD + // Stability: development + OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd") + // HP-UX (Hewlett Packard Unix) + // Stability: development + OSTypeHPUX = OSTypeKey.String("hpux") + // AIX (Advanced Interactive eXecutive) + // Stability: development + OSTypeAIX = OSTypeKey.String("aix") + // SunOS, Oracle Solaris + // Stability: development + OSTypeSolaris = OSTypeKey.String("solaris") + // IBM z/OS + // Stability: development + OSTypeZOS = OSTypeKey.String("zos") +) + +// Namespace: otel +const ( + // OTelComponentNameKey is the attribute Key conforming to the + // "otel.component.name" semantic conventions. It represents a name uniquely + // identifying the instance of the OpenTelemetry component within its containing + // SDK instance. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "otlp_grpc_span_exporter/0", "custom-name" + // Note: Implementations SHOULD ensure a low cardinality for this attribute, + // even across application or SDK restarts. + // E.g. implementations MUST NOT use UUIDs as values for this attribute. + // + // Implementations MAY achieve these goals by following a + // `/` pattern, e.g. + // `batching_span_processor/0`. + // Hereby `otel.component.type` refers to the corresponding attribute value of + // the component. + // + // The value of `instance-counter` MAY be automatically assigned by the + // component and uniqueness within the enclosing SDK instance MUST be + // guaranteed. + // For example, `` MAY be implemented by using a monotonically + // increasing counter (starting with `0`), which is incremented every time an + // instance of the given component type is started. + // + // With this implementation, for example the first Batching Span Processor would + // have `batching_span_processor/0` + // as `otel.component.name`, the second one `batching_span_processor/1` and so + // on. + // These values will therefore be reused in the case of an application restart. + OTelComponentNameKey = attribute.Key("otel.component.name") + + // OTelComponentTypeKey is the attribute Key conforming to the + // "otel.component.type" semantic conventions. It represents a name identifying + // the type of the OpenTelemetry component. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "batching_span_processor", "com.example.MySpanExporter" + // Note: If none of the standardized values apply, implementations SHOULD use + // the language-defined name of the type. + // E.g. for Java the fully qualified classname SHOULD be used in this case. + OTelComponentTypeKey = attribute.Key("otel.component.type") + + // OTelScopeNameKey is the attribute Key conforming to the "otel.scope.name" + // semantic conventions. It represents the name of the instrumentation scope - ( + // `InstrumentationScope.Name` in OTLP). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "io.opentelemetry.contrib.mongodb" + OTelScopeNameKey = attribute.Key("otel.scope.name") + + // OTelScopeVersionKey is the attribute Key conforming to the + // "otel.scope.version" semantic conventions. It represents the version of the + // instrumentation scope - (`InstrumentationScope.Version` in OTLP). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "1.0.0" + OTelScopeVersionKey = attribute.Key("otel.scope.version") + + // OTelSpanParentOriginKey is the attribute Key conforming to the + // "otel.span.parent.origin" semantic conventions. It represents the determines + // whether the span has a parent span, and if so, + // [whether it is a remote parent]. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // + // [whether it is a remote parent]: https://opentelemetry.io/docs/specs/otel/trace/api/#isremote + OTelSpanParentOriginKey = attribute.Key("otel.span.parent.origin") + + // OTelSpanSamplingResultKey is the attribute Key conforming to the + // "otel.span.sampling_result" semantic conventions. It represents the result + // value of the sampler for this span. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + OTelSpanSamplingResultKey = attribute.Key("otel.span.sampling_result") + + // OTelStatusCodeKey is the attribute Key conforming to the "otel.status_code" + // semantic conventions. It represents the name of the code, either "OK" or + // "ERROR". MUST NOT be set if the status code is UNSET. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: + OTelStatusCodeKey = attribute.Key("otel.status_code") + + // OTelStatusDescriptionKey is the attribute Key conforming to the + // "otel.status_description" semantic conventions. It represents the description + // of the Status if it has a value, otherwise not set. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "resource not found" + OTelStatusDescriptionKey = attribute.Key("otel.status_description") +) + +// OTelComponentName returns an attribute KeyValue conforming to the +// "otel.component.name" semantic conventions. It represents a name uniquely +// identifying the instance of the OpenTelemetry component within its containing +// SDK instance. +func OTelComponentName(val string) attribute.KeyValue { + return OTelComponentNameKey.String(val) +} + +// OTelScopeName returns an attribute KeyValue conforming to the +// "otel.scope.name" semantic conventions. It represents the name of the +// instrumentation scope - (`InstrumentationScope.Name` in OTLP). +func OTelScopeName(val string) attribute.KeyValue { + return OTelScopeNameKey.String(val) +} + +// OTelScopeVersion returns an attribute KeyValue conforming to the +// "otel.scope.version" semantic conventions. It represents the version of the +// instrumentation scope - (`InstrumentationScope.Version` in OTLP). +func OTelScopeVersion(val string) attribute.KeyValue { + return OTelScopeVersionKey.String(val) +} + +// OTelStatusDescription returns an attribute KeyValue conforming to the +// "otel.status_description" semantic conventions. It represents the description +// of the Status if it has a value, otherwise not set. +func OTelStatusDescription(val string) attribute.KeyValue { + return OTelStatusDescriptionKey.String(val) +} + +// Enum values for otel.component.type +var ( + // The builtin SDK batching span processor + // + // Stability: development + OTelComponentTypeBatchingSpanProcessor = OTelComponentTypeKey.String("batching_span_processor") + // The builtin SDK simple span processor + // + // Stability: development + OTelComponentTypeSimpleSpanProcessor = OTelComponentTypeKey.String("simple_span_processor") + // The builtin SDK batching log record processor + // + // Stability: development + OTelComponentTypeBatchingLogProcessor = OTelComponentTypeKey.String("batching_log_processor") + // The builtin SDK simple log record processor + // + // Stability: development + OTelComponentTypeSimpleLogProcessor = OTelComponentTypeKey.String("simple_log_processor") + // OTLP span exporter over gRPC with protobuf serialization + // + // Stability: development + OTelComponentTypeOtlpGRPCSpanExporter = OTelComponentTypeKey.String("otlp_grpc_span_exporter") + // OTLP span exporter over HTTP with protobuf serialization + // + // Stability: development + OTelComponentTypeOtlpHTTPSpanExporter = OTelComponentTypeKey.String("otlp_http_span_exporter") + // OTLP span exporter over HTTP with JSON serialization + // + // Stability: development + OTelComponentTypeOtlpHTTPJSONSpanExporter = OTelComponentTypeKey.String("otlp_http_json_span_exporter") + // Zipkin span exporter over HTTP + // + // Stability: development + OTelComponentTypeZipkinHTTPSpanExporter = OTelComponentTypeKey.String("zipkin_http_span_exporter") + // OTLP log record exporter over gRPC with protobuf serialization + // + // Stability: development + OTelComponentTypeOtlpGRPCLogExporter = OTelComponentTypeKey.String("otlp_grpc_log_exporter") + // OTLP log record exporter over HTTP with protobuf serialization + // + // Stability: development + OTelComponentTypeOtlpHTTPLogExporter = OTelComponentTypeKey.String("otlp_http_log_exporter") + // OTLP log record exporter over HTTP with JSON serialization + // + // Stability: development + OTelComponentTypeOtlpHTTPJSONLogExporter = OTelComponentTypeKey.String("otlp_http_json_log_exporter") + // The builtin SDK periodically exporting metric reader + // + // Stability: development + OTelComponentTypePeriodicMetricReader = OTelComponentTypeKey.String("periodic_metric_reader") + // OTLP metric exporter over gRPC with protobuf serialization + // + // Stability: development + OTelComponentTypeOtlpGRPCMetricExporter = OTelComponentTypeKey.String("otlp_grpc_metric_exporter") + // OTLP metric exporter over HTTP with protobuf serialization + // + // Stability: development + OTelComponentTypeOtlpHTTPMetricExporter = OTelComponentTypeKey.String("otlp_http_metric_exporter") + // OTLP metric exporter over HTTP with JSON serialization + // + // Stability: development + OTelComponentTypeOtlpHTTPJSONMetricExporter = OTelComponentTypeKey.String("otlp_http_json_metric_exporter") + // Prometheus metric exporter over HTTP with the default text-based format + // + // Stability: development + OTelComponentTypePrometheusHTTPTextMetricExporter = OTelComponentTypeKey.String("prometheus_http_text_metric_exporter") +) + +// Enum values for otel.span.parent.origin +var ( + // The span does not have a parent, it is a root span + // Stability: development + OTelSpanParentOriginNone = OTelSpanParentOriginKey.String("none") + // The span has a parent and the parent's span context [isRemote()] is false + // Stability: development + // + // [isRemote()]: https://opentelemetry.io/docs/specs/otel/trace/api/#isremote + OTelSpanParentOriginLocal = OTelSpanParentOriginKey.String("local") + // The span has a parent and the parent's span context [isRemote()] is true + // Stability: development + // + // [isRemote()]: https://opentelemetry.io/docs/specs/otel/trace/api/#isremote + OTelSpanParentOriginRemote = OTelSpanParentOriginKey.String("remote") +) + +// Enum values for otel.span.sampling_result +var ( + // The span is not sampled and not recording + // Stability: development + OTelSpanSamplingResultDrop = OTelSpanSamplingResultKey.String("DROP") + // The span is not sampled, but recording + // Stability: development + OTelSpanSamplingResultRecordOnly = OTelSpanSamplingResultKey.String("RECORD_ONLY") + // The span is sampled and recording + // Stability: development + OTelSpanSamplingResultRecordAndSample = OTelSpanSamplingResultKey.String("RECORD_AND_SAMPLE") +) + +// Enum values for otel.status_code +var ( + // The operation has been validated by an Application developer or Operator to + // have completed successfully. + // Stability: stable + OTelStatusCodeOk = OTelStatusCodeKey.String("OK") + // The operation contains an error. + // Stability: stable + OTelStatusCodeError = OTelStatusCodeKey.String("ERROR") +) + +// Namespace: peer +const ( + // PeerServiceKey is the attribute Key conforming to the "peer.service" semantic + // conventions. It represents the [`service.name`] of the remote service. SHOULD + // be equal to the actual `service.name` resource attribute of the remote + // service if any. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: AuthTokenCache + // + // [`service.name`]: /docs/resource/README.md#service + PeerServiceKey = attribute.Key("peer.service") +) + +// PeerService returns an attribute KeyValue conforming to the "peer.service" +// semantic conventions. It represents the [`service.name`] of the remote +// service. SHOULD be equal to the actual `service.name` resource attribute of +// the remote service if any. +// +// [`service.name`]: /docs/resource/README.md#service +func PeerService(val string) attribute.KeyValue { + return PeerServiceKey.String(val) +} + +// Namespace: process +const ( + // ProcessArgsCountKey is the attribute Key conforming to the + // "process.args_count" semantic conventions. It represents the length of the + // process.command_args array. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 4 + // Note: This field can be useful for querying or performing bucket analysis on + // how many arguments were provided to start a process. More arguments may be an + // indication of suspicious activity. + ProcessArgsCountKey = attribute.Key("process.args_count") + + // ProcessCommandKey is the attribute Key conforming to the "process.command" + // semantic conventions. It represents the command used to launch the process + // (i.e. the command name). On Linux based systems, can be set to the zeroth + // string in `proc/[pid]/cmdline`. On Windows, can be set to the first parameter + // extracted from `GetCommandLineW`. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "cmd/otelcol" + ProcessCommandKey = attribute.Key("process.command") + + // ProcessCommandArgsKey is the attribute Key conforming to the + // "process.command_args" semantic conventions. It represents the all the + // command arguments (including the command/executable itself) as received by + // the process. On Linux-based systems (and some other Unixoid systems + // supporting procfs), can be set according to the list of null-delimited + // strings extracted from `proc/[pid]/cmdline`. For libc-based executables, this + // would be the full argv vector passed to `main`. SHOULD NOT be collected by + // default unless there is sanitization that excludes sensitive data. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "cmd/otecol", "--config=config.yaml" + ProcessCommandArgsKey = attribute.Key("process.command_args") + + // ProcessCommandLineKey is the attribute Key conforming to the + // "process.command_line" semantic conventions. It represents the full command + // used to launch the process as a single string representing the full command. + // On Windows, can be set to the result of `GetCommandLineW`. Do not set this if + // you have to assemble it just for monitoring; use `process.command_args` + // instead. SHOULD NOT be collected by default unless there is sanitization that + // excludes sensitive data. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "C:\cmd\otecol --config="my directory\config.yaml"" + ProcessCommandLineKey = attribute.Key("process.command_line") + + // ProcessContextSwitchTypeKey is the attribute Key conforming to the + // "process.context_switch_type" semantic conventions. It represents the + // specifies whether the context switches for this data point were voluntary or + // involuntary. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + ProcessContextSwitchTypeKey = attribute.Key("process.context_switch_type") + + // ProcessCreationTimeKey is the attribute Key conforming to the + // "process.creation.time" semantic conventions. It represents the date and time + // the process was created, in ISO 8601 format. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2023-11-21T09:25:34.853Z" + ProcessCreationTimeKey = attribute.Key("process.creation.time") + + // ProcessExecutableBuildIDGNUKey is the attribute Key conforming to the + // "process.executable.build_id.gnu" semantic conventions. It represents the GNU + // build ID as found in the `.note.gnu.build-id` ELF section (hex string). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "c89b11207f6479603b0d49bf291c092c2b719293" + ProcessExecutableBuildIDGNUKey = attribute.Key("process.executable.build_id.gnu") + + // ProcessExecutableBuildIDGoKey is the attribute Key conforming to the + // "process.executable.build_id.go" semantic conventions. It represents the Go + // build ID as retrieved by `go tool buildid `. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // "foh3mEXu7BLZjsN9pOwG/kATcXlYVCDEFouRMQed_/WwRFB1hPo9LBkekthSPG/x8hMC8emW2cCjXD0_1aY" + ProcessExecutableBuildIDGoKey = attribute.Key("process.executable.build_id.go") + + // ProcessExecutableBuildIDHtlhashKey is the attribute Key conforming to the + // "process.executable.build_id.htlhash" semantic conventions. It represents the + // profiling specific build ID for executables. See the OTel specification for + // Profiles for more information. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "600DCAFE4A110000F2BF38C493F5FB92" + ProcessExecutableBuildIDHtlhashKey = attribute.Key("process.executable.build_id.htlhash") + + // ProcessExecutableNameKey is the attribute Key conforming to the + // "process.executable.name" semantic conventions. It represents the name of the + // process executable. On Linux based systems, this SHOULD be set to the base + // name of the target of `/proc/[pid]/exe`. On Windows, this SHOULD be set to + // the base name of `GetProcessImageFileNameW`. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "otelcol" + ProcessExecutableNameKey = attribute.Key("process.executable.name") + + // ProcessExecutablePathKey is the attribute Key conforming to the + // "process.executable.path" semantic conventions. It represents the full path + // to the process executable. On Linux based systems, can be set to the target + // of `proc/[pid]/exe`. On Windows, can be set to the result of + // `GetProcessImageFileNameW`. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "/usr/bin/cmd/otelcol" + ProcessExecutablePathKey = attribute.Key("process.executable.path") + + // ProcessExitCodeKey is the attribute Key conforming to the "process.exit.code" + // semantic conventions. It represents the exit code of the process. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 127 + ProcessExitCodeKey = attribute.Key("process.exit.code") + + // ProcessExitTimeKey is the attribute Key conforming to the "process.exit.time" + // semantic conventions. It represents the date and time the process exited, in + // ISO 8601 format. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2023-11-21T09:26:12.315Z" + ProcessExitTimeKey = attribute.Key("process.exit.time") + + // ProcessGroupLeaderPIDKey is the attribute Key conforming to the + // "process.group_leader.pid" semantic conventions. It represents the PID of the + // process's group leader. This is also the process group ID (PGID) of the + // process. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 23 + ProcessGroupLeaderPIDKey = attribute.Key("process.group_leader.pid") + + // ProcessInteractiveKey is the attribute Key conforming to the + // "process.interactive" semantic conventions. It represents the whether the + // process is connected to an interactive shell. + // + // Type: boolean + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + ProcessInteractiveKey = attribute.Key("process.interactive") + + // ProcessLinuxCgroupKey is the attribute Key conforming to the + // "process.linux.cgroup" semantic conventions. It represents the control group + // associated with the process. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "1:name=systemd:/user.slice/user-1000.slice/session-3.scope", + // "0::/user.slice/user-1000.slice/user@1000.service/tmux-spawn-0267755b-4639-4a27-90ed-f19f88e53748.scope" + // Note: Control groups (cgroups) are a kernel feature used to organize and + // manage process resources. This attribute provides the path(s) to the + // cgroup(s) associated with the process, which should match the contents of the + // [/proc/[PID]/cgroup] file. + // + // [/proc/[PID]/cgroup]: https://man7.org/linux/man-pages/man7/cgroups.7.html + ProcessLinuxCgroupKey = attribute.Key("process.linux.cgroup") + + // ProcessOwnerKey is the attribute Key conforming to the "process.owner" + // semantic conventions. It represents the username of the user that owns the + // process. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "root" + ProcessOwnerKey = attribute.Key("process.owner") + + // ProcessPagingFaultTypeKey is the attribute Key conforming to the + // "process.paging.fault_type" semantic conventions. It represents the type of + // page fault for this data point. Type `major` is for major/hard page faults, + // and `minor` is for minor/soft page faults. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + ProcessPagingFaultTypeKey = attribute.Key("process.paging.fault_type") + + // ProcessParentPIDKey is the attribute Key conforming to the + // "process.parent_pid" semantic conventions. It represents the parent Process + // identifier (PPID). + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 111 + ProcessParentPIDKey = attribute.Key("process.parent_pid") + + // ProcessPIDKey is the attribute Key conforming to the "process.pid" semantic + // conventions. It represents the process identifier (PID). + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1234 + ProcessPIDKey = attribute.Key("process.pid") + + // ProcessRealUserIDKey is the attribute Key conforming to the + // "process.real_user.id" semantic conventions. It represents the real user ID + // (RUID) of the process. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1000 + ProcessRealUserIDKey = attribute.Key("process.real_user.id") + + // ProcessRealUserNameKey is the attribute Key conforming to the + // "process.real_user.name" semantic conventions. It represents the username of + // the real user of the process. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "operator" + ProcessRealUserNameKey = attribute.Key("process.real_user.name") + + // ProcessRuntimeDescriptionKey is the attribute Key conforming to the + // "process.runtime.description" semantic conventions. It represents an + // additional description about the runtime of the process, for example a + // specific vendor customization of the runtime environment. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0 + ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description") + + // ProcessRuntimeNameKey is the attribute Key conforming to the + // "process.runtime.name" semantic conventions. It represents the name of the + // runtime of this process. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "OpenJDK Runtime Environment" + ProcessRuntimeNameKey = attribute.Key("process.runtime.name") + + // ProcessRuntimeVersionKey is the attribute Key conforming to the + // "process.runtime.version" semantic conventions. It represents the version of + // the runtime of this process, as returned by the runtime without modification. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 14.0.2 + ProcessRuntimeVersionKey = attribute.Key("process.runtime.version") + + // ProcessSavedUserIDKey is the attribute Key conforming to the + // "process.saved_user.id" semantic conventions. It represents the saved user ID + // (SUID) of the process. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1002 + ProcessSavedUserIDKey = attribute.Key("process.saved_user.id") + + // ProcessSavedUserNameKey is the attribute Key conforming to the + // "process.saved_user.name" semantic conventions. It represents the username of + // the saved user. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "operator" + ProcessSavedUserNameKey = attribute.Key("process.saved_user.name") + + // ProcessSessionLeaderPIDKey is the attribute Key conforming to the + // "process.session_leader.pid" semantic conventions. It represents the PID of + // the process's session leader. This is also the session ID (SID) of the + // process. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 14 + ProcessSessionLeaderPIDKey = attribute.Key("process.session_leader.pid") + + // ProcessTitleKey is the attribute Key conforming to the "process.title" + // semantic conventions. It represents the process title (proctitle). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "cat /etc/hostname", "xfce4-session", "bash" + // Note: In many Unix-like systems, process title (proctitle), is the string + // that represents the name or command line of a running process, displayed by + // system monitoring tools like ps, top, and htop. + ProcessTitleKey = attribute.Key("process.title") + + // ProcessUserIDKey is the attribute Key conforming to the "process.user.id" + // semantic conventions. It represents the effective user ID (EUID) of the + // process. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1001 + ProcessUserIDKey = attribute.Key("process.user.id") + + // ProcessUserNameKey is the attribute Key conforming to the "process.user.name" + // semantic conventions. It represents the username of the effective user of the + // process. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "root" + ProcessUserNameKey = attribute.Key("process.user.name") + + // ProcessVpidKey is the attribute Key conforming to the "process.vpid" semantic + // conventions. It represents the virtual process identifier. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 12 + // Note: The process ID within a PID namespace. This is not necessarily unique + // across all processes on the host but it is unique within the process + // namespace that the process exists within. + ProcessVpidKey = attribute.Key("process.vpid") + + // ProcessWorkingDirectoryKey is the attribute Key conforming to the + // "process.working_directory" semantic conventions. It represents the working + // directory of the process. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "/root" + ProcessWorkingDirectoryKey = attribute.Key("process.working_directory") +) + +// ProcessArgsCount returns an attribute KeyValue conforming to the +// "process.args_count" semantic conventions. It represents the length of the +// process.command_args array. +func ProcessArgsCount(val int) attribute.KeyValue { + return ProcessArgsCountKey.Int(val) +} + +// ProcessCommand returns an attribute KeyValue conforming to the +// "process.command" semantic conventions. It represents the command used to +// launch the process (i.e. the command name). On Linux based systems, can be set +// to the zeroth string in `proc/[pid]/cmdline`. On Windows, can be set to the +// first parameter extracted from `GetCommandLineW`. +func ProcessCommand(val string) attribute.KeyValue { + return ProcessCommandKey.String(val) +} + +// ProcessCommandArgs returns an attribute KeyValue conforming to the +// "process.command_args" semantic conventions. It represents the all the command +// arguments (including the command/executable itself) as received by the +// process. On Linux-based systems (and some other Unixoid systems supporting +// procfs), can be set according to the list of null-delimited strings extracted +// from `proc/[pid]/cmdline`. For libc-based executables, this would be the full +// argv vector passed to `main`. SHOULD NOT be collected by default unless there +// is sanitization that excludes sensitive data. +func ProcessCommandArgs(val ...string) attribute.KeyValue { + return ProcessCommandArgsKey.StringSlice(val) +} + +// ProcessCommandLine returns an attribute KeyValue conforming to the +// "process.command_line" semantic conventions. It represents the full command +// used to launch the process as a single string representing the full command. +// On Windows, can be set to the result of `GetCommandLineW`. Do not set this if +// you have to assemble it just for monitoring; use `process.command_args` +// instead. SHOULD NOT be collected by default unless there is sanitization that +// excludes sensitive data. +func ProcessCommandLine(val string) attribute.KeyValue { + return ProcessCommandLineKey.String(val) +} + +// ProcessCreationTime returns an attribute KeyValue conforming to the +// "process.creation.time" semantic conventions. It represents the date and time +// the process was created, in ISO 8601 format. +func ProcessCreationTime(val string) attribute.KeyValue { + return ProcessCreationTimeKey.String(val) +} + +// ProcessEnvironmentVariable returns an attribute KeyValue conforming to the +// "process.environment_variable" semantic conventions. It represents the process +// environment variables, `` being the environment variable name, the value +// being the environment variable value. +func ProcessEnvironmentVariable(key string, val string) attribute.KeyValue { + return attribute.String("process.environment_variable."+key, val) +} + +// ProcessExecutableBuildIDGNU returns an attribute KeyValue conforming to the +// "process.executable.build_id.gnu" semantic conventions. It represents the GNU +// build ID as found in the `.note.gnu.build-id` ELF section (hex string). +func ProcessExecutableBuildIDGNU(val string) attribute.KeyValue { + return ProcessExecutableBuildIDGNUKey.String(val) +} + +// ProcessExecutableBuildIDGo returns an attribute KeyValue conforming to the +// "process.executable.build_id.go" semantic conventions. It represents the Go +// build ID as retrieved by `go tool buildid `. +func ProcessExecutableBuildIDGo(val string) attribute.KeyValue { + return ProcessExecutableBuildIDGoKey.String(val) +} + +// ProcessExecutableBuildIDHtlhash returns an attribute KeyValue conforming to +// the "process.executable.build_id.htlhash" semantic conventions. It represents +// the profiling specific build ID for executables. See the OTel specification +// for Profiles for more information. +func ProcessExecutableBuildIDHtlhash(val string) attribute.KeyValue { + return ProcessExecutableBuildIDHtlhashKey.String(val) +} + +// ProcessExecutableName returns an attribute KeyValue conforming to the +// "process.executable.name" semantic conventions. It represents the name of the +// process executable. On Linux based systems, this SHOULD be set to the base +// name of the target of `/proc/[pid]/exe`. On Windows, this SHOULD be set to the +// base name of `GetProcessImageFileNameW`. +func ProcessExecutableName(val string) attribute.KeyValue { + return ProcessExecutableNameKey.String(val) +} + +// ProcessExecutablePath returns an attribute KeyValue conforming to the +// "process.executable.path" semantic conventions. It represents the full path to +// the process executable. On Linux based systems, can be set to the target of +// `proc/[pid]/exe`. On Windows, can be set to the result of +// `GetProcessImageFileNameW`. +func ProcessExecutablePath(val string) attribute.KeyValue { + return ProcessExecutablePathKey.String(val) +} + +// ProcessExitCode returns an attribute KeyValue conforming to the +// "process.exit.code" semantic conventions. It represents the exit code of the +// process. +func ProcessExitCode(val int) attribute.KeyValue { + return ProcessExitCodeKey.Int(val) +} + +// ProcessExitTime returns an attribute KeyValue conforming to the +// "process.exit.time" semantic conventions. It represents the date and time the +// process exited, in ISO 8601 format. +func ProcessExitTime(val string) attribute.KeyValue { + return ProcessExitTimeKey.String(val) +} + +// ProcessGroupLeaderPID returns an attribute KeyValue conforming to the +// "process.group_leader.pid" semantic conventions. It represents the PID of the +// process's group leader. This is also the process group ID (PGID) of the +// process. +func ProcessGroupLeaderPID(val int) attribute.KeyValue { + return ProcessGroupLeaderPIDKey.Int(val) +} + +// ProcessInteractive returns an attribute KeyValue conforming to the +// "process.interactive" semantic conventions. It represents the whether the +// process is connected to an interactive shell. +func ProcessInteractive(val bool) attribute.KeyValue { + return ProcessInteractiveKey.Bool(val) +} + +// ProcessLinuxCgroup returns an attribute KeyValue conforming to the +// "process.linux.cgroup" semantic conventions. It represents the control group +// associated with the process. +func ProcessLinuxCgroup(val string) attribute.KeyValue { + return ProcessLinuxCgroupKey.String(val) +} + +// ProcessOwner returns an attribute KeyValue conforming to the "process.owner" +// semantic conventions. It represents the username of the user that owns the +// process. +func ProcessOwner(val string) attribute.KeyValue { + return ProcessOwnerKey.String(val) +} + +// ProcessParentPID returns an attribute KeyValue conforming to the +// "process.parent_pid" semantic conventions. It represents the parent Process +// identifier (PPID). +func ProcessParentPID(val int) attribute.KeyValue { + return ProcessParentPIDKey.Int(val) +} + +// ProcessPID returns an attribute KeyValue conforming to the "process.pid" +// semantic conventions. It represents the process identifier (PID). +func ProcessPID(val int) attribute.KeyValue { + return ProcessPIDKey.Int(val) +} + +// ProcessRealUserID returns an attribute KeyValue conforming to the +// "process.real_user.id" semantic conventions. It represents the real user ID +// (RUID) of the process. +func ProcessRealUserID(val int) attribute.KeyValue { + return ProcessRealUserIDKey.Int(val) +} + +// ProcessRealUserName returns an attribute KeyValue conforming to the +// "process.real_user.name" semantic conventions. It represents the username of +// the real user of the process. +func ProcessRealUserName(val string) attribute.KeyValue { + return ProcessRealUserNameKey.String(val) +} + +// ProcessRuntimeDescription returns an attribute KeyValue conforming to the +// "process.runtime.description" semantic conventions. It represents an +// additional description about the runtime of the process, for example a +// specific vendor customization of the runtime environment. +func ProcessRuntimeDescription(val string) attribute.KeyValue { + return ProcessRuntimeDescriptionKey.String(val) +} + +// ProcessRuntimeName returns an attribute KeyValue conforming to the +// "process.runtime.name" semantic conventions. It represents the name of the +// runtime of this process. +func ProcessRuntimeName(val string) attribute.KeyValue { + return ProcessRuntimeNameKey.String(val) +} + +// ProcessRuntimeVersion returns an attribute KeyValue conforming to the +// "process.runtime.version" semantic conventions. It represents the version of +// the runtime of this process, as returned by the runtime without modification. +func ProcessRuntimeVersion(val string) attribute.KeyValue { + return ProcessRuntimeVersionKey.String(val) +} + +// ProcessSavedUserID returns an attribute KeyValue conforming to the +// "process.saved_user.id" semantic conventions. It represents the saved user ID +// (SUID) of the process. +func ProcessSavedUserID(val int) attribute.KeyValue { + return ProcessSavedUserIDKey.Int(val) +} + +// ProcessSavedUserName returns an attribute KeyValue conforming to the +// "process.saved_user.name" semantic conventions. It represents the username of +// the saved user. +func ProcessSavedUserName(val string) attribute.KeyValue { + return ProcessSavedUserNameKey.String(val) +} + +// ProcessSessionLeaderPID returns an attribute KeyValue conforming to the +// "process.session_leader.pid" semantic conventions. It represents the PID of +// the process's session leader. This is also the session ID (SID) of the +// process. +func ProcessSessionLeaderPID(val int) attribute.KeyValue { + return ProcessSessionLeaderPIDKey.Int(val) +} + +// ProcessTitle returns an attribute KeyValue conforming to the "process.title" +// semantic conventions. It represents the process title (proctitle). +func ProcessTitle(val string) attribute.KeyValue { + return ProcessTitleKey.String(val) +} + +// ProcessUserID returns an attribute KeyValue conforming to the +// "process.user.id" semantic conventions. It represents the effective user ID +// (EUID) of the process. +func ProcessUserID(val int) attribute.KeyValue { + return ProcessUserIDKey.Int(val) +} + +// ProcessUserName returns an attribute KeyValue conforming to the +// "process.user.name" semantic conventions. It represents the username of the +// effective user of the process. +func ProcessUserName(val string) attribute.KeyValue { + return ProcessUserNameKey.String(val) +} + +// ProcessVpid returns an attribute KeyValue conforming to the "process.vpid" +// semantic conventions. It represents the virtual process identifier. +func ProcessVpid(val int) attribute.KeyValue { + return ProcessVpidKey.Int(val) +} + +// ProcessWorkingDirectory returns an attribute KeyValue conforming to the +// "process.working_directory" semantic conventions. It represents the working +// directory of the process. +func ProcessWorkingDirectory(val string) attribute.KeyValue { + return ProcessWorkingDirectoryKey.String(val) +} + +// Enum values for process.context_switch_type +var ( + // voluntary + // Stability: development + ProcessContextSwitchTypeVoluntary = ProcessContextSwitchTypeKey.String("voluntary") + // involuntary + // Stability: development + ProcessContextSwitchTypeInvoluntary = ProcessContextSwitchTypeKey.String("involuntary") +) + +// Enum values for process.paging.fault_type +var ( + // major + // Stability: development + ProcessPagingFaultTypeMajor = ProcessPagingFaultTypeKey.String("major") + // minor + // Stability: development + ProcessPagingFaultTypeMinor = ProcessPagingFaultTypeKey.String("minor") +) + +// Namespace: profile +const ( + // ProfileFrameTypeKey is the attribute Key conforming to the + // "profile.frame.type" semantic conventions. It represents the describes the + // interpreter or compiler of a single frame. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "cpython" + ProfileFrameTypeKey = attribute.Key("profile.frame.type") +) + +// Enum values for profile.frame.type +var ( + // [.NET] + // + // Stability: development + // + // [.NET]: https://wikipedia.org/wiki/.NET + ProfileFrameTypeDotnet = ProfileFrameTypeKey.String("dotnet") + // [JVM] + // + // Stability: development + // + // [JVM]: https://wikipedia.org/wiki/Java_virtual_machine + ProfileFrameTypeJVM = ProfileFrameTypeKey.String("jvm") + // [Kernel] + // + // Stability: development + // + // [Kernel]: https://wikipedia.org/wiki/Kernel_(operating_system) + ProfileFrameTypeKernel = ProfileFrameTypeKey.String("kernel") + // Can be one of but not limited to [C], [C++], [Go] or [Rust]. If possible, a + // more precise value MUST be used. + // + // Stability: development + // + // [C]: https://wikipedia.org/wiki/C_(programming_language) + // [C++]: https://wikipedia.org/wiki/C%2B%2B + // [Go]: https://wikipedia.org/wiki/Go_(programming_language) + // [Rust]: https://wikipedia.org/wiki/Rust_(programming_language) + ProfileFrameTypeNative = ProfileFrameTypeKey.String("native") + // [Perl] + // + // Stability: development + // + // [Perl]: https://wikipedia.org/wiki/Perl + ProfileFrameTypePerl = ProfileFrameTypeKey.String("perl") + // [PHP] + // + // Stability: development + // + // [PHP]: https://wikipedia.org/wiki/PHP + ProfileFrameTypePHP = ProfileFrameTypeKey.String("php") + // [Python] + // + // Stability: development + // + // [Python]: https://wikipedia.org/wiki/Python_(programming_language) + ProfileFrameTypeCpython = ProfileFrameTypeKey.String("cpython") + // [Ruby] + // + // Stability: development + // + // [Ruby]: https://wikipedia.org/wiki/Ruby_(programming_language) + ProfileFrameTypeRuby = ProfileFrameTypeKey.String("ruby") + // [V8JS] + // + // Stability: development + // + // [V8JS]: https://wikipedia.org/wiki/V8_(JavaScript_engine) + ProfileFrameTypeV8JS = ProfileFrameTypeKey.String("v8js") + // [Erlang] + // + // Stability: development + // + // [Erlang]: https://en.wikipedia.org/wiki/BEAM_(Erlang_virtual_machine) + ProfileFrameTypeBeam = ProfileFrameTypeKey.String("beam") + // [Go], + // + // Stability: development + // + // [Go]: https://wikipedia.org/wiki/Go_(programming_language) + ProfileFrameTypeGo = ProfileFrameTypeKey.String("go") + // [Rust] + // + // Stability: development + // + // [Rust]: https://wikipedia.org/wiki/Rust_(programming_language) + ProfileFrameTypeRust = ProfileFrameTypeKey.String("rust") +) + +// Namespace: rpc +const ( + // RPCConnectRPCErrorCodeKey is the attribute Key conforming to the + // "rpc.connect_rpc.error_code" semantic conventions. It represents the + // [error codes] of the Connect request. Error codes are always string values. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // + // [error codes]: https://connectrpc.com//docs/protocol/#error-codes + RPCConnectRPCErrorCodeKey = attribute.Key("rpc.connect_rpc.error_code") + + // RPCGRPCStatusCodeKey is the attribute Key conforming to the + // "rpc.grpc.status_code" semantic conventions. It represents the + // [numeric status code] of the gRPC request. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // + // [numeric status code]: https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md + RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code") + + // RPCJSONRPCErrorCodeKey is the attribute Key conforming to the + // "rpc.jsonrpc.error_code" semantic conventions. It represents the `error.code` + // property of response if it is an error response. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: -32700, 100 + RPCJSONRPCErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code") + + // RPCJSONRPCErrorMessageKey is the attribute Key conforming to the + // "rpc.jsonrpc.error_message" semantic conventions. It represents the + // `error.message` property of response if it is an error response. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Parse error", "User already exists" + RPCJSONRPCErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message") + + // RPCJSONRPCRequestIDKey is the attribute Key conforming to the + // "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` + // property of request or response. Since protocol allows id to be int, string, + // `null` or missing (for notifications), value is expected to be cast to string + // for simplicity. Use empty string in case of `null` value. Omit entirely if + // this is a notification. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "10", "request-7", "" + RPCJSONRPCRequestIDKey = attribute.Key("rpc.jsonrpc.request_id") + + // RPCJSONRPCVersionKey is the attribute Key conforming to the + // "rpc.jsonrpc.version" semantic conventions. It represents the protocol + // version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 + // doesn't specify this, the value can be omitted. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2.0", "1.0" + RPCJSONRPCVersionKey = attribute.Key("rpc.jsonrpc.version") + + // RPCMessageCompressedSizeKey is the attribute Key conforming to the + // "rpc.message.compressed_size" semantic conventions. It represents the + // compressed size of the message in bytes. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + RPCMessageCompressedSizeKey = attribute.Key("rpc.message.compressed_size") + + // RPCMessageIDKey is the attribute Key conforming to the "rpc.message.id" + // semantic conventions. It MUST be calculated as two different counters + // starting from `1` one for sent messages and one for received message.. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: This way we guarantee that the values will be consistent between + // different implementations. + RPCMessageIDKey = attribute.Key("rpc.message.id") + + // RPCMessageTypeKey is the attribute Key conforming to the "rpc.message.type" + // semantic conventions. It represents the whether this is a received or sent + // message. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + RPCMessageTypeKey = attribute.Key("rpc.message.type") + + // RPCMessageUncompressedSizeKey is the attribute Key conforming to the + // "rpc.message.uncompressed_size" semantic conventions. It represents the + // uncompressed size of the message in bytes. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + RPCMessageUncompressedSizeKey = attribute.Key("rpc.message.uncompressed_size") + + // RPCMethodKey is the attribute Key conforming to the "rpc.method" semantic + // conventions. It represents the name of the (logical) method being called, + // must be equal to the $method part in the span name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: exampleMethod + // Note: This is the logical name of the method from the RPC interface + // perspective, which can be different from the name of any implementing + // method/function. The `code.function.name` attribute may be used to store the + // latter (e.g., method actually executing the call on the server side, RPC + // client stub method on the client side). + RPCMethodKey = attribute.Key("rpc.method") + + // RPCServiceKey is the attribute Key conforming to the "rpc.service" semantic + // conventions. It represents the full (logical) name of the service being + // called, including its package name, if applicable. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: myservice.EchoService + // Note: This is the logical name of the service from the RPC interface + // perspective, which can be different from the name of any implementing class. + // The `code.namespace` attribute may be used to store the latter (despite the + // attribute name, it may include a class name; e.g., class with method actually + // executing the call on the server side, RPC client stub class on the client + // side). + RPCServiceKey = attribute.Key("rpc.service") + + // RPCSystemKey is the attribute Key conforming to the "rpc.system" semantic + // conventions. It represents a string identifying the remoting system. See + // below for a list of well-known identifiers. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + RPCSystemKey = attribute.Key("rpc.system") +) + +// RPCConnectRPCRequestMetadata returns an attribute KeyValue conforming to the +// "rpc.connect_rpc.request.metadata" semantic conventions. It represents the +// connect request metadata, `` being the normalized Connect Metadata key +// (lowercase), the value being the metadata values. +func RPCConnectRPCRequestMetadata(key string, val ...string) attribute.KeyValue { + return attribute.StringSlice("rpc.connect_rpc.request.metadata."+key, val) +} + +// RPCConnectRPCResponseMetadata returns an attribute KeyValue conforming to the +// "rpc.connect_rpc.response.metadata" semantic conventions. It represents the +// connect response metadata, `` being the normalized Connect Metadata key +// (lowercase), the value being the metadata values. +func RPCConnectRPCResponseMetadata(key string, val ...string) attribute.KeyValue { + return attribute.StringSlice("rpc.connect_rpc.response.metadata."+key, val) +} + +// RPCGRPCRequestMetadata returns an attribute KeyValue conforming to the +// "rpc.grpc.request.metadata" semantic conventions. It represents the gRPC +// request metadata, `` being the normalized gRPC Metadata key (lowercase), +// the value being the metadata values. +func RPCGRPCRequestMetadata(key string, val ...string) attribute.KeyValue { + return attribute.StringSlice("rpc.grpc.request.metadata."+key, val) +} + +// RPCGRPCResponseMetadata returns an attribute KeyValue conforming to the +// "rpc.grpc.response.metadata" semantic conventions. It represents the gRPC +// response metadata, `` being the normalized gRPC Metadata key (lowercase), +// the value being the metadata values. +func RPCGRPCResponseMetadata(key string, val ...string) attribute.KeyValue { + return attribute.StringSlice("rpc.grpc.response.metadata."+key, val) +} + +// RPCJSONRPCErrorCode returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.error_code" semantic conventions. It represents the `error.code` +// property of response if it is an error response. +func RPCJSONRPCErrorCode(val int) attribute.KeyValue { + return RPCJSONRPCErrorCodeKey.Int(val) +} + +// RPCJSONRPCErrorMessage returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.error_message" semantic conventions. It represents the +// `error.message` property of response if it is an error response. +func RPCJSONRPCErrorMessage(val string) attribute.KeyValue { + return RPCJSONRPCErrorMessageKey.String(val) +} + +// RPCJSONRPCRequestID returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` property +// of request or response. Since protocol allows id to be int, string, `null` or +// missing (for notifications), value is expected to be cast to string for +// simplicity. Use empty string in case of `null` value. Omit entirely if this is +// a notification. +func RPCJSONRPCRequestID(val string) attribute.KeyValue { + return RPCJSONRPCRequestIDKey.String(val) +} + +// RPCJSONRPCVersion returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.version" semantic conventions. It represents the protocol version +// as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 doesn't +// specify this, the value can be omitted. +func RPCJSONRPCVersion(val string) attribute.KeyValue { + return RPCJSONRPCVersionKey.String(val) +} + +// RPCMessageCompressedSize returns an attribute KeyValue conforming to the +// "rpc.message.compressed_size" semantic conventions. It represents the +// compressed size of the message in bytes. +func RPCMessageCompressedSize(val int) attribute.KeyValue { + return RPCMessageCompressedSizeKey.Int(val) +} + +// RPCMessageID returns an attribute KeyValue conforming to the "rpc.message.id" +// semantic conventions. It MUST be calculated as two different counters starting +// from `1` one for sent messages and one for received message.. +func RPCMessageID(val int) attribute.KeyValue { + return RPCMessageIDKey.Int(val) +} + +// RPCMessageUncompressedSize returns an attribute KeyValue conforming to the +// "rpc.message.uncompressed_size" semantic conventions. It represents the +// uncompressed size of the message in bytes. +func RPCMessageUncompressedSize(val int) attribute.KeyValue { + return RPCMessageUncompressedSizeKey.Int(val) +} + +// RPCMethod returns an attribute KeyValue conforming to the "rpc.method" +// semantic conventions. It represents the name of the (logical) method being +// called, must be equal to the $method part in the span name. +func RPCMethod(val string) attribute.KeyValue { + return RPCMethodKey.String(val) +} + +// RPCService returns an attribute KeyValue conforming to the "rpc.service" +// semantic conventions. It represents the full (logical) name of the service +// being called, including its package name, if applicable. +func RPCService(val string) attribute.KeyValue { + return RPCServiceKey.String(val) +} + +// Enum values for rpc.connect_rpc.error_code +var ( + // cancelled + // Stability: development + RPCConnectRPCErrorCodeCancelled = RPCConnectRPCErrorCodeKey.String("cancelled") + // unknown + // Stability: development + RPCConnectRPCErrorCodeUnknown = RPCConnectRPCErrorCodeKey.String("unknown") + // invalid_argument + // Stability: development + RPCConnectRPCErrorCodeInvalidArgument = RPCConnectRPCErrorCodeKey.String("invalid_argument") + // deadline_exceeded + // Stability: development + RPCConnectRPCErrorCodeDeadlineExceeded = RPCConnectRPCErrorCodeKey.String("deadline_exceeded") + // not_found + // Stability: development + RPCConnectRPCErrorCodeNotFound = RPCConnectRPCErrorCodeKey.String("not_found") + // already_exists + // Stability: development + RPCConnectRPCErrorCodeAlreadyExists = RPCConnectRPCErrorCodeKey.String("already_exists") + // permission_denied + // Stability: development + RPCConnectRPCErrorCodePermissionDenied = RPCConnectRPCErrorCodeKey.String("permission_denied") + // resource_exhausted + // Stability: development + RPCConnectRPCErrorCodeResourceExhausted = RPCConnectRPCErrorCodeKey.String("resource_exhausted") + // failed_precondition + // Stability: development + RPCConnectRPCErrorCodeFailedPrecondition = RPCConnectRPCErrorCodeKey.String("failed_precondition") + // aborted + // Stability: development + RPCConnectRPCErrorCodeAborted = RPCConnectRPCErrorCodeKey.String("aborted") + // out_of_range + // Stability: development + RPCConnectRPCErrorCodeOutOfRange = RPCConnectRPCErrorCodeKey.String("out_of_range") + // unimplemented + // Stability: development + RPCConnectRPCErrorCodeUnimplemented = RPCConnectRPCErrorCodeKey.String("unimplemented") + // internal + // Stability: development + RPCConnectRPCErrorCodeInternal = RPCConnectRPCErrorCodeKey.String("internal") + // unavailable + // Stability: development + RPCConnectRPCErrorCodeUnavailable = RPCConnectRPCErrorCodeKey.String("unavailable") + // data_loss + // Stability: development + RPCConnectRPCErrorCodeDataLoss = RPCConnectRPCErrorCodeKey.String("data_loss") + // unauthenticated + // Stability: development + RPCConnectRPCErrorCodeUnauthenticated = RPCConnectRPCErrorCodeKey.String("unauthenticated") +) + +// Enum values for rpc.grpc.status_code +var ( + // OK + // Stability: development + RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0) + // CANCELLED + // Stability: development + RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1) + // UNKNOWN + // Stability: development + RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2) + // INVALID_ARGUMENT + // Stability: development + RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3) + // DEADLINE_EXCEEDED + // Stability: development + RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4) + // NOT_FOUND + // Stability: development + RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5) + // ALREADY_EXISTS + // Stability: development + RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6) + // PERMISSION_DENIED + // Stability: development + RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7) + // RESOURCE_EXHAUSTED + // Stability: development + RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8) + // FAILED_PRECONDITION + // Stability: development + RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9) + // ABORTED + // Stability: development + RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10) + // OUT_OF_RANGE + // Stability: development + RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11) + // UNIMPLEMENTED + // Stability: development + RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12) + // INTERNAL + // Stability: development + RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13) + // UNAVAILABLE + // Stability: development + RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14) + // DATA_LOSS + // Stability: development + RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15) + // UNAUTHENTICATED + // Stability: development + RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16) +) + +// Enum values for rpc.message.type +var ( + // sent + // Stability: development + RPCMessageTypeSent = RPCMessageTypeKey.String("SENT") + // received + // Stability: development + RPCMessageTypeReceived = RPCMessageTypeKey.String("RECEIVED") +) + +// Enum values for rpc.system +var ( + // gRPC + // Stability: development + RPCSystemGRPC = RPCSystemKey.String("grpc") + // Java RMI + // Stability: development + RPCSystemJavaRmi = RPCSystemKey.String("java_rmi") + // .NET WCF + // Stability: development + RPCSystemDotnetWcf = RPCSystemKey.String("dotnet_wcf") + // Apache Dubbo + // Stability: development + RPCSystemApacheDubbo = RPCSystemKey.String("apache_dubbo") + // Connect RPC + // Stability: development + RPCSystemConnectRPC = RPCSystemKey.String("connect_rpc") +) + +// Namespace: security_rule +const ( + // SecurityRuleCategoryKey is the attribute Key conforming to the + // "security_rule.category" semantic conventions. It represents a categorization + // value keyword used by the entity using the rule for detection of this event. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Attempted Information Leak" + SecurityRuleCategoryKey = attribute.Key("security_rule.category") + + // SecurityRuleDescriptionKey is the attribute Key conforming to the + // "security_rule.description" semantic conventions. It represents the + // description of the rule generating the event. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Block requests to public DNS over HTTPS / TLS protocols" + SecurityRuleDescriptionKey = attribute.Key("security_rule.description") + + // SecurityRuleLicenseKey is the attribute Key conforming to the + // "security_rule.license" semantic conventions. It represents the name of the + // license under which the rule used to generate this event is made available. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Apache 2.0" + SecurityRuleLicenseKey = attribute.Key("security_rule.license") + + // SecurityRuleNameKey is the attribute Key conforming to the + // "security_rule.name" semantic conventions. It represents the name of the rule + // or signature generating the event. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "BLOCK_DNS_over_TLS" + SecurityRuleNameKey = attribute.Key("security_rule.name") + + // SecurityRuleReferenceKey is the attribute Key conforming to the + // "security_rule.reference" semantic conventions. It represents the reference + // URL to additional information about the rule used to generate this event. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "https://en.wikipedia.org/wiki/DNS_over_TLS" + // Note: The URL can point to the vendor’s documentation about the rule. If + // that’s not available, it can also be a link to a more general page + // describing this type of alert. + SecurityRuleReferenceKey = attribute.Key("security_rule.reference") + + // SecurityRuleRulesetNameKey is the attribute Key conforming to the + // "security_rule.ruleset.name" semantic conventions. It represents the name of + // the ruleset, policy, group, or parent category in which the rule used to + // generate this event is a member. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Standard_Protocol_Filters" + SecurityRuleRulesetNameKey = attribute.Key("security_rule.ruleset.name") + + // SecurityRuleUUIDKey is the attribute Key conforming to the + // "security_rule.uuid" semantic conventions. It represents a rule ID that is + // unique within the scope of a set or group of agents, observers, or other + // entities using the rule for detection of this event. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "550e8400-e29b-41d4-a716-446655440000", "1100110011" + SecurityRuleUUIDKey = attribute.Key("security_rule.uuid") + + // SecurityRuleVersionKey is the attribute Key conforming to the + // "security_rule.version" semantic conventions. It represents the version / + // revision of the rule being used for analysis. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "1.0.0" + SecurityRuleVersionKey = attribute.Key("security_rule.version") +) + +// SecurityRuleCategory returns an attribute KeyValue conforming to the +// "security_rule.category" semantic conventions. It represents a categorization +// value keyword used by the entity using the rule for detection of this event. +func SecurityRuleCategory(val string) attribute.KeyValue { + return SecurityRuleCategoryKey.String(val) +} + +// SecurityRuleDescription returns an attribute KeyValue conforming to the +// "security_rule.description" semantic conventions. It represents the +// description of the rule generating the event. +func SecurityRuleDescription(val string) attribute.KeyValue { + return SecurityRuleDescriptionKey.String(val) +} + +// SecurityRuleLicense returns an attribute KeyValue conforming to the +// "security_rule.license" semantic conventions. It represents the name of the +// license under which the rule used to generate this event is made available. +func SecurityRuleLicense(val string) attribute.KeyValue { + return SecurityRuleLicenseKey.String(val) +} + +// SecurityRuleName returns an attribute KeyValue conforming to the +// "security_rule.name" semantic conventions. It represents the name of the rule +// or signature generating the event. +func SecurityRuleName(val string) attribute.KeyValue { + return SecurityRuleNameKey.String(val) +} + +// SecurityRuleReference returns an attribute KeyValue conforming to the +// "security_rule.reference" semantic conventions. It represents the reference +// URL to additional information about the rule used to generate this event. +func SecurityRuleReference(val string) attribute.KeyValue { + return SecurityRuleReferenceKey.String(val) +} + +// SecurityRuleRulesetName returns an attribute KeyValue conforming to the +// "security_rule.ruleset.name" semantic conventions. It represents the name of +// the ruleset, policy, group, or parent category in which the rule used to +// generate this event is a member. +func SecurityRuleRulesetName(val string) attribute.KeyValue { + return SecurityRuleRulesetNameKey.String(val) +} + +// SecurityRuleUUID returns an attribute KeyValue conforming to the +// "security_rule.uuid" semantic conventions. It represents a rule ID that is +// unique within the scope of a set or group of agents, observers, or other +// entities using the rule for detection of this event. +func SecurityRuleUUID(val string) attribute.KeyValue { + return SecurityRuleUUIDKey.String(val) +} + +// SecurityRuleVersion returns an attribute KeyValue conforming to the +// "security_rule.version" semantic conventions. It represents the version / +// revision of the rule being used for analysis. +func SecurityRuleVersion(val string) attribute.KeyValue { + return SecurityRuleVersionKey.String(val) +} + +// Namespace: server +const ( + // ServerAddressKey is the attribute Key conforming to the "server.address" + // semantic conventions. It represents the server domain name if available + // without reverse DNS lookup; otherwise, IP address or Unix domain socket name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "example.com", "10.1.2.80", "/tmp/my.sock" + // Note: When observed from the client side, and when communicating through an + // intermediary, `server.address` SHOULD represent the server address behind any + // intermediaries, for example proxies, if it's available. + ServerAddressKey = attribute.Key("server.address") + + // ServerPortKey is the attribute Key conforming to the "server.port" semantic + // conventions. It represents the server port number. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: 80, 8080, 443 + // Note: When observed from the client side, and when communicating through an + // intermediary, `server.port` SHOULD represent the server port behind any + // intermediaries, for example proxies, if it's available. + ServerPortKey = attribute.Key("server.port") +) + +// ServerAddress returns an attribute KeyValue conforming to the "server.address" +// semantic conventions. It represents the server domain name if available +// without reverse DNS lookup; otherwise, IP address or Unix domain socket name. +func ServerAddress(val string) attribute.KeyValue { + return ServerAddressKey.String(val) +} + +// ServerPort returns an attribute KeyValue conforming to the "server.port" +// semantic conventions. It represents the server port number. +func ServerPort(val int) attribute.KeyValue { + return ServerPortKey.Int(val) +} + +// Namespace: service +const ( + // ServiceInstanceIDKey is the attribute Key conforming to the + // "service.instance.id" semantic conventions. It represents the string ID of + // the service instance. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "627cc493-f310-47de-96bd-71410b7dec09" + // Note: MUST be unique for each instance of the same + // `service.namespace,service.name` pair (in other words + // `service.namespace,service.name,service.instance.id` triplet MUST be globally + // unique). The ID helps to + // distinguish instances of the same service that exist at the same time (e.g. + // instances of a horizontally scaled + // service). + // + // Implementations, such as SDKs, are recommended to generate a random Version 1 + // or Version 4 [RFC + // 4122] UUID, but are free to use an inherent unique ID as + // the source of + // this value if stability is desirable. In that case, the ID SHOULD be used as + // source of a UUID Version 5 and + // SHOULD use the following UUID as the namespace: + // `4d63009a-8d0f-11ee-aad7-4c796ed8e320`. + // + // UUIDs are typically recommended, as only an opaque value for the purposes of + // identifying a service instance is + // needed. Similar to what can be seen in the man page for the + // [`/etc/machine-id`] file, the underlying + // data, such as pod name and namespace should be treated as confidential, being + // the user's choice to expose it + // or not via another resource attribute. + // + // For applications running behind an application server (like unicorn), we do + // not recommend using one identifier + // for all processes participating in the application. Instead, it's recommended + // each division (e.g. a worker + // thread in unicorn) to have its own instance.id. + // + // It's not recommended for a Collector to set `service.instance.id` if it can't + // unambiguously determine the + // service instance that is generating that telemetry. For instance, creating an + // UUID based on `pod.name` will + // likely be wrong, as the Collector might not know from which container within + // that pod the telemetry originated. + // However, Collectors can set the `service.instance.id` if they can + // unambiguously determine the service instance + // for that telemetry. This is typically the case for scraping receivers, as + // they know the target address and + // port. + // + // [RFC + // 4122]: https://www.ietf.org/rfc/rfc4122.txt + // [`/etc/machine-id`]: https://www.freedesktop.org/software/systemd/man/latest/machine-id.html + ServiceInstanceIDKey = attribute.Key("service.instance.id") + + // ServiceNameKey is the attribute Key conforming to the "service.name" semantic + // conventions. It represents the logical name of the service. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "shoppingcart" + // Note: MUST be the same for all instances of horizontally scaled services. If + // the value was not specified, SDKs MUST fallback to `unknown_service:` + // concatenated with [`process.executable.name`], e.g. `unknown_service:bash`. + // If `process.executable.name` is not available, the value MUST be set to + // `unknown_service`. + // + // [`process.executable.name`]: process.md + ServiceNameKey = attribute.Key("service.name") + + // ServiceNamespaceKey is the attribute Key conforming to the + // "service.namespace" semantic conventions. It represents a namespace for + // `service.name`. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Shop" + // Note: A string value having a meaning that helps to distinguish a group of + // services, for example the team name that owns a group of services. + // `service.name` is expected to be unique within the same namespace. If + // `service.namespace` is not specified in the Resource then `service.name` is + // expected to be unique for all services that have no explicit namespace + // defined (so the empty/unspecified namespace is simply one more valid + // namespace). Zero-length namespace string is assumed equal to unspecified + // namespace. + ServiceNamespaceKey = attribute.Key("service.namespace") + + // ServiceVersionKey is the attribute Key conforming to the "service.version" + // semantic conventions. It represents the version string of the service API or + // implementation. The format is not defined by these conventions. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "2.0.0", "a01dbef8a" + ServiceVersionKey = attribute.Key("service.version") +) + +// ServiceInstanceID returns an attribute KeyValue conforming to the +// "service.instance.id" semantic conventions. It represents the string ID of the +// service instance. +func ServiceInstanceID(val string) attribute.KeyValue { + return ServiceInstanceIDKey.String(val) +} + +// ServiceName returns an attribute KeyValue conforming to the "service.name" +// semantic conventions. It represents the logical name of the service. +func ServiceName(val string) attribute.KeyValue { + return ServiceNameKey.String(val) +} + +// ServiceNamespace returns an attribute KeyValue conforming to the +// "service.namespace" semantic conventions. It represents a namespace for +// `service.name`. +func ServiceNamespace(val string) attribute.KeyValue { + return ServiceNamespaceKey.String(val) +} + +// ServiceVersion returns an attribute KeyValue conforming to the +// "service.version" semantic conventions. It represents the version string of +// the service API or implementation. The format is not defined by these +// conventions. +func ServiceVersion(val string) attribute.KeyValue { + return ServiceVersionKey.String(val) +} + +// Namespace: session +const ( + // SessionIDKey is the attribute Key conforming to the "session.id" semantic + // conventions. It represents a unique id to identify a session. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 00112233-4455-6677-8899-aabbccddeeff + SessionIDKey = attribute.Key("session.id") + + // SessionPreviousIDKey is the attribute Key conforming to the + // "session.previous_id" semantic conventions. It represents the previous + // `session.id` for this user, when known. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 00112233-4455-6677-8899-aabbccddeeff + SessionPreviousIDKey = attribute.Key("session.previous_id") +) + +// SessionID returns an attribute KeyValue conforming to the "session.id" +// semantic conventions. It represents a unique id to identify a session. +func SessionID(val string) attribute.KeyValue { + return SessionIDKey.String(val) +} + +// SessionPreviousID returns an attribute KeyValue conforming to the +// "session.previous_id" semantic conventions. It represents the previous +// `session.id` for this user, when known. +func SessionPreviousID(val string) attribute.KeyValue { + return SessionPreviousIDKey.String(val) +} + +// Namespace: signalr +const ( + // SignalRConnectionStatusKey is the attribute Key conforming to the + // "signalr.connection.status" semantic conventions. It represents the signalR + // HTTP connection closure status. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "app_shutdown", "timeout" + SignalRConnectionStatusKey = attribute.Key("signalr.connection.status") + + // SignalRTransportKey is the attribute Key conforming to the + // "signalr.transport" semantic conventions. It represents the + // [SignalR transport type]. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "web_sockets", "long_polling" + // + // [SignalR transport type]: https://github.com/dotnet/aspnetcore/blob/main/src/SignalR/docs/specs/TransportProtocols.md + SignalRTransportKey = attribute.Key("signalr.transport") +) + +// Enum values for signalr.connection.status +var ( + // The connection was closed normally. + // Stability: stable + SignalRConnectionStatusNormalClosure = SignalRConnectionStatusKey.String("normal_closure") + // The connection was closed due to a timeout. + // Stability: stable + SignalRConnectionStatusTimeout = SignalRConnectionStatusKey.String("timeout") + // The connection was closed because the app is shutting down. + // Stability: stable + SignalRConnectionStatusAppShutdown = SignalRConnectionStatusKey.String("app_shutdown") +) + +// Enum values for signalr.transport +var ( + // ServerSentEvents protocol + // Stability: stable + SignalRTransportServerSentEvents = SignalRTransportKey.String("server_sent_events") + // LongPolling protocol + // Stability: stable + SignalRTransportLongPolling = SignalRTransportKey.String("long_polling") + // WebSockets protocol + // Stability: stable + SignalRTransportWebSockets = SignalRTransportKey.String("web_sockets") +) + +// Namespace: source +const ( + // SourceAddressKey is the attribute Key conforming to the "source.address" + // semantic conventions. It represents the source address - domain name if + // available without reverse DNS lookup; otherwise, IP address or Unix domain + // socket name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "source.example.com", "10.1.2.80", "/tmp/my.sock" + // Note: When observed from the destination side, and when communicating through + // an intermediary, `source.address` SHOULD represent the source address behind + // any intermediaries, for example proxies, if it's available. + SourceAddressKey = attribute.Key("source.address") + + // SourcePortKey is the attribute Key conforming to the "source.port" semantic + // conventions. It represents the source port number. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 3389, 2888 + SourcePortKey = attribute.Key("source.port") +) + +// SourceAddress returns an attribute KeyValue conforming to the "source.address" +// semantic conventions. It represents the source address - domain name if +// available without reverse DNS lookup; otherwise, IP address or Unix domain +// socket name. +func SourceAddress(val string) attribute.KeyValue { + return SourceAddressKey.String(val) +} + +// SourcePort returns an attribute KeyValue conforming to the "source.port" +// semantic conventions. It represents the source port number. +func SourcePort(val int) attribute.KeyValue { + return SourcePortKey.Int(val) +} + +// Namespace: system +const ( + // SystemCPULogicalNumberKey is the attribute Key conforming to the + // "system.cpu.logical_number" semantic conventions. It represents the + // deprecated, use `cpu.logical_number` instead. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1 + SystemCPULogicalNumberKey = attribute.Key("system.cpu.logical_number") + + // SystemDeviceKey is the attribute Key conforming to the "system.device" + // semantic conventions. It represents the device identifier. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "(identifier)" + SystemDeviceKey = attribute.Key("system.device") + + // SystemFilesystemModeKey is the attribute Key conforming to the + // "system.filesystem.mode" semantic conventions. It represents the filesystem + // mode. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "rw, ro" + SystemFilesystemModeKey = attribute.Key("system.filesystem.mode") + + // SystemFilesystemMountpointKey is the attribute Key conforming to the + // "system.filesystem.mountpoint" semantic conventions. It represents the + // filesystem mount path. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "/mnt/data" + SystemFilesystemMountpointKey = attribute.Key("system.filesystem.mountpoint") + + // SystemFilesystemStateKey is the attribute Key conforming to the + // "system.filesystem.state" semantic conventions. It represents the filesystem + // state. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "used" + SystemFilesystemStateKey = attribute.Key("system.filesystem.state") + + // SystemFilesystemTypeKey is the attribute Key conforming to the + // "system.filesystem.type" semantic conventions. It represents the filesystem + // type. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "ext4" + SystemFilesystemTypeKey = attribute.Key("system.filesystem.type") + + // SystemMemoryStateKey is the attribute Key conforming to the + // "system.memory.state" semantic conventions. It represents the memory state. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "free", "cached" + SystemMemoryStateKey = attribute.Key("system.memory.state") + + // SystemPagingDirectionKey is the attribute Key conforming to the + // "system.paging.direction" semantic conventions. It represents the paging + // access direction. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "in" + SystemPagingDirectionKey = attribute.Key("system.paging.direction") + + // SystemPagingStateKey is the attribute Key conforming to the + // "system.paging.state" semantic conventions. It represents the memory paging + // state. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "free" + SystemPagingStateKey = attribute.Key("system.paging.state") + + // SystemPagingTypeKey is the attribute Key conforming to the + // "system.paging.type" semantic conventions. It represents the memory paging + // type. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "minor" + SystemPagingTypeKey = attribute.Key("system.paging.type") + + // SystemProcessStatusKey is the attribute Key conforming to the + // "system.process.status" semantic conventions. It represents the process + // state, e.g., [Linux Process State Codes]. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "running" + // + // [Linux Process State Codes]: https://man7.org/linux/man-pages/man1/ps.1.html#PROCESS_STATE_CODES + SystemProcessStatusKey = attribute.Key("system.process.status") +) + +// SystemCPULogicalNumber returns an attribute KeyValue conforming to the +// "system.cpu.logical_number" semantic conventions. It represents the +// deprecated, use `cpu.logical_number` instead. +func SystemCPULogicalNumber(val int) attribute.KeyValue { + return SystemCPULogicalNumberKey.Int(val) +} + +// SystemDevice returns an attribute KeyValue conforming to the "system.device" +// semantic conventions. It represents the device identifier. +func SystemDevice(val string) attribute.KeyValue { + return SystemDeviceKey.String(val) +} + +// SystemFilesystemMode returns an attribute KeyValue conforming to the +// "system.filesystem.mode" semantic conventions. It represents the filesystem +// mode. +func SystemFilesystemMode(val string) attribute.KeyValue { + return SystemFilesystemModeKey.String(val) +} + +// SystemFilesystemMountpoint returns an attribute KeyValue conforming to the +// "system.filesystem.mountpoint" semantic conventions. It represents the +// filesystem mount path. +func SystemFilesystemMountpoint(val string) attribute.KeyValue { + return SystemFilesystemMountpointKey.String(val) +} + +// Enum values for system.filesystem.state +var ( + // used + // Stability: development + SystemFilesystemStateUsed = SystemFilesystemStateKey.String("used") + // free + // Stability: development + SystemFilesystemStateFree = SystemFilesystemStateKey.String("free") + // reserved + // Stability: development + SystemFilesystemStateReserved = SystemFilesystemStateKey.String("reserved") +) + +// Enum values for system.filesystem.type +var ( + // fat32 + // Stability: development + SystemFilesystemTypeFat32 = SystemFilesystemTypeKey.String("fat32") + // exfat + // Stability: development + SystemFilesystemTypeExfat = SystemFilesystemTypeKey.String("exfat") + // ntfs + // Stability: development + SystemFilesystemTypeNtfs = SystemFilesystemTypeKey.String("ntfs") + // refs + // Stability: development + SystemFilesystemTypeRefs = SystemFilesystemTypeKey.String("refs") + // hfsplus + // Stability: development + SystemFilesystemTypeHfsplus = SystemFilesystemTypeKey.String("hfsplus") + // ext4 + // Stability: development + SystemFilesystemTypeExt4 = SystemFilesystemTypeKey.String("ext4") +) + +// Enum values for system.memory.state +var ( + // used + // Stability: development + SystemMemoryStateUsed = SystemMemoryStateKey.String("used") + // free + // Stability: development + SystemMemoryStateFree = SystemMemoryStateKey.String("free") + // buffers + // Stability: development + SystemMemoryStateBuffers = SystemMemoryStateKey.String("buffers") + // cached + // Stability: development + SystemMemoryStateCached = SystemMemoryStateKey.String("cached") +) + +// Enum values for system.paging.direction +var ( + // in + // Stability: development + SystemPagingDirectionIn = SystemPagingDirectionKey.String("in") + // out + // Stability: development + SystemPagingDirectionOut = SystemPagingDirectionKey.String("out") +) + +// Enum values for system.paging.state +var ( + // used + // Stability: development + SystemPagingStateUsed = SystemPagingStateKey.String("used") + // free + // Stability: development + SystemPagingStateFree = SystemPagingStateKey.String("free") +) + +// Enum values for system.paging.type +var ( + // major + // Stability: development + SystemPagingTypeMajor = SystemPagingTypeKey.String("major") + // minor + // Stability: development + SystemPagingTypeMinor = SystemPagingTypeKey.String("minor") +) + +// Enum values for system.process.status +var ( + // running + // Stability: development + SystemProcessStatusRunning = SystemProcessStatusKey.String("running") + // sleeping + // Stability: development + SystemProcessStatusSleeping = SystemProcessStatusKey.String("sleeping") + // stopped + // Stability: development + SystemProcessStatusStopped = SystemProcessStatusKey.String("stopped") + // defunct + // Stability: development + SystemProcessStatusDefunct = SystemProcessStatusKey.String("defunct") +) + +// Namespace: telemetry +const ( + // TelemetryDistroNameKey is the attribute Key conforming to the + // "telemetry.distro.name" semantic conventions. It represents the name of the + // auto instrumentation agent or distribution, if used. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "parts-unlimited-java" + // Note: Official auto instrumentation agents and distributions SHOULD set the + // `telemetry.distro.name` attribute to + // a string starting with `opentelemetry-`, e.g. + // `opentelemetry-java-instrumentation`. + TelemetryDistroNameKey = attribute.Key("telemetry.distro.name") + + // TelemetryDistroVersionKey is the attribute Key conforming to the + // "telemetry.distro.version" semantic conventions. It represents the version + // string of the auto instrumentation agent or distribution, if used. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "1.2.3" + TelemetryDistroVersionKey = attribute.Key("telemetry.distro.version") + + // TelemetrySDKLanguageKey is the attribute Key conforming to the + // "telemetry.sdk.language" semantic conventions. It represents the language of + // the telemetry SDK. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: + TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language") + + // TelemetrySDKNameKey is the attribute Key conforming to the + // "telemetry.sdk.name" semantic conventions. It represents the name of the + // telemetry SDK as defined above. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "opentelemetry" + // Note: The OpenTelemetry SDK MUST set the `telemetry.sdk.name` attribute to + // `opentelemetry`. + // If another SDK, like a fork or a vendor-provided implementation, is used, + // this SDK MUST set the + // `telemetry.sdk.name` attribute to the fully-qualified class or module name of + // this SDK's main entry point + // or another suitable identifier depending on the language. + // The identifier `opentelemetry` is reserved and MUST NOT be used in this case. + // All custom identifiers SHOULD be stable across different versions of an + // implementation. + TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name") + + // TelemetrySDKVersionKey is the attribute Key conforming to the + // "telemetry.sdk.version" semantic conventions. It represents the version + // string of the telemetry SDK. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "1.2.3" + TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version") +) + +// TelemetryDistroName returns an attribute KeyValue conforming to the +// "telemetry.distro.name" semantic conventions. It represents the name of the +// auto instrumentation agent or distribution, if used. +func TelemetryDistroName(val string) attribute.KeyValue { + return TelemetryDistroNameKey.String(val) +} + +// TelemetryDistroVersion returns an attribute KeyValue conforming to the +// "telemetry.distro.version" semantic conventions. It represents the version +// string of the auto instrumentation agent or distribution, if used. +func TelemetryDistroVersion(val string) attribute.KeyValue { + return TelemetryDistroVersionKey.String(val) +} + +// TelemetrySDKName returns an attribute KeyValue conforming to the +// "telemetry.sdk.name" semantic conventions. It represents the name of the +// telemetry SDK as defined above. +func TelemetrySDKName(val string) attribute.KeyValue { + return TelemetrySDKNameKey.String(val) +} + +// TelemetrySDKVersion returns an attribute KeyValue conforming to the +// "telemetry.sdk.version" semantic conventions. It represents the version string +// of the telemetry SDK. +func TelemetrySDKVersion(val string) attribute.KeyValue { + return TelemetrySDKVersionKey.String(val) +} + +// Enum values for telemetry.sdk.language +var ( + // cpp + // Stability: stable + TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp") + // dotnet + // Stability: stable + TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet") + // erlang + // Stability: stable + TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang") + // go + // Stability: stable + TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go") + // java + // Stability: stable + TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java") + // nodejs + // Stability: stable + TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs") + // php + // Stability: stable + TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php") + // python + // Stability: stable + TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python") + // ruby + // Stability: stable + TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby") + // rust + // Stability: stable + TelemetrySDKLanguageRust = TelemetrySDKLanguageKey.String("rust") + // swift + // Stability: stable + TelemetrySDKLanguageSwift = TelemetrySDKLanguageKey.String("swift") + // webjs + // Stability: stable + TelemetrySDKLanguageWebJS = TelemetrySDKLanguageKey.String("webjs") +) + +// Namespace: test +const ( + // TestCaseNameKey is the attribute Key conforming to the "test.case.name" + // semantic conventions. It represents the fully qualified human readable name + // of the [test case]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "org.example.TestCase1.test1", "example/tests/TestCase1.test1", + // "ExampleTestCase1_test1" + // + // [test case]: https://wikipedia.org/wiki/Test_case + TestCaseNameKey = attribute.Key("test.case.name") + + // TestCaseResultStatusKey is the attribute Key conforming to the + // "test.case.result.status" semantic conventions. It represents the status of + // the actual test case result from test execution. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "pass", "fail" + TestCaseResultStatusKey = attribute.Key("test.case.result.status") + + // TestSuiteNameKey is the attribute Key conforming to the "test.suite.name" + // semantic conventions. It represents the human readable name of a [test suite] + // . + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "TestSuite1" + // + // [test suite]: https://wikipedia.org/wiki/Test_suite + TestSuiteNameKey = attribute.Key("test.suite.name") + + // TestSuiteRunStatusKey is the attribute Key conforming to the + // "test.suite.run.status" semantic conventions. It represents the status of the + // test suite run. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "success", "failure", "skipped", "aborted", "timed_out", + // "in_progress" + TestSuiteRunStatusKey = attribute.Key("test.suite.run.status") +) + +// TestCaseName returns an attribute KeyValue conforming to the "test.case.name" +// semantic conventions. It represents the fully qualified human readable name of +// the [test case]. +// +// [test case]: https://wikipedia.org/wiki/Test_case +func TestCaseName(val string) attribute.KeyValue { + return TestCaseNameKey.String(val) +} + +// TestSuiteName returns an attribute KeyValue conforming to the +// "test.suite.name" semantic conventions. It represents the human readable name +// of a [test suite]. +// +// [test suite]: https://wikipedia.org/wiki/Test_suite +func TestSuiteName(val string) attribute.KeyValue { + return TestSuiteNameKey.String(val) +} + +// Enum values for test.case.result.status +var ( + // pass + // Stability: development + TestCaseResultStatusPass = TestCaseResultStatusKey.String("pass") + // fail + // Stability: development + TestCaseResultStatusFail = TestCaseResultStatusKey.String("fail") +) + +// Enum values for test.suite.run.status +var ( + // success + // Stability: development + TestSuiteRunStatusSuccess = TestSuiteRunStatusKey.String("success") + // failure + // Stability: development + TestSuiteRunStatusFailure = TestSuiteRunStatusKey.String("failure") + // skipped + // Stability: development + TestSuiteRunStatusSkipped = TestSuiteRunStatusKey.String("skipped") + // aborted + // Stability: development + TestSuiteRunStatusAborted = TestSuiteRunStatusKey.String("aborted") + // timed_out + // Stability: development + TestSuiteRunStatusTimedOut = TestSuiteRunStatusKey.String("timed_out") + // in_progress + // Stability: development + TestSuiteRunStatusInProgress = TestSuiteRunStatusKey.String("in_progress") +) + +// Namespace: thread +const ( + // ThreadIDKey is the attribute Key conforming to the "thread.id" semantic + // conventions. It represents the current "managed" thread ID (as opposed to OS + // thread ID). + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + ThreadIDKey = attribute.Key("thread.id") + + // ThreadNameKey is the attribute Key conforming to the "thread.name" semantic + // conventions. It represents the current thread name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: main + ThreadNameKey = attribute.Key("thread.name") +) + +// ThreadID returns an attribute KeyValue conforming to the "thread.id" semantic +// conventions. It represents the current "managed" thread ID (as opposed to OS +// thread ID). +func ThreadID(val int) attribute.KeyValue { + return ThreadIDKey.Int(val) +} + +// ThreadName returns an attribute KeyValue conforming to the "thread.name" +// semantic conventions. It represents the current thread name. +func ThreadName(val string) attribute.KeyValue { + return ThreadNameKey.String(val) +} + +// Namespace: tls +const ( + // TLSCipherKey is the attribute Key conforming to the "tls.cipher" semantic + // conventions. It represents the string indicating the [cipher] used during the + // current connection. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "TLS_RSA_WITH_3DES_EDE_CBC_SHA", + // "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256" + // Note: The values allowed for `tls.cipher` MUST be one of the `Descriptions` + // of the [registered TLS Cipher Suits]. + // + // [cipher]: https://datatracker.ietf.org/doc/html/rfc5246#appendix-A.5 + // [registered TLS Cipher Suits]: https://www.iana.org/assignments/tls-parameters/tls-parameters.xhtml#table-tls-parameters-4 + TLSCipherKey = attribute.Key("tls.cipher") + + // TLSClientCertificateKey is the attribute Key conforming to the + // "tls.client.certificate" semantic conventions. It represents the PEM-encoded + // stand-alone certificate offered by the client. This is usually + // mutually-exclusive of `client.certificate_chain` since this value also exists + // in that list. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "MII..." + TLSClientCertificateKey = attribute.Key("tls.client.certificate") + + // TLSClientCertificateChainKey is the attribute Key conforming to the + // "tls.client.certificate_chain" semantic conventions. It represents the array + // of PEM-encoded certificates that make up the certificate chain offered by the + // client. This is usually mutually-exclusive of `client.certificate` since that + // value should be the first certificate in the chain. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "MII...", "MI..." + TLSClientCertificateChainKey = attribute.Key("tls.client.certificate_chain") + + // TLSClientHashMd5Key is the attribute Key conforming to the + // "tls.client.hash.md5" semantic conventions. It represents the certificate + // fingerprint using the MD5 digest of DER-encoded version of certificate + // offered by the client. For consistency with other hash values, this value + // should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC" + TLSClientHashMd5Key = attribute.Key("tls.client.hash.md5") + + // TLSClientHashSha1Key is the attribute Key conforming to the + // "tls.client.hash.sha1" semantic conventions. It represents the certificate + // fingerprint using the SHA1 digest of DER-encoded version of certificate + // offered by the client. For consistency with other hash values, this value + // should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "9E393D93138888D288266C2D915214D1D1CCEB2A" + TLSClientHashSha1Key = attribute.Key("tls.client.hash.sha1") + + // TLSClientHashSha256Key is the attribute Key conforming to the + // "tls.client.hash.sha256" semantic conventions. It represents the certificate + // fingerprint using the SHA256 digest of DER-encoded version of certificate + // offered by the client. For consistency with other hash values, this value + // should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0" + TLSClientHashSha256Key = attribute.Key("tls.client.hash.sha256") + + // TLSClientIssuerKey is the attribute Key conforming to the "tls.client.issuer" + // semantic conventions. It represents the distinguished name of [subject] of + // the issuer of the x.509 certificate presented by the client. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "CN=Example Root CA, OU=Infrastructure Team, DC=example, DC=com" + // + // [subject]: https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6 + TLSClientIssuerKey = attribute.Key("tls.client.issuer") + + // TLSClientJa3Key is the attribute Key conforming to the "tls.client.ja3" + // semantic conventions. It represents a hash that identifies clients based on + // how they perform an SSL/TLS handshake. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "d4e5b18d6b55c71272893221c96ba240" + TLSClientJa3Key = attribute.Key("tls.client.ja3") + + // TLSClientNotAfterKey is the attribute Key conforming to the + // "tls.client.not_after" semantic conventions. It represents the date/Time + // indicating when client certificate is no longer considered valid. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2021-01-01T00:00:00.000Z" + TLSClientNotAfterKey = attribute.Key("tls.client.not_after") + + // TLSClientNotBeforeKey is the attribute Key conforming to the + // "tls.client.not_before" semantic conventions. It represents the date/Time + // indicating when client certificate is first considered valid. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "1970-01-01T00:00:00.000Z" + TLSClientNotBeforeKey = attribute.Key("tls.client.not_before") + + // TLSClientSubjectKey is the attribute Key conforming to the + // "tls.client.subject" semantic conventions. It represents the distinguished + // name of subject of the x.509 certificate presented by the client. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "CN=myclient, OU=Documentation Team, DC=example, DC=com" + TLSClientSubjectKey = attribute.Key("tls.client.subject") + + // TLSClientSupportedCiphersKey is the attribute Key conforming to the + // "tls.client.supported_ciphers" semantic conventions. It represents the array + // of ciphers offered by the client during the client hello. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", + // "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384" + TLSClientSupportedCiphersKey = attribute.Key("tls.client.supported_ciphers") + + // TLSCurveKey is the attribute Key conforming to the "tls.curve" semantic + // conventions. It represents the string indicating the curve used for the given + // cipher, when applicable. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "secp256r1" + TLSCurveKey = attribute.Key("tls.curve") + + // TLSEstablishedKey is the attribute Key conforming to the "tls.established" + // semantic conventions. It represents the boolean flag indicating if the TLS + // negotiation was successful and transitioned to an encrypted tunnel. + // + // Type: boolean + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: true + TLSEstablishedKey = attribute.Key("tls.established") + + // TLSNextProtocolKey is the attribute Key conforming to the "tls.next_protocol" + // semantic conventions. It represents the string indicating the protocol being + // tunneled. Per the values in the [IANA registry], this string should be lower + // case. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "http/1.1" + // + // [IANA registry]: https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids + TLSNextProtocolKey = attribute.Key("tls.next_protocol") + + // TLSProtocolNameKey is the attribute Key conforming to the "tls.protocol.name" + // semantic conventions. It represents the normalized lowercase protocol name + // parsed from original string of the negotiated [SSL/TLS protocol version]. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // + // [SSL/TLS protocol version]: https://docs.openssl.org/1.1.1/man3/SSL_get_version/#return-values + TLSProtocolNameKey = attribute.Key("tls.protocol.name") + + // TLSProtocolVersionKey is the attribute Key conforming to the + // "tls.protocol.version" semantic conventions. It represents the numeric part + // of the version parsed from the original string of the negotiated + // [SSL/TLS protocol version]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "1.2", "3" + // + // [SSL/TLS protocol version]: https://docs.openssl.org/1.1.1/man3/SSL_get_version/#return-values + TLSProtocolVersionKey = attribute.Key("tls.protocol.version") + + // TLSResumedKey is the attribute Key conforming to the "tls.resumed" semantic + // conventions. It represents the boolean flag indicating if this TLS connection + // was resumed from an existing TLS negotiation. + // + // Type: boolean + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: true + TLSResumedKey = attribute.Key("tls.resumed") + + // TLSServerCertificateKey is the attribute Key conforming to the + // "tls.server.certificate" semantic conventions. It represents the PEM-encoded + // stand-alone certificate offered by the server. This is usually + // mutually-exclusive of `server.certificate_chain` since this value also exists + // in that list. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "MII..." + TLSServerCertificateKey = attribute.Key("tls.server.certificate") + + // TLSServerCertificateChainKey is the attribute Key conforming to the + // "tls.server.certificate_chain" semantic conventions. It represents the array + // of PEM-encoded certificates that make up the certificate chain offered by the + // server. This is usually mutually-exclusive of `server.certificate` since that + // value should be the first certificate in the chain. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "MII...", "MI..." + TLSServerCertificateChainKey = attribute.Key("tls.server.certificate_chain") + + // TLSServerHashMd5Key is the attribute Key conforming to the + // "tls.server.hash.md5" semantic conventions. It represents the certificate + // fingerprint using the MD5 digest of DER-encoded version of certificate + // offered by the server. For consistency with other hash values, this value + // should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC" + TLSServerHashMd5Key = attribute.Key("tls.server.hash.md5") + + // TLSServerHashSha1Key is the attribute Key conforming to the + // "tls.server.hash.sha1" semantic conventions. It represents the certificate + // fingerprint using the SHA1 digest of DER-encoded version of certificate + // offered by the server. For consistency with other hash values, this value + // should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "9E393D93138888D288266C2D915214D1D1CCEB2A" + TLSServerHashSha1Key = attribute.Key("tls.server.hash.sha1") + + // TLSServerHashSha256Key is the attribute Key conforming to the + // "tls.server.hash.sha256" semantic conventions. It represents the certificate + // fingerprint using the SHA256 digest of DER-encoded version of certificate + // offered by the server. For consistency with other hash values, this value + // should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0" + TLSServerHashSha256Key = attribute.Key("tls.server.hash.sha256") + + // TLSServerIssuerKey is the attribute Key conforming to the "tls.server.issuer" + // semantic conventions. It represents the distinguished name of [subject] of + // the issuer of the x.509 certificate presented by the client. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "CN=Example Root CA, OU=Infrastructure Team, DC=example, DC=com" + // + // [subject]: https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6 + TLSServerIssuerKey = attribute.Key("tls.server.issuer") + + // TLSServerJa3sKey is the attribute Key conforming to the "tls.server.ja3s" + // semantic conventions. It represents a hash that identifies servers based on + // how they perform an SSL/TLS handshake. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "d4e5b18d6b55c71272893221c96ba240" + TLSServerJa3sKey = attribute.Key("tls.server.ja3s") + + // TLSServerNotAfterKey is the attribute Key conforming to the + // "tls.server.not_after" semantic conventions. It represents the date/Time + // indicating when server certificate is no longer considered valid. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2021-01-01T00:00:00.000Z" + TLSServerNotAfterKey = attribute.Key("tls.server.not_after") + + // TLSServerNotBeforeKey is the attribute Key conforming to the + // "tls.server.not_before" semantic conventions. It represents the date/Time + // indicating when server certificate is first considered valid. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "1970-01-01T00:00:00.000Z" + TLSServerNotBeforeKey = attribute.Key("tls.server.not_before") + + // TLSServerSubjectKey is the attribute Key conforming to the + // "tls.server.subject" semantic conventions. It represents the distinguished + // name of subject of the x.509 certificate presented by the server. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "CN=myserver, OU=Documentation Team, DC=example, DC=com" + TLSServerSubjectKey = attribute.Key("tls.server.subject") +) + +// TLSCipher returns an attribute KeyValue conforming to the "tls.cipher" +// semantic conventions. It represents the string indicating the [cipher] used +// during the current connection. +// +// [cipher]: https://datatracker.ietf.org/doc/html/rfc5246#appendix-A.5 +func TLSCipher(val string) attribute.KeyValue { + return TLSCipherKey.String(val) +} + +// TLSClientCertificate returns an attribute KeyValue conforming to the +// "tls.client.certificate" semantic conventions. It represents the PEM-encoded +// stand-alone certificate offered by the client. This is usually +// mutually-exclusive of `client.certificate_chain` since this value also exists +// in that list. +func TLSClientCertificate(val string) attribute.KeyValue { + return TLSClientCertificateKey.String(val) +} + +// TLSClientCertificateChain returns an attribute KeyValue conforming to the +// "tls.client.certificate_chain" semantic conventions. It represents the array +// of PEM-encoded certificates that make up the certificate chain offered by the +// client. This is usually mutually-exclusive of `client.certificate` since that +// value should be the first certificate in the chain. +func TLSClientCertificateChain(val ...string) attribute.KeyValue { + return TLSClientCertificateChainKey.StringSlice(val) +} + +// TLSClientHashMd5 returns an attribute KeyValue conforming to the +// "tls.client.hash.md5" semantic conventions. It represents the certificate +// fingerprint using the MD5 digest of DER-encoded version of certificate offered +// by the client. For consistency with other hash values, this value should be +// formatted as an uppercase hash. +func TLSClientHashMd5(val string) attribute.KeyValue { + return TLSClientHashMd5Key.String(val) +} + +// TLSClientHashSha1 returns an attribute KeyValue conforming to the +// "tls.client.hash.sha1" semantic conventions. It represents the certificate +// fingerprint using the SHA1 digest of DER-encoded version of certificate +// offered by the client. For consistency with other hash values, this value +// should be formatted as an uppercase hash. +func TLSClientHashSha1(val string) attribute.KeyValue { + return TLSClientHashSha1Key.String(val) +} + +// TLSClientHashSha256 returns an attribute KeyValue conforming to the +// "tls.client.hash.sha256" semantic conventions. It represents the certificate +// fingerprint using the SHA256 digest of DER-encoded version of certificate +// offered by the client. For consistency with other hash values, this value +// should be formatted as an uppercase hash. +func TLSClientHashSha256(val string) attribute.KeyValue { + return TLSClientHashSha256Key.String(val) +} + +// TLSClientIssuer returns an attribute KeyValue conforming to the +// "tls.client.issuer" semantic conventions. It represents the distinguished name +// of [subject] of the issuer of the x.509 certificate presented by the client. +// +// [subject]: https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6 +func TLSClientIssuer(val string) attribute.KeyValue { + return TLSClientIssuerKey.String(val) +} + +// TLSClientJa3 returns an attribute KeyValue conforming to the "tls.client.ja3" +// semantic conventions. It represents a hash that identifies clients based on +// how they perform an SSL/TLS handshake. +func TLSClientJa3(val string) attribute.KeyValue { + return TLSClientJa3Key.String(val) +} + +// TLSClientNotAfter returns an attribute KeyValue conforming to the +// "tls.client.not_after" semantic conventions. It represents the date/Time +// indicating when client certificate is no longer considered valid. +func TLSClientNotAfter(val string) attribute.KeyValue { + return TLSClientNotAfterKey.String(val) +} + +// TLSClientNotBefore returns an attribute KeyValue conforming to the +// "tls.client.not_before" semantic conventions. It represents the date/Time +// indicating when client certificate is first considered valid. +func TLSClientNotBefore(val string) attribute.KeyValue { + return TLSClientNotBeforeKey.String(val) +} + +// TLSClientSubject returns an attribute KeyValue conforming to the +// "tls.client.subject" semantic conventions. It represents the distinguished +// name of subject of the x.509 certificate presented by the client. +func TLSClientSubject(val string) attribute.KeyValue { + return TLSClientSubjectKey.String(val) +} + +// TLSClientSupportedCiphers returns an attribute KeyValue conforming to the +// "tls.client.supported_ciphers" semantic conventions. It represents the array +// of ciphers offered by the client during the client hello. +func TLSClientSupportedCiphers(val ...string) attribute.KeyValue { + return TLSClientSupportedCiphersKey.StringSlice(val) +} + +// TLSCurve returns an attribute KeyValue conforming to the "tls.curve" semantic +// conventions. It represents the string indicating the curve used for the given +// cipher, when applicable. +func TLSCurve(val string) attribute.KeyValue { + return TLSCurveKey.String(val) +} + +// TLSEstablished returns an attribute KeyValue conforming to the +// "tls.established" semantic conventions. It represents the boolean flag +// indicating if the TLS negotiation was successful and transitioned to an +// encrypted tunnel. +func TLSEstablished(val bool) attribute.KeyValue { + return TLSEstablishedKey.Bool(val) +} + +// TLSNextProtocol returns an attribute KeyValue conforming to the +// "tls.next_protocol" semantic conventions. It represents the string indicating +// the protocol being tunneled. Per the values in the [IANA registry], this +// string should be lower case. +// +// [IANA registry]: https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids +func TLSNextProtocol(val string) attribute.KeyValue { + return TLSNextProtocolKey.String(val) +} + +// TLSProtocolVersion returns an attribute KeyValue conforming to the +// "tls.protocol.version" semantic conventions. It represents the numeric part of +// the version parsed from the original string of the negotiated +// [SSL/TLS protocol version]. +// +// [SSL/TLS protocol version]: https://docs.openssl.org/1.1.1/man3/SSL_get_version/#return-values +func TLSProtocolVersion(val string) attribute.KeyValue { + return TLSProtocolVersionKey.String(val) +} + +// TLSResumed returns an attribute KeyValue conforming to the "tls.resumed" +// semantic conventions. It represents the boolean flag indicating if this TLS +// connection was resumed from an existing TLS negotiation. +func TLSResumed(val bool) attribute.KeyValue { + return TLSResumedKey.Bool(val) +} + +// TLSServerCertificate returns an attribute KeyValue conforming to the +// "tls.server.certificate" semantic conventions. It represents the PEM-encoded +// stand-alone certificate offered by the server. This is usually +// mutually-exclusive of `server.certificate_chain` since this value also exists +// in that list. +func TLSServerCertificate(val string) attribute.KeyValue { + return TLSServerCertificateKey.String(val) +} + +// TLSServerCertificateChain returns an attribute KeyValue conforming to the +// "tls.server.certificate_chain" semantic conventions. It represents the array +// of PEM-encoded certificates that make up the certificate chain offered by the +// server. This is usually mutually-exclusive of `server.certificate` since that +// value should be the first certificate in the chain. +func TLSServerCertificateChain(val ...string) attribute.KeyValue { + return TLSServerCertificateChainKey.StringSlice(val) +} + +// TLSServerHashMd5 returns an attribute KeyValue conforming to the +// "tls.server.hash.md5" semantic conventions. It represents the certificate +// fingerprint using the MD5 digest of DER-encoded version of certificate offered +// by the server. For consistency with other hash values, this value should be +// formatted as an uppercase hash. +func TLSServerHashMd5(val string) attribute.KeyValue { + return TLSServerHashMd5Key.String(val) +} + +// TLSServerHashSha1 returns an attribute KeyValue conforming to the +// "tls.server.hash.sha1" semantic conventions. It represents the certificate +// fingerprint using the SHA1 digest of DER-encoded version of certificate +// offered by the server. For consistency with other hash values, this value +// should be formatted as an uppercase hash. +func TLSServerHashSha1(val string) attribute.KeyValue { + return TLSServerHashSha1Key.String(val) +} + +// TLSServerHashSha256 returns an attribute KeyValue conforming to the +// "tls.server.hash.sha256" semantic conventions. It represents the certificate +// fingerprint using the SHA256 digest of DER-encoded version of certificate +// offered by the server. For consistency with other hash values, this value +// should be formatted as an uppercase hash. +func TLSServerHashSha256(val string) attribute.KeyValue { + return TLSServerHashSha256Key.String(val) +} + +// TLSServerIssuer returns an attribute KeyValue conforming to the +// "tls.server.issuer" semantic conventions. It represents the distinguished name +// of [subject] of the issuer of the x.509 certificate presented by the client. +// +// [subject]: https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6 +func TLSServerIssuer(val string) attribute.KeyValue { + return TLSServerIssuerKey.String(val) +} + +// TLSServerJa3s returns an attribute KeyValue conforming to the +// "tls.server.ja3s" semantic conventions. It represents a hash that identifies +// servers based on how they perform an SSL/TLS handshake. +func TLSServerJa3s(val string) attribute.KeyValue { + return TLSServerJa3sKey.String(val) +} + +// TLSServerNotAfter returns an attribute KeyValue conforming to the +// "tls.server.not_after" semantic conventions. It represents the date/Time +// indicating when server certificate is no longer considered valid. +func TLSServerNotAfter(val string) attribute.KeyValue { + return TLSServerNotAfterKey.String(val) +} + +// TLSServerNotBefore returns an attribute KeyValue conforming to the +// "tls.server.not_before" semantic conventions. It represents the date/Time +// indicating when server certificate is first considered valid. +func TLSServerNotBefore(val string) attribute.KeyValue { + return TLSServerNotBeforeKey.String(val) +} + +// TLSServerSubject returns an attribute KeyValue conforming to the +// "tls.server.subject" semantic conventions. It represents the distinguished +// name of subject of the x.509 certificate presented by the server. +func TLSServerSubject(val string) attribute.KeyValue { + return TLSServerSubjectKey.String(val) +} + +// Enum values for tls.protocol.name +var ( + // ssl + // Stability: development + TLSProtocolNameSsl = TLSProtocolNameKey.String("ssl") + // tls + // Stability: development + TLSProtocolNameTLS = TLSProtocolNameKey.String("tls") +) + +// Namespace: url +const ( + // URLDomainKey is the attribute Key conforming to the "url.domain" semantic + // conventions. It represents the domain extracted from the `url.full`, such as + // "opentelemetry.io". + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "www.foo.bar", "opentelemetry.io", "3.12.167.2", + // "[1080:0:0:0:8:800:200C:417A]" + // Note: In some cases a URL may refer to an IP and/or port directly, without a + // domain name. In this case, the IP address would go to the domain field. If + // the URL contains a [literal IPv6 address] enclosed by `[` and `]`, the `[` + // and `]` characters should also be captured in the domain field. + // + // [literal IPv6 address]: https://www.rfc-editor.org/rfc/rfc2732#section-2 + URLDomainKey = attribute.Key("url.domain") + + // URLExtensionKey is the attribute Key conforming to the "url.extension" + // semantic conventions. It represents the file extension extracted from the + // `url.full`, excluding the leading dot. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "png", "gz" + // Note: The file extension is only set if it exists, as not every url has a + // file extension. When the file name has multiple extensions `example.tar.gz`, + // only the last one should be captured `gz`, not `tar.gz`. + URLExtensionKey = attribute.Key("url.extension") + + // URLFragmentKey is the attribute Key conforming to the "url.fragment" semantic + // conventions. It represents the [URI fragment] component. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "SemConv" + // + // [URI fragment]: https://www.rfc-editor.org/rfc/rfc3986#section-3.5 + URLFragmentKey = attribute.Key("url.fragment") + + // URLFullKey is the attribute Key conforming to the "url.full" semantic + // conventions. It represents the absolute URL describing a network resource + // according to [RFC3986]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "https://www.foo.bar/search?q=OpenTelemetry#SemConv", "//localhost" + // Note: For network calls, URL usually has + // `scheme://host[:port][path][?query][#fragment]` format, where the fragment + // is not transmitted over HTTP, but if it is known, it SHOULD be included + // nevertheless. + // + // `url.full` MUST NOT contain credentials passed via URL in form of + // `https://username:password@www.example.com/`. + // In such case username and password SHOULD be redacted and attribute's value + // SHOULD be `https://REDACTED:REDACTED@www.example.com/`. + // + // `url.full` SHOULD capture the absolute URL when it is available (or can be + // reconstructed). + // + // Sensitive content provided in `url.full` SHOULD be scrubbed when + // instrumentations can identify it. + // + // + // Query string values for the following keys SHOULD be redacted by default and + // replaced by the + // value `REDACTED`: + // + // - [`AWSAccessKeyId`] + // - [`Signature`] + // - [`sig`] + // - [`X-Goog-Signature`] + // + // This list is subject to change over time. + // + // When a query string value is redacted, the query string key SHOULD still be + // preserved, e.g. + // `https://www.example.com/path?color=blue&sig=REDACTED`. + // + // [RFC3986]: https://www.rfc-editor.org/rfc/rfc3986 + // [`AWSAccessKeyId`]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/RESTAuthentication.html#RESTAuthenticationQueryStringAuth + // [`Signature`]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/RESTAuthentication.html#RESTAuthenticationQueryStringAuth + // [`sig`]: https://learn.microsoft.com/azure/storage/common/storage-sas-overview#sas-token + // [`X-Goog-Signature`]: https://cloud.google.com/storage/docs/access-control/signed-urls + URLFullKey = attribute.Key("url.full") + + // URLOriginalKey is the attribute Key conforming to the "url.original" semantic + // conventions. It represents the unmodified original URL as seen in the event + // source. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "https://www.foo.bar/search?q=OpenTelemetry#SemConv", + // "search?q=OpenTelemetry" + // Note: In network monitoring, the observed URL may be a full URL, whereas in + // access logs, the URL is often just represented as a path. This field is meant + // to represent the URL as it was observed, complete or not. + // `url.original` might contain credentials passed via URL in form of + // `https://username:password@www.example.com/`. In such case password and + // username SHOULD NOT be redacted and attribute's value SHOULD remain the same. + URLOriginalKey = attribute.Key("url.original") + + // URLPathKey is the attribute Key conforming to the "url.path" semantic + // conventions. It represents the [URI path] component. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "/search" + // Note: Sensitive content provided in `url.path` SHOULD be scrubbed when + // instrumentations can identify it. + // + // [URI path]: https://www.rfc-editor.org/rfc/rfc3986#section-3.3 + URLPathKey = attribute.Key("url.path") + + // URLPortKey is the attribute Key conforming to the "url.port" semantic + // conventions. It represents the port extracted from the `url.full`. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 443 + URLPortKey = attribute.Key("url.port") + + // URLQueryKey is the attribute Key conforming to the "url.query" semantic + // conventions. It represents the [URI query] component. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "q=OpenTelemetry" + // Note: Sensitive content provided in `url.query` SHOULD be scrubbed when + // instrumentations can identify it. + // + // + // Query string values for the following keys SHOULD be redacted by default and + // replaced by the value `REDACTED`: + // + // - [`AWSAccessKeyId`] + // - [`Signature`] + // - [`sig`] + // - [`X-Goog-Signature`] + // + // This list is subject to change over time. + // + // When a query string value is redacted, the query string key SHOULD still be + // preserved, e.g. + // `q=OpenTelemetry&sig=REDACTED`. + // + // [URI query]: https://www.rfc-editor.org/rfc/rfc3986#section-3.4 + // [`AWSAccessKeyId`]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/RESTAuthentication.html#RESTAuthenticationQueryStringAuth + // [`Signature`]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/RESTAuthentication.html#RESTAuthenticationQueryStringAuth + // [`sig`]: https://learn.microsoft.com/azure/storage/common/storage-sas-overview#sas-token + // [`X-Goog-Signature`]: https://cloud.google.com/storage/docs/access-control/signed-urls + URLQueryKey = attribute.Key("url.query") + + // URLRegisteredDomainKey is the attribute Key conforming to the + // "url.registered_domain" semantic conventions. It represents the highest + // registered url domain, stripped of the subdomain. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "example.com", "foo.co.uk" + // Note: This value can be determined precisely with the [public suffix list]. + // For example, the registered domain for `foo.example.com` is `example.com`. + // Trying to approximate this by simply taking the last two labels will not work + // well for TLDs such as `co.uk`. + // + // [public suffix list]: https://publicsuffix.org/ + URLRegisteredDomainKey = attribute.Key("url.registered_domain") + + // URLSchemeKey is the attribute Key conforming to the "url.scheme" semantic + // conventions. It represents the [URI scheme] component identifying the used + // protocol. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "https", "ftp", "telnet" + // + // [URI scheme]: https://www.rfc-editor.org/rfc/rfc3986#section-3.1 + URLSchemeKey = attribute.Key("url.scheme") + + // URLSubdomainKey is the attribute Key conforming to the "url.subdomain" + // semantic conventions. It represents the subdomain portion of a fully + // qualified domain name includes all of the names except the host name under + // the registered_domain. In a partially qualified domain, or if the + // qualification level of the full name cannot be determined, subdomain contains + // all of the names below the registered domain. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "east", "sub2.sub1" + // Note: The subdomain portion of `www.east.mydomain.co.uk` is `east`. If the + // domain has multiple levels of subdomain, such as `sub2.sub1.example.com`, the + // subdomain field should contain `sub2.sub1`, with no trailing period. + URLSubdomainKey = attribute.Key("url.subdomain") + + // URLTemplateKey is the attribute Key conforming to the "url.template" semantic + // conventions. It represents the low-cardinality template of an + // [absolute path reference]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "/users/{id}", "/users/:id", "/users?id={id}" + // + // [absolute path reference]: https://www.rfc-editor.org/rfc/rfc3986#section-4.2 + URLTemplateKey = attribute.Key("url.template") + + // URLTopLevelDomainKey is the attribute Key conforming to the + // "url.top_level_domain" semantic conventions. It represents the effective top + // level domain (eTLD), also known as the domain suffix, is the last part of the + // domain name. For example, the top level domain for example.com is `com`. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "com", "co.uk" + // Note: This value can be determined precisely with the [public suffix list]. + // + // [public suffix list]: https://publicsuffix.org/ + URLTopLevelDomainKey = attribute.Key("url.top_level_domain") +) + +// URLDomain returns an attribute KeyValue conforming to the "url.domain" +// semantic conventions. It represents the domain extracted from the `url.full`, +// such as "opentelemetry.io". +func URLDomain(val string) attribute.KeyValue { + return URLDomainKey.String(val) +} + +// URLExtension returns an attribute KeyValue conforming to the "url.extension" +// semantic conventions. It represents the file extension extracted from the +// `url.full`, excluding the leading dot. +func URLExtension(val string) attribute.KeyValue { + return URLExtensionKey.String(val) +} + +// URLFragment returns an attribute KeyValue conforming to the "url.fragment" +// semantic conventions. It represents the [URI fragment] component. +// +// [URI fragment]: https://www.rfc-editor.org/rfc/rfc3986#section-3.5 +func URLFragment(val string) attribute.KeyValue { + return URLFragmentKey.String(val) +} + +// URLFull returns an attribute KeyValue conforming to the "url.full" semantic +// conventions. It represents the absolute URL describing a network resource +// according to [RFC3986]. +// +// [RFC3986]: https://www.rfc-editor.org/rfc/rfc3986 +func URLFull(val string) attribute.KeyValue { + return URLFullKey.String(val) +} + +// URLOriginal returns an attribute KeyValue conforming to the "url.original" +// semantic conventions. It represents the unmodified original URL as seen in the +// event source. +func URLOriginal(val string) attribute.KeyValue { + return URLOriginalKey.String(val) +} + +// URLPath returns an attribute KeyValue conforming to the "url.path" semantic +// conventions. It represents the [URI path] component. +// +// [URI path]: https://www.rfc-editor.org/rfc/rfc3986#section-3.3 +func URLPath(val string) attribute.KeyValue { + return URLPathKey.String(val) +} + +// URLPort returns an attribute KeyValue conforming to the "url.port" semantic +// conventions. It represents the port extracted from the `url.full`. +func URLPort(val int) attribute.KeyValue { + return URLPortKey.Int(val) +} + +// URLQuery returns an attribute KeyValue conforming to the "url.query" semantic +// conventions. It represents the [URI query] component. +// +// [URI query]: https://www.rfc-editor.org/rfc/rfc3986#section-3.4 +func URLQuery(val string) attribute.KeyValue { + return URLQueryKey.String(val) +} + +// URLRegisteredDomain returns an attribute KeyValue conforming to the +// "url.registered_domain" semantic conventions. It represents the highest +// registered url domain, stripped of the subdomain. +func URLRegisteredDomain(val string) attribute.KeyValue { + return URLRegisteredDomainKey.String(val) +} + +// URLScheme returns an attribute KeyValue conforming to the "url.scheme" +// semantic conventions. It represents the [URI scheme] component identifying the +// used protocol. +// +// [URI scheme]: https://www.rfc-editor.org/rfc/rfc3986#section-3.1 +func URLScheme(val string) attribute.KeyValue { + return URLSchemeKey.String(val) +} + +// URLSubdomain returns an attribute KeyValue conforming to the "url.subdomain" +// semantic conventions. It represents the subdomain portion of a fully qualified +// domain name includes all of the names except the host name under the +// registered_domain. In a partially qualified domain, or if the qualification +// level of the full name cannot be determined, subdomain contains all of the +// names below the registered domain. +func URLSubdomain(val string) attribute.KeyValue { + return URLSubdomainKey.String(val) +} + +// URLTemplate returns an attribute KeyValue conforming to the "url.template" +// semantic conventions. It represents the low-cardinality template of an +// [absolute path reference]. +// +// [absolute path reference]: https://www.rfc-editor.org/rfc/rfc3986#section-4.2 +func URLTemplate(val string) attribute.KeyValue { + return URLTemplateKey.String(val) +} + +// URLTopLevelDomain returns an attribute KeyValue conforming to the +// "url.top_level_domain" semantic conventions. It represents the effective top +// level domain (eTLD), also known as the domain suffix, is the last part of the +// domain name. For example, the top level domain for example.com is `com`. +func URLTopLevelDomain(val string) attribute.KeyValue { + return URLTopLevelDomainKey.String(val) +} + +// Namespace: user +const ( + // UserEmailKey is the attribute Key conforming to the "user.email" semantic + // conventions. It represents the user email address. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "a.einstein@example.com" + UserEmailKey = attribute.Key("user.email") + + // UserFullNameKey is the attribute Key conforming to the "user.full_name" + // semantic conventions. It represents the user's full name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Albert Einstein" + UserFullNameKey = attribute.Key("user.full_name") + + // UserHashKey is the attribute Key conforming to the "user.hash" semantic + // conventions. It represents the unique user hash to correlate information for + // a user in anonymized form. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "364fc68eaf4c8acec74a4e52d7d1feaa" + // Note: Useful if `user.id` or `user.name` contain confidential information and + // cannot be used. + UserHashKey = attribute.Key("user.hash") + + // UserIDKey is the attribute Key conforming to the "user.id" semantic + // conventions. It represents the unique identifier of the user. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "S-1-5-21-202424912787-2692429404-2351956786-1000" + UserIDKey = attribute.Key("user.id") + + // UserNameKey is the attribute Key conforming to the "user.name" semantic + // conventions. It represents the short name or login/username of the user. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "a.einstein" + UserNameKey = attribute.Key("user.name") + + // UserRolesKey is the attribute Key conforming to the "user.roles" semantic + // conventions. It represents the array of user roles at the time of the event. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "admin", "reporting_user" + UserRolesKey = attribute.Key("user.roles") +) + +// UserEmail returns an attribute KeyValue conforming to the "user.email" +// semantic conventions. It represents the user email address. +func UserEmail(val string) attribute.KeyValue { + return UserEmailKey.String(val) +} + +// UserFullName returns an attribute KeyValue conforming to the "user.full_name" +// semantic conventions. It represents the user's full name. +func UserFullName(val string) attribute.KeyValue { + return UserFullNameKey.String(val) +} + +// UserHash returns an attribute KeyValue conforming to the "user.hash" semantic +// conventions. It represents the unique user hash to correlate information for a +// user in anonymized form. +func UserHash(val string) attribute.KeyValue { + return UserHashKey.String(val) +} + +// UserID returns an attribute KeyValue conforming to the "user.id" semantic +// conventions. It represents the unique identifier of the user. +func UserID(val string) attribute.KeyValue { + return UserIDKey.String(val) +} + +// UserName returns an attribute KeyValue conforming to the "user.name" semantic +// conventions. It represents the short name or login/username of the user. +func UserName(val string) attribute.KeyValue { + return UserNameKey.String(val) +} + +// UserRoles returns an attribute KeyValue conforming to the "user.roles" +// semantic conventions. It represents the array of user roles at the time of the +// event. +func UserRoles(val ...string) attribute.KeyValue { + return UserRolesKey.StringSlice(val) +} + +// Namespace: user_agent +const ( + // UserAgentNameKey is the attribute Key conforming to the "user_agent.name" + // semantic conventions. It represents the name of the user-agent extracted from + // original. Usually refers to the browser's name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Safari", "YourApp" + // Note: [Example] of extracting browser's name from original string. In the + // case of using a user-agent for non-browser products, such as microservices + // with multiple names/versions inside the `user_agent.original`, the most + // significant name SHOULD be selected. In such a scenario it should align with + // `user_agent.version` + // + // [Example]: https://www.whatsmyua.info + UserAgentNameKey = attribute.Key("user_agent.name") + + // UserAgentOriginalKey is the attribute Key conforming to the + // "user_agent.original" semantic conventions. It represents the value of the + // [HTTP User-Agent] header sent by the client. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "CERN-LineMode/2.15 libwww/2.17b3", "Mozilla/5.0 (iPhone; CPU + // iPhone OS 14_7_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) + // Version/14.1.2 Mobile/15E148 Safari/604.1", "YourApp/1.0.0 + // grpc-java-okhttp/1.27.2" + // + // [HTTP User-Agent]: https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent + UserAgentOriginalKey = attribute.Key("user_agent.original") + + // UserAgentOSNameKey is the attribute Key conforming to the + // "user_agent.os.name" semantic conventions. It represents the human readable + // operating system name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "iOS", "Android", "Ubuntu" + // Note: For mapping user agent strings to OS names, libraries such as + // [ua-parser] can be utilized. + // + // [ua-parser]: https://github.com/ua-parser + UserAgentOSNameKey = attribute.Key("user_agent.os.name") + + // UserAgentOSVersionKey is the attribute Key conforming to the + // "user_agent.os.version" semantic conventions. It represents the version + // string of the operating system as defined in [Version Attributes]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "14.2.1", "18.04.1" + // Note: For mapping user agent strings to OS versions, libraries such as + // [ua-parser] can be utilized. + // + // [Version Attributes]: /docs/resource/README.md#version-attributes + // [ua-parser]: https://github.com/ua-parser + UserAgentOSVersionKey = attribute.Key("user_agent.os.version") + + // UserAgentSyntheticTypeKey is the attribute Key conforming to the + // "user_agent.synthetic.type" semantic conventions. It represents the specifies + // the category of synthetic traffic, such as tests or bots. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: This attribute MAY be derived from the contents of the + // `user_agent.original` attribute. Components that populate the attribute are + // responsible for determining what they consider to be synthetic bot or test + // traffic. This attribute can either be set for self-identification purposes, + // or on telemetry detected to be generated as a result of a synthetic request. + // This attribute is useful for distinguishing between genuine client traffic + // and synthetic traffic generated by bots or tests. + UserAgentSyntheticTypeKey = attribute.Key("user_agent.synthetic.type") + + // UserAgentVersionKey is the attribute Key conforming to the + // "user_agent.version" semantic conventions. It represents the version of the + // user-agent extracted from original. Usually refers to the browser's version. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "14.1.2", "1.0.0" + // Note: [Example] of extracting browser's version from original string. In the + // case of using a user-agent for non-browser products, such as microservices + // with multiple names/versions inside the `user_agent.original`, the most + // significant version SHOULD be selected. In such a scenario it should align + // with `user_agent.name` + // + // [Example]: https://www.whatsmyua.info + UserAgentVersionKey = attribute.Key("user_agent.version") +) + +// UserAgentName returns an attribute KeyValue conforming to the +// "user_agent.name" semantic conventions. It represents the name of the +// user-agent extracted from original. Usually refers to the browser's name. +func UserAgentName(val string) attribute.KeyValue { + return UserAgentNameKey.String(val) +} + +// UserAgentOriginal returns an attribute KeyValue conforming to the +// "user_agent.original" semantic conventions. It represents the value of the +// [HTTP User-Agent] header sent by the client. +// +// [HTTP User-Agent]: https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent +func UserAgentOriginal(val string) attribute.KeyValue { + return UserAgentOriginalKey.String(val) +} + +// UserAgentOSName returns an attribute KeyValue conforming to the +// "user_agent.os.name" semantic conventions. It represents the human readable +// operating system name. +func UserAgentOSName(val string) attribute.KeyValue { + return UserAgentOSNameKey.String(val) +} + +// UserAgentOSVersion returns an attribute KeyValue conforming to the +// "user_agent.os.version" semantic conventions. It represents the version string +// of the operating system as defined in [Version Attributes]. +// +// [Version Attributes]: /docs/resource/README.md#version-attributes +func UserAgentOSVersion(val string) attribute.KeyValue { + return UserAgentOSVersionKey.String(val) +} + +// UserAgentVersion returns an attribute KeyValue conforming to the +// "user_agent.version" semantic conventions. It represents the version of the +// user-agent extracted from original. Usually refers to the browser's version. +func UserAgentVersion(val string) attribute.KeyValue { + return UserAgentVersionKey.String(val) +} + +// Enum values for user_agent.synthetic.type +var ( + // Bot source. + // Stability: development + UserAgentSyntheticTypeBot = UserAgentSyntheticTypeKey.String("bot") + // Synthetic test source. + // Stability: development + UserAgentSyntheticTypeTest = UserAgentSyntheticTypeKey.String("test") +) + +// Namespace: vcs +const ( + // VCSChangeIDKey is the attribute Key conforming to the "vcs.change.id" + // semantic conventions. It represents the ID of the change (pull request/merge + // request/changelist) if applicable. This is usually a unique (within + // repository) identifier generated by the VCS system. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "123" + VCSChangeIDKey = attribute.Key("vcs.change.id") + + // VCSChangeStateKey is the attribute Key conforming to the "vcs.change.state" + // semantic conventions. It represents the state of the change (pull + // request/merge request/changelist). + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "open", "closed", "merged" + VCSChangeStateKey = attribute.Key("vcs.change.state") + + // VCSChangeTitleKey is the attribute Key conforming to the "vcs.change.title" + // semantic conventions. It represents the human readable title of the change + // (pull request/merge request/changelist). This title is often a brief summary + // of the change and may get merged in to a ref as the commit summary. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Fixes broken thing", "feat: add my new feature", "[chore] update + // dependency" + VCSChangeTitleKey = attribute.Key("vcs.change.title") + + // VCSLineChangeTypeKey is the attribute Key conforming to the + // "vcs.line_change.type" semantic conventions. It represents the type of line + // change being measured on a branch or change. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "added", "removed" + VCSLineChangeTypeKey = attribute.Key("vcs.line_change.type") + + // VCSOwnerNameKey is the attribute Key conforming to the "vcs.owner.name" + // semantic conventions. It represents the group owner within the version + // control system. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "my-org", "myteam", "business-unit" + VCSOwnerNameKey = attribute.Key("vcs.owner.name") + + // VCSProviderNameKey is the attribute Key conforming to the "vcs.provider.name" + // semantic conventions. It represents the name of the version control system + // provider. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "github", "gitlab", "gitea", "bitbucket" + VCSProviderNameKey = attribute.Key("vcs.provider.name") + + // VCSRefBaseNameKey is the attribute Key conforming to the "vcs.ref.base.name" + // semantic conventions. It represents the name of the [reference] such as + // **branch** or **tag** in the repository. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "my-feature-branch", "tag-1-test" + // Note: `base` refers to the starting point of a change. For example, `main` + // would be the base reference of type branch if you've created a new + // reference of type branch from it and created new commits. + // + // [reference]: https://git-scm.com/docs/gitglossary#def_ref + VCSRefBaseNameKey = attribute.Key("vcs.ref.base.name") + + // VCSRefBaseRevisionKey is the attribute Key conforming to the + // "vcs.ref.base.revision" semantic conventions. It represents the revision, + // literally [revised version], The revision most often refers to a commit + // object in Git, or a revision number in SVN. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "9d59409acf479dfa0df1aa568182e43e43df8bbe28d60fcf2bc52e30068802cc", + // "main", "123", "HEAD" + // Note: `base` refers to the starting point of a change. For example, `main` + // would be the base reference of type branch if you've created a new + // reference of type branch from it and created new commits. The + // revision can be a full [hash value (see + // glossary)], + // of the recorded change to a ref within a repository pointing to a + // commit [commit] object. It does + // not necessarily have to be a hash; it can simply define a [revision + // number] + // which is an integer that is monotonically increasing. In cases where + // it is identical to the `ref.base.name`, it SHOULD still be included. + // It is up to the implementer to decide which value to set as the + // revision based on the VCS system and situational context. + // + // [revised version]: https://www.merriam-webster.com/dictionary/revision + // [hash value (see + // glossary)]: https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-5.pdf + // [commit]: https://git-scm.com/docs/git-commit + // [revision + // number]: https://svnbook.red-bean.com/en/1.7/svn.tour.revs.specifiers.html + VCSRefBaseRevisionKey = attribute.Key("vcs.ref.base.revision") + + // VCSRefBaseTypeKey is the attribute Key conforming to the "vcs.ref.base.type" + // semantic conventions. It represents the type of the [reference] in the + // repository. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "branch", "tag" + // Note: `base` refers to the starting point of a change. For example, `main` + // would be the base reference of type branch if you've created a new + // reference of type branch from it and created new commits. + // + // [reference]: https://git-scm.com/docs/gitglossary#def_ref + VCSRefBaseTypeKey = attribute.Key("vcs.ref.base.type") + + // VCSRefHeadNameKey is the attribute Key conforming to the "vcs.ref.head.name" + // semantic conventions. It represents the name of the [reference] such as + // **branch** or **tag** in the repository. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "my-feature-branch", "tag-1-test" + // Note: `head` refers to where you are right now; the current reference at a + // given time. + // + // [reference]: https://git-scm.com/docs/gitglossary#def_ref + VCSRefHeadNameKey = attribute.Key("vcs.ref.head.name") + + // VCSRefHeadRevisionKey is the attribute Key conforming to the + // "vcs.ref.head.revision" semantic conventions. It represents the revision, + // literally [revised version], The revision most often refers to a commit + // object in Git, or a revision number in SVN. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "9d59409acf479dfa0df1aa568182e43e43df8bbe28d60fcf2bc52e30068802cc", + // "main", "123", "HEAD" + // Note: `head` refers to where you are right now; the current reference at a + // given time.The revision can be a full [hash value (see + // glossary)], + // of the recorded change to a ref within a repository pointing to a + // commit [commit] object. It does + // not necessarily have to be a hash; it can simply define a [revision + // number] + // which is an integer that is monotonically increasing. In cases where + // it is identical to the `ref.head.name`, it SHOULD still be included. + // It is up to the implementer to decide which value to set as the + // revision based on the VCS system and situational context. + // + // [revised version]: https://www.merriam-webster.com/dictionary/revision + // [hash value (see + // glossary)]: https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-5.pdf + // [commit]: https://git-scm.com/docs/git-commit + // [revision + // number]: https://svnbook.red-bean.com/en/1.7/svn.tour.revs.specifiers.html + VCSRefHeadRevisionKey = attribute.Key("vcs.ref.head.revision") + + // VCSRefHeadTypeKey is the attribute Key conforming to the "vcs.ref.head.type" + // semantic conventions. It represents the type of the [reference] in the + // repository. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "branch", "tag" + // Note: `head` refers to where you are right now; the current reference at a + // given time. + // + // [reference]: https://git-scm.com/docs/gitglossary#def_ref + VCSRefHeadTypeKey = attribute.Key("vcs.ref.head.type") + + // VCSRefTypeKey is the attribute Key conforming to the "vcs.ref.type" semantic + // conventions. It represents the type of the [reference] in the repository. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "branch", "tag" + // + // [reference]: https://git-scm.com/docs/gitglossary#def_ref + VCSRefTypeKey = attribute.Key("vcs.ref.type") + + // VCSRepositoryNameKey is the attribute Key conforming to the + // "vcs.repository.name" semantic conventions. It represents the human readable + // name of the repository. It SHOULD NOT include any additional identifier like + // Group/SubGroup in GitLab or organization in GitHub. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "semantic-conventions", "my-cool-repo" + // Note: Due to it only being the name, it can clash with forks of the same + // repository if collecting telemetry across multiple orgs or groups in + // the same backends. + VCSRepositoryNameKey = attribute.Key("vcs.repository.name") + + // VCSRepositoryURLFullKey is the attribute Key conforming to the + // "vcs.repository.url.full" semantic conventions. It represents the + // [canonical URL] of the repository providing the complete HTTP(S) address in + // order to locate and identify the repository through a browser. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // "https://github.com/opentelemetry/open-telemetry-collector-contrib", + // "https://gitlab.com/my-org/my-project/my-projects-project/repo" + // Note: In Git Version Control Systems, the canonical URL SHOULD NOT include + // the `.git` extension. + // + // [canonical URL]: https://support.google.com/webmasters/answer/10347851?hl=en#:~:text=A%20canonical%20URL%20is%20the,Google%20chooses%20one%20as%20canonical. + VCSRepositoryURLFullKey = attribute.Key("vcs.repository.url.full") + + // VCSRevisionDeltaDirectionKey is the attribute Key conforming to the + // "vcs.revision_delta.direction" semantic conventions. It represents the type + // of revision comparison. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "ahead", "behind" + VCSRevisionDeltaDirectionKey = attribute.Key("vcs.revision_delta.direction") +) + +// VCSChangeID returns an attribute KeyValue conforming to the "vcs.change.id" +// semantic conventions. It represents the ID of the change (pull request/merge +// request/changelist) if applicable. This is usually a unique (within +// repository) identifier generated by the VCS system. +func VCSChangeID(val string) attribute.KeyValue { + return VCSChangeIDKey.String(val) +} + +// VCSChangeTitle returns an attribute KeyValue conforming to the +// "vcs.change.title" semantic conventions. It represents the human readable +// title of the change (pull request/merge request/changelist). This title is +// often a brief summary of the change and may get merged in to a ref as the +// commit summary. +func VCSChangeTitle(val string) attribute.KeyValue { + return VCSChangeTitleKey.String(val) +} + +// VCSOwnerName returns an attribute KeyValue conforming to the "vcs.owner.name" +// semantic conventions. It represents the group owner within the version control +// system. +func VCSOwnerName(val string) attribute.KeyValue { + return VCSOwnerNameKey.String(val) +} + +// VCSRefBaseName returns an attribute KeyValue conforming to the +// "vcs.ref.base.name" semantic conventions. It represents the name of the +// [reference] such as **branch** or **tag** in the repository. +// +// [reference]: https://git-scm.com/docs/gitglossary#def_ref +func VCSRefBaseName(val string) attribute.KeyValue { + return VCSRefBaseNameKey.String(val) +} + +// VCSRefBaseRevision returns an attribute KeyValue conforming to the +// "vcs.ref.base.revision" semantic conventions. It represents the revision, +// literally [revised version], The revision most often refers to a commit object +// in Git, or a revision number in SVN. +// +// [revised version]: https://www.merriam-webster.com/dictionary/revision +func VCSRefBaseRevision(val string) attribute.KeyValue { + return VCSRefBaseRevisionKey.String(val) +} + +// VCSRefHeadName returns an attribute KeyValue conforming to the +// "vcs.ref.head.name" semantic conventions. It represents the name of the +// [reference] such as **branch** or **tag** in the repository. +// +// [reference]: https://git-scm.com/docs/gitglossary#def_ref +func VCSRefHeadName(val string) attribute.KeyValue { + return VCSRefHeadNameKey.String(val) +} + +// VCSRefHeadRevision returns an attribute KeyValue conforming to the +// "vcs.ref.head.revision" semantic conventions. It represents the revision, +// literally [revised version], The revision most often refers to a commit object +// in Git, or a revision number in SVN. +// +// [revised version]: https://www.merriam-webster.com/dictionary/revision +func VCSRefHeadRevision(val string) attribute.KeyValue { + return VCSRefHeadRevisionKey.String(val) +} + +// VCSRepositoryName returns an attribute KeyValue conforming to the +// "vcs.repository.name" semantic conventions. It represents the human readable +// name of the repository. It SHOULD NOT include any additional identifier like +// Group/SubGroup in GitLab or organization in GitHub. +func VCSRepositoryName(val string) attribute.KeyValue { + return VCSRepositoryNameKey.String(val) +} + +// VCSRepositoryURLFull returns an attribute KeyValue conforming to the +// "vcs.repository.url.full" semantic conventions. It represents the +// [canonical URL] of the repository providing the complete HTTP(S) address in +// order to locate and identify the repository through a browser. +// +// [canonical URL]: https://support.google.com/webmasters/answer/10347851?hl=en#:~:text=A%20canonical%20URL%20is%20the,Google%20chooses%20one%20as%20canonical. +func VCSRepositoryURLFull(val string) attribute.KeyValue { + return VCSRepositoryURLFullKey.String(val) +} + +// Enum values for vcs.change.state +var ( + // Open means the change is currently active and under review. It hasn't been + // merged into the target branch yet, and it's still possible to make changes or + // add comments. + // Stability: development + VCSChangeStateOpen = VCSChangeStateKey.String("open") + // WIP (work-in-progress, draft) means the change is still in progress and not + // yet ready for a full review. It might still undergo significant changes. + // Stability: development + VCSChangeStateWip = VCSChangeStateKey.String("wip") + // Closed means the merge request has been closed without merging. This can + // happen for various reasons, such as the changes being deemed unnecessary, the + // issue being resolved in another way, or the author deciding to withdraw the + // request. + // Stability: development + VCSChangeStateClosed = VCSChangeStateKey.String("closed") + // Merged indicates that the change has been successfully integrated into the + // target codebase. + // Stability: development + VCSChangeStateMerged = VCSChangeStateKey.String("merged") +) + +// Enum values for vcs.line_change.type +var ( + // How many lines were added. + // Stability: development + VCSLineChangeTypeAdded = VCSLineChangeTypeKey.String("added") + // How many lines were removed. + // Stability: development + VCSLineChangeTypeRemoved = VCSLineChangeTypeKey.String("removed") +) + +// Enum values for vcs.provider.name +var ( + // [GitHub] + // Stability: development + // + // [GitHub]: https://github.com + VCSProviderNameGithub = VCSProviderNameKey.String("github") + // [GitLab] + // Stability: development + // + // [GitLab]: https://gitlab.com + VCSProviderNameGitlab = VCSProviderNameKey.String("gitlab") + // [Gitea] + // Stability: development + // + // [Gitea]: https://gitea.io + VCSProviderNameGitea = VCSProviderNameKey.String("gitea") + // [Bitbucket] + // Stability: development + // + // [Bitbucket]: https://bitbucket.org + VCSProviderNameBitbucket = VCSProviderNameKey.String("bitbucket") +) + +// Enum values for vcs.ref.base.type +var ( + // [branch] + // Stability: development + // + // [branch]: https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddefbranchabranch + VCSRefBaseTypeBranch = VCSRefBaseTypeKey.String("branch") + // [tag] + // Stability: development + // + // [tag]: https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddeftagatag + VCSRefBaseTypeTag = VCSRefBaseTypeKey.String("tag") +) + +// Enum values for vcs.ref.head.type +var ( + // [branch] + // Stability: development + // + // [branch]: https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddefbranchabranch + VCSRefHeadTypeBranch = VCSRefHeadTypeKey.String("branch") + // [tag] + // Stability: development + // + // [tag]: https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddeftagatag + VCSRefHeadTypeTag = VCSRefHeadTypeKey.String("tag") +) + +// Enum values for vcs.ref.type +var ( + // [branch] + // Stability: development + // + // [branch]: https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddefbranchabranch + VCSRefTypeBranch = VCSRefTypeKey.String("branch") + // [tag] + // Stability: development + // + // [tag]: https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddeftagatag + VCSRefTypeTag = VCSRefTypeKey.String("tag") +) + +// Enum values for vcs.revision_delta.direction +var ( + // How many revisions the change is behind the target ref. + // Stability: development + VCSRevisionDeltaDirectionBehind = VCSRevisionDeltaDirectionKey.String("behind") + // How many revisions the change is ahead of the target ref. + // Stability: development + VCSRevisionDeltaDirectionAhead = VCSRevisionDeltaDirectionKey.String("ahead") +) + +// Namespace: webengine +const ( + // WebEngineDescriptionKey is the attribute Key conforming to the + // "webengine.description" semantic conventions. It represents the additional + // description of the web engine (e.g. detailed version and edition + // information). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) - + // 2.2.2.Final" + WebEngineDescriptionKey = attribute.Key("webengine.description") + + // WebEngineNameKey is the attribute Key conforming to the "webengine.name" + // semantic conventions. It represents the name of the web engine. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "WildFly" + WebEngineNameKey = attribute.Key("webengine.name") + + // WebEngineVersionKey is the attribute Key conforming to the + // "webengine.version" semantic conventions. It represents the version of the + // web engine. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "21.0.0" + WebEngineVersionKey = attribute.Key("webengine.version") +) + +// WebEngineDescription returns an attribute KeyValue conforming to the +// "webengine.description" semantic conventions. It represents the additional +// description of the web engine (e.g. detailed version and edition information). +func WebEngineDescription(val string) attribute.KeyValue { + return WebEngineDescriptionKey.String(val) +} + +// WebEngineName returns an attribute KeyValue conforming to the "webengine.name" +// semantic conventions. It represents the name of the web engine. +func WebEngineName(val string) attribute.KeyValue { + return WebEngineNameKey.String(val) +} + +// WebEngineVersion returns an attribute KeyValue conforming to the +// "webengine.version" semantic conventions. It represents the version of the web +// engine. +func WebEngineVersion(val string) attribute.KeyValue { + return WebEngineVersionKey.String(val) +} + +// Namespace: zos +const ( + // ZOSSmfIDKey is the attribute Key conforming to the "zos.smf.id" semantic + // conventions. It represents the System Management Facility (SMF) Identifier + // uniquely identified a z/OS system within a SYSPLEX or mainframe environment + // and is used for system and performance analysis. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "SYS1" + ZOSSmfIDKey = attribute.Key("zos.smf.id") + + // ZOSSysplexNameKey is the attribute Key conforming to the "zos.sysplex.name" + // semantic conventions. It represents the name of the SYSPLEX to which the z/OS + // system belongs too. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "SYSPLEX1" + ZOSSysplexNameKey = attribute.Key("zos.sysplex.name") +) + +// ZOSSmfID returns an attribute KeyValue conforming to the "zos.smf.id" semantic +// conventions. It represents the System Management Facility (SMF) Identifier +// uniquely identified a z/OS system within a SYSPLEX or mainframe environment +// and is used for system and performance analysis. +func ZOSSmfID(val string) attribute.KeyValue { + return ZOSSmfIDKey.String(val) +} + +// ZOSSysplexName returns an attribute KeyValue conforming to the +// "zos.sysplex.name" semantic conventions. It represents the name of the SYSPLEX +// to which the z/OS system belongs too. +func ZOSSysplexName(val string) attribute.KeyValue { + return ZOSSysplexNameKey.String(val) +} \ No newline at end of file diff --git a/semconv/v1.36.0/azureconv/metric.go b/semconv/v1.36.0/azureconv/metric.go new file mode 100644 index 00000000000..9d08c2ca72d --- /dev/null +++ b/semconv/v1.36.0/azureconv/metric.go @@ -0,0 +1,338 @@ +// Code generated from semantic convention specification. DO NOT EDIT. + +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package httpconv provides types and functionality for OpenTelemetry semantic +// conventions in the "azure" namespace. +package azureconv + +import ( + "context" + "sync" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/noop" +) + +var ( + addOptPool = &sync.Pool{New: func() any { return &[]metric.AddOption{} }} + recOptPool = &sync.Pool{New: func() any { return &[]metric.RecordOption{} }} +) + +// CosmosDBConsistencyLevelAttr is an attribute conforming to the +// azure.cosmosdb.consistency.level semantic conventions. It represents the +// account or request [consistency level]. +// +// [consistency level]: https://learn.microsoft.com/azure/cosmos-db/consistency-levels +type CosmosDBConsistencyLevelAttr string + +var ( + // CosmosDBConsistencyLevelStrong is the standardized value "Strong" of + // CosmosDBConsistencyLevelAttr. + CosmosDBConsistencyLevelStrong CosmosDBConsistencyLevelAttr = "Strong" + // CosmosDBConsistencyLevelBoundedStaleness is the standardized value + // "BoundedStaleness" of CosmosDBConsistencyLevelAttr. + CosmosDBConsistencyLevelBoundedStaleness CosmosDBConsistencyLevelAttr = "BoundedStaleness" + // CosmosDBConsistencyLevelSession is the standardized value "Session" of + // CosmosDBConsistencyLevelAttr. + CosmosDBConsistencyLevelSession CosmosDBConsistencyLevelAttr = "Session" + // CosmosDBConsistencyLevelEventual is the standardized value "Eventual" of + // CosmosDBConsistencyLevelAttr. + CosmosDBConsistencyLevelEventual CosmosDBConsistencyLevelAttr = "Eventual" + // CosmosDBConsistencyLevelConsistentPrefix is the standardized value + // "ConsistentPrefix" of CosmosDBConsistencyLevelAttr. + CosmosDBConsistencyLevelConsistentPrefix CosmosDBConsistencyLevelAttr = "ConsistentPrefix" +) + +// ErrorTypeAttr is an attribute conforming to the error.type semantic +// conventions. It represents the describes a class of error the operation ended +// with. +type ErrorTypeAttr string + +var ( + // ErrorTypeOther is a fallback error value to be used when the instrumentation + // doesn't define a custom value. + ErrorTypeOther ErrorTypeAttr = "_OTHER" +) + +// CosmosDBClientActiveInstanceCount is an instrument used to record metric +// values conforming to the "azure.cosmosdb.client.active_instance.count" +// semantic conventions. It represents the number of active client instances. +type CosmosDBClientActiveInstanceCount struct { + metric.Int64UpDownCounter +} + +// NewCosmosDBClientActiveInstanceCount returns a new +// CosmosDBClientActiveInstanceCount instrument. +func NewCosmosDBClientActiveInstanceCount( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (CosmosDBClientActiveInstanceCount, error) { + // Check if the meter is nil. + if m == nil { + return CosmosDBClientActiveInstanceCount{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "azure.cosmosdb.client.active_instance.count", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("Number of active client instances"), + metric.WithUnit("{instance}"), + }, opt...)..., + ) + if err != nil { + return CosmosDBClientActiveInstanceCount{noop.Int64UpDownCounter{}}, err + } + return CosmosDBClientActiveInstanceCount{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m CosmosDBClientActiveInstanceCount) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (CosmosDBClientActiveInstanceCount) Name() string { + return "azure.cosmosdb.client.active_instance.count" +} + +// Unit returns the semantic convention unit of the instrument +func (CosmosDBClientActiveInstanceCount) Unit() string { + return "{instance}" +} + +// Description returns the semantic convention description of the instrument +func (CosmosDBClientActiveInstanceCount) Description() string { + return "Number of active client instances" +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +func (m CosmosDBClientActiveInstanceCount) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +func (m CosmosDBClientActiveInstanceCount) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AttrServerPort returns an optional attribute for the "server.port" semantic +// convention. It represents the server port number. +func (CosmosDBClientActiveInstanceCount) AttrServerPort(val int) attribute.KeyValue { + return attribute.Int("server.port", val) +} + +// AttrServerAddress returns an optional attribute for the "server.address" +// semantic convention. It represents the name of the database host. +func (CosmosDBClientActiveInstanceCount) AttrServerAddress(val string) attribute.KeyValue { + return attribute.String("server.address", val) +} + +// CosmosDBClientOperationRequestCharge is an instrument used to record metric +// values conforming to the "azure.cosmosdb.client.operation.request_charge" +// semantic conventions. It represents the [Request units] consumed by the +// operation. +// +// [Request units]: https://learn.microsoft.com/azure/cosmos-db/request-units +type CosmosDBClientOperationRequestCharge struct { + metric.Int64Histogram +} + +// NewCosmosDBClientOperationRequestCharge returns a new +// CosmosDBClientOperationRequestCharge instrument. +func NewCosmosDBClientOperationRequestCharge( + m metric.Meter, + opt ...metric.Int64HistogramOption, +) (CosmosDBClientOperationRequestCharge, error) { + // Check if the meter is nil. + if m == nil { + return CosmosDBClientOperationRequestCharge{noop.Int64Histogram{}}, nil + } + + i, err := m.Int64Histogram( + "azure.cosmosdb.client.operation.request_charge", + append([]metric.Int64HistogramOption{ + metric.WithDescription("[Request units](https://learn.microsoft.com/azure/cosmos-db/request-units) consumed by the operation"), + metric.WithUnit("{request_unit}"), + }, opt...)..., + ) + if err != nil { + return CosmosDBClientOperationRequestCharge{noop.Int64Histogram{}}, err + } + return CosmosDBClientOperationRequestCharge{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m CosmosDBClientOperationRequestCharge) Inst() metric.Int64Histogram { + return m.Int64Histogram +} + +// Name returns the semantic convention name of the instrument. +func (CosmosDBClientOperationRequestCharge) Name() string { + return "azure.cosmosdb.client.operation.request_charge" +} + +// Unit returns the semantic convention unit of the instrument +func (CosmosDBClientOperationRequestCharge) Unit() string { + return "{request_unit}" +} + +// Description returns the semantic convention description of the instrument +func (CosmosDBClientOperationRequestCharge) Description() string { + return "[Request units](https://learn.microsoft.com/azure/cosmos-db/request-units) consumed by the operation" +} + +// Record records val to the current distribution for attrs. +// +// The dbOperationName is the the name of the operation or command being +// executed. +// +// All additional attrs passed are included in the recorded value. +func (m CosmosDBClientOperationRequestCharge) Record( + ctx context.Context, + val int64, + dbOperationName string, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("db.operation.name", dbOperationName), + )..., + ), + ) + + m.Int64Histogram.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +func (m CosmosDBClientOperationRequestCharge) RecordSet(ctx context.Context, val int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Histogram.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Histogram.Record(ctx, val, *o...) +} + +// AttrCosmosDBConsistencyLevel returns an optional attribute for the +// "azure.cosmosdb.consistency.level" semantic convention. It represents the +// account or request [consistency level]. +// +// [consistency level]: https://learn.microsoft.com/azure/cosmos-db/consistency-levels +func (CosmosDBClientOperationRequestCharge) AttrCosmosDBConsistencyLevel(val CosmosDBConsistencyLevelAttr) attribute.KeyValue { + return attribute.String("azure.cosmosdb.consistency.level", string(val)) +} + +// AttrCosmosDBResponseSubStatusCode returns an optional attribute for the +// "azure.cosmosdb.response.sub_status_code" semantic convention. It represents +// the cosmos DB sub status code. +func (CosmosDBClientOperationRequestCharge) AttrCosmosDBResponseSubStatusCode(val int) attribute.KeyValue { + return attribute.Int("azure.cosmosdb.response.sub_status_code", val) +} + +// AttrDBCollectionName returns an optional attribute for the +// "db.collection.name" semantic convention. It represents the cosmos DB +// container name. +func (CosmosDBClientOperationRequestCharge) AttrDBCollectionName(val string) attribute.KeyValue { + return attribute.String("db.collection.name", val) +} + +// AttrDBNamespace returns an optional attribute for the "db.namespace" semantic +// convention. It represents the name of the database, fully qualified within the +// server address and port. +func (CosmosDBClientOperationRequestCharge) AttrDBNamespace(val string) attribute.KeyValue { + return attribute.String("db.namespace", val) +} + +// AttrDBResponseStatusCode returns an optional attribute for the +// "db.response.status_code" semantic convention. It represents the database +// response status code. +func (CosmosDBClientOperationRequestCharge) AttrDBResponseStatusCode(val string) attribute.KeyValue { + return attribute.String("db.response.status_code", val) +} + +// AttrErrorType returns an optional attribute for the "error.type" semantic +// convention. It represents the describes a class of error the operation ended +// with. +func (CosmosDBClientOperationRequestCharge) AttrErrorType(val ErrorTypeAttr) attribute.KeyValue { + return attribute.String("error.type", string(val)) +} + +// AttrServerPort returns an optional attribute for the "server.port" semantic +// convention. It represents the server port number. +func (CosmosDBClientOperationRequestCharge) AttrServerPort(val int) attribute.KeyValue { + return attribute.Int("server.port", val) +} + +// AttrCosmosDBOperationContactedRegions returns an optional attribute for the +// "azure.cosmosdb.operation.contacted_regions" semantic convention. It +// represents the list of regions contacted during operation in the order that +// they were contacted. If there is more than one region listed, it indicates +// that the operation was performed on multiple regions i.e. cross-regional call. +func (CosmosDBClientOperationRequestCharge) AttrCosmosDBOperationContactedRegions(val ...string) attribute.KeyValue { + return attribute.StringSlice("azure.cosmosdb.operation.contacted_regions", val) +} + +// AttrServerAddress returns an optional attribute for the "server.address" +// semantic convention. It represents the name of the database host. +func (CosmosDBClientOperationRequestCharge) AttrServerAddress(val string) attribute.KeyValue { + return attribute.String("server.address", val) +} \ No newline at end of file diff --git a/semconv/v1.36.0/cicdconv/metric.go b/semconv/v1.36.0/cicdconv/metric.go new file mode 100644 index 00000000000..2e156d394c1 --- /dev/null +++ b/semconv/v1.36.0/cicdconv/metric.go @@ -0,0 +1,643 @@ +// Code generated from semantic convention specification. DO NOT EDIT. + +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package httpconv provides types and functionality for OpenTelemetry semantic +// conventions in the "cicd" namespace. +package cicdconv + +import ( + "context" + "sync" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/noop" +) + +var ( + addOptPool = &sync.Pool{New: func() any { return &[]metric.AddOption{} }} + recOptPool = &sync.Pool{New: func() any { return &[]metric.RecordOption{} }} +) + +// PipelineResultAttr is an attribute conforming to the cicd.pipeline.result +// semantic conventions. It represents the result of a pipeline run. +type PipelineResultAttr string + +var ( + // PipelineResultSuccess is the pipeline run finished successfully. + PipelineResultSuccess PipelineResultAttr = "success" + // PipelineResultFailure is the pipeline run did not finish successfully, eg. + // due to a compile error or a failing test. Such failures are usually detected + // by non-zero exit codes of the tools executed in the pipeline run. + PipelineResultFailure PipelineResultAttr = "failure" + // PipelineResultError is the pipeline run failed due to an error in the CICD + // system, eg. due to the worker being killed. + PipelineResultError PipelineResultAttr = "error" + // PipelineResultTimeout is a timeout caused the pipeline run to be interrupted. + PipelineResultTimeout PipelineResultAttr = "timeout" + // PipelineResultCancellation is the pipeline run was cancelled, eg. by a user + // manually cancelling the pipeline run. + PipelineResultCancellation PipelineResultAttr = "cancellation" + // PipelineResultSkip is the pipeline run was skipped, eg. due to a precondition + // not being met. + PipelineResultSkip PipelineResultAttr = "skip" +) + +// PipelineRunStateAttr is an attribute conforming to the cicd.pipeline.run.state +// semantic conventions. It represents the pipeline run goes through these states +// during its lifecycle. +type PipelineRunStateAttr string + +var ( + // PipelineRunStatePending is the run pending state spans from the event + // triggering the pipeline run until the execution of the run starts (eg. time + // spent in a queue, provisioning agents, creating run resources). + PipelineRunStatePending PipelineRunStateAttr = "pending" + // PipelineRunStateExecuting is the executing state spans the execution of any + // run tasks (eg. build, test). + PipelineRunStateExecuting PipelineRunStateAttr = "executing" + // PipelineRunStateFinalizing is the finalizing state spans from when the run + // has finished executing (eg. cleanup of run resources). + PipelineRunStateFinalizing PipelineRunStateAttr = "finalizing" +) + +// WorkerStateAttr is an attribute conforming to the cicd.worker.state semantic +// conventions. It represents the state of a CICD worker / agent. +type WorkerStateAttr string + +var ( + // WorkerStateAvailable is the worker is not performing work for the CICD + // system. It is available to the CICD system to perform work on (online / + // idle). + WorkerStateAvailable WorkerStateAttr = "available" + // WorkerStateBusy is the worker is performing work for the CICD system. + WorkerStateBusy WorkerStateAttr = "busy" + // WorkerStateOffline is the worker is not available to the CICD system + // (disconnected / down). + WorkerStateOffline WorkerStateAttr = "offline" +) + +// ErrorTypeAttr is an attribute conforming to the error.type semantic +// conventions. It represents the describes a class of error the operation ended +// with. +type ErrorTypeAttr string + +var ( + // ErrorTypeOther is a fallback error value to be used when the instrumentation + // doesn't define a custom value. + ErrorTypeOther ErrorTypeAttr = "_OTHER" +) + +// PipelineRunActive is an instrument used to record metric values conforming to +// the "cicd.pipeline.run.active" semantic conventions. It represents the number +// of pipeline runs currently active in the system by state. +type PipelineRunActive struct { + metric.Int64UpDownCounter +} + +// NewPipelineRunActive returns a new PipelineRunActive instrument. +func NewPipelineRunActive( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (PipelineRunActive, error) { + // Check if the meter is nil. + if m == nil { + return PipelineRunActive{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "cicd.pipeline.run.active", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("The number of pipeline runs currently active in the system by state."), + metric.WithUnit("{run}"), + }, opt...)..., + ) + if err != nil { + return PipelineRunActive{noop.Int64UpDownCounter{}}, err + } + return PipelineRunActive{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m PipelineRunActive) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (PipelineRunActive) Name() string { + return "cicd.pipeline.run.active" +} + +// Unit returns the semantic convention unit of the instrument +func (PipelineRunActive) Unit() string { + return "{run}" +} + +// Description returns the semantic convention description of the instrument +func (PipelineRunActive) Description() string { + return "The number of pipeline runs currently active in the system by state." +} + +// Add adds incr to the existing count for attrs. +// +// The pipelineName is the the human readable name of the pipeline within a CI/CD +// system. +// +// The pipelineRunState is the the pipeline run goes through these states during +// its lifecycle. +func (m PipelineRunActive) Add( + ctx context.Context, + incr int64, + pipelineName string, + pipelineRunState PipelineRunStateAttr, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("cicd.pipeline.name", pipelineName), + attribute.String("cicd.pipeline.run.state", string(pipelineRunState)), + )..., + ), + ) + + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +func (m PipelineRunActive) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// PipelineRunDuration is an instrument used to record metric values conforming +// to the "cicd.pipeline.run.duration" semantic conventions. It represents the +// duration of a pipeline run grouped by pipeline, state and result. +type PipelineRunDuration struct { + metric.Float64Histogram +} + +// NewPipelineRunDuration returns a new PipelineRunDuration instrument. +func NewPipelineRunDuration( + m metric.Meter, + opt ...metric.Float64HistogramOption, +) (PipelineRunDuration, error) { + // Check if the meter is nil. + if m == nil { + return PipelineRunDuration{noop.Float64Histogram{}}, nil + } + + i, err := m.Float64Histogram( + "cicd.pipeline.run.duration", + append([]metric.Float64HistogramOption{ + metric.WithDescription("Duration of a pipeline run grouped by pipeline, state and result."), + metric.WithUnit("s"), + }, opt...)..., + ) + if err != nil { + return PipelineRunDuration{noop.Float64Histogram{}}, err + } + return PipelineRunDuration{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m PipelineRunDuration) Inst() metric.Float64Histogram { + return m.Float64Histogram +} + +// Name returns the semantic convention name of the instrument. +func (PipelineRunDuration) Name() string { + return "cicd.pipeline.run.duration" +} + +// Unit returns the semantic convention unit of the instrument +func (PipelineRunDuration) Unit() string { + return "s" +} + +// Description returns the semantic convention description of the instrument +func (PipelineRunDuration) Description() string { + return "Duration of a pipeline run grouped by pipeline, state and result." +} + +// Record records val to the current distribution for attrs. +// +// The pipelineName is the the human readable name of the pipeline within a CI/CD +// system. +// +// The pipelineRunState is the the pipeline run goes through these states during +// its lifecycle. +// +// All additional attrs passed are included in the recorded value. +func (m PipelineRunDuration) Record( + ctx context.Context, + val float64, + pipelineName string, + pipelineRunState PipelineRunStateAttr, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Float64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("cicd.pipeline.name", pipelineName), + attribute.String("cicd.pipeline.run.state", string(pipelineRunState)), + )..., + ), + ) + + m.Float64Histogram.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +func (m PipelineRunDuration) RecordSet(ctx context.Context, val float64, set attribute.Set) { + if set.Len() == 0 { + m.Float64Histogram.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Float64Histogram.Record(ctx, val, *o...) +} + +// AttrPipelineResult returns an optional attribute for the +// "cicd.pipeline.result" semantic convention. It represents the result of a +// pipeline run. +func (PipelineRunDuration) AttrPipelineResult(val PipelineResultAttr) attribute.KeyValue { + return attribute.String("cicd.pipeline.result", string(val)) +} + +// AttrErrorType returns an optional attribute for the "error.type" semantic +// convention. It represents the describes a class of error the operation ended +// with. +func (PipelineRunDuration) AttrErrorType(val ErrorTypeAttr) attribute.KeyValue { + return attribute.String("error.type", string(val)) +} + +// PipelineRunErrors is an instrument used to record metric values conforming to +// the "cicd.pipeline.run.errors" semantic conventions. It represents the number +// of errors encountered in pipeline runs (eg. compile, test failures). +type PipelineRunErrors struct { + metric.Int64Counter +} + +// NewPipelineRunErrors returns a new PipelineRunErrors instrument. +func NewPipelineRunErrors( + m metric.Meter, + opt ...metric.Int64CounterOption, +) (PipelineRunErrors, error) { + // Check if the meter is nil. + if m == nil { + return PipelineRunErrors{noop.Int64Counter{}}, nil + } + + i, err := m.Int64Counter( + "cicd.pipeline.run.errors", + append([]metric.Int64CounterOption{ + metric.WithDescription("The number of errors encountered in pipeline runs (eg. compile, test failures)."), + metric.WithUnit("{error}"), + }, opt...)..., + ) + if err != nil { + return PipelineRunErrors{noop.Int64Counter{}}, err + } + return PipelineRunErrors{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m PipelineRunErrors) Inst() metric.Int64Counter { + return m.Int64Counter +} + +// Name returns the semantic convention name of the instrument. +func (PipelineRunErrors) Name() string { + return "cicd.pipeline.run.errors" +} + +// Unit returns the semantic convention unit of the instrument +func (PipelineRunErrors) Unit() string { + return "{error}" +} + +// Description returns the semantic convention description of the instrument +func (PipelineRunErrors) Description() string { + return "The number of errors encountered in pipeline runs (eg. compile, test failures)." +} + +// Add adds incr to the existing count for attrs. +// +// The pipelineName is the the human readable name of the pipeline within a CI/CD +// system. +// +// The errorType is the describes a class of error the operation ended with. +// +// There might be errors in a pipeline run that are non fatal (eg. they are +// suppressed) or in a parallel stage multiple stages could have a fatal error. +// This means that this error count might not be the same as the count of metric +// `cicd.pipeline.run.duration` with run result `failure`. +func (m PipelineRunErrors) Add( + ctx context.Context, + incr int64, + pipelineName string, + errorType ErrorTypeAttr, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("cicd.pipeline.name", pipelineName), + attribute.String("error.type", string(errorType)), + )..., + ), + ) + + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// There might be errors in a pipeline run that are non fatal (eg. they are +// suppressed) or in a parallel stage multiple stages could have a fatal error. +// This means that this error count might not be the same as the count of metric +// `cicd.pipeline.run.duration` with run result `failure`. +func (m PipelineRunErrors) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Counter.Add(ctx, incr, *o...) +} + +// SystemErrors is an instrument used to record metric values conforming to the +// "cicd.system.errors" semantic conventions. It represents the number of errors +// in a component of the CICD system (eg. controller, scheduler, agent). +type SystemErrors struct { + metric.Int64Counter +} + +// NewSystemErrors returns a new SystemErrors instrument. +func NewSystemErrors( + m metric.Meter, + opt ...metric.Int64CounterOption, +) (SystemErrors, error) { + // Check if the meter is nil. + if m == nil { + return SystemErrors{noop.Int64Counter{}}, nil + } + + i, err := m.Int64Counter( + "cicd.system.errors", + append([]metric.Int64CounterOption{ + metric.WithDescription("The number of errors in a component of the CICD system (eg. controller, scheduler, agent)."), + metric.WithUnit("{error}"), + }, opt...)..., + ) + if err != nil { + return SystemErrors{noop.Int64Counter{}}, err + } + return SystemErrors{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SystemErrors) Inst() metric.Int64Counter { + return m.Int64Counter +} + +// Name returns the semantic convention name of the instrument. +func (SystemErrors) Name() string { + return "cicd.system.errors" +} + +// Unit returns the semantic convention unit of the instrument +func (SystemErrors) Unit() string { + return "{error}" +} + +// Description returns the semantic convention description of the instrument +func (SystemErrors) Description() string { + return "The number of errors in a component of the CICD system (eg. controller, scheduler, agent)." +} + +// Add adds incr to the existing count for attrs. +// +// The systemComponent is the the name of a component of the CICD system. +// +// The errorType is the describes a class of error the operation ended with. +// +// Errors in pipeline run execution are explicitly excluded. Ie a test failure is +// not counted in this metric. +func (m SystemErrors) Add( + ctx context.Context, + incr int64, + systemComponent string, + errorType ErrorTypeAttr, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("cicd.system.component", systemComponent), + attribute.String("error.type", string(errorType)), + )..., + ), + ) + + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// Errors in pipeline run execution are explicitly excluded. Ie a test failure is +// not counted in this metric. +func (m SystemErrors) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Counter.Add(ctx, incr, *o...) +} + +// WorkerCount is an instrument used to record metric values conforming to the +// "cicd.worker.count" semantic conventions. It represents the number of workers +// on the CICD system by state. +type WorkerCount struct { + metric.Int64UpDownCounter +} + +// NewWorkerCount returns a new WorkerCount instrument. +func NewWorkerCount( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (WorkerCount, error) { + // Check if the meter is nil. + if m == nil { + return WorkerCount{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "cicd.worker.count", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("The number of workers on the CICD system by state."), + metric.WithUnit("{count}"), + }, opt...)..., + ) + if err != nil { + return WorkerCount{noop.Int64UpDownCounter{}}, err + } + return WorkerCount{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m WorkerCount) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (WorkerCount) Name() string { + return "cicd.worker.count" +} + +// Unit returns the semantic convention unit of the instrument +func (WorkerCount) Unit() string { + return "{count}" +} + +// Description returns the semantic convention description of the instrument +func (WorkerCount) Description() string { + return "The number of workers on the CICD system by state." +} + +// Add adds incr to the existing count for attrs. +// +// The workerState is the the state of a CICD worker / agent. +func (m WorkerCount) Add( + ctx context.Context, + incr int64, + workerState WorkerStateAttr, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("cicd.worker.state", string(workerState)), + )..., + ), + ) + + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +func (m WorkerCount) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} \ No newline at end of file diff --git a/semconv/v1.36.0/containerconv/metric.go b/semconv/v1.36.0/containerconv/metric.go new file mode 100644 index 00000000000..8bcacf5f458 --- /dev/null +++ b/semconv/v1.36.0/containerconv/metric.go @@ -0,0 +1,696 @@ +// Code generated from semantic convention specification. DO NOT EDIT. + +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package httpconv provides types and functionality for OpenTelemetry semantic +// conventions in the "container" namespace. +package containerconv + +import ( + "context" + "sync" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/noop" +) + +var ( + addOptPool = &sync.Pool{New: func() any { return &[]metric.AddOption{} }} + recOptPool = &sync.Pool{New: func() any { return &[]metric.RecordOption{} }} +) + +// CPUModeAttr is an attribute conforming to the cpu.mode semantic conventions. +// It represents the CPU mode for this data point. A container's CPU metric +// SHOULD be characterized *either* by data points with no `mode` labels, +// *or only* data points with `mode` labels. +type CPUModeAttr string + +var ( + // CPUModeUser is the standardized value "user" of CPUModeAttr. + CPUModeUser CPUModeAttr = "user" + // CPUModeSystem is the standardized value "system" of CPUModeAttr. + CPUModeSystem CPUModeAttr = "system" + // CPUModeNice is the standardized value "nice" of CPUModeAttr. + CPUModeNice CPUModeAttr = "nice" + // CPUModeIdle is the standardized value "idle" of CPUModeAttr. + CPUModeIdle CPUModeAttr = "idle" + // CPUModeIOWait is the standardized value "iowait" of CPUModeAttr. + CPUModeIOWait CPUModeAttr = "iowait" + // CPUModeInterrupt is the standardized value "interrupt" of CPUModeAttr. + CPUModeInterrupt CPUModeAttr = "interrupt" + // CPUModeSteal is the standardized value "steal" of CPUModeAttr. + CPUModeSteal CPUModeAttr = "steal" + // CPUModeKernel is the standardized value "kernel" of CPUModeAttr. + CPUModeKernel CPUModeAttr = "kernel" +) + +// DiskIODirectionAttr is an attribute conforming to the disk.io.direction +// semantic conventions. It represents the disk IO operation direction. +type DiskIODirectionAttr string + +var ( + // DiskIODirectionRead is the standardized value "read" of DiskIODirectionAttr. + DiskIODirectionRead DiskIODirectionAttr = "read" + // DiskIODirectionWrite is the standardized value "write" of + // DiskIODirectionAttr. + DiskIODirectionWrite DiskIODirectionAttr = "write" +) + +// NetworkIODirectionAttr is an attribute conforming to the network.io.direction +// semantic conventions. It represents the network IO operation direction. +type NetworkIODirectionAttr string + +var ( + // NetworkIODirectionTransmit is the standardized value "transmit" of + // NetworkIODirectionAttr. + NetworkIODirectionTransmit NetworkIODirectionAttr = "transmit" + // NetworkIODirectionReceive is the standardized value "receive" of + // NetworkIODirectionAttr. + NetworkIODirectionReceive NetworkIODirectionAttr = "receive" +) + +// CPUTime is an instrument used to record metric values conforming to the +// "container.cpu.time" semantic conventions. It represents the total CPU time +// consumed. +type CPUTime struct { + metric.Float64Counter +} + +// NewCPUTime returns a new CPUTime instrument. +func NewCPUTime( + m metric.Meter, + opt ...metric.Float64CounterOption, +) (CPUTime, error) { + // Check if the meter is nil. + if m == nil { + return CPUTime{noop.Float64Counter{}}, nil + } + + i, err := m.Float64Counter( + "container.cpu.time", + append([]metric.Float64CounterOption{ + metric.WithDescription("Total CPU time consumed"), + metric.WithUnit("s"), + }, opt...)..., + ) + if err != nil { + return CPUTime{noop.Float64Counter{}}, err + } + return CPUTime{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m CPUTime) Inst() metric.Float64Counter { + return m.Float64Counter +} + +// Name returns the semantic convention name of the instrument. +func (CPUTime) Name() string { + return "container.cpu.time" +} + +// Unit returns the semantic convention unit of the instrument +func (CPUTime) Unit() string { + return "s" +} + +// Description returns the semantic convention description of the instrument +func (CPUTime) Description() string { + return "Total CPU time consumed" +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +// +// Total CPU time consumed by the specific container on all available CPU cores +func (m CPUTime) Add( + ctx context.Context, + incr float64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Float64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Float64Counter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// Total CPU time consumed by the specific container on all available CPU cores +func (m CPUTime) AddSet(ctx context.Context, incr float64, set attribute.Set) { + if set.Len() == 0 { + m.Float64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Float64Counter.Add(ctx, incr, *o...) +} + +// AttrCPUMode returns an optional attribute for the "cpu.mode" semantic +// convention. It represents the CPU mode for this data point. A container's CPU +// metric SHOULD be characterized *either* by data points with no `mode` labels, +// *or only* data points with `mode` labels. +func (CPUTime) AttrCPUMode(val CPUModeAttr) attribute.KeyValue { + return attribute.String("cpu.mode", string(val)) +} + +// CPUUsage is an instrument used to record metric values conforming to the +// "container.cpu.usage" semantic conventions. It represents the container's CPU +// usage, measured in cpus. Range from 0 to the number of allocatable CPUs. +type CPUUsage struct { + metric.Int64Gauge +} + +// NewCPUUsage returns a new CPUUsage instrument. +func NewCPUUsage( + m metric.Meter, + opt ...metric.Int64GaugeOption, +) (CPUUsage, error) { + // Check if the meter is nil. + if m == nil { + return CPUUsage{noop.Int64Gauge{}}, nil + } + + i, err := m.Int64Gauge( + "container.cpu.usage", + append([]metric.Int64GaugeOption{ + metric.WithDescription("Container's CPU usage, measured in cpus. Range from 0 to the number of allocatable CPUs"), + metric.WithUnit("{cpu}"), + }, opt...)..., + ) + if err != nil { + return CPUUsage{noop.Int64Gauge{}}, err + } + return CPUUsage{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m CPUUsage) Inst() metric.Int64Gauge { + return m.Int64Gauge +} + +// Name returns the semantic convention name of the instrument. +func (CPUUsage) Name() string { + return "container.cpu.usage" +} + +// Unit returns the semantic convention unit of the instrument +func (CPUUsage) Unit() string { + return "{cpu}" +} + +// Description returns the semantic convention description of the instrument +func (CPUUsage) Description() string { + return "Container's CPU usage, measured in cpus. Range from 0 to the number of allocatable CPUs" +} + +// Record records val to the current distribution for attrs. +// +// All additional attrs passed are included in the recorded value. +// +// CPU usage of the specific container on all available CPU cores, averaged over +// the sample window +func (m CPUUsage) Record( + ctx context.Context, + val int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Gauge.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64Gauge.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +// +// CPU usage of the specific container on all available CPU cores, averaged over +// the sample window +func (m CPUUsage) RecordSet(ctx context.Context, val int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Gauge.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Gauge.Record(ctx, val, *o...) +} + +// AttrCPUMode returns an optional attribute for the "cpu.mode" semantic +// convention. It represents the CPU mode for this data point. A container's CPU +// metric SHOULD be characterized *either* by data points with no `mode` labels, +// *or only* data points with `mode` labels. +func (CPUUsage) AttrCPUMode(val CPUModeAttr) attribute.KeyValue { + return attribute.String("cpu.mode", string(val)) +} + +// DiskIO is an instrument used to record metric values conforming to the +// "container.disk.io" semantic conventions. It represents the disk bytes for the +// container. +type DiskIO struct { + metric.Int64Counter +} + +// NewDiskIO returns a new DiskIO instrument. +func NewDiskIO( + m metric.Meter, + opt ...metric.Int64CounterOption, +) (DiskIO, error) { + // Check if the meter is nil. + if m == nil { + return DiskIO{noop.Int64Counter{}}, nil + } + + i, err := m.Int64Counter( + "container.disk.io", + append([]metric.Int64CounterOption{ + metric.WithDescription("Disk bytes for the container."), + metric.WithUnit("By"), + }, opt...)..., + ) + if err != nil { + return DiskIO{noop.Int64Counter{}}, err + } + return DiskIO{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m DiskIO) Inst() metric.Int64Counter { + return m.Int64Counter +} + +// Name returns the semantic convention name of the instrument. +func (DiskIO) Name() string { + return "container.disk.io" +} + +// Unit returns the semantic convention unit of the instrument +func (DiskIO) Unit() string { + return "By" +} + +// Description returns the semantic convention description of the instrument +func (DiskIO) Description() string { + return "Disk bytes for the container." +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +// +// The total number of bytes read/written successfully (aggregated from all +// disks). +func (m DiskIO) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// The total number of bytes read/written successfully (aggregated from all +// disks). +func (m DiskIO) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AttrDiskIODirection returns an optional attribute for the "disk.io.direction" +// semantic convention. It represents the disk IO operation direction. +func (DiskIO) AttrDiskIODirection(val DiskIODirectionAttr) attribute.KeyValue { + return attribute.String("disk.io.direction", string(val)) +} + +// AttrSystemDevice returns an optional attribute for the "system.device" +// semantic convention. It represents the device identifier. +func (DiskIO) AttrSystemDevice(val string) attribute.KeyValue { + return attribute.String("system.device", val) +} + +// MemoryUsage is an instrument used to record metric values conforming to the +// "container.memory.usage" semantic conventions. It represents the memory usage +// of the container. +type MemoryUsage struct { + metric.Int64Counter +} + +// NewMemoryUsage returns a new MemoryUsage instrument. +func NewMemoryUsage( + m metric.Meter, + opt ...metric.Int64CounterOption, +) (MemoryUsage, error) { + // Check if the meter is nil. + if m == nil { + return MemoryUsage{noop.Int64Counter{}}, nil + } + + i, err := m.Int64Counter( + "container.memory.usage", + append([]metric.Int64CounterOption{ + metric.WithDescription("Memory usage of the container."), + metric.WithUnit("By"), + }, opt...)..., + ) + if err != nil { + return MemoryUsage{noop.Int64Counter{}}, err + } + return MemoryUsage{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m MemoryUsage) Inst() metric.Int64Counter { + return m.Int64Counter +} + +// Name returns the semantic convention name of the instrument. +func (MemoryUsage) Name() string { + return "container.memory.usage" +} + +// Unit returns the semantic convention unit of the instrument +func (MemoryUsage) Unit() string { + return "By" +} + +// Description returns the semantic convention description of the instrument +func (MemoryUsage) Description() string { + return "Memory usage of the container." +} + +// Add adds incr to the existing count for attrs. +// +// Memory usage of the container. +func (m MemoryUsage) Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// Memory usage of the container. +func (m MemoryUsage) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Counter.Add(ctx, incr, *o...) +} + +// NetworkIO is an instrument used to record metric values conforming to the +// "container.network.io" semantic conventions. It represents the network bytes +// for the container. +type NetworkIO struct { + metric.Int64Counter +} + +// NewNetworkIO returns a new NetworkIO instrument. +func NewNetworkIO( + m metric.Meter, + opt ...metric.Int64CounterOption, +) (NetworkIO, error) { + // Check if the meter is nil. + if m == nil { + return NetworkIO{noop.Int64Counter{}}, nil + } + + i, err := m.Int64Counter( + "container.network.io", + append([]metric.Int64CounterOption{ + metric.WithDescription("Network bytes for the container."), + metric.WithUnit("By"), + }, opt...)..., + ) + if err != nil { + return NetworkIO{noop.Int64Counter{}}, err + } + return NetworkIO{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m NetworkIO) Inst() metric.Int64Counter { + return m.Int64Counter +} + +// Name returns the semantic convention name of the instrument. +func (NetworkIO) Name() string { + return "container.network.io" +} + +// Unit returns the semantic convention unit of the instrument +func (NetworkIO) Unit() string { + return "By" +} + +// Description returns the semantic convention description of the instrument +func (NetworkIO) Description() string { + return "Network bytes for the container." +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +// +// The number of bytes sent/received on all network interfaces by the container. +func (m NetworkIO) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// The number of bytes sent/received on all network interfaces by the container. +func (m NetworkIO) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AttrNetworkInterfaceName returns an optional attribute for the +// "network.interface.name" semantic convention. It represents the network +// interface name. +func (NetworkIO) AttrNetworkInterfaceName(val string) attribute.KeyValue { + return attribute.String("network.interface.name", val) +} + +// AttrNetworkIODirection returns an optional attribute for the +// "network.io.direction" semantic convention. It represents the network IO +// operation direction. +func (NetworkIO) AttrNetworkIODirection(val NetworkIODirectionAttr) attribute.KeyValue { + return attribute.String("network.io.direction", string(val)) +} + +// Uptime is an instrument used to record metric values conforming to the +// "container.uptime" semantic conventions. It represents the time the container +// has been running. +type Uptime struct { + metric.Float64Gauge +} + +// NewUptime returns a new Uptime instrument. +func NewUptime( + m metric.Meter, + opt ...metric.Float64GaugeOption, +) (Uptime, error) { + // Check if the meter is nil. + if m == nil { + return Uptime{noop.Float64Gauge{}}, nil + } + + i, err := m.Float64Gauge( + "container.uptime", + append([]metric.Float64GaugeOption{ + metric.WithDescription("The time the container has been running"), + metric.WithUnit("s"), + }, opt...)..., + ) + if err != nil { + return Uptime{noop.Float64Gauge{}}, err + } + return Uptime{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m Uptime) Inst() metric.Float64Gauge { + return m.Float64Gauge +} + +// Name returns the semantic convention name of the instrument. +func (Uptime) Name() string { + return "container.uptime" +} + +// Unit returns the semantic convention unit of the instrument +func (Uptime) Unit() string { + return "s" +} + +// Description returns the semantic convention description of the instrument +func (Uptime) Description() string { + return "The time the container has been running" +} + +// Record records val to the current distribution for attrs. +// +// Instrumentations SHOULD use a gauge with type `double` and measure uptime in +// seconds as a floating point number with the highest precision available. +// The actual accuracy would depend on the instrumentation and operating system. +func (m Uptime) Record(ctx context.Context, val float64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Float64Gauge.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Float64Gauge.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +// +// Instrumentations SHOULD use a gauge with type `double` and measure uptime in +// seconds as a floating point number with the highest precision available. +// The actual accuracy would depend on the instrumentation and operating system. +func (m Uptime) RecordSet(ctx context.Context, val float64, set attribute.Set) { + if set.Len() == 0 { + m.Float64Gauge.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Float64Gauge.Record(ctx, val, *o...) +} \ No newline at end of file diff --git a/semconv/v1.36.0/dbconv/metric.go b/semconv/v1.36.0/dbconv/metric.go new file mode 100644 index 00000000000..1db72b5935a --- /dev/null +++ b/semconv/v1.36.0/dbconv/metric.go @@ -0,0 +1,1542 @@ +// Code generated from semantic convention specification. DO NOT EDIT. + +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package httpconv provides types and functionality for OpenTelemetry semantic +// conventions in the "db" namespace. +package dbconv + +import ( + "context" + "sync" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/noop" +) + +var ( + addOptPool = &sync.Pool{New: func() any { return &[]metric.AddOption{} }} + recOptPool = &sync.Pool{New: func() any { return &[]metric.RecordOption{} }} +) + +// ClientConnectionStateAttr is an attribute conforming to the +// db.client.connection.state semantic conventions. It represents the state of a +// connection in the pool. +type ClientConnectionStateAttr string + +var ( + // ClientConnectionStateIdle is the standardized value "idle" of + // ClientConnectionStateAttr. + ClientConnectionStateIdle ClientConnectionStateAttr = "idle" + // ClientConnectionStateUsed is the standardized value "used" of + // ClientConnectionStateAttr. + ClientConnectionStateUsed ClientConnectionStateAttr = "used" +) + +// SystemNameAttr is an attribute conforming to the db.system.name semantic +// conventions. It represents the database management system (DBMS) product as +// identified by the client instrumentation. +type SystemNameAttr string + +var ( + // SystemNameOtherSQL is the some other SQL database. Fallback only. + SystemNameOtherSQL SystemNameAttr = "other_sql" + // SystemNameSoftwareagAdabas is the [Adabas (Adaptable Database System)]. + // + // [Adabas (Adaptable Database System)]: https://documentation.softwareag.com/?pf=adabas + SystemNameSoftwareagAdabas SystemNameAttr = "softwareag.adabas" + // SystemNameActianIngres is the [Actian Ingres]. + // + // [Actian Ingres]: https://www.actian.com/databases/ingres/ + SystemNameActianIngres SystemNameAttr = "actian.ingres" + // SystemNameAWSDynamoDB is the [Amazon DynamoDB]. + // + // [Amazon DynamoDB]: https://aws.amazon.com/pm/dynamodb/ + SystemNameAWSDynamoDB SystemNameAttr = "aws.dynamodb" + // SystemNameAWSRedshift is the [Amazon Redshift]. + // + // [Amazon Redshift]: https://aws.amazon.com/redshift/ + SystemNameAWSRedshift SystemNameAttr = "aws.redshift" + // SystemNameAzureCosmosDB is the [Azure Cosmos DB]. + // + // [Azure Cosmos DB]: https://learn.microsoft.com/azure/cosmos-db + SystemNameAzureCosmosDB SystemNameAttr = "azure.cosmosdb" + // SystemNameIntersystemsCache is the [InterSystems Caché]. + // + // [InterSystems Caché]: https://www.intersystems.com/products/cache/ + SystemNameIntersystemsCache SystemNameAttr = "intersystems.cache" + // SystemNameCassandra is the [Apache Cassandra]. + // + // [Apache Cassandra]: https://cassandra.apache.org/ + SystemNameCassandra SystemNameAttr = "cassandra" + // SystemNameClickHouse is the [ClickHouse]. + // + // [ClickHouse]: https://clickhouse.com/ + SystemNameClickHouse SystemNameAttr = "clickhouse" + // SystemNameCockroachDB is the [CockroachDB]. + // + // [CockroachDB]: https://www.cockroachlabs.com/ + SystemNameCockroachDB SystemNameAttr = "cockroachdb" + // SystemNameCouchbase is the [Couchbase]. + // + // [Couchbase]: https://www.couchbase.com/ + SystemNameCouchbase SystemNameAttr = "couchbase" + // SystemNameCouchDB is the [Apache CouchDB]. + // + // [Apache CouchDB]: https://couchdb.apache.org/ + SystemNameCouchDB SystemNameAttr = "couchdb" + // SystemNameDerby is the [Apache Derby]. + // + // [Apache Derby]: https://db.apache.org/derby/ + SystemNameDerby SystemNameAttr = "derby" + // SystemNameElasticsearch is the [Elasticsearch]. + // + // [Elasticsearch]: https://www.elastic.co/elasticsearch + SystemNameElasticsearch SystemNameAttr = "elasticsearch" + // SystemNameFirebirdSQL is the [Firebird]. + // + // [Firebird]: https://www.firebirdsql.org/ + SystemNameFirebirdSQL SystemNameAttr = "firebirdsql" + // SystemNameGCPSpanner is the [Google Cloud Spanner]. + // + // [Google Cloud Spanner]: https://cloud.google.com/spanner + SystemNameGCPSpanner SystemNameAttr = "gcp.spanner" + // SystemNameGeode is the [Apache Geode]. + // + // [Apache Geode]: https://geode.apache.org/ + SystemNameGeode SystemNameAttr = "geode" + // SystemNameH2database is the [H2 Database]. + // + // [H2 Database]: https://h2database.com/ + SystemNameH2database SystemNameAttr = "h2database" + // SystemNameHBase is the [Apache HBase]. + // + // [Apache HBase]: https://hbase.apache.org/ + SystemNameHBase SystemNameAttr = "hbase" + // SystemNameHive is the [Apache Hive]. + // + // [Apache Hive]: https://hive.apache.org/ + SystemNameHive SystemNameAttr = "hive" + // SystemNameHSQLDB is the [HyperSQL Database]. + // + // [HyperSQL Database]: https://hsqldb.org/ + SystemNameHSQLDB SystemNameAttr = "hsqldb" + // SystemNameIBMDB2 is the [IBM Db2]. + // + // [IBM Db2]: https://www.ibm.com/db2 + SystemNameIBMDB2 SystemNameAttr = "ibm.db2" + // SystemNameIBMInformix is the [IBM Informix]. + // + // [IBM Informix]: https://www.ibm.com/products/informix + SystemNameIBMInformix SystemNameAttr = "ibm.informix" + // SystemNameIBMNetezza is the [IBM Netezza]. + // + // [IBM Netezza]: https://www.ibm.com/products/netezza + SystemNameIBMNetezza SystemNameAttr = "ibm.netezza" + // SystemNameInfluxDB is the [InfluxDB]. + // + // [InfluxDB]: https://www.influxdata.com/ + SystemNameInfluxDB SystemNameAttr = "influxdb" + // SystemNameInstantDB is the [Instant]. + // + // [Instant]: https://www.instantdb.com/ + SystemNameInstantDB SystemNameAttr = "instantdb" + // SystemNameMariaDB is the [MariaDB]. + // + // [MariaDB]: https://mariadb.org/ + SystemNameMariaDB SystemNameAttr = "mariadb" + // SystemNameMemcached is the [Memcached]. + // + // [Memcached]: https://memcached.org/ + SystemNameMemcached SystemNameAttr = "memcached" + // SystemNameMongoDB is the [MongoDB]. + // + // [MongoDB]: https://www.mongodb.com/ + SystemNameMongoDB SystemNameAttr = "mongodb" + // SystemNameMicrosoftSQLServer is the [Microsoft SQL Server]. + // + // [Microsoft SQL Server]: https://www.microsoft.com/sql-server + SystemNameMicrosoftSQLServer SystemNameAttr = "microsoft.sql_server" + // SystemNameMySQL is the [MySQL]. + // + // [MySQL]: https://www.mysql.com/ + SystemNameMySQL SystemNameAttr = "mysql" + // SystemNameNeo4j is the [Neo4j]. + // + // [Neo4j]: https://neo4j.com/ + SystemNameNeo4j SystemNameAttr = "neo4j" + // SystemNameOpenSearch is the [OpenSearch]. + // + // [OpenSearch]: https://opensearch.org/ + SystemNameOpenSearch SystemNameAttr = "opensearch" + // SystemNameOracleDB is the [Oracle Database]. + // + // [Oracle Database]: https://www.oracle.com/database/ + SystemNameOracleDB SystemNameAttr = "oracle.db" + // SystemNamePostgreSQL is the [PostgreSQL]. + // + // [PostgreSQL]: https://www.postgresql.org/ + SystemNamePostgreSQL SystemNameAttr = "postgresql" + // SystemNameRedis is the [Redis]. + // + // [Redis]: https://redis.io/ + SystemNameRedis SystemNameAttr = "redis" + // SystemNameSAPHANA is the [SAP HANA]. + // + // [SAP HANA]: https://www.sap.com/products/technology-platform/hana/what-is-sap-hana.html + SystemNameSAPHANA SystemNameAttr = "sap.hana" + // SystemNameSAPMaxDB is the [SAP MaxDB]. + // + // [SAP MaxDB]: https://maxdb.sap.com/ + SystemNameSAPMaxDB SystemNameAttr = "sap.maxdb" + // SystemNameSQLite is the [SQLite]. + // + // [SQLite]: https://www.sqlite.org/ + SystemNameSQLite SystemNameAttr = "sqlite" + // SystemNameTeradata is the [Teradata]. + // + // [Teradata]: https://www.teradata.com/ + SystemNameTeradata SystemNameAttr = "teradata" + // SystemNameTrino is the [Trino]. + // + // [Trino]: https://trino.io/ + SystemNameTrino SystemNameAttr = "trino" +) + +// ErrorTypeAttr is an attribute conforming to the error.type semantic +// conventions. It represents the describes a class of error the operation ended +// with. +type ErrorTypeAttr string + +var ( + // ErrorTypeOther is a fallback error value to be used when the instrumentation + // doesn't define a custom value. + ErrorTypeOther ErrorTypeAttr = "_OTHER" +) + +// ClientConnectionCount is an instrument used to record metric values conforming +// to the "db.client.connection.count" semantic conventions. It represents the +// number of connections that are currently in state described by the `state` +// attribute. +type ClientConnectionCount struct { + metric.Int64UpDownCounter +} + +// NewClientConnectionCount returns a new ClientConnectionCount instrument. +func NewClientConnectionCount( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (ClientConnectionCount, error) { + // Check if the meter is nil. + if m == nil { + return ClientConnectionCount{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "db.client.connection.count", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("The number of connections that are currently in state described by the `state` attribute"), + metric.WithUnit("{connection}"), + }, opt...)..., + ) + if err != nil { + return ClientConnectionCount{noop.Int64UpDownCounter{}}, err + } + return ClientConnectionCount{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ClientConnectionCount) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (ClientConnectionCount) Name() string { + return "db.client.connection.count" +} + +// Unit returns the semantic convention unit of the instrument +func (ClientConnectionCount) Unit() string { + return "{connection}" +} + +// Description returns the semantic convention description of the instrument +func (ClientConnectionCount) Description() string { + return "The number of connections that are currently in state described by the `state` attribute" +} + +// Add adds incr to the existing count for attrs. +// +// The clientConnectionPoolName is the the name of the connection pool; unique +// within the instrumented application. In case the connection pool +// implementation doesn't provide a name, instrumentation SHOULD use a +// combination of parameters that would make the name unique, for example, +// combining attributes `server.address`, `server.port`, and `db.namespace`, +// formatted as `server.address:server.port/db.namespace`. Instrumentations that +// generate connection pool name following different patterns SHOULD document it. +// +// The clientConnectionState is the the state of a connection in the pool +func (m ClientConnectionCount) Add( + ctx context.Context, + incr int64, + clientConnectionPoolName string, + clientConnectionState ClientConnectionStateAttr, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("db.client.connection.pool.name", clientConnectionPoolName), + attribute.String("db.client.connection.state", string(clientConnectionState)), + )..., + ), + ) + + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +func (m ClientConnectionCount) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// ClientConnectionCreateTime is an instrument used to record metric values +// conforming to the "db.client.connection.create_time" semantic conventions. It +// represents the time it took to create a new connection. +type ClientConnectionCreateTime struct { + metric.Float64Histogram +} + +// NewClientConnectionCreateTime returns a new ClientConnectionCreateTime +// instrument. +func NewClientConnectionCreateTime( + m metric.Meter, + opt ...metric.Float64HistogramOption, +) (ClientConnectionCreateTime, error) { + // Check if the meter is nil. + if m == nil { + return ClientConnectionCreateTime{noop.Float64Histogram{}}, nil + } + + i, err := m.Float64Histogram( + "db.client.connection.create_time", + append([]metric.Float64HistogramOption{ + metric.WithDescription("The time it took to create a new connection"), + metric.WithUnit("s"), + }, opt...)..., + ) + if err != nil { + return ClientConnectionCreateTime{noop.Float64Histogram{}}, err + } + return ClientConnectionCreateTime{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ClientConnectionCreateTime) Inst() metric.Float64Histogram { + return m.Float64Histogram +} + +// Name returns the semantic convention name of the instrument. +func (ClientConnectionCreateTime) Name() string { + return "db.client.connection.create_time" +} + +// Unit returns the semantic convention unit of the instrument +func (ClientConnectionCreateTime) Unit() string { + return "s" +} + +// Description returns the semantic convention description of the instrument +func (ClientConnectionCreateTime) Description() string { + return "The time it took to create a new connection" +} + +// Record records val to the current distribution for attrs. +// +// The clientConnectionPoolName is the the name of the connection pool; unique +// within the instrumented application. In case the connection pool +// implementation doesn't provide a name, instrumentation SHOULD use a +// combination of parameters that would make the name unique, for example, +// combining attributes `server.address`, `server.port`, and `db.namespace`, +// formatted as `server.address:server.port/db.namespace`. Instrumentations that +// generate connection pool name following different patterns SHOULD document it. +func (m ClientConnectionCreateTime) Record( + ctx context.Context, + val float64, + clientConnectionPoolName string, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Float64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("db.client.connection.pool.name", clientConnectionPoolName), + )..., + ), + ) + + m.Float64Histogram.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +func (m ClientConnectionCreateTime) RecordSet(ctx context.Context, val float64, set attribute.Set) { + if set.Len() == 0 { + m.Float64Histogram.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Float64Histogram.Record(ctx, val, *o...) +} + +// ClientConnectionIdleMax is an instrument used to record metric values +// conforming to the "db.client.connection.idle.max" semantic conventions. It +// represents the maximum number of idle open connections allowed. +type ClientConnectionIdleMax struct { + metric.Int64UpDownCounter +} + +// NewClientConnectionIdleMax returns a new ClientConnectionIdleMax instrument. +func NewClientConnectionIdleMax( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (ClientConnectionIdleMax, error) { + // Check if the meter is nil. + if m == nil { + return ClientConnectionIdleMax{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "db.client.connection.idle.max", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("The maximum number of idle open connections allowed"), + metric.WithUnit("{connection}"), + }, opt...)..., + ) + if err != nil { + return ClientConnectionIdleMax{noop.Int64UpDownCounter{}}, err + } + return ClientConnectionIdleMax{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ClientConnectionIdleMax) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (ClientConnectionIdleMax) Name() string { + return "db.client.connection.idle.max" +} + +// Unit returns the semantic convention unit of the instrument +func (ClientConnectionIdleMax) Unit() string { + return "{connection}" +} + +// Description returns the semantic convention description of the instrument +func (ClientConnectionIdleMax) Description() string { + return "The maximum number of idle open connections allowed" +} + +// Add adds incr to the existing count for attrs. +// +// The clientConnectionPoolName is the the name of the connection pool; unique +// within the instrumented application. In case the connection pool +// implementation doesn't provide a name, instrumentation SHOULD use a +// combination of parameters that would make the name unique, for example, +// combining attributes `server.address`, `server.port`, and `db.namespace`, +// formatted as `server.address:server.port/db.namespace`. Instrumentations that +// generate connection pool name following different patterns SHOULD document it. +func (m ClientConnectionIdleMax) Add( + ctx context.Context, + incr int64, + clientConnectionPoolName string, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("db.client.connection.pool.name", clientConnectionPoolName), + )..., + ), + ) + + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +func (m ClientConnectionIdleMax) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// ClientConnectionIdleMin is an instrument used to record metric values +// conforming to the "db.client.connection.idle.min" semantic conventions. It +// represents the minimum number of idle open connections allowed. +type ClientConnectionIdleMin struct { + metric.Int64UpDownCounter +} + +// NewClientConnectionIdleMin returns a new ClientConnectionIdleMin instrument. +func NewClientConnectionIdleMin( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (ClientConnectionIdleMin, error) { + // Check if the meter is nil. + if m == nil { + return ClientConnectionIdleMin{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "db.client.connection.idle.min", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("The minimum number of idle open connections allowed"), + metric.WithUnit("{connection}"), + }, opt...)..., + ) + if err != nil { + return ClientConnectionIdleMin{noop.Int64UpDownCounter{}}, err + } + return ClientConnectionIdleMin{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ClientConnectionIdleMin) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (ClientConnectionIdleMin) Name() string { + return "db.client.connection.idle.min" +} + +// Unit returns the semantic convention unit of the instrument +func (ClientConnectionIdleMin) Unit() string { + return "{connection}" +} + +// Description returns the semantic convention description of the instrument +func (ClientConnectionIdleMin) Description() string { + return "The minimum number of idle open connections allowed" +} + +// Add adds incr to the existing count for attrs. +// +// The clientConnectionPoolName is the the name of the connection pool; unique +// within the instrumented application. In case the connection pool +// implementation doesn't provide a name, instrumentation SHOULD use a +// combination of parameters that would make the name unique, for example, +// combining attributes `server.address`, `server.port`, and `db.namespace`, +// formatted as `server.address:server.port/db.namespace`. Instrumentations that +// generate connection pool name following different patterns SHOULD document it. +func (m ClientConnectionIdleMin) Add( + ctx context.Context, + incr int64, + clientConnectionPoolName string, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("db.client.connection.pool.name", clientConnectionPoolName), + )..., + ), + ) + + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +func (m ClientConnectionIdleMin) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// ClientConnectionMax is an instrument used to record metric values conforming +// to the "db.client.connection.max" semantic conventions. It represents the +// maximum number of open connections allowed. +type ClientConnectionMax struct { + metric.Int64UpDownCounter +} + +// NewClientConnectionMax returns a new ClientConnectionMax instrument. +func NewClientConnectionMax( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (ClientConnectionMax, error) { + // Check if the meter is nil. + if m == nil { + return ClientConnectionMax{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "db.client.connection.max", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("The maximum number of open connections allowed"), + metric.WithUnit("{connection}"), + }, opt...)..., + ) + if err != nil { + return ClientConnectionMax{noop.Int64UpDownCounter{}}, err + } + return ClientConnectionMax{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ClientConnectionMax) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (ClientConnectionMax) Name() string { + return "db.client.connection.max" +} + +// Unit returns the semantic convention unit of the instrument +func (ClientConnectionMax) Unit() string { + return "{connection}" +} + +// Description returns the semantic convention description of the instrument +func (ClientConnectionMax) Description() string { + return "The maximum number of open connections allowed" +} + +// Add adds incr to the existing count for attrs. +// +// The clientConnectionPoolName is the the name of the connection pool; unique +// within the instrumented application. In case the connection pool +// implementation doesn't provide a name, instrumentation SHOULD use a +// combination of parameters that would make the name unique, for example, +// combining attributes `server.address`, `server.port`, and `db.namespace`, +// formatted as `server.address:server.port/db.namespace`. Instrumentations that +// generate connection pool name following different patterns SHOULD document it. +func (m ClientConnectionMax) Add( + ctx context.Context, + incr int64, + clientConnectionPoolName string, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("db.client.connection.pool.name", clientConnectionPoolName), + )..., + ), + ) + + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +func (m ClientConnectionMax) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// ClientConnectionPendingRequests is an instrument used to record metric values +// conforming to the "db.client.connection.pending_requests" semantic +// conventions. It represents the number of current pending requests for an open +// connection. +type ClientConnectionPendingRequests struct { + metric.Int64UpDownCounter +} + +// NewClientConnectionPendingRequests returns a new +// ClientConnectionPendingRequests instrument. +func NewClientConnectionPendingRequests( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (ClientConnectionPendingRequests, error) { + // Check if the meter is nil. + if m == nil { + return ClientConnectionPendingRequests{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "db.client.connection.pending_requests", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("The number of current pending requests for an open connection"), + metric.WithUnit("{request}"), + }, opt...)..., + ) + if err != nil { + return ClientConnectionPendingRequests{noop.Int64UpDownCounter{}}, err + } + return ClientConnectionPendingRequests{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ClientConnectionPendingRequests) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (ClientConnectionPendingRequests) Name() string { + return "db.client.connection.pending_requests" +} + +// Unit returns the semantic convention unit of the instrument +func (ClientConnectionPendingRequests) Unit() string { + return "{request}" +} + +// Description returns the semantic convention description of the instrument +func (ClientConnectionPendingRequests) Description() string { + return "The number of current pending requests for an open connection" +} + +// Add adds incr to the existing count for attrs. +// +// The clientConnectionPoolName is the the name of the connection pool; unique +// within the instrumented application. In case the connection pool +// implementation doesn't provide a name, instrumentation SHOULD use a +// combination of parameters that would make the name unique, for example, +// combining attributes `server.address`, `server.port`, and `db.namespace`, +// formatted as `server.address:server.port/db.namespace`. Instrumentations that +// generate connection pool name following different patterns SHOULD document it. +func (m ClientConnectionPendingRequests) Add( + ctx context.Context, + incr int64, + clientConnectionPoolName string, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("db.client.connection.pool.name", clientConnectionPoolName), + )..., + ), + ) + + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +func (m ClientConnectionPendingRequests) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// ClientConnectionTimeouts is an instrument used to record metric values +// conforming to the "db.client.connection.timeouts" semantic conventions. It +// represents the number of connection timeouts that have occurred trying to +// obtain a connection from the pool. +type ClientConnectionTimeouts struct { + metric.Int64Counter +} + +// NewClientConnectionTimeouts returns a new ClientConnectionTimeouts instrument. +func NewClientConnectionTimeouts( + m metric.Meter, + opt ...metric.Int64CounterOption, +) (ClientConnectionTimeouts, error) { + // Check if the meter is nil. + if m == nil { + return ClientConnectionTimeouts{noop.Int64Counter{}}, nil + } + + i, err := m.Int64Counter( + "db.client.connection.timeouts", + append([]metric.Int64CounterOption{ + metric.WithDescription("The number of connection timeouts that have occurred trying to obtain a connection from the pool"), + metric.WithUnit("{timeout}"), + }, opt...)..., + ) + if err != nil { + return ClientConnectionTimeouts{noop.Int64Counter{}}, err + } + return ClientConnectionTimeouts{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ClientConnectionTimeouts) Inst() metric.Int64Counter { + return m.Int64Counter +} + +// Name returns the semantic convention name of the instrument. +func (ClientConnectionTimeouts) Name() string { + return "db.client.connection.timeouts" +} + +// Unit returns the semantic convention unit of the instrument +func (ClientConnectionTimeouts) Unit() string { + return "{timeout}" +} + +// Description returns the semantic convention description of the instrument +func (ClientConnectionTimeouts) Description() string { + return "The number of connection timeouts that have occurred trying to obtain a connection from the pool" +} + +// Add adds incr to the existing count for attrs. +// +// The clientConnectionPoolName is the the name of the connection pool; unique +// within the instrumented application. In case the connection pool +// implementation doesn't provide a name, instrumentation SHOULD use a +// combination of parameters that would make the name unique, for example, +// combining attributes `server.address`, `server.port`, and `db.namespace`, +// formatted as `server.address:server.port/db.namespace`. Instrumentations that +// generate connection pool name following different patterns SHOULD document it. +func (m ClientConnectionTimeouts) Add( + ctx context.Context, + incr int64, + clientConnectionPoolName string, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("db.client.connection.pool.name", clientConnectionPoolName), + )..., + ), + ) + + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +func (m ClientConnectionTimeouts) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Counter.Add(ctx, incr, *o...) +} + +// ClientConnectionUseTime is an instrument used to record metric values +// conforming to the "db.client.connection.use_time" semantic conventions. It +// represents the time between borrowing a connection and returning it to the +// pool. +type ClientConnectionUseTime struct { + metric.Float64Histogram +} + +// NewClientConnectionUseTime returns a new ClientConnectionUseTime instrument. +func NewClientConnectionUseTime( + m metric.Meter, + opt ...metric.Float64HistogramOption, +) (ClientConnectionUseTime, error) { + // Check if the meter is nil. + if m == nil { + return ClientConnectionUseTime{noop.Float64Histogram{}}, nil + } + + i, err := m.Float64Histogram( + "db.client.connection.use_time", + append([]metric.Float64HistogramOption{ + metric.WithDescription("The time between borrowing a connection and returning it to the pool"), + metric.WithUnit("s"), + }, opt...)..., + ) + if err != nil { + return ClientConnectionUseTime{noop.Float64Histogram{}}, err + } + return ClientConnectionUseTime{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ClientConnectionUseTime) Inst() metric.Float64Histogram { + return m.Float64Histogram +} + +// Name returns the semantic convention name of the instrument. +func (ClientConnectionUseTime) Name() string { + return "db.client.connection.use_time" +} + +// Unit returns the semantic convention unit of the instrument +func (ClientConnectionUseTime) Unit() string { + return "s" +} + +// Description returns the semantic convention description of the instrument +func (ClientConnectionUseTime) Description() string { + return "The time between borrowing a connection and returning it to the pool" +} + +// Record records val to the current distribution for attrs. +// +// The clientConnectionPoolName is the the name of the connection pool; unique +// within the instrumented application. In case the connection pool +// implementation doesn't provide a name, instrumentation SHOULD use a +// combination of parameters that would make the name unique, for example, +// combining attributes `server.address`, `server.port`, and `db.namespace`, +// formatted as `server.address:server.port/db.namespace`. Instrumentations that +// generate connection pool name following different patterns SHOULD document it. +func (m ClientConnectionUseTime) Record( + ctx context.Context, + val float64, + clientConnectionPoolName string, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Float64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("db.client.connection.pool.name", clientConnectionPoolName), + )..., + ), + ) + + m.Float64Histogram.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +func (m ClientConnectionUseTime) RecordSet(ctx context.Context, val float64, set attribute.Set) { + if set.Len() == 0 { + m.Float64Histogram.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Float64Histogram.Record(ctx, val, *o...) +} + +// ClientConnectionWaitTime is an instrument used to record metric values +// conforming to the "db.client.connection.wait_time" semantic conventions. It +// represents the time it took to obtain an open connection from the pool. +type ClientConnectionWaitTime struct { + metric.Float64Histogram +} + +// NewClientConnectionWaitTime returns a new ClientConnectionWaitTime instrument. +func NewClientConnectionWaitTime( + m metric.Meter, + opt ...metric.Float64HistogramOption, +) (ClientConnectionWaitTime, error) { + // Check if the meter is nil. + if m == nil { + return ClientConnectionWaitTime{noop.Float64Histogram{}}, nil + } + + i, err := m.Float64Histogram( + "db.client.connection.wait_time", + append([]metric.Float64HistogramOption{ + metric.WithDescription("The time it took to obtain an open connection from the pool"), + metric.WithUnit("s"), + }, opt...)..., + ) + if err != nil { + return ClientConnectionWaitTime{noop.Float64Histogram{}}, err + } + return ClientConnectionWaitTime{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ClientConnectionWaitTime) Inst() metric.Float64Histogram { + return m.Float64Histogram +} + +// Name returns the semantic convention name of the instrument. +func (ClientConnectionWaitTime) Name() string { + return "db.client.connection.wait_time" +} + +// Unit returns the semantic convention unit of the instrument +func (ClientConnectionWaitTime) Unit() string { + return "s" +} + +// Description returns the semantic convention description of the instrument +func (ClientConnectionWaitTime) Description() string { + return "The time it took to obtain an open connection from the pool" +} + +// Record records val to the current distribution for attrs. +// +// The clientConnectionPoolName is the the name of the connection pool; unique +// within the instrumented application. In case the connection pool +// implementation doesn't provide a name, instrumentation SHOULD use a +// combination of parameters that would make the name unique, for example, +// combining attributes `server.address`, `server.port`, and `db.namespace`, +// formatted as `server.address:server.port/db.namespace`. Instrumentations that +// generate connection pool name following different patterns SHOULD document it. +func (m ClientConnectionWaitTime) Record( + ctx context.Context, + val float64, + clientConnectionPoolName string, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Float64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("db.client.connection.pool.name", clientConnectionPoolName), + )..., + ), + ) + + m.Float64Histogram.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +func (m ClientConnectionWaitTime) RecordSet(ctx context.Context, val float64, set attribute.Set) { + if set.Len() == 0 { + m.Float64Histogram.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Float64Histogram.Record(ctx, val, *o...) +} + +// ClientOperationDuration is an instrument used to record metric values +// conforming to the "db.client.operation.duration" semantic conventions. It +// represents the duration of database client operations. +type ClientOperationDuration struct { + metric.Float64Histogram +} + +// NewClientOperationDuration returns a new ClientOperationDuration instrument. +func NewClientOperationDuration( + m metric.Meter, + opt ...metric.Float64HistogramOption, +) (ClientOperationDuration, error) { + // Check if the meter is nil. + if m == nil { + return ClientOperationDuration{noop.Float64Histogram{}}, nil + } + + i, err := m.Float64Histogram( + "db.client.operation.duration", + append([]metric.Float64HistogramOption{ + metric.WithDescription("Duration of database client operations."), + metric.WithUnit("s"), + }, opt...)..., + ) + if err != nil { + return ClientOperationDuration{noop.Float64Histogram{}}, err + } + return ClientOperationDuration{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ClientOperationDuration) Inst() metric.Float64Histogram { + return m.Float64Histogram +} + +// Name returns the semantic convention name of the instrument. +func (ClientOperationDuration) Name() string { + return "db.client.operation.duration" +} + +// Unit returns the semantic convention unit of the instrument +func (ClientOperationDuration) Unit() string { + return "s" +} + +// Description returns the semantic convention description of the instrument +func (ClientOperationDuration) Description() string { + return "Duration of database client operations." +} + +// Record records val to the current distribution for attrs. +// +// The systemName is the the database management system (DBMS) product as +// identified by the client instrumentation. +// +// All additional attrs passed are included in the recorded value. +// +// Batch operations SHOULD be recorded as a single operation. +func (m ClientOperationDuration) Record( + ctx context.Context, + val float64, + systemName SystemNameAttr, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Float64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("db.system.name", string(systemName)), + )..., + ), + ) + + m.Float64Histogram.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +// +// Batch operations SHOULD be recorded as a single operation. +func (m ClientOperationDuration) RecordSet(ctx context.Context, val float64, set attribute.Set) { + if set.Len() == 0 { + m.Float64Histogram.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Float64Histogram.Record(ctx, val, *o...) +} + +// AttrCollectionName returns an optional attribute for the "db.collection.name" +// semantic convention. It represents the name of a collection (table, container) +// within the database. +func (ClientOperationDuration) AttrCollectionName(val string) attribute.KeyValue { + return attribute.String("db.collection.name", val) +} + +// AttrNamespace returns an optional attribute for the "db.namespace" semantic +// convention. It represents the name of the database, fully qualified within the +// server address and port. +func (ClientOperationDuration) AttrNamespace(val string) attribute.KeyValue { + return attribute.String("db.namespace", val) +} + +// AttrOperationName returns an optional attribute for the "db.operation.name" +// semantic convention. It represents the name of the operation or command being +// executed. +func (ClientOperationDuration) AttrOperationName(val string) attribute.KeyValue { + return attribute.String("db.operation.name", val) +} + +// AttrResponseStatusCode returns an optional attribute for the +// "db.response.status_code" semantic convention. It represents the database +// response status code. +func (ClientOperationDuration) AttrResponseStatusCode(val string) attribute.KeyValue { + return attribute.String("db.response.status_code", val) +} + +// AttrErrorType returns an optional attribute for the "error.type" semantic +// convention. It represents the describes a class of error the operation ended +// with. +func (ClientOperationDuration) AttrErrorType(val ErrorTypeAttr) attribute.KeyValue { + return attribute.String("error.type", string(val)) +} + +// AttrServerPort returns an optional attribute for the "server.port" semantic +// convention. It represents the server port number. +func (ClientOperationDuration) AttrServerPort(val int) attribute.KeyValue { + return attribute.Int("server.port", val) +} + +// AttrQuerySummary returns an optional attribute for the "db.query.summary" +// semantic convention. It represents the low cardinality summary of a database +// query. +func (ClientOperationDuration) AttrQuerySummary(val string) attribute.KeyValue { + return attribute.String("db.query.summary", val) +} + +// AttrStoredProcedureName returns an optional attribute for the +// "db.stored_procedure.name" semantic convention. It represents the name of a +// stored procedure within the database. +func (ClientOperationDuration) AttrStoredProcedureName(val string) attribute.KeyValue { + return attribute.String("db.stored_procedure.name", val) +} + +// AttrNetworkPeerAddress returns an optional attribute for the +// "network.peer.address" semantic convention. It represents the peer address of +// the database node where the operation was performed. +func (ClientOperationDuration) AttrNetworkPeerAddress(val string) attribute.KeyValue { + return attribute.String("network.peer.address", val) +} + +// AttrNetworkPeerPort returns an optional attribute for the "network.peer.port" +// semantic convention. It represents the peer port number of the network +// connection. +func (ClientOperationDuration) AttrNetworkPeerPort(val int) attribute.KeyValue { + return attribute.Int("network.peer.port", val) +} + +// AttrServerAddress returns an optional attribute for the "server.address" +// semantic convention. It represents the name of the database host. +func (ClientOperationDuration) AttrServerAddress(val string) attribute.KeyValue { + return attribute.String("server.address", val) +} + +// AttrQueryText returns an optional attribute for the "db.query.text" semantic +// convention. It represents the database query being executed. +func (ClientOperationDuration) AttrQueryText(val string) attribute.KeyValue { + return attribute.String("db.query.text", val) +} + +// ClientResponseReturnedRows is an instrument used to record metric values +// conforming to the "db.client.response.returned_rows" semantic conventions. It +// represents the actual number of records returned by the database operation. +type ClientResponseReturnedRows struct { + metric.Int64Histogram +} + +// NewClientResponseReturnedRows returns a new ClientResponseReturnedRows +// instrument. +func NewClientResponseReturnedRows( + m metric.Meter, + opt ...metric.Int64HistogramOption, +) (ClientResponseReturnedRows, error) { + // Check if the meter is nil. + if m == nil { + return ClientResponseReturnedRows{noop.Int64Histogram{}}, nil + } + + i, err := m.Int64Histogram( + "db.client.response.returned_rows", + append([]metric.Int64HistogramOption{ + metric.WithDescription("The actual number of records returned by the database operation."), + metric.WithUnit("{row}"), + }, opt...)..., + ) + if err != nil { + return ClientResponseReturnedRows{noop.Int64Histogram{}}, err + } + return ClientResponseReturnedRows{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ClientResponseReturnedRows) Inst() metric.Int64Histogram { + return m.Int64Histogram +} + +// Name returns the semantic convention name of the instrument. +func (ClientResponseReturnedRows) Name() string { + return "db.client.response.returned_rows" +} + +// Unit returns the semantic convention unit of the instrument +func (ClientResponseReturnedRows) Unit() string { + return "{row}" +} + +// Description returns the semantic convention description of the instrument +func (ClientResponseReturnedRows) Description() string { + return "The actual number of records returned by the database operation." +} + +// Record records val to the current distribution for attrs. +// +// The systemName is the the database management system (DBMS) product as +// identified by the client instrumentation. +// +// All additional attrs passed are included in the recorded value. +func (m ClientResponseReturnedRows) Record( + ctx context.Context, + val int64, + systemName SystemNameAttr, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("db.system.name", string(systemName)), + )..., + ), + ) + + m.Int64Histogram.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +func (m ClientResponseReturnedRows) RecordSet(ctx context.Context, val int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Histogram.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Histogram.Record(ctx, val, *o...) +} + +// AttrCollectionName returns an optional attribute for the "db.collection.name" +// semantic convention. It represents the name of a collection (table, container) +// within the database. +func (ClientResponseReturnedRows) AttrCollectionName(val string) attribute.KeyValue { + return attribute.String("db.collection.name", val) +} + +// AttrNamespace returns an optional attribute for the "db.namespace" semantic +// convention. It represents the name of the database, fully qualified within the +// server address and port. +func (ClientResponseReturnedRows) AttrNamespace(val string) attribute.KeyValue { + return attribute.String("db.namespace", val) +} + +// AttrOperationName returns an optional attribute for the "db.operation.name" +// semantic convention. It represents the name of the operation or command being +// executed. +func (ClientResponseReturnedRows) AttrOperationName(val string) attribute.KeyValue { + return attribute.String("db.operation.name", val) +} + +// AttrResponseStatusCode returns an optional attribute for the +// "db.response.status_code" semantic convention. It represents the database +// response status code. +func (ClientResponseReturnedRows) AttrResponseStatusCode(val string) attribute.KeyValue { + return attribute.String("db.response.status_code", val) +} + +// AttrErrorType returns an optional attribute for the "error.type" semantic +// convention. It represents the describes a class of error the operation ended +// with. +func (ClientResponseReturnedRows) AttrErrorType(val ErrorTypeAttr) attribute.KeyValue { + return attribute.String("error.type", string(val)) +} + +// AttrServerPort returns an optional attribute for the "server.port" semantic +// convention. It represents the server port number. +func (ClientResponseReturnedRows) AttrServerPort(val int) attribute.KeyValue { + return attribute.Int("server.port", val) +} + +// AttrQuerySummary returns an optional attribute for the "db.query.summary" +// semantic convention. It represents the low cardinality summary of a database +// query. +func (ClientResponseReturnedRows) AttrQuerySummary(val string) attribute.KeyValue { + return attribute.String("db.query.summary", val) +} + +// AttrNetworkPeerAddress returns an optional attribute for the +// "network.peer.address" semantic convention. It represents the peer address of +// the database node where the operation was performed. +func (ClientResponseReturnedRows) AttrNetworkPeerAddress(val string) attribute.KeyValue { + return attribute.String("network.peer.address", val) +} + +// AttrNetworkPeerPort returns an optional attribute for the "network.peer.port" +// semantic convention. It represents the peer port number of the network +// connection. +func (ClientResponseReturnedRows) AttrNetworkPeerPort(val int) attribute.KeyValue { + return attribute.Int("network.peer.port", val) +} + +// AttrServerAddress returns an optional attribute for the "server.address" +// semantic convention. It represents the name of the database host. +func (ClientResponseReturnedRows) AttrServerAddress(val string) attribute.KeyValue { + return attribute.String("server.address", val) +} + +// AttrQueryText returns an optional attribute for the "db.query.text" semantic +// convention. It represents the database query being executed. +func (ClientResponseReturnedRows) AttrQueryText(val string) attribute.KeyValue { + return attribute.String("db.query.text", val) +} \ No newline at end of file diff --git a/semconv/v1.36.0/dnsconv/metric.go b/semconv/v1.36.0/dnsconv/metric.go new file mode 100644 index 00000000000..b5348a23ffd --- /dev/null +++ b/semconv/v1.36.0/dnsconv/metric.go @@ -0,0 +1,139 @@ +// Code generated from semantic convention specification. DO NOT EDIT. + +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package httpconv provides types and functionality for OpenTelemetry semantic +// conventions in the "dns" namespace. +package dnsconv + +import ( + "context" + "sync" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/noop" +) + +var ( + addOptPool = &sync.Pool{New: func() any { return &[]metric.AddOption{} }} + recOptPool = &sync.Pool{New: func() any { return &[]metric.RecordOption{} }} +) + +// ErrorTypeAttr is an attribute conforming to the error.type semantic +// conventions. It represents the describes the error the DNS lookup failed with. +type ErrorTypeAttr string + +var ( + // ErrorTypeOther is a fallback error value to be used when the instrumentation + // doesn't define a custom value. + ErrorTypeOther ErrorTypeAttr = "_OTHER" +) + +// LookupDuration is an instrument used to record metric values conforming to the +// "dns.lookup.duration" semantic conventions. It represents the measures the +// time taken to perform a DNS lookup. +type LookupDuration struct { + metric.Float64Histogram +} + +// NewLookupDuration returns a new LookupDuration instrument. +func NewLookupDuration( + m metric.Meter, + opt ...metric.Float64HistogramOption, +) (LookupDuration, error) { + // Check if the meter is nil. + if m == nil { + return LookupDuration{noop.Float64Histogram{}}, nil + } + + i, err := m.Float64Histogram( + "dns.lookup.duration", + append([]metric.Float64HistogramOption{ + metric.WithDescription("Measures the time taken to perform a DNS lookup."), + metric.WithUnit("s"), + }, opt...)..., + ) + if err != nil { + return LookupDuration{noop.Float64Histogram{}}, err + } + return LookupDuration{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m LookupDuration) Inst() metric.Float64Histogram { + return m.Float64Histogram +} + +// Name returns the semantic convention name of the instrument. +func (LookupDuration) Name() string { + return "dns.lookup.duration" +} + +// Unit returns the semantic convention unit of the instrument +func (LookupDuration) Unit() string { + return "s" +} + +// Description returns the semantic convention description of the instrument +func (LookupDuration) Description() string { + return "Measures the time taken to perform a DNS lookup." +} + +// Record records val to the current distribution for attrs. +// +// The questionName is the the name being queried. +// +// All additional attrs passed are included in the recorded value. +func (m LookupDuration) Record( + ctx context.Context, + val float64, + questionName string, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Float64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("dns.question.name", questionName), + )..., + ), + ) + + m.Float64Histogram.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +func (m LookupDuration) RecordSet(ctx context.Context, val float64, set attribute.Set) { + if set.Len() == 0 { + m.Float64Histogram.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Float64Histogram.Record(ctx, val, *o...) +} + +// AttrErrorType returns an optional attribute for the "error.type" semantic +// convention. It represents the describes the error the DNS lookup failed with. +func (LookupDuration) AttrErrorType(val ErrorTypeAttr) attribute.KeyValue { + return attribute.String("error.type", string(val)) +} \ No newline at end of file diff --git a/semconv/v1.36.0/doc.go b/semconv/v1.36.0/doc.go new file mode 100644 index 00000000000..964a9239e21 --- /dev/null +++ b/semconv/v1.36.0/doc.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package semconv implements OpenTelemetry semantic conventions. +// +// OpenTelemetry semantic conventions are agreed standardized naming +// patterns for OpenTelemetry things. This package represents the v1.36.0 +// version of the OpenTelemetry semantic conventions. +package semconv // import "go.opentelemetry.io/otel/semconv/v1.36.0" diff --git a/semconv/v1.36.0/error_type.go b/semconv/v1.36.0/error_type.go new file mode 100644 index 00000000000..c1fa7afc6c6 --- /dev/null +++ b/semconv/v1.36.0/error_type.go @@ -0,0 +1,31 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.36.0" + +import ( + "fmt" + "reflect" + + "go.opentelemetry.io/otel/attribute" +) + +// ErrorType returns an [attribute.KeyValue] identifying the error type of err. +func ErrorType(err error) attribute.KeyValue { + if err == nil { + return ErrorTypeOther + } + t := reflect.TypeOf(err) + var value string + if t.PkgPath() == "" && t.Name() == "" { + // Likely a builtin type. + value = t.String() + } else { + value = fmt.Sprintf("%s.%s", t.PkgPath(), t.Name()) + } + + if value == "" { + return ErrorTypeOther + } + return ErrorTypeKey.String(value) +} diff --git a/semconv/v1.36.0/error_type_test.go b/semconv/v1.36.0/error_type_test.go new file mode 100644 index 00000000000..71c039e7458 --- /dev/null +++ b/semconv/v1.36.0/error_type_test.go @@ -0,0 +1,59 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.36.0" + +import ( + "errors" + "fmt" + "reflect" + "testing" + + "go.opentelemetry.io/otel/attribute" +) + +type CustomError struct{} + +func (CustomError) Error() string { + return "custom error" +} + +func TestErrorType(t *testing.T) { + customErr := CustomError{} + builtinErr := errors.New("something went wrong") + var nilErr error + + wantCustomType := reflect.TypeOf(customErr) + wantCustomStr := fmt.Sprintf("%s.%s", wantCustomType.PkgPath(), wantCustomType.Name()) + + tests := []struct { + name string + err error + want attribute.KeyValue + }{ + { + name: "BuiltinError", + err: builtinErr, + want: attribute.String("error.type", "*errors.errorString"), + }, + { + name: "CustomError", + err: customErr, + want: attribute.String("error.type", wantCustomStr), + }, + { + name: "NilError", + err: nilErr, + want: ErrorTypeOther, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := ErrorType(tt.err) + if got != tt.want { + t.Errorf("ErrorType(%v) = %v, want %v", tt.err, got, tt.want) + } + }) + } +} diff --git a/semconv/v1.36.0/exception.go b/semconv/v1.36.0/exception.go new file mode 100644 index 00000000000..65dedebfced --- /dev/null +++ b/semconv/v1.36.0/exception.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.36.0" + +const ( + // ExceptionEventName is the name of the Span event representing an exception. + ExceptionEventName = "exception" +) diff --git a/semconv/v1.36.0/faasconv/metric.go b/semconv/v1.36.0/faasconv/metric.go new file mode 100644 index 00000000000..c952a507ef3 --- /dev/null +++ b/semconv/v1.36.0/faasconv/metric.go @@ -0,0 +1,964 @@ +// Code generated from semantic convention specification. DO NOT EDIT. + +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package httpconv provides types and functionality for OpenTelemetry semantic +// conventions in the "faas" namespace. +package faasconv + +import ( + "context" + "sync" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/noop" +) + +var ( + addOptPool = &sync.Pool{New: func() any { return &[]metric.AddOption{} }} + recOptPool = &sync.Pool{New: func() any { return &[]metric.RecordOption{} }} +) + +// TriggerAttr is an attribute conforming to the faas.trigger semantic +// conventions. It represents the type of the trigger which caused this function +// invocation. +type TriggerAttr string + +var ( + // TriggerDatasource is a response to some data source operation such as a + // database or filesystem read/write. + TriggerDatasource TriggerAttr = "datasource" + // TriggerHTTP is the to provide an answer to an inbound HTTP request. + TriggerHTTP TriggerAttr = "http" + // TriggerPubSub is a function is set to be executed when messages are sent to a + // messaging system. + TriggerPubSub TriggerAttr = "pubsub" + // TriggerTimer is a function is scheduled to be executed regularly. + TriggerTimer TriggerAttr = "timer" + // TriggerOther is the if none of the others apply. + TriggerOther TriggerAttr = "other" +) + +// Coldstarts is an instrument used to record metric values conforming to the +// "faas.coldstarts" semantic conventions. It represents the number of invocation +// cold starts. +type Coldstarts struct { + metric.Int64Counter +} + +// NewColdstarts returns a new Coldstarts instrument. +func NewColdstarts( + m metric.Meter, + opt ...metric.Int64CounterOption, +) (Coldstarts, error) { + // Check if the meter is nil. + if m == nil { + return Coldstarts{noop.Int64Counter{}}, nil + } + + i, err := m.Int64Counter( + "faas.coldstarts", + append([]metric.Int64CounterOption{ + metric.WithDescription("Number of invocation cold starts"), + metric.WithUnit("{coldstart}"), + }, opt...)..., + ) + if err != nil { + return Coldstarts{noop.Int64Counter{}}, err + } + return Coldstarts{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m Coldstarts) Inst() metric.Int64Counter { + return m.Int64Counter +} + +// Name returns the semantic convention name of the instrument. +func (Coldstarts) Name() string { + return "faas.coldstarts" +} + +// Unit returns the semantic convention unit of the instrument +func (Coldstarts) Unit() string { + return "{coldstart}" +} + +// Description returns the semantic convention description of the instrument +func (Coldstarts) Description() string { + return "Number of invocation cold starts" +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +func (m Coldstarts) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +func (m Coldstarts) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AttrTrigger returns an optional attribute for the "faas.trigger" semantic +// convention. It represents the type of the trigger which caused this function +// invocation. +func (Coldstarts) AttrTrigger(val TriggerAttr) attribute.KeyValue { + return attribute.String("faas.trigger", string(val)) +} + +// CPUUsage is an instrument used to record metric values conforming to the +// "faas.cpu_usage" semantic conventions. It represents the distribution of CPU +// usage per invocation. +type CPUUsage struct { + metric.Float64Histogram +} + +// NewCPUUsage returns a new CPUUsage instrument. +func NewCPUUsage( + m metric.Meter, + opt ...metric.Float64HistogramOption, +) (CPUUsage, error) { + // Check if the meter is nil. + if m == nil { + return CPUUsage{noop.Float64Histogram{}}, nil + } + + i, err := m.Float64Histogram( + "faas.cpu_usage", + append([]metric.Float64HistogramOption{ + metric.WithDescription("Distribution of CPU usage per invocation"), + metric.WithUnit("s"), + }, opt...)..., + ) + if err != nil { + return CPUUsage{noop.Float64Histogram{}}, err + } + return CPUUsage{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m CPUUsage) Inst() metric.Float64Histogram { + return m.Float64Histogram +} + +// Name returns the semantic convention name of the instrument. +func (CPUUsage) Name() string { + return "faas.cpu_usage" +} + +// Unit returns the semantic convention unit of the instrument +func (CPUUsage) Unit() string { + return "s" +} + +// Description returns the semantic convention description of the instrument +func (CPUUsage) Description() string { + return "Distribution of CPU usage per invocation" +} + +// Record records val to the current distribution for attrs. +// +// All additional attrs passed are included in the recorded value. +func (m CPUUsage) Record( + ctx context.Context, + val float64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Float64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Float64Histogram.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +func (m CPUUsage) RecordSet(ctx context.Context, val float64, set attribute.Set) { + if set.Len() == 0 { + m.Float64Histogram.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Float64Histogram.Record(ctx, val, *o...) +} + +// AttrTrigger returns an optional attribute for the "faas.trigger" semantic +// convention. It represents the type of the trigger which caused this function +// invocation. +func (CPUUsage) AttrTrigger(val TriggerAttr) attribute.KeyValue { + return attribute.String("faas.trigger", string(val)) +} + +// Errors is an instrument used to record metric values conforming to the +// "faas.errors" semantic conventions. It represents the number of invocation +// errors. +type Errors struct { + metric.Int64Counter +} + +// NewErrors returns a new Errors instrument. +func NewErrors( + m metric.Meter, + opt ...metric.Int64CounterOption, +) (Errors, error) { + // Check if the meter is nil. + if m == nil { + return Errors{noop.Int64Counter{}}, nil + } + + i, err := m.Int64Counter( + "faas.errors", + append([]metric.Int64CounterOption{ + metric.WithDescription("Number of invocation errors"), + metric.WithUnit("{error}"), + }, opt...)..., + ) + if err != nil { + return Errors{noop.Int64Counter{}}, err + } + return Errors{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m Errors) Inst() metric.Int64Counter { + return m.Int64Counter +} + +// Name returns the semantic convention name of the instrument. +func (Errors) Name() string { + return "faas.errors" +} + +// Unit returns the semantic convention unit of the instrument +func (Errors) Unit() string { + return "{error}" +} + +// Description returns the semantic convention description of the instrument +func (Errors) Description() string { + return "Number of invocation errors" +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +func (m Errors) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +func (m Errors) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AttrTrigger returns an optional attribute for the "faas.trigger" semantic +// convention. It represents the type of the trigger which caused this function +// invocation. +func (Errors) AttrTrigger(val TriggerAttr) attribute.KeyValue { + return attribute.String("faas.trigger", string(val)) +} + +// InitDuration is an instrument used to record metric values conforming to the +// "faas.init_duration" semantic conventions. It represents the measures the +// duration of the function's initialization, such as a cold start. +type InitDuration struct { + metric.Float64Histogram +} + +// NewInitDuration returns a new InitDuration instrument. +func NewInitDuration( + m metric.Meter, + opt ...metric.Float64HistogramOption, +) (InitDuration, error) { + // Check if the meter is nil. + if m == nil { + return InitDuration{noop.Float64Histogram{}}, nil + } + + i, err := m.Float64Histogram( + "faas.init_duration", + append([]metric.Float64HistogramOption{ + metric.WithDescription("Measures the duration of the function's initialization, such as a cold start"), + metric.WithUnit("s"), + }, opt...)..., + ) + if err != nil { + return InitDuration{noop.Float64Histogram{}}, err + } + return InitDuration{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m InitDuration) Inst() metric.Float64Histogram { + return m.Float64Histogram +} + +// Name returns the semantic convention name of the instrument. +func (InitDuration) Name() string { + return "faas.init_duration" +} + +// Unit returns the semantic convention unit of the instrument +func (InitDuration) Unit() string { + return "s" +} + +// Description returns the semantic convention description of the instrument +func (InitDuration) Description() string { + return "Measures the duration of the function's initialization, such as a cold start" +} + +// Record records val to the current distribution for attrs. +// +// All additional attrs passed are included in the recorded value. +func (m InitDuration) Record( + ctx context.Context, + val float64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Float64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Float64Histogram.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +func (m InitDuration) RecordSet(ctx context.Context, val float64, set attribute.Set) { + if set.Len() == 0 { + m.Float64Histogram.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Float64Histogram.Record(ctx, val, *o...) +} + +// AttrTrigger returns an optional attribute for the "faas.trigger" semantic +// convention. It represents the type of the trigger which caused this function +// invocation. +func (InitDuration) AttrTrigger(val TriggerAttr) attribute.KeyValue { + return attribute.String("faas.trigger", string(val)) +} + +// Invocations is an instrument used to record metric values conforming to the +// "faas.invocations" semantic conventions. It represents the number of +// successful invocations. +type Invocations struct { + metric.Int64Counter +} + +// NewInvocations returns a new Invocations instrument. +func NewInvocations( + m metric.Meter, + opt ...metric.Int64CounterOption, +) (Invocations, error) { + // Check if the meter is nil. + if m == nil { + return Invocations{noop.Int64Counter{}}, nil + } + + i, err := m.Int64Counter( + "faas.invocations", + append([]metric.Int64CounterOption{ + metric.WithDescription("Number of successful invocations"), + metric.WithUnit("{invocation}"), + }, opt...)..., + ) + if err != nil { + return Invocations{noop.Int64Counter{}}, err + } + return Invocations{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m Invocations) Inst() metric.Int64Counter { + return m.Int64Counter +} + +// Name returns the semantic convention name of the instrument. +func (Invocations) Name() string { + return "faas.invocations" +} + +// Unit returns the semantic convention unit of the instrument +func (Invocations) Unit() string { + return "{invocation}" +} + +// Description returns the semantic convention description of the instrument +func (Invocations) Description() string { + return "Number of successful invocations" +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +func (m Invocations) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +func (m Invocations) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AttrTrigger returns an optional attribute for the "faas.trigger" semantic +// convention. It represents the type of the trigger which caused this function +// invocation. +func (Invocations) AttrTrigger(val TriggerAttr) attribute.KeyValue { + return attribute.String("faas.trigger", string(val)) +} + +// InvokeDuration is an instrument used to record metric values conforming to the +// "faas.invoke_duration" semantic conventions. It represents the measures the +// duration of the function's logic execution. +type InvokeDuration struct { + metric.Float64Histogram +} + +// NewInvokeDuration returns a new InvokeDuration instrument. +func NewInvokeDuration( + m metric.Meter, + opt ...metric.Float64HistogramOption, +) (InvokeDuration, error) { + // Check if the meter is nil. + if m == nil { + return InvokeDuration{noop.Float64Histogram{}}, nil + } + + i, err := m.Float64Histogram( + "faas.invoke_duration", + append([]metric.Float64HistogramOption{ + metric.WithDescription("Measures the duration of the function's logic execution"), + metric.WithUnit("s"), + }, opt...)..., + ) + if err != nil { + return InvokeDuration{noop.Float64Histogram{}}, err + } + return InvokeDuration{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m InvokeDuration) Inst() metric.Float64Histogram { + return m.Float64Histogram +} + +// Name returns the semantic convention name of the instrument. +func (InvokeDuration) Name() string { + return "faas.invoke_duration" +} + +// Unit returns the semantic convention unit of the instrument +func (InvokeDuration) Unit() string { + return "s" +} + +// Description returns the semantic convention description of the instrument +func (InvokeDuration) Description() string { + return "Measures the duration of the function's logic execution" +} + +// Record records val to the current distribution for attrs. +// +// All additional attrs passed are included in the recorded value. +func (m InvokeDuration) Record( + ctx context.Context, + val float64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Float64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Float64Histogram.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +func (m InvokeDuration) RecordSet(ctx context.Context, val float64, set attribute.Set) { + if set.Len() == 0 { + m.Float64Histogram.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Float64Histogram.Record(ctx, val, *o...) +} + +// AttrTrigger returns an optional attribute for the "faas.trigger" semantic +// convention. It represents the type of the trigger which caused this function +// invocation. +func (InvokeDuration) AttrTrigger(val TriggerAttr) attribute.KeyValue { + return attribute.String("faas.trigger", string(val)) +} + +// MemUsage is an instrument used to record metric values conforming to the +// "faas.mem_usage" semantic conventions. It represents the distribution of max +// memory usage per invocation. +type MemUsage struct { + metric.Int64Histogram +} + +// NewMemUsage returns a new MemUsage instrument. +func NewMemUsage( + m metric.Meter, + opt ...metric.Int64HistogramOption, +) (MemUsage, error) { + // Check if the meter is nil. + if m == nil { + return MemUsage{noop.Int64Histogram{}}, nil + } + + i, err := m.Int64Histogram( + "faas.mem_usage", + append([]metric.Int64HistogramOption{ + metric.WithDescription("Distribution of max memory usage per invocation"), + metric.WithUnit("By"), + }, opt...)..., + ) + if err != nil { + return MemUsage{noop.Int64Histogram{}}, err + } + return MemUsage{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m MemUsage) Inst() metric.Int64Histogram { + return m.Int64Histogram +} + +// Name returns the semantic convention name of the instrument. +func (MemUsage) Name() string { + return "faas.mem_usage" +} + +// Unit returns the semantic convention unit of the instrument +func (MemUsage) Unit() string { + return "By" +} + +// Description returns the semantic convention description of the instrument +func (MemUsage) Description() string { + return "Distribution of max memory usage per invocation" +} + +// Record records val to the current distribution for attrs. +// +// All additional attrs passed are included in the recorded value. +func (m MemUsage) Record( + ctx context.Context, + val int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64Histogram.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +func (m MemUsage) RecordSet(ctx context.Context, val int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Histogram.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Histogram.Record(ctx, val, *o...) +} + +// AttrTrigger returns an optional attribute for the "faas.trigger" semantic +// convention. It represents the type of the trigger which caused this function +// invocation. +func (MemUsage) AttrTrigger(val TriggerAttr) attribute.KeyValue { + return attribute.String("faas.trigger", string(val)) +} + +// NetIO is an instrument used to record metric values conforming to the +// "faas.net_io" semantic conventions. It represents the distribution of net I/O +// usage per invocation. +type NetIO struct { + metric.Int64Histogram +} + +// NewNetIO returns a new NetIO instrument. +func NewNetIO( + m metric.Meter, + opt ...metric.Int64HistogramOption, +) (NetIO, error) { + // Check if the meter is nil. + if m == nil { + return NetIO{noop.Int64Histogram{}}, nil + } + + i, err := m.Int64Histogram( + "faas.net_io", + append([]metric.Int64HistogramOption{ + metric.WithDescription("Distribution of net I/O usage per invocation"), + metric.WithUnit("By"), + }, opt...)..., + ) + if err != nil { + return NetIO{noop.Int64Histogram{}}, err + } + return NetIO{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m NetIO) Inst() metric.Int64Histogram { + return m.Int64Histogram +} + +// Name returns the semantic convention name of the instrument. +func (NetIO) Name() string { + return "faas.net_io" +} + +// Unit returns the semantic convention unit of the instrument +func (NetIO) Unit() string { + return "By" +} + +// Description returns the semantic convention description of the instrument +func (NetIO) Description() string { + return "Distribution of net I/O usage per invocation" +} + +// Record records val to the current distribution for attrs. +// +// All additional attrs passed are included in the recorded value. +func (m NetIO) Record( + ctx context.Context, + val int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64Histogram.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +func (m NetIO) RecordSet(ctx context.Context, val int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Histogram.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Histogram.Record(ctx, val, *o...) +} + +// AttrTrigger returns an optional attribute for the "faas.trigger" semantic +// convention. It represents the type of the trigger which caused this function +// invocation. +func (NetIO) AttrTrigger(val TriggerAttr) attribute.KeyValue { + return attribute.String("faas.trigger", string(val)) +} + +// Timeouts is an instrument used to record metric values conforming to the +// "faas.timeouts" semantic conventions. It represents the number of invocation +// timeouts. +type Timeouts struct { + metric.Int64Counter +} + +// NewTimeouts returns a new Timeouts instrument. +func NewTimeouts( + m metric.Meter, + opt ...metric.Int64CounterOption, +) (Timeouts, error) { + // Check if the meter is nil. + if m == nil { + return Timeouts{noop.Int64Counter{}}, nil + } + + i, err := m.Int64Counter( + "faas.timeouts", + append([]metric.Int64CounterOption{ + metric.WithDescription("Number of invocation timeouts"), + metric.WithUnit("{timeout}"), + }, opt...)..., + ) + if err != nil { + return Timeouts{noop.Int64Counter{}}, err + } + return Timeouts{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m Timeouts) Inst() metric.Int64Counter { + return m.Int64Counter +} + +// Name returns the semantic convention name of the instrument. +func (Timeouts) Name() string { + return "faas.timeouts" +} + +// Unit returns the semantic convention unit of the instrument +func (Timeouts) Unit() string { + return "{timeout}" +} + +// Description returns the semantic convention description of the instrument +func (Timeouts) Description() string { + return "Number of invocation timeouts" +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +func (m Timeouts) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +func (m Timeouts) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AttrTrigger returns an optional attribute for the "faas.trigger" semantic +// convention. It represents the type of the trigger which caused this function +// invocation. +func (Timeouts) AttrTrigger(val TriggerAttr) attribute.KeyValue { + return attribute.String("faas.trigger", string(val)) +} \ No newline at end of file diff --git a/semconv/v1.36.0/genaiconv/metric.go b/semconv/v1.36.0/genaiconv/metric.go new file mode 100644 index 00000000000..7b049c1ca11 --- /dev/null +++ b/semconv/v1.36.0/genaiconv/metric.go @@ -0,0 +1,795 @@ +// Code generated from semantic convention specification. DO NOT EDIT. + +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package httpconv provides types and functionality for OpenTelemetry semantic +// conventions in the "gen_ai" namespace. +package genaiconv + +import ( + "context" + "sync" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/noop" +) + +var ( + addOptPool = &sync.Pool{New: func() any { return &[]metric.AddOption{} }} + recOptPool = &sync.Pool{New: func() any { return &[]metric.RecordOption{} }} +) + +// ErrorTypeAttr is an attribute conforming to the error.type semantic +// conventions. It represents the describes a class of error the operation ended +// with. +type ErrorTypeAttr string + +var ( + // ErrorTypeOther is a fallback error value to be used when the instrumentation + // doesn't define a custom value. + ErrorTypeOther ErrorTypeAttr = "_OTHER" +) + +// OperationNameAttr is an attribute conforming to the gen_ai.operation.name +// semantic conventions. It represents the name of the operation being performed. +type OperationNameAttr string + +var ( + // OperationNameChat is the chat completion operation such as [OpenAI Chat API] + // . + // + // [OpenAI Chat API]: https://platform.openai.com/docs/api-reference/chat + OperationNameChat OperationNameAttr = "chat" + // OperationNameGenerateContent is the multimodal content generation operation + // such as [Gemini Generate Content]. + // + // [Gemini Generate Content]: https://ai.google.dev/api/generate-content + OperationNameGenerateContent OperationNameAttr = "generate_content" + // OperationNameTextCompletion is the text completions operation such as + // [OpenAI Completions API (Legacy)]. + // + // [OpenAI Completions API (Legacy)]: https://platform.openai.com/docs/api-reference/completions + OperationNameTextCompletion OperationNameAttr = "text_completion" + // OperationNameEmbeddings is the embeddings operation such as + // [OpenAI Create embeddings API]. + // + // [OpenAI Create embeddings API]: https://platform.openai.com/docs/api-reference/embeddings/create + OperationNameEmbeddings OperationNameAttr = "embeddings" + // OperationNameCreateAgent is the create GenAI agent. + OperationNameCreateAgent OperationNameAttr = "create_agent" + // OperationNameInvokeAgent is the invoke GenAI agent. + OperationNameInvokeAgent OperationNameAttr = "invoke_agent" + // OperationNameExecuteTool is the execute a tool. + OperationNameExecuteTool OperationNameAttr = "execute_tool" +) + +// SystemAttr is an attribute conforming to the gen_ai.system semantic +// conventions. It represents the Generative AI product as identified by the +// client or server instrumentation. +type SystemAttr string + +var ( + // SystemOpenAI is the openAI. + SystemOpenAI SystemAttr = "openai" + // SystemGCPGenAI is the any Google generative AI endpoint. + SystemGCPGenAI SystemAttr = "gcp.gen_ai" + // SystemGCPVertexAI is the vertex AI. + SystemGCPVertexAI SystemAttr = "gcp.vertex_ai" + // SystemGCPGemini is the gemini. + SystemGCPGemini SystemAttr = "gcp.gemini" + // SystemAnthropic is the anthropic. + SystemAnthropic SystemAttr = "anthropic" + // SystemCohere is the cohere. + SystemCohere SystemAttr = "cohere" + // SystemAzureAIInference is the azure AI Inference. + SystemAzureAIInference SystemAttr = "azure.ai.inference" + // SystemAzureAIOpenAI is the azure OpenAI. + SystemAzureAIOpenAI SystemAttr = "azure.ai.openai" + // SystemIBMWatsonxAI is the IBM Watsonx AI. + SystemIBMWatsonxAI SystemAttr = "ibm.watsonx.ai" + // SystemAWSBedrock is the AWS Bedrock. + SystemAWSBedrock SystemAttr = "aws.bedrock" + // SystemPerplexity is the perplexity. + SystemPerplexity SystemAttr = "perplexity" + // SystemXai is the xAI. + SystemXai SystemAttr = "xai" + // SystemDeepseek is the deepSeek. + SystemDeepseek SystemAttr = "deepseek" + // SystemGroq is the groq. + SystemGroq SystemAttr = "groq" + // SystemMistralAI is the mistral AI. + SystemMistralAI SystemAttr = "mistral_ai" +) + +// TokenTypeAttr is an attribute conforming to the gen_ai.token.type semantic +// conventions. It represents the type of token being counted. +type TokenTypeAttr string + +var ( + // TokenTypeInput is the input tokens (prompt, input, etc.). + TokenTypeInput TokenTypeAttr = "input" + // TokenTypeOutput is the output tokens (completion, response, etc.). + TokenTypeOutput TokenTypeAttr = "output" +) + +// ClientOperationDuration is an instrument used to record metric values +// conforming to the "gen_ai.client.operation.duration" semantic conventions. It +// represents the genAI operation duration. +type ClientOperationDuration struct { + metric.Float64Histogram +} + +// NewClientOperationDuration returns a new ClientOperationDuration instrument. +func NewClientOperationDuration( + m metric.Meter, + opt ...metric.Float64HistogramOption, +) (ClientOperationDuration, error) { + // Check if the meter is nil. + if m == nil { + return ClientOperationDuration{noop.Float64Histogram{}}, nil + } + + i, err := m.Float64Histogram( + "gen_ai.client.operation.duration", + append([]metric.Float64HistogramOption{ + metric.WithDescription("GenAI operation duration"), + metric.WithUnit("s"), + }, opt...)..., + ) + if err != nil { + return ClientOperationDuration{noop.Float64Histogram{}}, err + } + return ClientOperationDuration{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ClientOperationDuration) Inst() metric.Float64Histogram { + return m.Float64Histogram +} + +// Name returns the semantic convention name of the instrument. +func (ClientOperationDuration) Name() string { + return "gen_ai.client.operation.duration" +} + +// Unit returns the semantic convention unit of the instrument +func (ClientOperationDuration) Unit() string { + return "s" +} + +// Description returns the semantic convention description of the instrument +func (ClientOperationDuration) Description() string { + return "GenAI operation duration" +} + +// Record records val to the current distribution for attrs. +// +// The operationName is the the name of the operation being performed. +// +// The system is the the Generative AI product as identified by the client or +// server instrumentation. +// +// All additional attrs passed are included in the recorded value. +func (m ClientOperationDuration) Record( + ctx context.Context, + val float64, + operationName OperationNameAttr, + system SystemAttr, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Float64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("gen_ai.operation.name", string(operationName)), + attribute.String("gen_ai.system", string(system)), + )..., + ), + ) + + m.Float64Histogram.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +func (m ClientOperationDuration) RecordSet(ctx context.Context, val float64, set attribute.Set) { + if set.Len() == 0 { + m.Float64Histogram.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Float64Histogram.Record(ctx, val, *o...) +} + +// AttrErrorType returns an optional attribute for the "error.type" semantic +// convention. It represents the describes a class of error the operation ended +// with. +func (ClientOperationDuration) AttrErrorType(val ErrorTypeAttr) attribute.KeyValue { + return attribute.String("error.type", string(val)) +} + +// AttrRequestModel returns an optional attribute for the "gen_ai.request.model" +// semantic convention. It represents the name of the GenAI model a request is +// being made to. +func (ClientOperationDuration) AttrRequestModel(val string) attribute.KeyValue { + return attribute.String("gen_ai.request.model", val) +} + +// AttrServerPort returns an optional attribute for the "server.port" semantic +// convention. It represents the genAI server port. +func (ClientOperationDuration) AttrServerPort(val int) attribute.KeyValue { + return attribute.Int("server.port", val) +} + +// AttrResponseModel returns an optional attribute for the +// "gen_ai.response.model" semantic convention. It represents the name of the +// model that generated the response. +func (ClientOperationDuration) AttrResponseModel(val string) attribute.KeyValue { + return attribute.String("gen_ai.response.model", val) +} + +// AttrServerAddress returns an optional attribute for the "server.address" +// semantic convention. It represents the genAI server address. +func (ClientOperationDuration) AttrServerAddress(val string) attribute.KeyValue { + return attribute.String("server.address", val) +} + +// ClientTokenUsage is an instrument used to record metric values conforming to +// the "gen_ai.client.token.usage" semantic conventions. It represents the +// measures number of input and output tokens used. +type ClientTokenUsage struct { + metric.Int64Histogram +} + +// NewClientTokenUsage returns a new ClientTokenUsage instrument. +func NewClientTokenUsage( + m metric.Meter, + opt ...metric.Int64HistogramOption, +) (ClientTokenUsage, error) { + // Check if the meter is nil. + if m == nil { + return ClientTokenUsage{noop.Int64Histogram{}}, nil + } + + i, err := m.Int64Histogram( + "gen_ai.client.token.usage", + append([]metric.Int64HistogramOption{ + metric.WithDescription("Measures number of input and output tokens used"), + metric.WithUnit("{token}"), + }, opt...)..., + ) + if err != nil { + return ClientTokenUsage{noop.Int64Histogram{}}, err + } + return ClientTokenUsage{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ClientTokenUsage) Inst() metric.Int64Histogram { + return m.Int64Histogram +} + +// Name returns the semantic convention name of the instrument. +func (ClientTokenUsage) Name() string { + return "gen_ai.client.token.usage" +} + +// Unit returns the semantic convention unit of the instrument +func (ClientTokenUsage) Unit() string { + return "{token}" +} + +// Description returns the semantic convention description of the instrument +func (ClientTokenUsage) Description() string { + return "Measures number of input and output tokens used" +} + +// Record records val to the current distribution for attrs. +// +// The operationName is the the name of the operation being performed. +// +// The system is the the Generative AI product as identified by the client or +// server instrumentation. +// +// The tokenType is the the type of token being counted. +// +// All additional attrs passed are included in the recorded value. +func (m ClientTokenUsage) Record( + ctx context.Context, + val int64, + operationName OperationNameAttr, + system SystemAttr, + tokenType TokenTypeAttr, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("gen_ai.operation.name", string(operationName)), + attribute.String("gen_ai.system", string(system)), + attribute.String("gen_ai.token.type", string(tokenType)), + )..., + ), + ) + + m.Int64Histogram.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +func (m ClientTokenUsage) RecordSet(ctx context.Context, val int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Histogram.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Histogram.Record(ctx, val, *o...) +} + +// AttrRequestModel returns an optional attribute for the "gen_ai.request.model" +// semantic convention. It represents the name of the GenAI model a request is +// being made to. +func (ClientTokenUsage) AttrRequestModel(val string) attribute.KeyValue { + return attribute.String("gen_ai.request.model", val) +} + +// AttrServerPort returns an optional attribute for the "server.port" semantic +// convention. It represents the genAI server port. +func (ClientTokenUsage) AttrServerPort(val int) attribute.KeyValue { + return attribute.Int("server.port", val) +} + +// AttrResponseModel returns an optional attribute for the +// "gen_ai.response.model" semantic convention. It represents the name of the +// model that generated the response. +func (ClientTokenUsage) AttrResponseModel(val string) attribute.KeyValue { + return attribute.String("gen_ai.response.model", val) +} + +// AttrServerAddress returns an optional attribute for the "server.address" +// semantic convention. It represents the genAI server address. +func (ClientTokenUsage) AttrServerAddress(val string) attribute.KeyValue { + return attribute.String("server.address", val) +} + +// ServerRequestDuration is an instrument used to record metric values conforming +// to the "gen_ai.server.request.duration" semantic conventions. It represents +// the generative AI server request duration such as time-to-last byte or last +// output token. +type ServerRequestDuration struct { + metric.Float64Histogram +} + +// NewServerRequestDuration returns a new ServerRequestDuration instrument. +func NewServerRequestDuration( + m metric.Meter, + opt ...metric.Float64HistogramOption, +) (ServerRequestDuration, error) { + // Check if the meter is nil. + if m == nil { + return ServerRequestDuration{noop.Float64Histogram{}}, nil + } + + i, err := m.Float64Histogram( + "gen_ai.server.request.duration", + append([]metric.Float64HistogramOption{ + metric.WithDescription("Generative AI server request duration such as time-to-last byte or last output token"), + metric.WithUnit("s"), + }, opt...)..., + ) + if err != nil { + return ServerRequestDuration{noop.Float64Histogram{}}, err + } + return ServerRequestDuration{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ServerRequestDuration) Inst() metric.Float64Histogram { + return m.Float64Histogram +} + +// Name returns the semantic convention name of the instrument. +func (ServerRequestDuration) Name() string { + return "gen_ai.server.request.duration" +} + +// Unit returns the semantic convention unit of the instrument +func (ServerRequestDuration) Unit() string { + return "s" +} + +// Description returns the semantic convention description of the instrument +func (ServerRequestDuration) Description() string { + return "Generative AI server request duration such as time-to-last byte or last output token" +} + +// Record records val to the current distribution for attrs. +// +// The operationName is the the name of the operation being performed. +// +// The system is the the Generative AI product as identified by the client or +// server instrumentation. +// +// All additional attrs passed are included in the recorded value. +func (m ServerRequestDuration) Record( + ctx context.Context, + val float64, + operationName OperationNameAttr, + system SystemAttr, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Float64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("gen_ai.operation.name", string(operationName)), + attribute.String("gen_ai.system", string(system)), + )..., + ), + ) + + m.Float64Histogram.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +func (m ServerRequestDuration) RecordSet(ctx context.Context, val float64, set attribute.Set) { + if set.Len() == 0 { + m.Float64Histogram.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Float64Histogram.Record(ctx, val, *o...) +} + +// AttrErrorType returns an optional attribute for the "error.type" semantic +// convention. It represents the describes a class of error the operation ended +// with. +func (ServerRequestDuration) AttrErrorType(val ErrorTypeAttr) attribute.KeyValue { + return attribute.String("error.type", string(val)) +} + +// AttrRequestModel returns an optional attribute for the "gen_ai.request.model" +// semantic convention. It represents the name of the GenAI model a request is +// being made to. +func (ServerRequestDuration) AttrRequestModel(val string) attribute.KeyValue { + return attribute.String("gen_ai.request.model", val) +} + +// AttrServerPort returns an optional attribute for the "server.port" semantic +// convention. It represents the genAI server port. +func (ServerRequestDuration) AttrServerPort(val int) attribute.KeyValue { + return attribute.Int("server.port", val) +} + +// AttrResponseModel returns an optional attribute for the +// "gen_ai.response.model" semantic convention. It represents the name of the +// model that generated the response. +func (ServerRequestDuration) AttrResponseModel(val string) attribute.KeyValue { + return attribute.String("gen_ai.response.model", val) +} + +// AttrServerAddress returns an optional attribute for the "server.address" +// semantic convention. It represents the genAI server address. +func (ServerRequestDuration) AttrServerAddress(val string) attribute.KeyValue { + return attribute.String("server.address", val) +} + +// ServerTimePerOutputToken is an instrument used to record metric values +// conforming to the "gen_ai.server.time_per_output_token" semantic conventions. +// It represents the time per output token generated after the first token for +// successful responses. +type ServerTimePerOutputToken struct { + metric.Float64Histogram +} + +// NewServerTimePerOutputToken returns a new ServerTimePerOutputToken instrument. +func NewServerTimePerOutputToken( + m metric.Meter, + opt ...metric.Float64HistogramOption, +) (ServerTimePerOutputToken, error) { + // Check if the meter is nil. + if m == nil { + return ServerTimePerOutputToken{noop.Float64Histogram{}}, nil + } + + i, err := m.Float64Histogram( + "gen_ai.server.time_per_output_token", + append([]metric.Float64HistogramOption{ + metric.WithDescription("Time per output token generated after the first token for successful responses"), + metric.WithUnit("s"), + }, opt...)..., + ) + if err != nil { + return ServerTimePerOutputToken{noop.Float64Histogram{}}, err + } + return ServerTimePerOutputToken{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ServerTimePerOutputToken) Inst() metric.Float64Histogram { + return m.Float64Histogram +} + +// Name returns the semantic convention name of the instrument. +func (ServerTimePerOutputToken) Name() string { + return "gen_ai.server.time_per_output_token" +} + +// Unit returns the semantic convention unit of the instrument +func (ServerTimePerOutputToken) Unit() string { + return "s" +} + +// Description returns the semantic convention description of the instrument +func (ServerTimePerOutputToken) Description() string { + return "Time per output token generated after the first token for successful responses" +} + +// Record records val to the current distribution for attrs. +// +// The operationName is the the name of the operation being performed. +// +// The system is the the Generative AI product as identified by the client or +// server instrumentation. +// +// All additional attrs passed are included in the recorded value. +func (m ServerTimePerOutputToken) Record( + ctx context.Context, + val float64, + operationName OperationNameAttr, + system SystemAttr, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Float64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("gen_ai.operation.name", string(operationName)), + attribute.String("gen_ai.system", string(system)), + )..., + ), + ) + + m.Float64Histogram.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +func (m ServerTimePerOutputToken) RecordSet(ctx context.Context, val float64, set attribute.Set) { + if set.Len() == 0 { + m.Float64Histogram.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Float64Histogram.Record(ctx, val, *o...) +} + +// AttrRequestModel returns an optional attribute for the "gen_ai.request.model" +// semantic convention. It represents the name of the GenAI model a request is +// being made to. +func (ServerTimePerOutputToken) AttrRequestModel(val string) attribute.KeyValue { + return attribute.String("gen_ai.request.model", val) +} + +// AttrServerPort returns an optional attribute for the "server.port" semantic +// convention. It represents the genAI server port. +func (ServerTimePerOutputToken) AttrServerPort(val int) attribute.KeyValue { + return attribute.Int("server.port", val) +} + +// AttrResponseModel returns an optional attribute for the +// "gen_ai.response.model" semantic convention. It represents the name of the +// model that generated the response. +func (ServerTimePerOutputToken) AttrResponseModel(val string) attribute.KeyValue { + return attribute.String("gen_ai.response.model", val) +} + +// AttrServerAddress returns an optional attribute for the "server.address" +// semantic convention. It represents the genAI server address. +func (ServerTimePerOutputToken) AttrServerAddress(val string) attribute.KeyValue { + return attribute.String("server.address", val) +} + +// ServerTimeToFirstToken is an instrument used to record metric values +// conforming to the "gen_ai.server.time_to_first_token" semantic conventions. It +// represents the time to generate first token for successful responses. +type ServerTimeToFirstToken struct { + metric.Float64Histogram +} + +// NewServerTimeToFirstToken returns a new ServerTimeToFirstToken instrument. +func NewServerTimeToFirstToken( + m metric.Meter, + opt ...metric.Float64HistogramOption, +) (ServerTimeToFirstToken, error) { + // Check if the meter is nil. + if m == nil { + return ServerTimeToFirstToken{noop.Float64Histogram{}}, nil + } + + i, err := m.Float64Histogram( + "gen_ai.server.time_to_first_token", + append([]metric.Float64HistogramOption{ + metric.WithDescription("Time to generate first token for successful responses"), + metric.WithUnit("s"), + }, opt...)..., + ) + if err != nil { + return ServerTimeToFirstToken{noop.Float64Histogram{}}, err + } + return ServerTimeToFirstToken{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ServerTimeToFirstToken) Inst() metric.Float64Histogram { + return m.Float64Histogram +} + +// Name returns the semantic convention name of the instrument. +func (ServerTimeToFirstToken) Name() string { + return "gen_ai.server.time_to_first_token" +} + +// Unit returns the semantic convention unit of the instrument +func (ServerTimeToFirstToken) Unit() string { + return "s" +} + +// Description returns the semantic convention description of the instrument +func (ServerTimeToFirstToken) Description() string { + return "Time to generate first token for successful responses" +} + +// Record records val to the current distribution for attrs. +// +// The operationName is the the name of the operation being performed. +// +// The system is the the Generative AI product as identified by the client or +// server instrumentation. +// +// All additional attrs passed are included in the recorded value. +func (m ServerTimeToFirstToken) Record( + ctx context.Context, + val float64, + operationName OperationNameAttr, + system SystemAttr, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Float64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("gen_ai.operation.name", string(operationName)), + attribute.String("gen_ai.system", string(system)), + )..., + ), + ) + + m.Float64Histogram.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +func (m ServerTimeToFirstToken) RecordSet(ctx context.Context, val float64, set attribute.Set) { + if set.Len() == 0 { + m.Float64Histogram.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Float64Histogram.Record(ctx, val, *o...) +} + +// AttrRequestModel returns an optional attribute for the "gen_ai.request.model" +// semantic convention. It represents the name of the GenAI model a request is +// being made to. +func (ServerTimeToFirstToken) AttrRequestModel(val string) attribute.KeyValue { + return attribute.String("gen_ai.request.model", val) +} + +// AttrServerPort returns an optional attribute for the "server.port" semantic +// convention. It represents the genAI server port. +func (ServerTimeToFirstToken) AttrServerPort(val int) attribute.KeyValue { + return attribute.Int("server.port", val) +} + +// AttrResponseModel returns an optional attribute for the +// "gen_ai.response.model" semantic convention. It represents the name of the +// model that generated the response. +func (ServerTimeToFirstToken) AttrResponseModel(val string) attribute.KeyValue { + return attribute.String("gen_ai.response.model", val) +} + +// AttrServerAddress returns an optional attribute for the "server.address" +// semantic convention. It represents the genAI server address. +func (ServerTimeToFirstToken) AttrServerAddress(val string) attribute.KeyValue { + return attribute.String("server.address", val) +} \ No newline at end of file diff --git a/semconv/v1.36.0/goconv/metric.go b/semconv/v1.36.0/goconv/metric.go new file mode 100644 index 00000000000..fe9e2933e17 --- /dev/null +++ b/semconv/v1.36.0/goconv/metric.go @@ -0,0 +1,531 @@ +// Code generated from semantic convention specification. DO NOT EDIT. + +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package httpconv provides types and functionality for OpenTelemetry semantic +// conventions in the "go" namespace. +package goconv + +import ( + "context" + "sync" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/noop" +) + +var ( + addOptPool = &sync.Pool{New: func() any { return &[]metric.AddOption{} }} + recOptPool = &sync.Pool{New: func() any { return &[]metric.RecordOption{} }} +) + +// MemoryTypeAttr is an attribute conforming to the go.memory.type semantic +// conventions. It represents the type of memory. +type MemoryTypeAttr string + +var ( + // MemoryTypeStack is the memory allocated from the heap that is reserved for + // stack space, whether or not it is currently in-use. + MemoryTypeStack MemoryTypeAttr = "stack" + // MemoryTypeOther is the memory used by the Go runtime, excluding other + // categories of memory usage described in this enumeration. + MemoryTypeOther MemoryTypeAttr = "other" +) + +// ConfigGogc is an instrument used to record metric values conforming to the +// "go.config.gogc" semantic conventions. It represents the heap size target +// percentage configured by the user, otherwise 100. +type ConfigGogc struct { + metric.Int64ObservableUpDownCounter +} + +// NewConfigGogc returns a new ConfigGogc instrument. +func NewConfigGogc( + m metric.Meter, + opt ...metric.Int64ObservableUpDownCounterOption, +) (ConfigGogc, error) { + // Check if the meter is nil. + if m == nil { + return ConfigGogc{noop.Int64ObservableUpDownCounter{}}, nil + } + + i, err := m.Int64ObservableUpDownCounter( + "go.config.gogc", + append([]metric.Int64ObservableUpDownCounterOption{ + metric.WithDescription("Heap size target percentage configured by the user, otherwise 100."), + metric.WithUnit("%"), + }, opt...)..., + ) + if err != nil { + return ConfigGogc{noop.Int64ObservableUpDownCounter{}}, err + } + return ConfigGogc{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ConfigGogc) Inst() metric.Int64ObservableUpDownCounter { + return m.Int64ObservableUpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (ConfigGogc) Name() string { + return "go.config.gogc" +} + +// Unit returns the semantic convention unit of the instrument +func (ConfigGogc) Unit() string { + return "%" +} + +// Description returns the semantic convention description of the instrument +func (ConfigGogc) Description() string { + return "Heap size target percentage configured by the user, otherwise 100." +} + +// GoroutineCount is an instrument used to record metric values conforming to the +// "go.goroutine.count" semantic conventions. It represents the count of live +// goroutines. +type GoroutineCount struct { + metric.Int64ObservableUpDownCounter +} + +// NewGoroutineCount returns a new GoroutineCount instrument. +func NewGoroutineCount( + m metric.Meter, + opt ...metric.Int64ObservableUpDownCounterOption, +) (GoroutineCount, error) { + // Check if the meter is nil. + if m == nil { + return GoroutineCount{noop.Int64ObservableUpDownCounter{}}, nil + } + + i, err := m.Int64ObservableUpDownCounter( + "go.goroutine.count", + append([]metric.Int64ObservableUpDownCounterOption{ + metric.WithDescription("Count of live goroutines."), + metric.WithUnit("{goroutine}"), + }, opt...)..., + ) + if err != nil { + return GoroutineCount{noop.Int64ObservableUpDownCounter{}}, err + } + return GoroutineCount{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m GoroutineCount) Inst() metric.Int64ObservableUpDownCounter { + return m.Int64ObservableUpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (GoroutineCount) Name() string { + return "go.goroutine.count" +} + +// Unit returns the semantic convention unit of the instrument +func (GoroutineCount) Unit() string { + return "{goroutine}" +} + +// Description returns the semantic convention description of the instrument +func (GoroutineCount) Description() string { + return "Count of live goroutines." +} + +// MemoryAllocated is an instrument used to record metric values conforming to +// the "go.memory.allocated" semantic conventions. It represents the memory +// allocated to the heap by the application. +type MemoryAllocated struct { + metric.Int64ObservableCounter +} + +// NewMemoryAllocated returns a new MemoryAllocated instrument. +func NewMemoryAllocated( + m metric.Meter, + opt ...metric.Int64ObservableCounterOption, +) (MemoryAllocated, error) { + // Check if the meter is nil. + if m == nil { + return MemoryAllocated{noop.Int64ObservableCounter{}}, nil + } + + i, err := m.Int64ObservableCounter( + "go.memory.allocated", + append([]metric.Int64ObservableCounterOption{ + metric.WithDescription("Memory allocated to the heap by the application."), + metric.WithUnit("By"), + }, opt...)..., + ) + if err != nil { + return MemoryAllocated{noop.Int64ObservableCounter{}}, err + } + return MemoryAllocated{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m MemoryAllocated) Inst() metric.Int64ObservableCounter { + return m.Int64ObservableCounter +} + +// Name returns the semantic convention name of the instrument. +func (MemoryAllocated) Name() string { + return "go.memory.allocated" +} + +// Unit returns the semantic convention unit of the instrument +func (MemoryAllocated) Unit() string { + return "By" +} + +// Description returns the semantic convention description of the instrument +func (MemoryAllocated) Description() string { + return "Memory allocated to the heap by the application." +} + +// MemoryAllocations is an instrument used to record metric values conforming to +// the "go.memory.allocations" semantic conventions. It represents the count of +// allocations to the heap by the application. +type MemoryAllocations struct { + metric.Int64ObservableCounter +} + +// NewMemoryAllocations returns a new MemoryAllocations instrument. +func NewMemoryAllocations( + m metric.Meter, + opt ...metric.Int64ObservableCounterOption, +) (MemoryAllocations, error) { + // Check if the meter is nil. + if m == nil { + return MemoryAllocations{noop.Int64ObservableCounter{}}, nil + } + + i, err := m.Int64ObservableCounter( + "go.memory.allocations", + append([]metric.Int64ObservableCounterOption{ + metric.WithDescription("Count of allocations to the heap by the application."), + metric.WithUnit("{allocation}"), + }, opt...)..., + ) + if err != nil { + return MemoryAllocations{noop.Int64ObservableCounter{}}, err + } + return MemoryAllocations{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m MemoryAllocations) Inst() metric.Int64ObservableCounter { + return m.Int64ObservableCounter +} + +// Name returns the semantic convention name of the instrument. +func (MemoryAllocations) Name() string { + return "go.memory.allocations" +} + +// Unit returns the semantic convention unit of the instrument +func (MemoryAllocations) Unit() string { + return "{allocation}" +} + +// Description returns the semantic convention description of the instrument +func (MemoryAllocations) Description() string { + return "Count of allocations to the heap by the application." +} + +// MemoryGCGoal is an instrument used to record metric values conforming to the +// "go.memory.gc.goal" semantic conventions. It represents the heap size target +// for the end of the GC cycle. +type MemoryGCGoal struct { + metric.Int64ObservableUpDownCounter +} + +// NewMemoryGCGoal returns a new MemoryGCGoal instrument. +func NewMemoryGCGoal( + m metric.Meter, + opt ...metric.Int64ObservableUpDownCounterOption, +) (MemoryGCGoal, error) { + // Check if the meter is nil. + if m == nil { + return MemoryGCGoal{noop.Int64ObservableUpDownCounter{}}, nil + } + + i, err := m.Int64ObservableUpDownCounter( + "go.memory.gc.goal", + append([]metric.Int64ObservableUpDownCounterOption{ + metric.WithDescription("Heap size target for the end of the GC cycle."), + metric.WithUnit("By"), + }, opt...)..., + ) + if err != nil { + return MemoryGCGoal{noop.Int64ObservableUpDownCounter{}}, err + } + return MemoryGCGoal{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m MemoryGCGoal) Inst() metric.Int64ObservableUpDownCounter { + return m.Int64ObservableUpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (MemoryGCGoal) Name() string { + return "go.memory.gc.goal" +} + +// Unit returns the semantic convention unit of the instrument +func (MemoryGCGoal) Unit() string { + return "By" +} + +// Description returns the semantic convention description of the instrument +func (MemoryGCGoal) Description() string { + return "Heap size target for the end of the GC cycle." +} + +// MemoryLimit is an instrument used to record metric values conforming to the +// "go.memory.limit" semantic conventions. It represents the go runtime memory +// limit configured by the user, if a limit exists. +type MemoryLimit struct { + metric.Int64ObservableUpDownCounter +} + +// NewMemoryLimit returns a new MemoryLimit instrument. +func NewMemoryLimit( + m metric.Meter, + opt ...metric.Int64ObservableUpDownCounterOption, +) (MemoryLimit, error) { + // Check if the meter is nil. + if m == nil { + return MemoryLimit{noop.Int64ObservableUpDownCounter{}}, nil + } + + i, err := m.Int64ObservableUpDownCounter( + "go.memory.limit", + append([]metric.Int64ObservableUpDownCounterOption{ + metric.WithDescription("Go runtime memory limit configured by the user, if a limit exists."), + metric.WithUnit("By"), + }, opt...)..., + ) + if err != nil { + return MemoryLimit{noop.Int64ObservableUpDownCounter{}}, err + } + return MemoryLimit{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m MemoryLimit) Inst() metric.Int64ObservableUpDownCounter { + return m.Int64ObservableUpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (MemoryLimit) Name() string { + return "go.memory.limit" +} + +// Unit returns the semantic convention unit of the instrument +func (MemoryLimit) Unit() string { + return "By" +} + +// Description returns the semantic convention description of the instrument +func (MemoryLimit) Description() string { + return "Go runtime memory limit configured by the user, if a limit exists." +} + +// MemoryUsed is an instrument used to record metric values conforming to the +// "go.memory.used" semantic conventions. It represents the memory used by the Go +// runtime. +type MemoryUsed struct { + metric.Int64ObservableUpDownCounter +} + +// NewMemoryUsed returns a new MemoryUsed instrument. +func NewMemoryUsed( + m metric.Meter, + opt ...metric.Int64ObservableUpDownCounterOption, +) (MemoryUsed, error) { + // Check if the meter is nil. + if m == nil { + return MemoryUsed{noop.Int64ObservableUpDownCounter{}}, nil + } + + i, err := m.Int64ObservableUpDownCounter( + "go.memory.used", + append([]metric.Int64ObservableUpDownCounterOption{ + metric.WithDescription("Memory used by the Go runtime."), + metric.WithUnit("By"), + }, opt...)..., + ) + if err != nil { + return MemoryUsed{noop.Int64ObservableUpDownCounter{}}, err + } + return MemoryUsed{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m MemoryUsed) Inst() metric.Int64ObservableUpDownCounter { + return m.Int64ObservableUpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (MemoryUsed) Name() string { + return "go.memory.used" +} + +// Unit returns the semantic convention unit of the instrument +func (MemoryUsed) Unit() string { + return "By" +} + +// Description returns the semantic convention description of the instrument +func (MemoryUsed) Description() string { + return "Memory used by the Go runtime." +} + +// AttrMemoryType returns an optional attribute for the "go.memory.type" semantic +// convention. It represents the type of memory. +func (MemoryUsed) AttrMemoryType(val MemoryTypeAttr) attribute.KeyValue { + return attribute.String("go.memory.type", string(val)) +} + +// ProcessorLimit is an instrument used to record metric values conforming to the +// "go.processor.limit" semantic conventions. It represents the number of OS +// threads that can execute user-level Go code simultaneously. +type ProcessorLimit struct { + metric.Int64ObservableUpDownCounter +} + +// NewProcessorLimit returns a new ProcessorLimit instrument. +func NewProcessorLimit( + m metric.Meter, + opt ...metric.Int64ObservableUpDownCounterOption, +) (ProcessorLimit, error) { + // Check if the meter is nil. + if m == nil { + return ProcessorLimit{noop.Int64ObservableUpDownCounter{}}, nil + } + + i, err := m.Int64ObservableUpDownCounter( + "go.processor.limit", + append([]metric.Int64ObservableUpDownCounterOption{ + metric.WithDescription("The number of OS threads that can execute user-level Go code simultaneously."), + metric.WithUnit("{thread}"), + }, opt...)..., + ) + if err != nil { + return ProcessorLimit{noop.Int64ObservableUpDownCounter{}}, err + } + return ProcessorLimit{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ProcessorLimit) Inst() metric.Int64ObservableUpDownCounter { + return m.Int64ObservableUpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (ProcessorLimit) Name() string { + return "go.processor.limit" +} + +// Unit returns the semantic convention unit of the instrument +func (ProcessorLimit) Unit() string { + return "{thread}" +} + +// Description returns the semantic convention description of the instrument +func (ProcessorLimit) Description() string { + return "The number of OS threads that can execute user-level Go code simultaneously." +} + +// ScheduleDuration is an instrument used to record metric values conforming to +// the "go.schedule.duration" semantic conventions. It represents the time +// goroutines have spent in the scheduler in a runnable state before actually +// running. +type ScheduleDuration struct { + metric.Float64Histogram +} + +// NewScheduleDuration returns a new ScheduleDuration instrument. +func NewScheduleDuration( + m metric.Meter, + opt ...metric.Float64HistogramOption, +) (ScheduleDuration, error) { + // Check if the meter is nil. + if m == nil { + return ScheduleDuration{noop.Float64Histogram{}}, nil + } + + i, err := m.Float64Histogram( + "go.schedule.duration", + append([]metric.Float64HistogramOption{ + metric.WithDescription("The time goroutines have spent in the scheduler in a runnable state before actually running."), + metric.WithUnit("s"), + }, opt...)..., + ) + if err != nil { + return ScheduleDuration{noop.Float64Histogram{}}, err + } + return ScheduleDuration{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ScheduleDuration) Inst() metric.Float64Histogram { + return m.Float64Histogram +} + +// Name returns the semantic convention name of the instrument. +func (ScheduleDuration) Name() string { + return "go.schedule.duration" +} + +// Unit returns the semantic convention unit of the instrument +func (ScheduleDuration) Unit() string { + return "s" +} + +// Description returns the semantic convention description of the instrument +func (ScheduleDuration) Description() string { + return "The time goroutines have spent in the scheduler in a runnable state before actually running." +} + +// Record records val to the current distribution for attrs. +// +// Computed from `/sched/latencies:seconds`. Bucket boundaries are provided by +// the runtime, and are subject to change. +func (m ScheduleDuration) Record(ctx context.Context, val float64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Float64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Float64Histogram.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +// +// Computed from `/sched/latencies:seconds`. Bucket boundaries are provided by +// the runtime, and are subject to change. +func (m ScheduleDuration) RecordSet(ctx context.Context, val float64, set attribute.Set) { + if set.Len() == 0 { + m.Float64Histogram.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Float64Histogram.Record(ctx, val, *o...) +} \ No newline at end of file diff --git a/semconv/v1.36.0/httpconv/metric.go b/semconv/v1.36.0/httpconv/metric.go new file mode 100644 index 00000000000..552763e31ae --- /dev/null +++ b/semconv/v1.36.0/httpconv/metric.go @@ -0,0 +1,1662 @@ +// Code generated from semantic convention specification. DO NOT EDIT. + +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package httpconv provides types and functionality for OpenTelemetry semantic +// conventions in the "http" namespace. +package httpconv + +import ( + "context" + "sync" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/noop" +) + +var ( + addOptPool = &sync.Pool{New: func() any { return &[]metric.AddOption{} }} + recOptPool = &sync.Pool{New: func() any { return &[]metric.RecordOption{} }} +) + +// ErrorTypeAttr is an attribute conforming to the error.type semantic +// conventions. It represents the describes a class of error the operation ended +// with. +type ErrorTypeAttr string + +var ( + // ErrorTypeOther is a fallback error value to be used when the instrumentation + // doesn't define a custom value. + ErrorTypeOther ErrorTypeAttr = "_OTHER" +) + +// ConnectionStateAttr is an attribute conforming to the http.connection.state +// semantic conventions. It represents the state of the HTTP connection in the +// HTTP connection pool. +type ConnectionStateAttr string + +var ( + // ConnectionStateActive is the active state. + ConnectionStateActive ConnectionStateAttr = "active" + // ConnectionStateIdle is the idle state. + ConnectionStateIdle ConnectionStateAttr = "idle" +) + +// RequestMethodAttr is an attribute conforming to the http.request.method +// semantic conventions. It represents the HTTP request method. +type RequestMethodAttr string + +var ( + // RequestMethodConnect is the CONNECT method. + RequestMethodConnect RequestMethodAttr = "CONNECT" + // RequestMethodDelete is the DELETE method. + RequestMethodDelete RequestMethodAttr = "DELETE" + // RequestMethodGet is the GET method. + RequestMethodGet RequestMethodAttr = "GET" + // RequestMethodHead is the HEAD method. + RequestMethodHead RequestMethodAttr = "HEAD" + // RequestMethodOptions is the OPTIONS method. + RequestMethodOptions RequestMethodAttr = "OPTIONS" + // RequestMethodPatch is the PATCH method. + RequestMethodPatch RequestMethodAttr = "PATCH" + // RequestMethodPost is the POST method. + RequestMethodPost RequestMethodAttr = "POST" + // RequestMethodPut is the PUT method. + RequestMethodPut RequestMethodAttr = "PUT" + // RequestMethodTrace is the TRACE method. + RequestMethodTrace RequestMethodAttr = "TRACE" + // RequestMethodOther is the any HTTP method that the instrumentation has no + // prior knowledge of. + RequestMethodOther RequestMethodAttr = "_OTHER" +) + +// UserAgentSyntheticTypeAttr is an attribute conforming to the +// user_agent.synthetic.type semantic conventions. It represents the specifies +// the category of synthetic traffic, such as tests or bots. +type UserAgentSyntheticTypeAttr string + +var ( + // UserAgentSyntheticTypeBot is the bot source. + UserAgentSyntheticTypeBot UserAgentSyntheticTypeAttr = "bot" + // UserAgentSyntheticTypeTest is the synthetic test source. + UserAgentSyntheticTypeTest UserAgentSyntheticTypeAttr = "test" +) + +// ClientActiveRequests is an instrument used to record metric values conforming +// to the "http.client.active_requests" semantic conventions. It represents the +// number of active HTTP requests. +type ClientActiveRequests struct { + metric.Int64UpDownCounter +} + +// NewClientActiveRequests returns a new ClientActiveRequests instrument. +func NewClientActiveRequests( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (ClientActiveRequests, error) { + // Check if the meter is nil. + if m == nil { + return ClientActiveRequests{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "http.client.active_requests", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("Number of active HTTP requests."), + metric.WithUnit("{request}"), + }, opt...)..., + ) + if err != nil { + return ClientActiveRequests{noop.Int64UpDownCounter{}}, err + } + return ClientActiveRequests{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ClientActiveRequests) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (ClientActiveRequests) Name() string { + return "http.client.active_requests" +} + +// Unit returns the semantic convention unit of the instrument +func (ClientActiveRequests) Unit() string { + return "{request}" +} + +// Description returns the semantic convention description of the instrument +func (ClientActiveRequests) Description() string { + return "Number of active HTTP requests." +} + +// Add adds incr to the existing count for attrs. +// +// The serverAddress is the server domain name if available without reverse DNS +// lookup; otherwise, IP address or Unix domain socket name. +// +// The serverPort is the port identifier of the ["URI origin"] HTTP request is +// sent to. +// +// All additional attrs passed are included in the recorded value. +// +// ["URI origin"]: https://www.rfc-editor.org/rfc/rfc9110.html#name-uri-origin +func (m ClientActiveRequests) Add( + ctx context.Context, + incr int64, + serverAddress string, + serverPort int, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("server.address", serverAddress), + attribute.Int("server.port", serverPort), + )..., + ), + ) + + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +func (m ClientActiveRequests) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AttrURLTemplate returns an optional attribute for the "url.template" semantic +// convention. It represents the low-cardinality template of an +// [absolute path reference]. +// +// [absolute path reference]: https://www.rfc-editor.org/rfc/rfc3986#section-4.2 +func (ClientActiveRequests) AttrURLTemplate(val string) attribute.KeyValue { + return attribute.String("url.template", val) +} + +// AttrRequestMethod returns an optional attribute for the "http.request.method" +// semantic convention. It represents the HTTP request method. +func (ClientActiveRequests) AttrRequestMethod(val RequestMethodAttr) attribute.KeyValue { + return attribute.String("http.request.method", string(val)) +} + +// AttrURLScheme returns an optional attribute for the "url.scheme" semantic +// convention. It represents the [URI scheme] component identifying the used +// protocol. +// +// [URI scheme]: https://www.rfc-editor.org/rfc/rfc3986#section-3.1 +func (ClientActiveRequests) AttrURLScheme(val string) attribute.KeyValue { + return attribute.String("url.scheme", val) +} + +// ClientConnectionDuration is an instrument used to record metric values +// conforming to the "http.client.connection.duration" semantic conventions. It +// represents the duration of the successfully established outbound HTTP +// connections. +type ClientConnectionDuration struct { + metric.Float64Histogram +} + +// NewClientConnectionDuration returns a new ClientConnectionDuration instrument. +func NewClientConnectionDuration( + m metric.Meter, + opt ...metric.Float64HistogramOption, +) (ClientConnectionDuration, error) { + // Check if the meter is nil. + if m == nil { + return ClientConnectionDuration{noop.Float64Histogram{}}, nil + } + + i, err := m.Float64Histogram( + "http.client.connection.duration", + append([]metric.Float64HistogramOption{ + metric.WithDescription("The duration of the successfully established outbound HTTP connections."), + metric.WithUnit("s"), + }, opt...)..., + ) + if err != nil { + return ClientConnectionDuration{noop.Float64Histogram{}}, err + } + return ClientConnectionDuration{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ClientConnectionDuration) Inst() metric.Float64Histogram { + return m.Float64Histogram +} + +// Name returns the semantic convention name of the instrument. +func (ClientConnectionDuration) Name() string { + return "http.client.connection.duration" +} + +// Unit returns the semantic convention unit of the instrument +func (ClientConnectionDuration) Unit() string { + return "s" +} + +// Description returns the semantic convention description of the instrument +func (ClientConnectionDuration) Description() string { + return "The duration of the successfully established outbound HTTP connections." +} + +// Record records val to the current distribution for attrs. +// +// The serverAddress is the server domain name if available without reverse DNS +// lookup; otherwise, IP address or Unix domain socket name. +// +// The serverPort is the port identifier of the ["URI origin"] HTTP request is +// sent to. +// +// All additional attrs passed are included in the recorded value. +// +// ["URI origin"]: https://www.rfc-editor.org/rfc/rfc9110.html#name-uri-origin +func (m ClientConnectionDuration) Record( + ctx context.Context, + val float64, + serverAddress string, + serverPort int, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Float64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("server.address", serverAddress), + attribute.Int("server.port", serverPort), + )..., + ), + ) + + m.Float64Histogram.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +func (m ClientConnectionDuration) RecordSet(ctx context.Context, val float64, set attribute.Set) { + if set.Len() == 0 { + m.Float64Histogram.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Float64Histogram.Record(ctx, val, *o...) +} + +// AttrNetworkPeerAddress returns an optional attribute for the +// "network.peer.address" semantic convention. It represents the peer address of +// the network connection - IP address or Unix domain socket name. +func (ClientConnectionDuration) AttrNetworkPeerAddress(val string) attribute.KeyValue { + return attribute.String("network.peer.address", val) +} + +// AttrNetworkProtocolVersion returns an optional attribute for the +// "network.protocol.version" semantic convention. It represents the actual +// version of the protocol used for network communication. +func (ClientConnectionDuration) AttrNetworkProtocolVersion(val string) attribute.KeyValue { + return attribute.String("network.protocol.version", val) +} + +// AttrURLScheme returns an optional attribute for the "url.scheme" semantic +// convention. It represents the [URI scheme] component identifying the used +// protocol. +// +// [URI scheme]: https://www.rfc-editor.org/rfc/rfc3986#section-3.1 +func (ClientConnectionDuration) AttrURLScheme(val string) attribute.KeyValue { + return attribute.String("url.scheme", val) +} + +// ClientOpenConnections is an instrument used to record metric values conforming +// to the "http.client.open_connections" semantic conventions. It represents the +// number of outbound HTTP connections that are currently active or idle on the +// client. +type ClientOpenConnections struct { + metric.Int64UpDownCounter +} + +// NewClientOpenConnections returns a new ClientOpenConnections instrument. +func NewClientOpenConnections( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (ClientOpenConnections, error) { + // Check if the meter is nil. + if m == nil { + return ClientOpenConnections{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "http.client.open_connections", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("Number of outbound HTTP connections that are currently active or idle on the client."), + metric.WithUnit("{connection}"), + }, opt...)..., + ) + if err != nil { + return ClientOpenConnections{noop.Int64UpDownCounter{}}, err + } + return ClientOpenConnections{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ClientOpenConnections) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (ClientOpenConnections) Name() string { + return "http.client.open_connections" +} + +// Unit returns the semantic convention unit of the instrument +func (ClientOpenConnections) Unit() string { + return "{connection}" +} + +// Description returns the semantic convention description of the instrument +func (ClientOpenConnections) Description() string { + return "Number of outbound HTTP connections that are currently active or idle on the client." +} + +// Add adds incr to the existing count for attrs. +// +// The connectionState is the state of the HTTP connection in the HTTP connection +// pool. +// +// The serverAddress is the server domain name if available without reverse DNS +// lookup; otherwise, IP address or Unix domain socket name. +// +// The serverPort is the port identifier of the ["URI origin"] HTTP request is +// sent to. +// +// All additional attrs passed are included in the recorded value. +// +// ["URI origin"]: https://www.rfc-editor.org/rfc/rfc9110.html#name-uri-origin +func (m ClientOpenConnections) Add( + ctx context.Context, + incr int64, + connectionState ConnectionStateAttr, + serverAddress string, + serverPort int, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("http.connection.state", string(connectionState)), + attribute.String("server.address", serverAddress), + attribute.Int("server.port", serverPort), + )..., + ), + ) + + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +func (m ClientOpenConnections) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AttrNetworkPeerAddress returns an optional attribute for the +// "network.peer.address" semantic convention. It represents the peer address of +// the network connection - IP address or Unix domain socket name. +func (ClientOpenConnections) AttrNetworkPeerAddress(val string) attribute.KeyValue { + return attribute.String("network.peer.address", val) +} + +// AttrNetworkProtocolVersion returns an optional attribute for the +// "network.protocol.version" semantic convention. It represents the actual +// version of the protocol used for network communication. +func (ClientOpenConnections) AttrNetworkProtocolVersion(val string) attribute.KeyValue { + return attribute.String("network.protocol.version", val) +} + +// AttrURLScheme returns an optional attribute for the "url.scheme" semantic +// convention. It represents the [URI scheme] component identifying the used +// protocol. +// +// [URI scheme]: https://www.rfc-editor.org/rfc/rfc3986#section-3.1 +func (ClientOpenConnections) AttrURLScheme(val string) attribute.KeyValue { + return attribute.String("url.scheme", val) +} + +// ClientRequestBodySize is an instrument used to record metric values conforming +// to the "http.client.request.body.size" semantic conventions. It represents the +// size of HTTP client request bodies. +type ClientRequestBodySize struct { + metric.Int64Histogram +} + +// NewClientRequestBodySize returns a new ClientRequestBodySize instrument. +func NewClientRequestBodySize( + m metric.Meter, + opt ...metric.Int64HistogramOption, +) (ClientRequestBodySize, error) { + // Check if the meter is nil. + if m == nil { + return ClientRequestBodySize{noop.Int64Histogram{}}, nil + } + + i, err := m.Int64Histogram( + "http.client.request.body.size", + append([]metric.Int64HistogramOption{ + metric.WithDescription("Size of HTTP client request bodies."), + metric.WithUnit("By"), + }, opt...)..., + ) + if err != nil { + return ClientRequestBodySize{noop.Int64Histogram{}}, err + } + return ClientRequestBodySize{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ClientRequestBodySize) Inst() metric.Int64Histogram { + return m.Int64Histogram +} + +// Name returns the semantic convention name of the instrument. +func (ClientRequestBodySize) Name() string { + return "http.client.request.body.size" +} + +// Unit returns the semantic convention unit of the instrument +func (ClientRequestBodySize) Unit() string { + return "By" +} + +// Description returns the semantic convention description of the instrument +func (ClientRequestBodySize) Description() string { + return "Size of HTTP client request bodies." +} + +// Record records val to the current distribution for attrs. +// +// The requestMethod is the HTTP request method. +// +// The serverAddress is the host identifier of the ["URI origin"] HTTP request is +// sent to. +// +// The serverPort is the port identifier of the ["URI origin"] HTTP request is +// sent to. +// +// All additional attrs passed are included in the recorded value. +// +// ["URI origin"]: https://www.rfc-editor.org/rfc/rfc9110.html#name-uri-origin +// ["URI origin"]: https://www.rfc-editor.org/rfc/rfc9110.html#name-uri-origin +// +// The size of the request payload body in bytes. This is the number of bytes +// transferred excluding headers and is often, but not always, present as the +// [Content-Length] header. For requests using transport encoding, this should be +// the compressed size. +// +// [Content-Length]: https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length +func (m ClientRequestBodySize) Record( + ctx context.Context, + val int64, + requestMethod RequestMethodAttr, + serverAddress string, + serverPort int, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("http.request.method", string(requestMethod)), + attribute.String("server.address", serverAddress), + attribute.Int("server.port", serverPort), + )..., + ), + ) + + m.Int64Histogram.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +// +// The size of the request payload body in bytes. This is the number of bytes +// transferred excluding headers and is often, but not always, present as the +// [Content-Length] header. For requests using transport encoding, this should be +// the compressed size. +// +// [Content-Length]: https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length +func (m ClientRequestBodySize) RecordSet(ctx context.Context, val int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Histogram.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Histogram.Record(ctx, val, *o...) +} + +// AttrErrorType returns an optional attribute for the "error.type" semantic +// convention. It represents the describes a class of error the operation ended +// with. +func (ClientRequestBodySize) AttrErrorType(val ErrorTypeAttr) attribute.KeyValue { + return attribute.String("error.type", string(val)) +} + +// AttrResponseStatusCode returns an optional attribute for the +// "http.response.status_code" semantic convention. It represents the +// [HTTP response status code]. +// +// [HTTP response status code]: https://tools.ietf.org/html/rfc7231#section-6 +func (ClientRequestBodySize) AttrResponseStatusCode(val int) attribute.KeyValue { + return attribute.Int("http.response.status_code", val) +} + +// AttrNetworkProtocolName returns an optional attribute for the +// "network.protocol.name" semantic convention. It represents the +// [OSI application layer] or non-OSI equivalent. +// +// [OSI application layer]: https://wikipedia.org/wiki/Application_layer +func (ClientRequestBodySize) AttrNetworkProtocolName(val string) attribute.KeyValue { + return attribute.String("network.protocol.name", val) +} + +// AttrURLTemplate returns an optional attribute for the "url.template" semantic +// convention. It represents the low-cardinality template of an +// [absolute path reference]. +// +// [absolute path reference]: https://www.rfc-editor.org/rfc/rfc3986#section-4.2 +func (ClientRequestBodySize) AttrURLTemplate(val string) attribute.KeyValue { + return attribute.String("url.template", val) +} + +// AttrNetworkProtocolVersion returns an optional attribute for the +// "network.protocol.version" semantic convention. It represents the actual +// version of the protocol used for network communication. +func (ClientRequestBodySize) AttrNetworkProtocolVersion(val string) attribute.KeyValue { + return attribute.String("network.protocol.version", val) +} + +// AttrURLScheme returns an optional attribute for the "url.scheme" semantic +// convention. It represents the [URI scheme] component identifying the used +// protocol. +// +// [URI scheme]: https://www.rfc-editor.org/rfc/rfc3986#section-3.1 +func (ClientRequestBodySize) AttrURLScheme(val string) attribute.KeyValue { + return attribute.String("url.scheme", val) +} + +// ClientRequestDuration is an instrument used to record metric values conforming +// to the "http.client.request.duration" semantic conventions. It represents the +// duration of HTTP client requests. +type ClientRequestDuration struct { + metric.Float64Histogram +} + +// NewClientRequestDuration returns a new ClientRequestDuration instrument. +func NewClientRequestDuration( + m metric.Meter, + opt ...metric.Float64HistogramOption, +) (ClientRequestDuration, error) { + // Check if the meter is nil. + if m == nil { + return ClientRequestDuration{noop.Float64Histogram{}}, nil + } + + i, err := m.Float64Histogram( + "http.client.request.duration", + append([]metric.Float64HistogramOption{ + metric.WithDescription("Duration of HTTP client requests."), + metric.WithUnit("s"), + }, opt...)..., + ) + if err != nil { + return ClientRequestDuration{noop.Float64Histogram{}}, err + } + return ClientRequestDuration{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ClientRequestDuration) Inst() metric.Float64Histogram { + return m.Float64Histogram +} + +// Name returns the semantic convention name of the instrument. +func (ClientRequestDuration) Name() string { + return "http.client.request.duration" +} + +// Unit returns the semantic convention unit of the instrument +func (ClientRequestDuration) Unit() string { + return "s" +} + +// Description returns the semantic convention description of the instrument +func (ClientRequestDuration) Description() string { + return "Duration of HTTP client requests." +} + +// Record records val to the current distribution for attrs. +// +// The requestMethod is the HTTP request method. +// +// The serverAddress is the host identifier of the ["URI origin"] HTTP request is +// sent to. +// +// The serverPort is the port identifier of the ["URI origin"] HTTP request is +// sent to. +// +// All additional attrs passed are included in the recorded value. +// +// ["URI origin"]: https://www.rfc-editor.org/rfc/rfc9110.html#name-uri-origin +// ["URI origin"]: https://www.rfc-editor.org/rfc/rfc9110.html#name-uri-origin +func (m ClientRequestDuration) Record( + ctx context.Context, + val float64, + requestMethod RequestMethodAttr, + serverAddress string, + serverPort int, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Float64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("http.request.method", string(requestMethod)), + attribute.String("server.address", serverAddress), + attribute.Int("server.port", serverPort), + )..., + ), + ) + + m.Float64Histogram.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +func (m ClientRequestDuration) RecordSet(ctx context.Context, val float64, set attribute.Set) { + if set.Len() == 0 { + m.Float64Histogram.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Float64Histogram.Record(ctx, val, *o...) +} + +// AttrErrorType returns an optional attribute for the "error.type" semantic +// convention. It represents the describes a class of error the operation ended +// with. +func (ClientRequestDuration) AttrErrorType(val ErrorTypeAttr) attribute.KeyValue { + return attribute.String("error.type", string(val)) +} + +// AttrResponseStatusCode returns an optional attribute for the +// "http.response.status_code" semantic convention. It represents the +// [HTTP response status code]. +// +// [HTTP response status code]: https://tools.ietf.org/html/rfc7231#section-6 +func (ClientRequestDuration) AttrResponseStatusCode(val int) attribute.KeyValue { + return attribute.Int("http.response.status_code", val) +} + +// AttrNetworkProtocolName returns an optional attribute for the +// "network.protocol.name" semantic convention. It represents the +// [OSI application layer] or non-OSI equivalent. +// +// [OSI application layer]: https://wikipedia.org/wiki/Application_layer +func (ClientRequestDuration) AttrNetworkProtocolName(val string) attribute.KeyValue { + return attribute.String("network.protocol.name", val) +} + +// AttrNetworkProtocolVersion returns an optional attribute for the +// "network.protocol.version" semantic convention. It represents the actual +// version of the protocol used for network communication. +func (ClientRequestDuration) AttrNetworkProtocolVersion(val string) attribute.KeyValue { + return attribute.String("network.protocol.version", val) +} + +// AttrURLScheme returns an optional attribute for the "url.scheme" semantic +// convention. It represents the [URI scheme] component identifying the used +// protocol. +// +// [URI scheme]: https://www.rfc-editor.org/rfc/rfc3986#section-3.1 +func (ClientRequestDuration) AttrURLScheme(val string) attribute.KeyValue { + return attribute.String("url.scheme", val) +} + +// AttrURLTemplate returns an optional attribute for the "url.template" semantic +// convention. It represents the low-cardinality template of an +// [absolute path reference]. +// +// [absolute path reference]: https://www.rfc-editor.org/rfc/rfc3986#section-4.2 +func (ClientRequestDuration) AttrURLTemplate(val string) attribute.KeyValue { + return attribute.String("url.template", val) +} + +// ClientResponseBodySize is an instrument used to record metric values +// conforming to the "http.client.response.body.size" semantic conventions. It +// represents the size of HTTP client response bodies. +type ClientResponseBodySize struct { + metric.Int64Histogram +} + +// NewClientResponseBodySize returns a new ClientResponseBodySize instrument. +func NewClientResponseBodySize( + m metric.Meter, + opt ...metric.Int64HistogramOption, +) (ClientResponseBodySize, error) { + // Check if the meter is nil. + if m == nil { + return ClientResponseBodySize{noop.Int64Histogram{}}, nil + } + + i, err := m.Int64Histogram( + "http.client.response.body.size", + append([]metric.Int64HistogramOption{ + metric.WithDescription("Size of HTTP client response bodies."), + metric.WithUnit("By"), + }, opt...)..., + ) + if err != nil { + return ClientResponseBodySize{noop.Int64Histogram{}}, err + } + return ClientResponseBodySize{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ClientResponseBodySize) Inst() metric.Int64Histogram { + return m.Int64Histogram +} + +// Name returns the semantic convention name of the instrument. +func (ClientResponseBodySize) Name() string { + return "http.client.response.body.size" +} + +// Unit returns the semantic convention unit of the instrument +func (ClientResponseBodySize) Unit() string { + return "By" +} + +// Description returns the semantic convention description of the instrument +func (ClientResponseBodySize) Description() string { + return "Size of HTTP client response bodies." +} + +// Record records val to the current distribution for attrs. +// +// The requestMethod is the HTTP request method. +// +// The serverAddress is the host identifier of the ["URI origin"] HTTP request is +// sent to. +// +// The serverPort is the port identifier of the ["URI origin"] HTTP request is +// sent to. +// +// All additional attrs passed are included in the recorded value. +// +// ["URI origin"]: https://www.rfc-editor.org/rfc/rfc9110.html#name-uri-origin +// ["URI origin"]: https://www.rfc-editor.org/rfc/rfc9110.html#name-uri-origin +// +// The size of the response payload body in bytes. This is the number of bytes +// transferred excluding headers and is often, but not always, present as the +// [Content-Length] header. For requests using transport encoding, this should be +// the compressed size. +// +// [Content-Length]: https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length +func (m ClientResponseBodySize) Record( + ctx context.Context, + val int64, + requestMethod RequestMethodAttr, + serverAddress string, + serverPort int, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("http.request.method", string(requestMethod)), + attribute.String("server.address", serverAddress), + attribute.Int("server.port", serverPort), + )..., + ), + ) + + m.Int64Histogram.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +// +// The size of the response payload body in bytes. This is the number of bytes +// transferred excluding headers and is often, but not always, present as the +// [Content-Length] header. For requests using transport encoding, this should be +// the compressed size. +// +// [Content-Length]: https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length +func (m ClientResponseBodySize) RecordSet(ctx context.Context, val int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Histogram.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Histogram.Record(ctx, val, *o...) +} + +// AttrErrorType returns an optional attribute for the "error.type" semantic +// convention. It represents the describes a class of error the operation ended +// with. +func (ClientResponseBodySize) AttrErrorType(val ErrorTypeAttr) attribute.KeyValue { + return attribute.String("error.type", string(val)) +} + +// AttrResponseStatusCode returns an optional attribute for the +// "http.response.status_code" semantic convention. It represents the +// [HTTP response status code]. +// +// [HTTP response status code]: https://tools.ietf.org/html/rfc7231#section-6 +func (ClientResponseBodySize) AttrResponseStatusCode(val int) attribute.KeyValue { + return attribute.Int("http.response.status_code", val) +} + +// AttrNetworkProtocolName returns an optional attribute for the +// "network.protocol.name" semantic convention. It represents the +// [OSI application layer] or non-OSI equivalent. +// +// [OSI application layer]: https://wikipedia.org/wiki/Application_layer +func (ClientResponseBodySize) AttrNetworkProtocolName(val string) attribute.KeyValue { + return attribute.String("network.protocol.name", val) +} + +// AttrURLTemplate returns an optional attribute for the "url.template" semantic +// convention. It represents the low-cardinality template of an +// [absolute path reference]. +// +// [absolute path reference]: https://www.rfc-editor.org/rfc/rfc3986#section-4.2 +func (ClientResponseBodySize) AttrURLTemplate(val string) attribute.KeyValue { + return attribute.String("url.template", val) +} + +// AttrNetworkProtocolVersion returns an optional attribute for the +// "network.protocol.version" semantic convention. It represents the actual +// version of the protocol used for network communication. +func (ClientResponseBodySize) AttrNetworkProtocolVersion(val string) attribute.KeyValue { + return attribute.String("network.protocol.version", val) +} + +// AttrURLScheme returns an optional attribute for the "url.scheme" semantic +// convention. It represents the [URI scheme] component identifying the used +// protocol. +// +// [URI scheme]: https://www.rfc-editor.org/rfc/rfc3986#section-3.1 +func (ClientResponseBodySize) AttrURLScheme(val string) attribute.KeyValue { + return attribute.String("url.scheme", val) +} + +// ServerActiveRequests is an instrument used to record metric values conforming +// to the "http.server.active_requests" semantic conventions. It represents the +// number of active HTTP server requests. +type ServerActiveRequests struct { + metric.Int64UpDownCounter +} + +// NewServerActiveRequests returns a new ServerActiveRequests instrument. +func NewServerActiveRequests( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (ServerActiveRequests, error) { + // Check if the meter is nil. + if m == nil { + return ServerActiveRequests{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "http.server.active_requests", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("Number of active HTTP server requests."), + metric.WithUnit("{request}"), + }, opt...)..., + ) + if err != nil { + return ServerActiveRequests{noop.Int64UpDownCounter{}}, err + } + return ServerActiveRequests{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ServerActiveRequests) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (ServerActiveRequests) Name() string { + return "http.server.active_requests" +} + +// Unit returns the semantic convention unit of the instrument +func (ServerActiveRequests) Unit() string { + return "{request}" +} + +// Description returns the semantic convention description of the instrument +func (ServerActiveRequests) Description() string { + return "Number of active HTTP server requests." +} + +// Add adds incr to the existing count for attrs. +// +// The requestMethod is the HTTP request method. +// +// The urlScheme is the the [URI scheme] component identifying the used protocol. +// +// All additional attrs passed are included in the recorded value. +// +// [URI scheme]: https://www.rfc-editor.org/rfc/rfc3986#section-3.1 +func (m ServerActiveRequests) Add( + ctx context.Context, + incr int64, + requestMethod RequestMethodAttr, + urlScheme string, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("http.request.method", string(requestMethod)), + attribute.String("url.scheme", urlScheme), + )..., + ), + ) + + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +func (m ServerActiveRequests) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AttrServerAddress returns an optional attribute for the "server.address" +// semantic convention. It represents the name of the local HTTP server that +// received the request. +func (ServerActiveRequests) AttrServerAddress(val string) attribute.KeyValue { + return attribute.String("server.address", val) +} + +// AttrServerPort returns an optional attribute for the "server.port" semantic +// convention. It represents the port of the local HTTP server that received the +// request. +func (ServerActiveRequests) AttrServerPort(val int) attribute.KeyValue { + return attribute.Int("server.port", val) +} + +// ServerRequestBodySize is an instrument used to record metric values conforming +// to the "http.server.request.body.size" semantic conventions. It represents the +// size of HTTP server request bodies. +type ServerRequestBodySize struct { + metric.Int64Histogram +} + +// NewServerRequestBodySize returns a new ServerRequestBodySize instrument. +func NewServerRequestBodySize( + m metric.Meter, + opt ...metric.Int64HistogramOption, +) (ServerRequestBodySize, error) { + // Check if the meter is nil. + if m == nil { + return ServerRequestBodySize{noop.Int64Histogram{}}, nil + } + + i, err := m.Int64Histogram( + "http.server.request.body.size", + append([]metric.Int64HistogramOption{ + metric.WithDescription("Size of HTTP server request bodies."), + metric.WithUnit("By"), + }, opt...)..., + ) + if err != nil { + return ServerRequestBodySize{noop.Int64Histogram{}}, err + } + return ServerRequestBodySize{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ServerRequestBodySize) Inst() metric.Int64Histogram { + return m.Int64Histogram +} + +// Name returns the semantic convention name of the instrument. +func (ServerRequestBodySize) Name() string { + return "http.server.request.body.size" +} + +// Unit returns the semantic convention unit of the instrument +func (ServerRequestBodySize) Unit() string { + return "By" +} + +// Description returns the semantic convention description of the instrument +func (ServerRequestBodySize) Description() string { + return "Size of HTTP server request bodies." +} + +// Record records val to the current distribution for attrs. +// +// The requestMethod is the HTTP request method. +// +// The urlScheme is the the [URI scheme] component identifying the used protocol. +// +// All additional attrs passed are included in the recorded value. +// +// [URI scheme]: https://www.rfc-editor.org/rfc/rfc3986#section-3.1 +// +// The size of the request payload body in bytes. This is the number of bytes +// transferred excluding headers and is often, but not always, present as the +// [Content-Length] header. For requests using transport encoding, this should be +// the compressed size. +// +// [Content-Length]: https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length +func (m ServerRequestBodySize) Record( + ctx context.Context, + val int64, + requestMethod RequestMethodAttr, + urlScheme string, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("http.request.method", string(requestMethod)), + attribute.String("url.scheme", urlScheme), + )..., + ), + ) + + m.Int64Histogram.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +// +// The size of the request payload body in bytes. This is the number of bytes +// transferred excluding headers and is often, but not always, present as the +// [Content-Length] header. For requests using transport encoding, this should be +// the compressed size. +// +// [Content-Length]: https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length +func (m ServerRequestBodySize) RecordSet(ctx context.Context, val int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Histogram.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Histogram.Record(ctx, val, *o...) +} + +// AttrErrorType returns an optional attribute for the "error.type" semantic +// convention. It represents the describes a class of error the operation ended +// with. +func (ServerRequestBodySize) AttrErrorType(val ErrorTypeAttr) attribute.KeyValue { + return attribute.String("error.type", string(val)) +} + +// AttrResponseStatusCode returns an optional attribute for the +// "http.response.status_code" semantic convention. It represents the +// [HTTP response status code]. +// +// [HTTP response status code]: https://tools.ietf.org/html/rfc7231#section-6 +func (ServerRequestBodySize) AttrResponseStatusCode(val int) attribute.KeyValue { + return attribute.Int("http.response.status_code", val) +} + +// AttrRoute returns an optional attribute for the "http.route" semantic +// convention. It represents the matched route, that is, the path template in the +// format used by the respective server framework. +func (ServerRequestBodySize) AttrRoute(val string) attribute.KeyValue { + return attribute.String("http.route", val) +} + +// AttrNetworkProtocolName returns an optional attribute for the +// "network.protocol.name" semantic convention. It represents the +// [OSI application layer] or non-OSI equivalent. +// +// [OSI application layer]: https://wikipedia.org/wiki/Application_layer +func (ServerRequestBodySize) AttrNetworkProtocolName(val string) attribute.KeyValue { + return attribute.String("network.protocol.name", val) +} + +// AttrNetworkProtocolVersion returns an optional attribute for the +// "network.protocol.version" semantic convention. It represents the actual +// version of the protocol used for network communication. +func (ServerRequestBodySize) AttrNetworkProtocolVersion(val string) attribute.KeyValue { + return attribute.String("network.protocol.version", val) +} + +// AttrServerAddress returns an optional attribute for the "server.address" +// semantic convention. It represents the name of the local HTTP server that +// received the request. +func (ServerRequestBodySize) AttrServerAddress(val string) attribute.KeyValue { + return attribute.String("server.address", val) +} + +// AttrServerPort returns an optional attribute for the "server.port" semantic +// convention. It represents the port of the local HTTP server that received the +// request. +func (ServerRequestBodySize) AttrServerPort(val int) attribute.KeyValue { + return attribute.Int("server.port", val) +} + +// AttrUserAgentSyntheticType returns an optional attribute for the +// "user_agent.synthetic.type" semantic convention. It represents the specifies +// the category of synthetic traffic, such as tests or bots. +func (ServerRequestBodySize) AttrUserAgentSyntheticType(val UserAgentSyntheticTypeAttr) attribute.KeyValue { + return attribute.String("user_agent.synthetic.type", string(val)) +} + +// ServerRequestDuration is an instrument used to record metric values conforming +// to the "http.server.request.duration" semantic conventions. It represents the +// duration of HTTP server requests. +type ServerRequestDuration struct { + metric.Float64Histogram +} + +// NewServerRequestDuration returns a new ServerRequestDuration instrument. +func NewServerRequestDuration( + m metric.Meter, + opt ...metric.Float64HistogramOption, +) (ServerRequestDuration, error) { + // Check if the meter is nil. + if m == nil { + return ServerRequestDuration{noop.Float64Histogram{}}, nil + } + + i, err := m.Float64Histogram( + "http.server.request.duration", + append([]metric.Float64HistogramOption{ + metric.WithDescription("Duration of HTTP server requests."), + metric.WithUnit("s"), + }, opt...)..., + ) + if err != nil { + return ServerRequestDuration{noop.Float64Histogram{}}, err + } + return ServerRequestDuration{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ServerRequestDuration) Inst() metric.Float64Histogram { + return m.Float64Histogram +} + +// Name returns the semantic convention name of the instrument. +func (ServerRequestDuration) Name() string { + return "http.server.request.duration" +} + +// Unit returns the semantic convention unit of the instrument +func (ServerRequestDuration) Unit() string { + return "s" +} + +// Description returns the semantic convention description of the instrument +func (ServerRequestDuration) Description() string { + return "Duration of HTTP server requests." +} + +// Record records val to the current distribution for attrs. +// +// The requestMethod is the HTTP request method. +// +// The urlScheme is the the [URI scheme] component identifying the used protocol. +// +// All additional attrs passed are included in the recorded value. +// +// [URI scheme]: https://www.rfc-editor.org/rfc/rfc3986#section-3.1 +func (m ServerRequestDuration) Record( + ctx context.Context, + val float64, + requestMethod RequestMethodAttr, + urlScheme string, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Float64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("http.request.method", string(requestMethod)), + attribute.String("url.scheme", urlScheme), + )..., + ), + ) + + m.Float64Histogram.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +func (m ServerRequestDuration) RecordSet(ctx context.Context, val float64, set attribute.Set) { + if set.Len() == 0 { + m.Float64Histogram.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Float64Histogram.Record(ctx, val, *o...) +} + +// AttrErrorType returns an optional attribute for the "error.type" semantic +// convention. It represents the describes a class of error the operation ended +// with. +func (ServerRequestDuration) AttrErrorType(val ErrorTypeAttr) attribute.KeyValue { + return attribute.String("error.type", string(val)) +} + +// AttrResponseStatusCode returns an optional attribute for the +// "http.response.status_code" semantic convention. It represents the +// [HTTP response status code]. +// +// [HTTP response status code]: https://tools.ietf.org/html/rfc7231#section-6 +func (ServerRequestDuration) AttrResponseStatusCode(val int) attribute.KeyValue { + return attribute.Int("http.response.status_code", val) +} + +// AttrRoute returns an optional attribute for the "http.route" semantic +// convention. It represents the matched route, that is, the path template in the +// format used by the respective server framework. +func (ServerRequestDuration) AttrRoute(val string) attribute.KeyValue { + return attribute.String("http.route", val) +} + +// AttrNetworkProtocolName returns an optional attribute for the +// "network.protocol.name" semantic convention. It represents the +// [OSI application layer] or non-OSI equivalent. +// +// [OSI application layer]: https://wikipedia.org/wiki/Application_layer +func (ServerRequestDuration) AttrNetworkProtocolName(val string) attribute.KeyValue { + return attribute.String("network.protocol.name", val) +} + +// AttrNetworkProtocolVersion returns an optional attribute for the +// "network.protocol.version" semantic convention. It represents the actual +// version of the protocol used for network communication. +func (ServerRequestDuration) AttrNetworkProtocolVersion(val string) attribute.KeyValue { + return attribute.String("network.protocol.version", val) +} + +// AttrServerAddress returns an optional attribute for the "server.address" +// semantic convention. It represents the name of the local HTTP server that +// received the request. +func (ServerRequestDuration) AttrServerAddress(val string) attribute.KeyValue { + return attribute.String("server.address", val) +} + +// AttrServerPort returns an optional attribute for the "server.port" semantic +// convention. It represents the port of the local HTTP server that received the +// request. +func (ServerRequestDuration) AttrServerPort(val int) attribute.KeyValue { + return attribute.Int("server.port", val) +} + +// AttrUserAgentSyntheticType returns an optional attribute for the +// "user_agent.synthetic.type" semantic convention. It represents the specifies +// the category of synthetic traffic, such as tests or bots. +func (ServerRequestDuration) AttrUserAgentSyntheticType(val UserAgentSyntheticTypeAttr) attribute.KeyValue { + return attribute.String("user_agent.synthetic.type", string(val)) +} + +// ServerResponseBodySize is an instrument used to record metric values +// conforming to the "http.server.response.body.size" semantic conventions. It +// represents the size of HTTP server response bodies. +type ServerResponseBodySize struct { + metric.Int64Histogram +} + +// NewServerResponseBodySize returns a new ServerResponseBodySize instrument. +func NewServerResponseBodySize( + m metric.Meter, + opt ...metric.Int64HistogramOption, +) (ServerResponseBodySize, error) { + // Check if the meter is nil. + if m == nil { + return ServerResponseBodySize{noop.Int64Histogram{}}, nil + } + + i, err := m.Int64Histogram( + "http.server.response.body.size", + append([]metric.Int64HistogramOption{ + metric.WithDescription("Size of HTTP server response bodies."), + metric.WithUnit("By"), + }, opt...)..., + ) + if err != nil { + return ServerResponseBodySize{noop.Int64Histogram{}}, err + } + return ServerResponseBodySize{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ServerResponseBodySize) Inst() metric.Int64Histogram { + return m.Int64Histogram +} + +// Name returns the semantic convention name of the instrument. +func (ServerResponseBodySize) Name() string { + return "http.server.response.body.size" +} + +// Unit returns the semantic convention unit of the instrument +func (ServerResponseBodySize) Unit() string { + return "By" +} + +// Description returns the semantic convention description of the instrument +func (ServerResponseBodySize) Description() string { + return "Size of HTTP server response bodies." +} + +// Record records val to the current distribution for attrs. +// +// The requestMethod is the HTTP request method. +// +// The urlScheme is the the [URI scheme] component identifying the used protocol. +// +// All additional attrs passed are included in the recorded value. +// +// [URI scheme]: https://www.rfc-editor.org/rfc/rfc3986#section-3.1 +// +// The size of the response payload body in bytes. This is the number of bytes +// transferred excluding headers and is often, but not always, present as the +// [Content-Length] header. For requests using transport encoding, this should be +// the compressed size. +// +// [Content-Length]: https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length +func (m ServerResponseBodySize) Record( + ctx context.Context, + val int64, + requestMethod RequestMethodAttr, + urlScheme string, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("http.request.method", string(requestMethod)), + attribute.String("url.scheme", urlScheme), + )..., + ), + ) + + m.Int64Histogram.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +// +// The size of the response payload body in bytes. This is the number of bytes +// transferred excluding headers and is often, but not always, present as the +// [Content-Length] header. For requests using transport encoding, this should be +// the compressed size. +// +// [Content-Length]: https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length +func (m ServerResponseBodySize) RecordSet(ctx context.Context, val int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Histogram.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Histogram.Record(ctx, val, *o...) +} + +// AttrErrorType returns an optional attribute for the "error.type" semantic +// convention. It represents the describes a class of error the operation ended +// with. +func (ServerResponseBodySize) AttrErrorType(val ErrorTypeAttr) attribute.KeyValue { + return attribute.String("error.type", string(val)) +} + +// AttrResponseStatusCode returns an optional attribute for the +// "http.response.status_code" semantic convention. It represents the +// [HTTP response status code]. +// +// [HTTP response status code]: https://tools.ietf.org/html/rfc7231#section-6 +func (ServerResponseBodySize) AttrResponseStatusCode(val int) attribute.KeyValue { + return attribute.Int("http.response.status_code", val) +} + +// AttrRoute returns an optional attribute for the "http.route" semantic +// convention. It represents the matched route, that is, the path template in the +// format used by the respective server framework. +func (ServerResponseBodySize) AttrRoute(val string) attribute.KeyValue { + return attribute.String("http.route", val) +} + +// AttrNetworkProtocolName returns an optional attribute for the +// "network.protocol.name" semantic convention. It represents the +// [OSI application layer] or non-OSI equivalent. +// +// [OSI application layer]: https://wikipedia.org/wiki/Application_layer +func (ServerResponseBodySize) AttrNetworkProtocolName(val string) attribute.KeyValue { + return attribute.String("network.protocol.name", val) +} + +// AttrNetworkProtocolVersion returns an optional attribute for the +// "network.protocol.version" semantic convention. It represents the actual +// version of the protocol used for network communication. +func (ServerResponseBodySize) AttrNetworkProtocolVersion(val string) attribute.KeyValue { + return attribute.String("network.protocol.version", val) +} + +// AttrServerAddress returns an optional attribute for the "server.address" +// semantic convention. It represents the name of the local HTTP server that +// received the request. +func (ServerResponseBodySize) AttrServerAddress(val string) attribute.KeyValue { + return attribute.String("server.address", val) +} + +// AttrServerPort returns an optional attribute for the "server.port" semantic +// convention. It represents the port of the local HTTP server that received the +// request. +func (ServerResponseBodySize) AttrServerPort(val int) attribute.KeyValue { + return attribute.Int("server.port", val) +} + +// AttrUserAgentSyntheticType returns an optional attribute for the +// "user_agent.synthetic.type" semantic convention. It represents the specifies +// the category of synthetic traffic, such as tests or bots. +func (ServerResponseBodySize) AttrUserAgentSyntheticType(val UserAgentSyntheticTypeAttr) attribute.KeyValue { + return attribute.String("user_agent.synthetic.type", string(val)) +} \ No newline at end of file diff --git a/semconv/v1.36.0/hwconv/metric.go b/semconv/v1.36.0/hwconv/metric.go new file mode 100644 index 00000000000..2e8ca7f80e0 --- /dev/null +++ b/semconv/v1.36.0/hwconv/metric.go @@ -0,0 +1,1071 @@ +// Code generated from semantic convention specification. DO NOT EDIT. + +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package httpconv provides types and functionality for OpenTelemetry semantic +// conventions in the "hw" namespace. +package hwconv + +import ( + "context" + "sync" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/noop" +) + +var ( + addOptPool = &sync.Pool{New: func() any { return &[]metric.AddOption{} }} + recOptPool = &sync.Pool{New: func() any { return &[]metric.RecordOption{} }} +) + +// ErrorTypeAttr is an attribute conforming to the error.type semantic +// conventions. It represents the type of error encountered by the component. +type ErrorTypeAttr string + +var ( + // ErrorTypeOther is a fallback error value to be used when the instrumentation + // doesn't define a custom value. + ErrorTypeOther ErrorTypeAttr = "_OTHER" +) + +// StateAttr is an attribute conforming to the hw.state semantic conventions. It +// represents the current state of the component. +type StateAttr string + +var ( + // StateOk is the ok. + StateOk StateAttr = "ok" + // StateDegraded is the degraded. + StateDegraded StateAttr = "degraded" + // StateFailed is the failed. + StateFailed StateAttr = "failed" +) + +// TypeAttr is an attribute conforming to the hw.type semantic conventions. It +// represents the type of the component. +type TypeAttr string + +var ( + // TypeBattery is the battery. + TypeBattery TypeAttr = "battery" + // TypeCPU is the CPU. + TypeCPU TypeAttr = "cpu" + // TypeDiskController is the disk controller. + TypeDiskController TypeAttr = "disk_controller" + // TypeEnclosure is the enclosure. + TypeEnclosure TypeAttr = "enclosure" + // TypeFan is the fan. + TypeFan TypeAttr = "fan" + // TypeGpu is the GPU. + TypeGpu TypeAttr = "gpu" + // TypeLogicalDisk is the logical disk. + TypeLogicalDisk TypeAttr = "logical_disk" + // TypeMemory is the memory. + TypeMemory TypeAttr = "memory" + // TypeNetwork is the network. + TypeNetwork TypeAttr = "network" + // TypePhysicalDisk is the physical disk. + TypePhysicalDisk TypeAttr = "physical_disk" + // TypePowerSupply is the power supply. + TypePowerSupply TypeAttr = "power_supply" + // TypeTapeDrive is the tape drive. + TypeTapeDrive TypeAttr = "tape_drive" + // TypeTemperature is the temperature. + TypeTemperature TypeAttr = "temperature" + // TypeVoltage is the voltage. + TypeVoltage TypeAttr = "voltage" +) + +// Energy is an instrument used to record metric values conforming to the +// "hw.energy" semantic conventions. It represents the energy consumed by the +// component. +type Energy struct { + metric.Int64Counter +} + +// NewEnergy returns a new Energy instrument. +func NewEnergy( + m metric.Meter, + opt ...metric.Int64CounterOption, +) (Energy, error) { + // Check if the meter is nil. + if m == nil { + return Energy{noop.Int64Counter{}}, nil + } + + i, err := m.Int64Counter( + "hw.energy", + append([]metric.Int64CounterOption{ + metric.WithDescription("Energy consumed by the component"), + metric.WithUnit("J"), + }, opt...)..., + ) + if err != nil { + return Energy{noop.Int64Counter{}}, err + } + return Energy{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m Energy) Inst() metric.Int64Counter { + return m.Int64Counter +} + +// Name returns the semantic convention name of the instrument. +func (Energy) Name() string { + return "hw.energy" +} + +// Unit returns the semantic convention unit of the instrument +func (Energy) Unit() string { + return "J" +} + +// Description returns the semantic convention description of the instrument +func (Energy) Description() string { + return "Energy consumed by the component" +} + +// Add adds incr to the existing count for attrs. +// +// The id is the an identifier for the hardware component, unique within the +// monitored host +// +// The hwType is the type of the component +// +// All additional attrs passed are included in the recorded value. +func (m Energy) Add( + ctx context.Context, + incr int64, + id string, + hwType TypeAttr, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("hw.id", id), + attribute.String("hw.type", string(hwType)), + )..., + ), + ) + + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +func (m Energy) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AttrName returns an optional attribute for the "hw.name" semantic convention. +// It represents an easily-recognizable name for the hardware component. +func (Energy) AttrName(val string) attribute.KeyValue { + return attribute.String("hw.name", val) +} + +// AttrParent returns an optional attribute for the "hw.parent" semantic +// convention. It represents the unique identifier of the parent component +// (typically the `hw.id` attribute of the enclosure, or disk controller). +func (Energy) AttrParent(val string) attribute.KeyValue { + return attribute.String("hw.parent", val) +} + +// Errors is an instrument used to record metric values conforming to the +// "hw.errors" semantic conventions. It represents the number of errors +// encountered by the component. +type Errors struct { + metric.Int64Counter +} + +// NewErrors returns a new Errors instrument. +func NewErrors( + m metric.Meter, + opt ...metric.Int64CounterOption, +) (Errors, error) { + // Check if the meter is nil. + if m == nil { + return Errors{noop.Int64Counter{}}, nil + } + + i, err := m.Int64Counter( + "hw.errors", + append([]metric.Int64CounterOption{ + metric.WithDescription("Number of errors encountered by the component"), + metric.WithUnit("{error}"), + }, opt...)..., + ) + if err != nil { + return Errors{noop.Int64Counter{}}, err + } + return Errors{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m Errors) Inst() metric.Int64Counter { + return m.Int64Counter +} + +// Name returns the semantic convention name of the instrument. +func (Errors) Name() string { + return "hw.errors" +} + +// Unit returns the semantic convention unit of the instrument +func (Errors) Unit() string { + return "{error}" +} + +// Description returns the semantic convention description of the instrument +func (Errors) Description() string { + return "Number of errors encountered by the component" +} + +// Add adds incr to the existing count for attrs. +// +// The id is the an identifier for the hardware component, unique within the +// monitored host +// +// The hwType is the type of the component +// +// All additional attrs passed are included in the recorded value. +func (m Errors) Add( + ctx context.Context, + incr int64, + id string, + hwType TypeAttr, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("hw.id", id), + attribute.String("hw.type", string(hwType)), + )..., + ), + ) + + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +func (m Errors) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AttrErrorType returns an optional attribute for the "error.type" semantic +// convention. It represents the type of error encountered by the component. +func (Errors) AttrErrorType(val ErrorTypeAttr) attribute.KeyValue { + return attribute.String("error.type", string(val)) +} + +// AttrName returns an optional attribute for the "hw.name" semantic convention. +// It represents an easily-recognizable name for the hardware component. +func (Errors) AttrName(val string) attribute.KeyValue { + return attribute.String("hw.name", val) +} + +// AttrParent returns an optional attribute for the "hw.parent" semantic +// convention. It represents the unique identifier of the parent component +// (typically the `hw.id` attribute of the enclosure, or disk controller). +func (Errors) AttrParent(val string) attribute.KeyValue { + return attribute.String("hw.parent", val) +} + +// HostAmbientTemperature is an instrument used to record metric values +// conforming to the "hw.host.ambient_temperature" semantic conventions. It +// represents the ambient (external) temperature of the physical host. +type HostAmbientTemperature struct { + metric.Int64Gauge +} + +// NewHostAmbientTemperature returns a new HostAmbientTemperature instrument. +func NewHostAmbientTemperature( + m metric.Meter, + opt ...metric.Int64GaugeOption, +) (HostAmbientTemperature, error) { + // Check if the meter is nil. + if m == nil { + return HostAmbientTemperature{noop.Int64Gauge{}}, nil + } + + i, err := m.Int64Gauge( + "hw.host.ambient_temperature", + append([]metric.Int64GaugeOption{ + metric.WithDescription("Ambient (external) temperature of the physical host"), + metric.WithUnit("Cel"), + }, opt...)..., + ) + if err != nil { + return HostAmbientTemperature{noop.Int64Gauge{}}, err + } + return HostAmbientTemperature{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m HostAmbientTemperature) Inst() metric.Int64Gauge { + return m.Int64Gauge +} + +// Name returns the semantic convention name of the instrument. +func (HostAmbientTemperature) Name() string { + return "hw.host.ambient_temperature" +} + +// Unit returns the semantic convention unit of the instrument +func (HostAmbientTemperature) Unit() string { + return "Cel" +} + +// Description returns the semantic convention description of the instrument +func (HostAmbientTemperature) Description() string { + return "Ambient (external) temperature of the physical host" +} + +// Record records val to the current distribution for attrs. +// +// The id is the an identifier for the hardware component, unique within the +// monitored host +// +// All additional attrs passed are included in the recorded value. +func (m HostAmbientTemperature) Record( + ctx context.Context, + val int64, + id string, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Gauge.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("hw.id", id), + )..., + ), + ) + + m.Int64Gauge.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +func (m HostAmbientTemperature) RecordSet(ctx context.Context, val int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Gauge.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Gauge.Record(ctx, val, *o...) +} + +// AttrName returns an optional attribute for the "hw.name" semantic convention. +// It represents an easily-recognizable name for the hardware component. +func (HostAmbientTemperature) AttrName(val string) attribute.KeyValue { + return attribute.String("hw.name", val) +} + +// AttrParent returns an optional attribute for the "hw.parent" semantic +// convention. It represents the unique identifier of the parent component +// (typically the `hw.id` attribute of the enclosure, or disk controller). +func (HostAmbientTemperature) AttrParent(val string) attribute.KeyValue { + return attribute.String("hw.parent", val) +} + +// HostEnergy is an instrument used to record metric values conforming to the +// "hw.host.energy" semantic conventions. It represents the total energy consumed +// by the entire physical host, in joules. +type HostEnergy struct { + metric.Int64Counter +} + +// NewHostEnergy returns a new HostEnergy instrument. +func NewHostEnergy( + m metric.Meter, + opt ...metric.Int64CounterOption, +) (HostEnergy, error) { + // Check if the meter is nil. + if m == nil { + return HostEnergy{noop.Int64Counter{}}, nil + } + + i, err := m.Int64Counter( + "hw.host.energy", + append([]metric.Int64CounterOption{ + metric.WithDescription("Total energy consumed by the entire physical host, in joules"), + metric.WithUnit("J"), + }, opt...)..., + ) + if err != nil { + return HostEnergy{noop.Int64Counter{}}, err + } + return HostEnergy{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m HostEnergy) Inst() metric.Int64Counter { + return m.Int64Counter +} + +// Name returns the semantic convention name of the instrument. +func (HostEnergy) Name() string { + return "hw.host.energy" +} + +// Unit returns the semantic convention unit of the instrument +func (HostEnergy) Unit() string { + return "J" +} + +// Description returns the semantic convention description of the instrument +func (HostEnergy) Description() string { + return "Total energy consumed by the entire physical host, in joules" +} + +// Add adds incr to the existing count for attrs. +// +// The id is the an identifier for the hardware component, unique within the +// monitored host +// +// All additional attrs passed are included in the recorded value. +// +// The overall energy usage of a host MUST be reported using the specific +// `hw.host.energy` and `hw.host.power` metrics **only**, instead of the generic +// `hw.energy` and `hw.power` described in the previous section, to prevent +// summing up overlapping values. +func (m HostEnergy) Add( + ctx context.Context, + incr int64, + id string, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("hw.id", id), + )..., + ), + ) + + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// The overall energy usage of a host MUST be reported using the specific +// `hw.host.energy` and `hw.host.power` metrics **only**, instead of the generic +// `hw.energy` and `hw.power` described in the previous section, to prevent +// summing up overlapping values. +func (m HostEnergy) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AttrName returns an optional attribute for the "hw.name" semantic convention. +// It represents an easily-recognizable name for the hardware component. +func (HostEnergy) AttrName(val string) attribute.KeyValue { + return attribute.String("hw.name", val) +} + +// AttrParent returns an optional attribute for the "hw.parent" semantic +// convention. It represents the unique identifier of the parent component +// (typically the `hw.id` attribute of the enclosure, or disk controller). +func (HostEnergy) AttrParent(val string) attribute.KeyValue { + return attribute.String("hw.parent", val) +} + +// HostHeatingMargin is an instrument used to record metric values conforming to +// the "hw.host.heating_margin" semantic conventions. It represents the by how +// many degrees Celsius the temperature of the physical host can be increased, +// before reaching a warning threshold on one of the internal sensors. +type HostHeatingMargin struct { + metric.Int64Gauge +} + +// NewHostHeatingMargin returns a new HostHeatingMargin instrument. +func NewHostHeatingMargin( + m metric.Meter, + opt ...metric.Int64GaugeOption, +) (HostHeatingMargin, error) { + // Check if the meter is nil. + if m == nil { + return HostHeatingMargin{noop.Int64Gauge{}}, nil + } + + i, err := m.Int64Gauge( + "hw.host.heating_margin", + append([]metric.Int64GaugeOption{ + metric.WithDescription("By how many degrees Celsius the temperature of the physical host can be increased, before reaching a warning threshold on one of the internal sensors"), + metric.WithUnit("Cel"), + }, opt...)..., + ) + if err != nil { + return HostHeatingMargin{noop.Int64Gauge{}}, err + } + return HostHeatingMargin{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m HostHeatingMargin) Inst() metric.Int64Gauge { + return m.Int64Gauge +} + +// Name returns the semantic convention name of the instrument. +func (HostHeatingMargin) Name() string { + return "hw.host.heating_margin" +} + +// Unit returns the semantic convention unit of the instrument +func (HostHeatingMargin) Unit() string { + return "Cel" +} + +// Description returns the semantic convention description of the instrument +func (HostHeatingMargin) Description() string { + return "By how many degrees Celsius the temperature of the physical host can be increased, before reaching a warning threshold on one of the internal sensors" +} + +// Record records val to the current distribution for attrs. +// +// The id is the an identifier for the hardware component, unique within the +// monitored host +// +// All additional attrs passed are included in the recorded value. +func (m HostHeatingMargin) Record( + ctx context.Context, + val int64, + id string, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Gauge.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("hw.id", id), + )..., + ), + ) + + m.Int64Gauge.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +func (m HostHeatingMargin) RecordSet(ctx context.Context, val int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Gauge.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Gauge.Record(ctx, val, *o...) +} + +// AttrName returns an optional attribute for the "hw.name" semantic convention. +// It represents an easily-recognizable name for the hardware component. +func (HostHeatingMargin) AttrName(val string) attribute.KeyValue { + return attribute.String("hw.name", val) +} + +// AttrParent returns an optional attribute for the "hw.parent" semantic +// convention. It represents the unique identifier of the parent component +// (typically the `hw.id` attribute of the enclosure, or disk controller). +func (HostHeatingMargin) AttrParent(val string) attribute.KeyValue { + return attribute.String("hw.parent", val) +} + +// HostPower is an instrument used to record metric values conforming to the +// "hw.host.power" semantic conventions. It represents the instantaneous power +// consumed by the entire physical host in Watts (`hw.host.energy` is preferred). +type HostPower struct { + metric.Int64Gauge +} + +// NewHostPower returns a new HostPower instrument. +func NewHostPower( + m metric.Meter, + opt ...metric.Int64GaugeOption, +) (HostPower, error) { + // Check if the meter is nil. + if m == nil { + return HostPower{noop.Int64Gauge{}}, nil + } + + i, err := m.Int64Gauge( + "hw.host.power", + append([]metric.Int64GaugeOption{ + metric.WithDescription("Instantaneous power consumed by the entire physical host in Watts (`hw.host.energy` is preferred)"), + metric.WithUnit("W"), + }, opt...)..., + ) + if err != nil { + return HostPower{noop.Int64Gauge{}}, err + } + return HostPower{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m HostPower) Inst() metric.Int64Gauge { + return m.Int64Gauge +} + +// Name returns the semantic convention name of the instrument. +func (HostPower) Name() string { + return "hw.host.power" +} + +// Unit returns the semantic convention unit of the instrument +func (HostPower) Unit() string { + return "W" +} + +// Description returns the semantic convention description of the instrument +func (HostPower) Description() string { + return "Instantaneous power consumed by the entire physical host in Watts (`hw.host.energy` is preferred)" +} + +// Record records val to the current distribution for attrs. +// +// The id is the an identifier for the hardware component, unique within the +// monitored host +// +// All additional attrs passed are included in the recorded value. +// +// The overall energy usage of a host MUST be reported using the specific +// `hw.host.energy` and `hw.host.power` metrics **only**, instead of the generic +// `hw.energy` and `hw.power` described in the previous section, to prevent +// summing up overlapping values. +func (m HostPower) Record( + ctx context.Context, + val int64, + id string, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Gauge.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("hw.id", id), + )..., + ), + ) + + m.Int64Gauge.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +// +// The overall energy usage of a host MUST be reported using the specific +// `hw.host.energy` and `hw.host.power` metrics **only**, instead of the generic +// `hw.energy` and `hw.power` described in the previous section, to prevent +// summing up overlapping values. +func (m HostPower) RecordSet(ctx context.Context, val int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Gauge.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Gauge.Record(ctx, val, *o...) +} + +// AttrName returns an optional attribute for the "hw.name" semantic convention. +// It represents an easily-recognizable name for the hardware component. +func (HostPower) AttrName(val string) attribute.KeyValue { + return attribute.String("hw.name", val) +} + +// AttrParent returns an optional attribute for the "hw.parent" semantic +// convention. It represents the unique identifier of the parent component +// (typically the `hw.id` attribute of the enclosure, or disk controller). +func (HostPower) AttrParent(val string) attribute.KeyValue { + return attribute.String("hw.parent", val) +} + +// Power is an instrument used to record metric values conforming to the +// "hw.power" semantic conventions. It represents the instantaneous power +// consumed by the component. +type Power struct { + metric.Int64Gauge +} + +// NewPower returns a new Power instrument. +func NewPower( + m metric.Meter, + opt ...metric.Int64GaugeOption, +) (Power, error) { + // Check if the meter is nil. + if m == nil { + return Power{noop.Int64Gauge{}}, nil + } + + i, err := m.Int64Gauge( + "hw.power", + append([]metric.Int64GaugeOption{ + metric.WithDescription("Instantaneous power consumed by the component"), + metric.WithUnit("W"), + }, opt...)..., + ) + if err != nil { + return Power{noop.Int64Gauge{}}, err + } + return Power{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m Power) Inst() metric.Int64Gauge { + return m.Int64Gauge +} + +// Name returns the semantic convention name of the instrument. +func (Power) Name() string { + return "hw.power" +} + +// Unit returns the semantic convention unit of the instrument +func (Power) Unit() string { + return "W" +} + +// Description returns the semantic convention description of the instrument +func (Power) Description() string { + return "Instantaneous power consumed by the component" +} + +// Record records val to the current distribution for attrs. +// +// The id is the an identifier for the hardware component, unique within the +// monitored host +// +// The hwType is the type of the component +// +// All additional attrs passed are included in the recorded value. +// +// It is recommended to report `hw.energy` instead of `hw.power` when possible. +func (m Power) Record( + ctx context.Context, + val int64, + id string, + hwType TypeAttr, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Gauge.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("hw.id", id), + attribute.String("hw.type", string(hwType)), + )..., + ), + ) + + m.Int64Gauge.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +// +// It is recommended to report `hw.energy` instead of `hw.power` when possible. +func (m Power) RecordSet(ctx context.Context, val int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Gauge.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Gauge.Record(ctx, val, *o...) +} + +// AttrName returns an optional attribute for the "hw.name" semantic convention. +// It represents an easily-recognizable name for the hardware component. +func (Power) AttrName(val string) attribute.KeyValue { + return attribute.String("hw.name", val) +} + +// AttrParent returns an optional attribute for the "hw.parent" semantic +// convention. It represents the unique identifier of the parent component +// (typically the `hw.id` attribute of the enclosure, or disk controller). +func (Power) AttrParent(val string) attribute.KeyValue { + return attribute.String("hw.parent", val) +} + +// Status is an instrument used to record metric values conforming to the +// "hw.status" semantic conventions. It represents the operational status: `1` +// (true) or `0` (false) for each of the possible states. +type Status struct { + metric.Int64UpDownCounter +} + +// NewStatus returns a new Status instrument. +func NewStatus( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (Status, error) { + // Check if the meter is nil. + if m == nil { + return Status{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "hw.status", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("Operational status: `1` (true) or `0` (false) for each of the possible states"), + metric.WithUnit("1"), + }, opt...)..., + ) + if err != nil { + return Status{noop.Int64UpDownCounter{}}, err + } + return Status{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m Status) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (Status) Name() string { + return "hw.status" +} + +// Unit returns the semantic convention unit of the instrument +func (Status) Unit() string { + return "1" +} + +// Description returns the semantic convention description of the instrument +func (Status) Description() string { + return "Operational status: `1` (true) or `0` (false) for each of the possible states" +} + +// Add adds incr to the existing count for attrs. +// +// The id is the an identifier for the hardware component, unique within the +// monitored host +// +// The state is the the current state of the component +// +// The hwType is the type of the component +// +// All additional attrs passed are included in the recorded value. +// +// `hw.status` is currently specified as an *UpDownCounter* but would ideally be +// represented using a [*StateSet* as defined in OpenMetrics]. This semantic +// convention will be updated once *StateSet* is specified in OpenTelemetry. This +// planned change is not expected to have any consequence on the way users query +// their timeseries backend to retrieve the values of `hw.status` over time. +// +// [ [*StateSet* as defined in OpenMetrics]: https://github.com/prometheus/OpenMetrics/blob/v1.0.0/specification/OpenMetrics.md#stateset +func (m Status) Add( + ctx context.Context, + incr int64, + id string, + state StateAttr, + hwType TypeAttr, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("hw.id", id), + attribute.String("hw.state", string(state)), + attribute.String("hw.type", string(hwType)), + )..., + ), + ) + + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// `hw.status` is currently specified as an *UpDownCounter* but would ideally be +// represented using a [*StateSet* as defined in OpenMetrics]. This semantic +// convention will be updated once *StateSet* is specified in OpenTelemetry. This +// planned change is not expected to have any consequence on the way users query +// their timeseries backend to retrieve the values of `hw.status` over time. +// +// [ [*StateSet* as defined in OpenMetrics]: https://github.com/prometheus/OpenMetrics/blob/v1.0.0/specification/OpenMetrics.md#stateset +func (m Status) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AttrName returns an optional attribute for the "hw.name" semantic convention. +// It represents an easily-recognizable name for the hardware component. +func (Status) AttrName(val string) attribute.KeyValue { + return attribute.String("hw.name", val) +} + +// AttrParent returns an optional attribute for the "hw.parent" semantic +// convention. It represents the unique identifier of the parent component +// (typically the `hw.id` attribute of the enclosure, or disk controller). +func (Status) AttrParent(val string) attribute.KeyValue { + return attribute.String("hw.parent", val) +} \ No newline at end of file diff --git a/semconv/v1.36.0/k8sconv/metric.go b/semconv/v1.36.0/k8sconv/metric.go new file mode 100644 index 00000000000..5f6328b9ba7 --- /dev/null +++ b/semconv/v1.36.0/k8sconv/metric.go @@ -0,0 +1,7802 @@ +// Code generated from semantic convention specification. DO NOT EDIT. + +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package httpconv provides types and functionality for OpenTelemetry semantic +// conventions in the "k8s" namespace. +package k8sconv + +import ( + "context" + "sync" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/noop" +) + +var ( + addOptPool = &sync.Pool{New: func() any { return &[]metric.AddOption{} }} + recOptPool = &sync.Pool{New: func() any { return &[]metric.RecordOption{} }} +) + +// ContainerStatusReasonAttr is an attribute conforming to the +// k8s.container.status.reason semantic conventions. It represents the reason for +// the container state. Corresponds to the `reason` field of the: +// [K8s ContainerStateWaiting] or [K8s ContainerStateTerminated]. +// +// [K8s ContainerStateWaiting]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#containerstatewaiting-v1-core +// [K8s ContainerStateTerminated]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#containerstateterminated-v1-core +type ContainerStatusReasonAttr string + +var ( + // ContainerStatusReasonContainerCreating is the container is being created. + ContainerStatusReasonContainerCreating ContainerStatusReasonAttr = "ContainerCreating" + // ContainerStatusReasonCrashLoopBackOff is the container is in a crash loop + // back off state. + ContainerStatusReasonCrashLoopBackOff ContainerStatusReasonAttr = "CrashLoopBackOff" + // ContainerStatusReasonCreateContainerConfigError is the there was an error + // creating the container configuration. + ContainerStatusReasonCreateContainerConfigError ContainerStatusReasonAttr = "CreateContainerConfigError" + // ContainerStatusReasonErrImagePull is the there was an error pulling the + // container image. + ContainerStatusReasonErrImagePull ContainerStatusReasonAttr = "ErrImagePull" + // ContainerStatusReasonImagePullBackOff is the container image pull is in back + // off state. + ContainerStatusReasonImagePullBackOff ContainerStatusReasonAttr = "ImagePullBackOff" + // ContainerStatusReasonOomKilled is the container was killed due to out of + // memory. + ContainerStatusReasonOomKilled ContainerStatusReasonAttr = "OOMKilled" + // ContainerStatusReasonCompleted is the container has completed execution. + ContainerStatusReasonCompleted ContainerStatusReasonAttr = "Completed" + // ContainerStatusReasonError is the there was an error with the container. + ContainerStatusReasonError ContainerStatusReasonAttr = "Error" + // ContainerStatusReasonContainerCannotRun is the container cannot run. + ContainerStatusReasonContainerCannotRun ContainerStatusReasonAttr = "ContainerCannotRun" +) + +// ContainerStatusStateAttr is an attribute conforming to the +// k8s.container.status.state semantic conventions. It represents the state of +// the container. [K8s ContainerState]. +// +// [K8s ContainerState]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#containerstate-v1-core +type ContainerStatusStateAttr string + +var ( + // ContainerStatusStateTerminated is the container has terminated. + ContainerStatusStateTerminated ContainerStatusStateAttr = "terminated" + // ContainerStatusStateRunning is the container is running. + ContainerStatusStateRunning ContainerStatusStateAttr = "running" + // ContainerStatusStateWaiting is the container is waiting. + ContainerStatusStateWaiting ContainerStatusStateAttr = "waiting" +) + +// NamespacePhaseAttr is an attribute conforming to the k8s.namespace.phase +// semantic conventions. It represents the phase of the K8s namespace. +type NamespacePhaseAttr string + +var ( + // NamespacePhaseActive is the active namespace phase as described by [K8s API] + // . + // + // [K8s API]: https://pkg.go.dev/k8s.io/api@v0.31.3/core/v1#NamespacePhase + NamespacePhaseActive NamespacePhaseAttr = "active" + // NamespacePhaseTerminating is the terminating namespace phase as described by + // [K8s API]. + // + // [K8s API]: https://pkg.go.dev/k8s.io/api@v0.31.3/core/v1#NamespacePhase + NamespacePhaseTerminating NamespacePhaseAttr = "terminating" +) + +// NodeConditionStatusAttr is an attribute conforming to the +// k8s.node.condition.status semantic conventions. It represents the status of +// the condition, one of True, False, Unknown. +type NodeConditionStatusAttr string + +var ( + // NodeConditionStatusConditionTrue is the standardized value "true" of + // NodeConditionStatusAttr. + NodeConditionStatusConditionTrue NodeConditionStatusAttr = "true" + // NodeConditionStatusConditionFalse is the standardized value "false" of + // NodeConditionStatusAttr. + NodeConditionStatusConditionFalse NodeConditionStatusAttr = "false" + // NodeConditionStatusConditionUnknown is the standardized value "unknown" of + // NodeConditionStatusAttr. + NodeConditionStatusConditionUnknown NodeConditionStatusAttr = "unknown" +) + +// NodeConditionTypeAttr is an attribute conforming to the +// k8s.node.condition.type semantic conventions. It represents the condition type +// of a K8s Node. +type NodeConditionTypeAttr string + +var ( + // NodeConditionTypeReady is the node is healthy and ready to accept pods. + NodeConditionTypeReady NodeConditionTypeAttr = "Ready" + // NodeConditionTypeDiskPressure is the pressure exists on the disk size—that + // is, if the disk capacity is low. + NodeConditionTypeDiskPressure NodeConditionTypeAttr = "DiskPressure" + // NodeConditionTypeMemoryPressure is the pressure exists on the node + // memory—that is, if the node memory is low. + NodeConditionTypeMemoryPressure NodeConditionTypeAttr = "MemoryPressure" + // NodeConditionTypePIDPressure is the pressure exists on the processes—that + // is, if there are too many processes on the node. + NodeConditionTypePIDPressure NodeConditionTypeAttr = "PIDPressure" + // NodeConditionTypeNetworkUnavailable is the network for the node is not + // correctly configured. + NodeConditionTypeNetworkUnavailable NodeConditionTypeAttr = "NetworkUnavailable" +) + +// NetworkIODirectionAttr is an attribute conforming to the network.io.direction +// semantic conventions. It represents the network IO operation direction. +type NetworkIODirectionAttr string + +var ( + // NetworkIODirectionTransmit is the standardized value "transmit" of + // NetworkIODirectionAttr. + NetworkIODirectionTransmit NetworkIODirectionAttr = "transmit" + // NetworkIODirectionReceive is the standardized value "receive" of + // NetworkIODirectionAttr. + NetworkIODirectionReceive NetworkIODirectionAttr = "receive" +) + +// ContainerCPULimit is an instrument used to record metric values conforming to +// the "k8s.container.cpu.limit" semantic conventions. It represents the maximum +// CPU resource limit set for the container. +type ContainerCPULimit struct { + metric.Int64UpDownCounter +} + +// NewContainerCPULimit returns a new ContainerCPULimit instrument. +func NewContainerCPULimit( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (ContainerCPULimit, error) { + // Check if the meter is nil. + if m == nil { + return ContainerCPULimit{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "k8s.container.cpu.limit", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("Maximum CPU resource limit set for the container"), + metric.WithUnit("{cpu}"), + }, opt...)..., + ) + if err != nil { + return ContainerCPULimit{noop.Int64UpDownCounter{}}, err + } + return ContainerCPULimit{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ContainerCPULimit) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (ContainerCPULimit) Name() string { + return "k8s.container.cpu.limit" +} + +// Unit returns the semantic convention unit of the instrument +func (ContainerCPULimit) Unit() string { + return "{cpu}" +} + +// Description returns the semantic convention description of the instrument +func (ContainerCPULimit) Description() string { + return "Maximum CPU resource limit set for the container" +} + +// Add adds incr to the existing count for attrs. +// +// See +// https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#resourcerequirements-v1-core +// for details. +func (m ContainerCPULimit) Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// See +// https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#resourcerequirements-v1-core +// for details. +func (m ContainerCPULimit) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// ContainerCPURequest is an instrument used to record metric values conforming +// to the "k8s.container.cpu.request" semantic conventions. It represents the CPU +// resource requested for the container. +type ContainerCPURequest struct { + metric.Int64UpDownCounter +} + +// NewContainerCPURequest returns a new ContainerCPURequest instrument. +func NewContainerCPURequest( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (ContainerCPURequest, error) { + // Check if the meter is nil. + if m == nil { + return ContainerCPURequest{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "k8s.container.cpu.request", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("CPU resource requested for the container"), + metric.WithUnit("{cpu}"), + }, opt...)..., + ) + if err != nil { + return ContainerCPURequest{noop.Int64UpDownCounter{}}, err + } + return ContainerCPURequest{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ContainerCPURequest) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (ContainerCPURequest) Name() string { + return "k8s.container.cpu.request" +} + +// Unit returns the semantic convention unit of the instrument +func (ContainerCPURequest) Unit() string { + return "{cpu}" +} + +// Description returns the semantic convention description of the instrument +func (ContainerCPURequest) Description() string { + return "CPU resource requested for the container" +} + +// Add adds incr to the existing count for attrs. +// +// See +// https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#resourcerequirements-v1-core +// for details. +func (m ContainerCPURequest) Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// See +// https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#resourcerequirements-v1-core +// for details. +func (m ContainerCPURequest) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// ContainerEphemeralStorageLimit is an instrument used to record metric values +// conforming to the "k8s.container.ephemeral_storage.limit" semantic +// conventions. It represents the maximum ephemeral storage resource limit set +// for the container. +type ContainerEphemeralStorageLimit struct { + metric.Int64UpDownCounter +} + +// NewContainerEphemeralStorageLimit returns a new ContainerEphemeralStorageLimit +// instrument. +func NewContainerEphemeralStorageLimit( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (ContainerEphemeralStorageLimit, error) { + // Check if the meter is nil. + if m == nil { + return ContainerEphemeralStorageLimit{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "k8s.container.ephemeral_storage.limit", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("Maximum ephemeral storage resource limit set for the container"), + metric.WithUnit("By"), + }, opt...)..., + ) + if err != nil { + return ContainerEphemeralStorageLimit{noop.Int64UpDownCounter{}}, err + } + return ContainerEphemeralStorageLimit{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ContainerEphemeralStorageLimit) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (ContainerEphemeralStorageLimit) Name() string { + return "k8s.container.ephemeral_storage.limit" +} + +// Unit returns the semantic convention unit of the instrument +func (ContainerEphemeralStorageLimit) Unit() string { + return "By" +} + +// Description returns the semantic convention description of the instrument +func (ContainerEphemeralStorageLimit) Description() string { + return "Maximum ephemeral storage resource limit set for the container" +} + +// Add adds incr to the existing count for attrs. +// +// See +// https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#resourcerequirements-v1-core +// for details. +func (m ContainerEphemeralStorageLimit) Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// See +// https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#resourcerequirements-v1-core +// for details. +func (m ContainerEphemeralStorageLimit) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// ContainerEphemeralStorageRequest is an instrument used to record metric values +// conforming to the "k8s.container.ephemeral_storage.request" semantic +// conventions. It represents the ephemeral storage resource requested for the +// container. +type ContainerEphemeralStorageRequest struct { + metric.Int64UpDownCounter +} + +// NewContainerEphemeralStorageRequest returns a new +// ContainerEphemeralStorageRequest instrument. +func NewContainerEphemeralStorageRequest( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (ContainerEphemeralStorageRequest, error) { + // Check if the meter is nil. + if m == nil { + return ContainerEphemeralStorageRequest{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "k8s.container.ephemeral_storage.request", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("Ephemeral storage resource requested for the container"), + metric.WithUnit("By"), + }, opt...)..., + ) + if err != nil { + return ContainerEphemeralStorageRequest{noop.Int64UpDownCounter{}}, err + } + return ContainerEphemeralStorageRequest{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ContainerEphemeralStorageRequest) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (ContainerEphemeralStorageRequest) Name() string { + return "k8s.container.ephemeral_storage.request" +} + +// Unit returns the semantic convention unit of the instrument +func (ContainerEphemeralStorageRequest) Unit() string { + return "By" +} + +// Description returns the semantic convention description of the instrument +func (ContainerEphemeralStorageRequest) Description() string { + return "Ephemeral storage resource requested for the container" +} + +// Add adds incr to the existing count for attrs. +// +// See +// https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#resourcerequirements-v1-core +// for details. +func (m ContainerEphemeralStorageRequest) Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// See +// https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#resourcerequirements-v1-core +// for details. +func (m ContainerEphemeralStorageRequest) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// ContainerMemoryLimit is an instrument used to record metric values conforming +// to the "k8s.container.memory.limit" semantic conventions. It represents the +// maximum memory resource limit set for the container. +type ContainerMemoryLimit struct { + metric.Int64UpDownCounter +} + +// NewContainerMemoryLimit returns a new ContainerMemoryLimit instrument. +func NewContainerMemoryLimit( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (ContainerMemoryLimit, error) { + // Check if the meter is nil. + if m == nil { + return ContainerMemoryLimit{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "k8s.container.memory.limit", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("Maximum memory resource limit set for the container"), + metric.WithUnit("By"), + }, opt...)..., + ) + if err != nil { + return ContainerMemoryLimit{noop.Int64UpDownCounter{}}, err + } + return ContainerMemoryLimit{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ContainerMemoryLimit) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (ContainerMemoryLimit) Name() string { + return "k8s.container.memory.limit" +} + +// Unit returns the semantic convention unit of the instrument +func (ContainerMemoryLimit) Unit() string { + return "By" +} + +// Description returns the semantic convention description of the instrument +func (ContainerMemoryLimit) Description() string { + return "Maximum memory resource limit set for the container" +} + +// Add adds incr to the existing count for attrs. +// +// See +// https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#resourcerequirements-v1-core +// for details. +func (m ContainerMemoryLimit) Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// See +// https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#resourcerequirements-v1-core +// for details. +func (m ContainerMemoryLimit) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// ContainerMemoryRequest is an instrument used to record metric values +// conforming to the "k8s.container.memory.request" semantic conventions. It +// represents the memory resource requested for the container. +type ContainerMemoryRequest struct { + metric.Int64UpDownCounter +} + +// NewContainerMemoryRequest returns a new ContainerMemoryRequest instrument. +func NewContainerMemoryRequest( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (ContainerMemoryRequest, error) { + // Check if the meter is nil. + if m == nil { + return ContainerMemoryRequest{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "k8s.container.memory.request", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("Memory resource requested for the container"), + metric.WithUnit("By"), + }, opt...)..., + ) + if err != nil { + return ContainerMemoryRequest{noop.Int64UpDownCounter{}}, err + } + return ContainerMemoryRequest{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ContainerMemoryRequest) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (ContainerMemoryRequest) Name() string { + return "k8s.container.memory.request" +} + +// Unit returns the semantic convention unit of the instrument +func (ContainerMemoryRequest) Unit() string { + return "By" +} + +// Description returns the semantic convention description of the instrument +func (ContainerMemoryRequest) Description() string { + return "Memory resource requested for the container" +} + +// Add adds incr to the existing count for attrs. +// +// See +// https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#resourcerequirements-v1-core +// for details. +func (m ContainerMemoryRequest) Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// See +// https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#resourcerequirements-v1-core +// for details. +func (m ContainerMemoryRequest) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// ContainerReady is an instrument used to record metric values conforming to the +// "k8s.container.ready" semantic conventions. It represents the indicates +// whether the container is currently marked as ready to accept traffic, based on +// its readiness probe (1 = ready, 0 = not ready). +type ContainerReady struct { + metric.Int64UpDownCounter +} + +// NewContainerReady returns a new ContainerReady instrument. +func NewContainerReady( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (ContainerReady, error) { + // Check if the meter is nil. + if m == nil { + return ContainerReady{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "k8s.container.ready", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("Indicates whether the container is currently marked as ready to accept traffic, based on its readiness probe (1 = ready, 0 = not ready)"), + metric.WithUnit("{container}"), + }, opt...)..., + ) + if err != nil { + return ContainerReady{noop.Int64UpDownCounter{}}, err + } + return ContainerReady{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ContainerReady) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (ContainerReady) Name() string { + return "k8s.container.ready" +} + +// Unit returns the semantic convention unit of the instrument +func (ContainerReady) Unit() string { + return "{container}" +} + +// Description returns the semantic convention description of the instrument +func (ContainerReady) Description() string { + return "Indicates whether the container is currently marked as ready to accept traffic, based on its readiness probe (1 = ready, 0 = not ready)" +} + +// Add adds incr to the existing count for attrs. +// +// This metric SHOULD reflect the value of the `ready` field in the +// [K8s ContainerStatus]. +// +// [K8s ContainerStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#containerstatus-v1-core +func (m ContainerReady) Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// This metric SHOULD reflect the value of the `ready` field in the +// [K8s ContainerStatus]. +// +// [K8s ContainerStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#containerstatus-v1-core +func (m ContainerReady) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// ContainerRestartCount is an instrument used to record metric values conforming +// to the "k8s.container.restart.count" semantic conventions. It represents the +// describes how many times the container has restarted (since the last counter +// reset). +type ContainerRestartCount struct { + metric.Int64UpDownCounter +} + +// NewContainerRestartCount returns a new ContainerRestartCount instrument. +func NewContainerRestartCount( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (ContainerRestartCount, error) { + // Check if the meter is nil. + if m == nil { + return ContainerRestartCount{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "k8s.container.restart.count", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("Describes how many times the container has restarted (since the last counter reset)"), + metric.WithUnit("{restart}"), + }, opt...)..., + ) + if err != nil { + return ContainerRestartCount{noop.Int64UpDownCounter{}}, err + } + return ContainerRestartCount{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ContainerRestartCount) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (ContainerRestartCount) Name() string { + return "k8s.container.restart.count" +} + +// Unit returns the semantic convention unit of the instrument +func (ContainerRestartCount) Unit() string { + return "{restart}" +} + +// Description returns the semantic convention description of the instrument +func (ContainerRestartCount) Description() string { + return "Describes how many times the container has restarted (since the last counter reset)" +} + +// Add adds incr to the existing count for attrs. +// +// This value is pulled directly from the K8s API and the value can go +// indefinitely high and be reset to 0 +// at any time depending on how your kubelet is configured to prune dead +// containers. +// It is best to not depend too much on the exact value but rather look at it as +// either == 0, in which case you can conclude there were no restarts in the +// recent past, or > 0, in which case +// you can conclude there were restarts in the recent past, and not try and +// analyze the value beyond that. +func (m ContainerRestartCount) Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// This value is pulled directly from the K8s API and the value can go +// indefinitely high and be reset to 0 +// at any time depending on how your kubelet is configured to prune dead +// containers. +// It is best to not depend too much on the exact value but rather look at it as +// either == 0, in which case you can conclude there were no restarts in the +// recent past, or > 0, in which case +// you can conclude there were restarts in the recent past, and not try and +// analyze the value beyond that. +func (m ContainerRestartCount) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// ContainerStatusReason is an instrument used to record metric values conforming +// to the "k8s.container.status.reason" semantic conventions. It represents the +// describes the number of K8s containers that are currently in a state for a +// given reason. +type ContainerStatusReason struct { + metric.Int64UpDownCounter +} + +// NewContainerStatusReason returns a new ContainerStatusReason instrument. +func NewContainerStatusReason( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (ContainerStatusReason, error) { + // Check if the meter is nil. + if m == nil { + return ContainerStatusReason{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "k8s.container.status.reason", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("Describes the number of K8s containers that are currently in a state for a given reason"), + metric.WithUnit("{container}"), + }, opt...)..., + ) + if err != nil { + return ContainerStatusReason{noop.Int64UpDownCounter{}}, err + } + return ContainerStatusReason{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ContainerStatusReason) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (ContainerStatusReason) Name() string { + return "k8s.container.status.reason" +} + +// Unit returns the semantic convention unit of the instrument +func (ContainerStatusReason) Unit() string { + return "{container}" +} + +// Description returns the semantic convention description of the instrument +func (ContainerStatusReason) Description() string { + return "Describes the number of K8s containers that are currently in a state for a given reason" +} + +// Add adds incr to the existing count for attrs. +// +// The containerStatusReason is the the reason for the container state. +// Corresponds to the `reason` field of the: [K8s ContainerStateWaiting] or +// [K8s ContainerStateTerminated] +// +// [K8s ContainerStateWaiting]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#containerstatewaiting-v1-core +// [K8s ContainerStateTerminated]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#containerstateterminated-v1-core +// +// All possible container state reasons will be reported at each time interval to +// avoid missing metrics. +// Only the value corresponding to the current state reason will be non-zero. +func (m ContainerStatusReason) Add( + ctx context.Context, + incr int64, + containerStatusReason ContainerStatusReasonAttr, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("k8s.container.status.reason", string(containerStatusReason)), + )..., + ), + ) + + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// All possible container state reasons will be reported at each time interval to +// avoid missing metrics. +// Only the value corresponding to the current state reason will be non-zero. +func (m ContainerStatusReason) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// ContainerStatusState is an instrument used to record metric values conforming +// to the "k8s.container.status.state" semantic conventions. It represents the +// describes the number of K8s containers that are currently in a given state. +type ContainerStatusState struct { + metric.Int64UpDownCounter +} + +// NewContainerStatusState returns a new ContainerStatusState instrument. +func NewContainerStatusState( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (ContainerStatusState, error) { + // Check if the meter is nil. + if m == nil { + return ContainerStatusState{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "k8s.container.status.state", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("Describes the number of K8s containers that are currently in a given state"), + metric.WithUnit("{container}"), + }, opt...)..., + ) + if err != nil { + return ContainerStatusState{noop.Int64UpDownCounter{}}, err + } + return ContainerStatusState{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ContainerStatusState) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (ContainerStatusState) Name() string { + return "k8s.container.status.state" +} + +// Unit returns the semantic convention unit of the instrument +func (ContainerStatusState) Unit() string { + return "{container}" +} + +// Description returns the semantic convention description of the instrument +func (ContainerStatusState) Description() string { + return "Describes the number of K8s containers that are currently in a given state" +} + +// Add adds incr to the existing count for attrs. +// +// The containerStatusState is the the state of the container. +// [K8s ContainerState] +// +// [K8s ContainerState]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#containerstate-v1-core +// +// All possible container states will be reported at each time interval to avoid +// missing metrics. +// Only the value corresponding to the current state will be non-zero. +func (m ContainerStatusState) Add( + ctx context.Context, + incr int64, + containerStatusState ContainerStatusStateAttr, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("k8s.container.status.state", string(containerStatusState)), + )..., + ), + ) + + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// All possible container states will be reported at each time interval to avoid +// missing metrics. +// Only the value corresponding to the current state will be non-zero. +func (m ContainerStatusState) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// ContainerStorageLimit is an instrument used to record metric values conforming +// to the "k8s.container.storage.limit" semantic conventions. It represents the +// maximum storage resource limit set for the container. +type ContainerStorageLimit struct { + metric.Int64UpDownCounter +} + +// NewContainerStorageLimit returns a new ContainerStorageLimit instrument. +func NewContainerStorageLimit( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (ContainerStorageLimit, error) { + // Check if the meter is nil. + if m == nil { + return ContainerStorageLimit{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "k8s.container.storage.limit", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("Maximum storage resource limit set for the container"), + metric.WithUnit("By"), + }, opt...)..., + ) + if err != nil { + return ContainerStorageLimit{noop.Int64UpDownCounter{}}, err + } + return ContainerStorageLimit{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ContainerStorageLimit) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (ContainerStorageLimit) Name() string { + return "k8s.container.storage.limit" +} + +// Unit returns the semantic convention unit of the instrument +func (ContainerStorageLimit) Unit() string { + return "By" +} + +// Description returns the semantic convention description of the instrument +func (ContainerStorageLimit) Description() string { + return "Maximum storage resource limit set for the container" +} + +// Add adds incr to the existing count for attrs. +// +// See +// https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#resourcerequirements-v1-core +// for details. +func (m ContainerStorageLimit) Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// See +// https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#resourcerequirements-v1-core +// for details. +func (m ContainerStorageLimit) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// ContainerStorageRequest is an instrument used to record metric values +// conforming to the "k8s.container.storage.request" semantic conventions. It +// represents the storage resource requested for the container. +type ContainerStorageRequest struct { + metric.Int64UpDownCounter +} + +// NewContainerStorageRequest returns a new ContainerStorageRequest instrument. +func NewContainerStorageRequest( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (ContainerStorageRequest, error) { + // Check if the meter is nil. + if m == nil { + return ContainerStorageRequest{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "k8s.container.storage.request", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("Storage resource requested for the container"), + metric.WithUnit("By"), + }, opt...)..., + ) + if err != nil { + return ContainerStorageRequest{noop.Int64UpDownCounter{}}, err + } + return ContainerStorageRequest{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ContainerStorageRequest) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (ContainerStorageRequest) Name() string { + return "k8s.container.storage.request" +} + +// Unit returns the semantic convention unit of the instrument +func (ContainerStorageRequest) Unit() string { + return "By" +} + +// Description returns the semantic convention description of the instrument +func (ContainerStorageRequest) Description() string { + return "Storage resource requested for the container" +} + +// Add adds incr to the existing count for attrs. +// +// See +// https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#resourcerequirements-v1-core +// for details. +func (m ContainerStorageRequest) Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// See +// https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#resourcerequirements-v1-core +// for details. +func (m ContainerStorageRequest) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// CronJobActiveJobs is an instrument used to record metric values conforming to +// the "k8s.cronjob.active_jobs" semantic conventions. It represents the number +// of actively running jobs for a cronjob. +type CronJobActiveJobs struct { + metric.Int64UpDownCounter +} + +// NewCronJobActiveJobs returns a new CronJobActiveJobs instrument. +func NewCronJobActiveJobs( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (CronJobActiveJobs, error) { + // Check if the meter is nil. + if m == nil { + return CronJobActiveJobs{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "k8s.cronjob.active_jobs", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("The number of actively running jobs for a cronjob"), + metric.WithUnit("{job}"), + }, opt...)..., + ) + if err != nil { + return CronJobActiveJobs{noop.Int64UpDownCounter{}}, err + } + return CronJobActiveJobs{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m CronJobActiveJobs) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (CronJobActiveJobs) Name() string { + return "k8s.cronjob.active_jobs" +} + +// Unit returns the semantic convention unit of the instrument +func (CronJobActiveJobs) Unit() string { + return "{job}" +} + +// Description returns the semantic convention description of the instrument +func (CronJobActiveJobs) Description() string { + return "The number of actively running jobs for a cronjob" +} + +// Add adds incr to the existing count for attrs. +// +// This metric aligns with the `active` field of the +// [K8s CronJobStatus]. +// +// [K8s CronJobStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#cronjobstatus-v1-batch +func (m CronJobActiveJobs) Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// This metric aligns with the `active` field of the +// [K8s CronJobStatus]. +// +// [K8s CronJobStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#cronjobstatus-v1-batch +func (m CronJobActiveJobs) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// DaemonSetCurrentScheduledNodes is an instrument used to record metric values +// conforming to the "k8s.daemonset.current_scheduled_nodes" semantic +// conventions. It represents the number of nodes that are running at least 1 +// daemon pod and are supposed to run the daemon pod. +type DaemonSetCurrentScheduledNodes struct { + metric.Int64UpDownCounter +} + +// NewDaemonSetCurrentScheduledNodes returns a new DaemonSetCurrentScheduledNodes +// instrument. +func NewDaemonSetCurrentScheduledNodes( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (DaemonSetCurrentScheduledNodes, error) { + // Check if the meter is nil. + if m == nil { + return DaemonSetCurrentScheduledNodes{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "k8s.daemonset.current_scheduled_nodes", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("Number of nodes that are running at least 1 daemon pod and are supposed to run the daemon pod"), + metric.WithUnit("{node}"), + }, opt...)..., + ) + if err != nil { + return DaemonSetCurrentScheduledNodes{noop.Int64UpDownCounter{}}, err + } + return DaemonSetCurrentScheduledNodes{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m DaemonSetCurrentScheduledNodes) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (DaemonSetCurrentScheduledNodes) Name() string { + return "k8s.daemonset.current_scheduled_nodes" +} + +// Unit returns the semantic convention unit of the instrument +func (DaemonSetCurrentScheduledNodes) Unit() string { + return "{node}" +} + +// Description returns the semantic convention description of the instrument +func (DaemonSetCurrentScheduledNodes) Description() string { + return "Number of nodes that are running at least 1 daemon pod and are supposed to run the daemon pod" +} + +// Add adds incr to the existing count for attrs. +// +// This metric aligns with the `currentNumberScheduled` field of the +// [K8s DaemonSetStatus]. +// +// [K8s DaemonSetStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#daemonsetstatus-v1-apps +func (m DaemonSetCurrentScheduledNodes) Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// This metric aligns with the `currentNumberScheduled` field of the +// [K8s DaemonSetStatus]. +// +// [K8s DaemonSetStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#daemonsetstatus-v1-apps +func (m DaemonSetCurrentScheduledNodes) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// DaemonSetDesiredScheduledNodes is an instrument used to record metric values +// conforming to the "k8s.daemonset.desired_scheduled_nodes" semantic +// conventions. It represents the number of nodes that should be running the +// daemon pod (including nodes currently running the daemon pod). +type DaemonSetDesiredScheduledNodes struct { + metric.Int64UpDownCounter +} + +// NewDaemonSetDesiredScheduledNodes returns a new DaemonSetDesiredScheduledNodes +// instrument. +func NewDaemonSetDesiredScheduledNodes( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (DaemonSetDesiredScheduledNodes, error) { + // Check if the meter is nil. + if m == nil { + return DaemonSetDesiredScheduledNodes{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "k8s.daemonset.desired_scheduled_nodes", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("Number of nodes that should be running the daemon pod (including nodes currently running the daemon pod)"), + metric.WithUnit("{node}"), + }, opt...)..., + ) + if err != nil { + return DaemonSetDesiredScheduledNodes{noop.Int64UpDownCounter{}}, err + } + return DaemonSetDesiredScheduledNodes{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m DaemonSetDesiredScheduledNodes) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (DaemonSetDesiredScheduledNodes) Name() string { + return "k8s.daemonset.desired_scheduled_nodes" +} + +// Unit returns the semantic convention unit of the instrument +func (DaemonSetDesiredScheduledNodes) Unit() string { + return "{node}" +} + +// Description returns the semantic convention description of the instrument +func (DaemonSetDesiredScheduledNodes) Description() string { + return "Number of nodes that should be running the daemon pod (including nodes currently running the daemon pod)" +} + +// Add adds incr to the existing count for attrs. +// +// This metric aligns with the `desiredNumberScheduled` field of the +// [K8s DaemonSetStatus]. +// +// [K8s DaemonSetStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#daemonsetstatus-v1-apps +func (m DaemonSetDesiredScheduledNodes) Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// This metric aligns with the `desiredNumberScheduled` field of the +// [K8s DaemonSetStatus]. +// +// [K8s DaemonSetStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#daemonsetstatus-v1-apps +func (m DaemonSetDesiredScheduledNodes) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// DaemonSetMisscheduledNodes is an instrument used to record metric values +// conforming to the "k8s.daemonset.misscheduled_nodes" semantic conventions. It +// represents the number of nodes that are running the daemon pod, but are not +// supposed to run the daemon pod. +type DaemonSetMisscheduledNodes struct { + metric.Int64UpDownCounter +} + +// NewDaemonSetMisscheduledNodes returns a new DaemonSetMisscheduledNodes +// instrument. +func NewDaemonSetMisscheduledNodes( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (DaemonSetMisscheduledNodes, error) { + // Check if the meter is nil. + if m == nil { + return DaemonSetMisscheduledNodes{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "k8s.daemonset.misscheduled_nodes", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("Number of nodes that are running the daemon pod, but are not supposed to run the daemon pod"), + metric.WithUnit("{node}"), + }, opt...)..., + ) + if err != nil { + return DaemonSetMisscheduledNodes{noop.Int64UpDownCounter{}}, err + } + return DaemonSetMisscheduledNodes{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m DaemonSetMisscheduledNodes) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (DaemonSetMisscheduledNodes) Name() string { + return "k8s.daemonset.misscheduled_nodes" +} + +// Unit returns the semantic convention unit of the instrument +func (DaemonSetMisscheduledNodes) Unit() string { + return "{node}" +} + +// Description returns the semantic convention description of the instrument +func (DaemonSetMisscheduledNodes) Description() string { + return "Number of nodes that are running the daemon pod, but are not supposed to run the daemon pod" +} + +// Add adds incr to the existing count for attrs. +// +// This metric aligns with the `numberMisscheduled` field of the +// [K8s DaemonSetStatus]. +// +// [K8s DaemonSetStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#daemonsetstatus-v1-apps +func (m DaemonSetMisscheduledNodes) Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// This metric aligns with the `numberMisscheduled` field of the +// [K8s DaemonSetStatus]. +// +// [K8s DaemonSetStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#daemonsetstatus-v1-apps +func (m DaemonSetMisscheduledNodes) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// DaemonSetReadyNodes is an instrument used to record metric values conforming +// to the "k8s.daemonset.ready_nodes" semantic conventions. It represents the +// number of nodes that should be running the daemon pod and have one or more of +// the daemon pod running and ready. +type DaemonSetReadyNodes struct { + metric.Int64UpDownCounter +} + +// NewDaemonSetReadyNodes returns a new DaemonSetReadyNodes instrument. +func NewDaemonSetReadyNodes( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (DaemonSetReadyNodes, error) { + // Check if the meter is nil. + if m == nil { + return DaemonSetReadyNodes{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "k8s.daemonset.ready_nodes", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("Number of nodes that should be running the daemon pod and have one or more of the daemon pod running and ready"), + metric.WithUnit("{node}"), + }, opt...)..., + ) + if err != nil { + return DaemonSetReadyNodes{noop.Int64UpDownCounter{}}, err + } + return DaemonSetReadyNodes{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m DaemonSetReadyNodes) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (DaemonSetReadyNodes) Name() string { + return "k8s.daemonset.ready_nodes" +} + +// Unit returns the semantic convention unit of the instrument +func (DaemonSetReadyNodes) Unit() string { + return "{node}" +} + +// Description returns the semantic convention description of the instrument +func (DaemonSetReadyNodes) Description() string { + return "Number of nodes that should be running the daemon pod and have one or more of the daemon pod running and ready" +} + +// Add adds incr to the existing count for attrs. +// +// This metric aligns with the `numberReady` field of the +// [K8s DaemonSetStatus]. +// +// [K8s DaemonSetStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#daemonsetstatus-v1-apps +func (m DaemonSetReadyNodes) Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// This metric aligns with the `numberReady` field of the +// [K8s DaemonSetStatus]. +// +// [K8s DaemonSetStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#daemonsetstatus-v1-apps +func (m DaemonSetReadyNodes) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// DeploymentAvailablePods is an instrument used to record metric values +// conforming to the "k8s.deployment.available_pods" semantic conventions. It +// represents the total number of available replica pods (ready for at least +// minReadySeconds) targeted by this deployment. +type DeploymentAvailablePods struct { + metric.Int64UpDownCounter +} + +// NewDeploymentAvailablePods returns a new DeploymentAvailablePods instrument. +func NewDeploymentAvailablePods( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (DeploymentAvailablePods, error) { + // Check if the meter is nil. + if m == nil { + return DeploymentAvailablePods{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "k8s.deployment.available_pods", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("Total number of available replica pods (ready for at least minReadySeconds) targeted by this deployment"), + metric.WithUnit("{pod}"), + }, opt...)..., + ) + if err != nil { + return DeploymentAvailablePods{noop.Int64UpDownCounter{}}, err + } + return DeploymentAvailablePods{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m DeploymentAvailablePods) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (DeploymentAvailablePods) Name() string { + return "k8s.deployment.available_pods" +} + +// Unit returns the semantic convention unit of the instrument +func (DeploymentAvailablePods) Unit() string { + return "{pod}" +} + +// Description returns the semantic convention description of the instrument +func (DeploymentAvailablePods) Description() string { + return "Total number of available replica pods (ready for at least minReadySeconds) targeted by this deployment" +} + +// Add adds incr to the existing count for attrs. +// +// This metric aligns with the `availableReplicas` field of the +// [K8s DeploymentStatus]. +// +// [K8s DeploymentStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#deploymentstatus-v1-apps +func (m DeploymentAvailablePods) Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// This metric aligns with the `availableReplicas` field of the +// [K8s DeploymentStatus]. +// +// [K8s DeploymentStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#deploymentstatus-v1-apps +func (m DeploymentAvailablePods) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// DeploymentDesiredPods is an instrument used to record metric values conforming +// to the "k8s.deployment.desired_pods" semantic conventions. It represents the +// number of desired replica pods in this deployment. +type DeploymentDesiredPods struct { + metric.Int64UpDownCounter +} + +// NewDeploymentDesiredPods returns a new DeploymentDesiredPods instrument. +func NewDeploymentDesiredPods( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (DeploymentDesiredPods, error) { + // Check if the meter is nil. + if m == nil { + return DeploymentDesiredPods{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "k8s.deployment.desired_pods", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("Number of desired replica pods in this deployment"), + metric.WithUnit("{pod}"), + }, opt...)..., + ) + if err != nil { + return DeploymentDesiredPods{noop.Int64UpDownCounter{}}, err + } + return DeploymentDesiredPods{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m DeploymentDesiredPods) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (DeploymentDesiredPods) Name() string { + return "k8s.deployment.desired_pods" +} + +// Unit returns the semantic convention unit of the instrument +func (DeploymentDesiredPods) Unit() string { + return "{pod}" +} + +// Description returns the semantic convention description of the instrument +func (DeploymentDesiredPods) Description() string { + return "Number of desired replica pods in this deployment" +} + +// Add adds incr to the existing count for attrs. +// +// This metric aligns with the `replicas` field of the +// [K8s DeploymentSpec]. +// +// [K8s DeploymentSpec]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#deploymentspec-v1-apps +func (m DeploymentDesiredPods) Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// This metric aligns with the `replicas` field of the +// [K8s DeploymentSpec]. +// +// [K8s DeploymentSpec]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#deploymentspec-v1-apps +func (m DeploymentDesiredPods) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// HPACurrentPods is an instrument used to record metric values conforming to the +// "k8s.hpa.current_pods" semantic conventions. It represents the current number +// of replica pods managed by this horizontal pod autoscaler, as last seen by the +// autoscaler. +type HPACurrentPods struct { + metric.Int64UpDownCounter +} + +// NewHPACurrentPods returns a new HPACurrentPods instrument. +func NewHPACurrentPods( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (HPACurrentPods, error) { + // Check if the meter is nil. + if m == nil { + return HPACurrentPods{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "k8s.hpa.current_pods", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("Current number of replica pods managed by this horizontal pod autoscaler, as last seen by the autoscaler"), + metric.WithUnit("{pod}"), + }, opt...)..., + ) + if err != nil { + return HPACurrentPods{noop.Int64UpDownCounter{}}, err + } + return HPACurrentPods{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m HPACurrentPods) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (HPACurrentPods) Name() string { + return "k8s.hpa.current_pods" +} + +// Unit returns the semantic convention unit of the instrument +func (HPACurrentPods) Unit() string { + return "{pod}" +} + +// Description returns the semantic convention description of the instrument +func (HPACurrentPods) Description() string { + return "Current number of replica pods managed by this horizontal pod autoscaler, as last seen by the autoscaler" +} + +// Add adds incr to the existing count for attrs. +// +// This metric aligns with the `currentReplicas` field of the +// [K8s HorizontalPodAutoscalerStatus] +// +// [K8s HorizontalPodAutoscalerStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#horizontalpodautoscalerstatus-v2-autoscaling +func (m HPACurrentPods) Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// This metric aligns with the `currentReplicas` field of the +// [K8s HorizontalPodAutoscalerStatus] +// +// [K8s HorizontalPodAutoscalerStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#horizontalpodautoscalerstatus-v2-autoscaling +func (m HPACurrentPods) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// HPADesiredPods is an instrument used to record metric values conforming to the +// "k8s.hpa.desired_pods" semantic conventions. It represents the desired number +// of replica pods managed by this horizontal pod autoscaler, as last calculated +// by the autoscaler. +type HPADesiredPods struct { + metric.Int64UpDownCounter +} + +// NewHPADesiredPods returns a new HPADesiredPods instrument. +func NewHPADesiredPods( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (HPADesiredPods, error) { + // Check if the meter is nil. + if m == nil { + return HPADesiredPods{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "k8s.hpa.desired_pods", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("Desired number of replica pods managed by this horizontal pod autoscaler, as last calculated by the autoscaler"), + metric.WithUnit("{pod}"), + }, opt...)..., + ) + if err != nil { + return HPADesiredPods{noop.Int64UpDownCounter{}}, err + } + return HPADesiredPods{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m HPADesiredPods) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (HPADesiredPods) Name() string { + return "k8s.hpa.desired_pods" +} + +// Unit returns the semantic convention unit of the instrument +func (HPADesiredPods) Unit() string { + return "{pod}" +} + +// Description returns the semantic convention description of the instrument +func (HPADesiredPods) Description() string { + return "Desired number of replica pods managed by this horizontal pod autoscaler, as last calculated by the autoscaler" +} + +// Add adds incr to the existing count for attrs. +// +// This metric aligns with the `desiredReplicas` field of the +// [K8s HorizontalPodAutoscalerStatus] +// +// [K8s HorizontalPodAutoscalerStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#horizontalpodautoscalerstatus-v2-autoscaling +func (m HPADesiredPods) Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// This metric aligns with the `desiredReplicas` field of the +// [K8s HorizontalPodAutoscalerStatus] +// +// [K8s HorizontalPodAutoscalerStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#horizontalpodautoscalerstatus-v2-autoscaling +func (m HPADesiredPods) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// HPAMaxPods is an instrument used to record metric values conforming to the +// "k8s.hpa.max_pods" semantic conventions. It represents the upper limit for the +// number of replica pods to which the autoscaler can scale up. +type HPAMaxPods struct { + metric.Int64UpDownCounter +} + +// NewHPAMaxPods returns a new HPAMaxPods instrument. +func NewHPAMaxPods( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (HPAMaxPods, error) { + // Check if the meter is nil. + if m == nil { + return HPAMaxPods{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "k8s.hpa.max_pods", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("The upper limit for the number of replica pods to which the autoscaler can scale up"), + metric.WithUnit("{pod}"), + }, opt...)..., + ) + if err != nil { + return HPAMaxPods{noop.Int64UpDownCounter{}}, err + } + return HPAMaxPods{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m HPAMaxPods) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (HPAMaxPods) Name() string { + return "k8s.hpa.max_pods" +} + +// Unit returns the semantic convention unit of the instrument +func (HPAMaxPods) Unit() string { + return "{pod}" +} + +// Description returns the semantic convention description of the instrument +func (HPAMaxPods) Description() string { + return "The upper limit for the number of replica pods to which the autoscaler can scale up" +} + +// Add adds incr to the existing count for attrs. +// +// This metric aligns with the `maxReplicas` field of the +// [K8s HorizontalPodAutoscalerSpec] +// +// [K8s HorizontalPodAutoscalerSpec]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#horizontalpodautoscalerspec-v2-autoscaling +func (m HPAMaxPods) Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// This metric aligns with the `maxReplicas` field of the +// [K8s HorizontalPodAutoscalerSpec] +// +// [K8s HorizontalPodAutoscalerSpec]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#horizontalpodautoscalerspec-v2-autoscaling +func (m HPAMaxPods) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// HPAMetricTargetCPUAverageUtilization is an instrument used to record metric +// values conforming to the "k8s.hpa.metric.target.cpu.average_utilization" +// semantic conventions. It represents the target average utilization, in +// percentage, for CPU resource in HPA config. +type HPAMetricTargetCPUAverageUtilization struct { + metric.Int64Gauge +} + +// NewHPAMetricTargetCPUAverageUtilization returns a new +// HPAMetricTargetCPUAverageUtilization instrument. +func NewHPAMetricTargetCPUAverageUtilization( + m metric.Meter, + opt ...metric.Int64GaugeOption, +) (HPAMetricTargetCPUAverageUtilization, error) { + // Check if the meter is nil. + if m == nil { + return HPAMetricTargetCPUAverageUtilization{noop.Int64Gauge{}}, nil + } + + i, err := m.Int64Gauge( + "k8s.hpa.metric.target.cpu.average_utilization", + append([]metric.Int64GaugeOption{ + metric.WithDescription("Target average utilization, in percentage, for CPU resource in HPA config."), + metric.WithUnit("1"), + }, opt...)..., + ) + if err != nil { + return HPAMetricTargetCPUAverageUtilization{noop.Int64Gauge{}}, err + } + return HPAMetricTargetCPUAverageUtilization{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m HPAMetricTargetCPUAverageUtilization) Inst() metric.Int64Gauge { + return m.Int64Gauge +} + +// Name returns the semantic convention name of the instrument. +func (HPAMetricTargetCPUAverageUtilization) Name() string { + return "k8s.hpa.metric.target.cpu.average_utilization" +} + +// Unit returns the semantic convention unit of the instrument +func (HPAMetricTargetCPUAverageUtilization) Unit() string { + return "1" +} + +// Description returns the semantic convention description of the instrument +func (HPAMetricTargetCPUAverageUtilization) Description() string { + return "Target average utilization, in percentage, for CPU resource in HPA config." +} + +// Record records val to the current distribution for attrs. +// +// All additional attrs passed are included in the recorded value. +// +// This metric aligns with the `averageUtilization` field of the +// [K8s HPA MetricTarget]. +// If the type of the metric is [`ContainerResource`], +// the `k8s.container.name` attribute MUST be set to identify the specific +// container within the pod to which the metric applies. +// +// [K8s HPA MetricTarget]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#metrictarget-v2-autoscaling +// [`ContainerResource`]: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#support-for-metrics-apis +func (m HPAMetricTargetCPUAverageUtilization) Record( + ctx context.Context, + val int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Gauge.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64Gauge.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +// +// This metric aligns with the `averageUtilization` field of the +// [K8s HPA MetricTarget]. +// If the type of the metric is [`ContainerResource`], +// the `k8s.container.name` attribute MUST be set to identify the specific +// container within the pod to which the metric applies. +// +// [K8s HPA MetricTarget]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#metrictarget-v2-autoscaling +// [`ContainerResource`]: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#support-for-metrics-apis +func (m HPAMetricTargetCPUAverageUtilization) RecordSet(ctx context.Context, val int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Gauge.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Gauge.Record(ctx, val, *o...) +} + +// AttrContainerName returns an optional attribute for the "k8s.container.name" +// semantic convention. It represents the name of the Container from Pod +// specification, must be unique within a Pod. Container runtime usually uses +// different globally unique name (`container.name`). +func (HPAMetricTargetCPUAverageUtilization) AttrContainerName(val string) attribute.KeyValue { + return attribute.String("k8s.container.name", val) +} + +// AttrHPAMetricType returns an optional attribute for the "k8s.hpa.metric.type" +// semantic convention. It represents the type of metric source for the +// horizontal pod autoscaler. +func (HPAMetricTargetCPUAverageUtilization) AttrHPAMetricType(val string) attribute.KeyValue { + return attribute.String("k8s.hpa.metric.type", val) +} + +// HPAMetricTargetCPUAverageValue is an instrument used to record metric values +// conforming to the "k8s.hpa.metric.target.cpu.average_value" semantic +// conventions. It represents the target average value for CPU resource in HPA +// config. +type HPAMetricTargetCPUAverageValue struct { + metric.Int64Gauge +} + +// NewHPAMetricTargetCPUAverageValue returns a new HPAMetricTargetCPUAverageValue +// instrument. +func NewHPAMetricTargetCPUAverageValue( + m metric.Meter, + opt ...metric.Int64GaugeOption, +) (HPAMetricTargetCPUAverageValue, error) { + // Check if the meter is nil. + if m == nil { + return HPAMetricTargetCPUAverageValue{noop.Int64Gauge{}}, nil + } + + i, err := m.Int64Gauge( + "k8s.hpa.metric.target.cpu.average_value", + append([]metric.Int64GaugeOption{ + metric.WithDescription("Target average value for CPU resource in HPA config."), + metric.WithUnit("{cpu}"), + }, opt...)..., + ) + if err != nil { + return HPAMetricTargetCPUAverageValue{noop.Int64Gauge{}}, err + } + return HPAMetricTargetCPUAverageValue{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m HPAMetricTargetCPUAverageValue) Inst() metric.Int64Gauge { + return m.Int64Gauge +} + +// Name returns the semantic convention name of the instrument. +func (HPAMetricTargetCPUAverageValue) Name() string { + return "k8s.hpa.metric.target.cpu.average_value" +} + +// Unit returns the semantic convention unit of the instrument +func (HPAMetricTargetCPUAverageValue) Unit() string { + return "{cpu}" +} + +// Description returns the semantic convention description of the instrument +func (HPAMetricTargetCPUAverageValue) Description() string { + return "Target average value for CPU resource in HPA config." +} + +// Record records val to the current distribution for attrs. +// +// All additional attrs passed are included in the recorded value. +// +// This metric aligns with the `averageValue` field of the +// [K8s HPA MetricTarget]. +// If the type of the metric is [`ContainerResource`], +// the `k8s.container.name` attribute MUST be set to identify the specific +// container within the pod to which the metric applies. +// +// [K8s HPA MetricTarget]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#metrictarget-v2-autoscaling +// [`ContainerResource`]: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#support-for-metrics-apis +func (m HPAMetricTargetCPUAverageValue) Record( + ctx context.Context, + val int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Gauge.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64Gauge.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +// +// This metric aligns with the `averageValue` field of the +// [K8s HPA MetricTarget]. +// If the type of the metric is [`ContainerResource`], +// the `k8s.container.name` attribute MUST be set to identify the specific +// container within the pod to which the metric applies. +// +// [K8s HPA MetricTarget]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#metrictarget-v2-autoscaling +// [`ContainerResource`]: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#support-for-metrics-apis +func (m HPAMetricTargetCPUAverageValue) RecordSet(ctx context.Context, val int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Gauge.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Gauge.Record(ctx, val, *o...) +} + +// AttrContainerName returns an optional attribute for the "k8s.container.name" +// semantic convention. It represents the name of the Container from Pod +// specification, must be unique within a Pod. Container runtime usually uses +// different globally unique name (`container.name`). +func (HPAMetricTargetCPUAverageValue) AttrContainerName(val string) attribute.KeyValue { + return attribute.String("k8s.container.name", val) +} + +// AttrHPAMetricType returns an optional attribute for the "k8s.hpa.metric.type" +// semantic convention. It represents the type of metric source for the +// horizontal pod autoscaler. +func (HPAMetricTargetCPUAverageValue) AttrHPAMetricType(val string) attribute.KeyValue { + return attribute.String("k8s.hpa.metric.type", val) +} + +// HPAMetricTargetCPUValue is an instrument used to record metric values +// conforming to the "k8s.hpa.metric.target.cpu.value" semantic conventions. It +// represents the target value for CPU resource in HPA config. +type HPAMetricTargetCPUValue struct { + metric.Int64Gauge +} + +// NewHPAMetricTargetCPUValue returns a new HPAMetricTargetCPUValue instrument. +func NewHPAMetricTargetCPUValue( + m metric.Meter, + opt ...metric.Int64GaugeOption, +) (HPAMetricTargetCPUValue, error) { + // Check if the meter is nil. + if m == nil { + return HPAMetricTargetCPUValue{noop.Int64Gauge{}}, nil + } + + i, err := m.Int64Gauge( + "k8s.hpa.metric.target.cpu.value", + append([]metric.Int64GaugeOption{ + metric.WithDescription("Target value for CPU resource in HPA config."), + metric.WithUnit("{cpu}"), + }, opt...)..., + ) + if err != nil { + return HPAMetricTargetCPUValue{noop.Int64Gauge{}}, err + } + return HPAMetricTargetCPUValue{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m HPAMetricTargetCPUValue) Inst() metric.Int64Gauge { + return m.Int64Gauge +} + +// Name returns the semantic convention name of the instrument. +func (HPAMetricTargetCPUValue) Name() string { + return "k8s.hpa.metric.target.cpu.value" +} + +// Unit returns the semantic convention unit of the instrument +func (HPAMetricTargetCPUValue) Unit() string { + return "{cpu}" +} + +// Description returns the semantic convention description of the instrument +func (HPAMetricTargetCPUValue) Description() string { + return "Target value for CPU resource in HPA config." +} + +// Record records val to the current distribution for attrs. +// +// All additional attrs passed are included in the recorded value. +// +// This metric aligns with the `value` field of the +// [K8s HPA MetricTarget]. +// If the type of the metric is [`ContainerResource`], +// the `k8s.container.name` attribute MUST be set to identify the specific +// container within the pod to which the metric applies. +// +// [K8s HPA MetricTarget]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#metrictarget-v2-autoscaling +// [`ContainerResource`]: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#support-for-metrics-apis +func (m HPAMetricTargetCPUValue) Record( + ctx context.Context, + val int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Gauge.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64Gauge.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +// +// This metric aligns with the `value` field of the +// [K8s HPA MetricTarget]. +// If the type of the metric is [`ContainerResource`], +// the `k8s.container.name` attribute MUST be set to identify the specific +// container within the pod to which the metric applies. +// +// [K8s HPA MetricTarget]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#metrictarget-v2-autoscaling +// [`ContainerResource`]: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#support-for-metrics-apis +func (m HPAMetricTargetCPUValue) RecordSet(ctx context.Context, val int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Gauge.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Gauge.Record(ctx, val, *o...) +} + +// AttrContainerName returns an optional attribute for the "k8s.container.name" +// semantic convention. It represents the name of the Container from Pod +// specification, must be unique within a Pod. Container runtime usually uses +// different globally unique name (`container.name`). +func (HPAMetricTargetCPUValue) AttrContainerName(val string) attribute.KeyValue { + return attribute.String("k8s.container.name", val) +} + +// AttrHPAMetricType returns an optional attribute for the "k8s.hpa.metric.type" +// semantic convention. It represents the type of metric source for the +// horizontal pod autoscaler. +func (HPAMetricTargetCPUValue) AttrHPAMetricType(val string) attribute.KeyValue { + return attribute.String("k8s.hpa.metric.type", val) +} + +// HPAMinPods is an instrument used to record metric values conforming to the +// "k8s.hpa.min_pods" semantic conventions. It represents the lower limit for the +// number of replica pods to which the autoscaler can scale down. +type HPAMinPods struct { + metric.Int64UpDownCounter +} + +// NewHPAMinPods returns a new HPAMinPods instrument. +func NewHPAMinPods( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (HPAMinPods, error) { + // Check if the meter is nil. + if m == nil { + return HPAMinPods{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "k8s.hpa.min_pods", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("The lower limit for the number of replica pods to which the autoscaler can scale down"), + metric.WithUnit("{pod}"), + }, opt...)..., + ) + if err != nil { + return HPAMinPods{noop.Int64UpDownCounter{}}, err + } + return HPAMinPods{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m HPAMinPods) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (HPAMinPods) Name() string { + return "k8s.hpa.min_pods" +} + +// Unit returns the semantic convention unit of the instrument +func (HPAMinPods) Unit() string { + return "{pod}" +} + +// Description returns the semantic convention description of the instrument +func (HPAMinPods) Description() string { + return "The lower limit for the number of replica pods to which the autoscaler can scale down" +} + +// Add adds incr to the existing count for attrs. +// +// This metric aligns with the `minReplicas` field of the +// [K8s HorizontalPodAutoscalerSpec] +// +// [K8s HorizontalPodAutoscalerSpec]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#horizontalpodautoscalerspec-v2-autoscaling +func (m HPAMinPods) Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// This metric aligns with the `minReplicas` field of the +// [K8s HorizontalPodAutoscalerSpec] +// +// [K8s HorizontalPodAutoscalerSpec]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#horizontalpodautoscalerspec-v2-autoscaling +func (m HPAMinPods) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// JobActivePods is an instrument used to record metric values conforming to the +// "k8s.job.active_pods" semantic conventions. It represents the number of +// pending and actively running pods for a job. +type JobActivePods struct { + metric.Int64UpDownCounter +} + +// NewJobActivePods returns a new JobActivePods instrument. +func NewJobActivePods( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (JobActivePods, error) { + // Check if the meter is nil. + if m == nil { + return JobActivePods{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "k8s.job.active_pods", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("The number of pending and actively running pods for a job"), + metric.WithUnit("{pod}"), + }, opt...)..., + ) + if err != nil { + return JobActivePods{noop.Int64UpDownCounter{}}, err + } + return JobActivePods{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m JobActivePods) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (JobActivePods) Name() string { + return "k8s.job.active_pods" +} + +// Unit returns the semantic convention unit of the instrument +func (JobActivePods) Unit() string { + return "{pod}" +} + +// Description returns the semantic convention description of the instrument +func (JobActivePods) Description() string { + return "The number of pending and actively running pods for a job" +} + +// Add adds incr to the existing count for attrs. +// +// This metric aligns with the `active` field of the +// [K8s JobStatus]. +// +// [K8s JobStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#jobstatus-v1-batch +func (m JobActivePods) Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// This metric aligns with the `active` field of the +// [K8s JobStatus]. +// +// [K8s JobStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#jobstatus-v1-batch +func (m JobActivePods) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// JobDesiredSuccessfulPods is an instrument used to record metric values +// conforming to the "k8s.job.desired_successful_pods" semantic conventions. It +// represents the desired number of successfully finished pods the job should be +// run with. +type JobDesiredSuccessfulPods struct { + metric.Int64UpDownCounter +} + +// NewJobDesiredSuccessfulPods returns a new JobDesiredSuccessfulPods instrument. +func NewJobDesiredSuccessfulPods( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (JobDesiredSuccessfulPods, error) { + // Check if the meter is nil. + if m == nil { + return JobDesiredSuccessfulPods{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "k8s.job.desired_successful_pods", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("The desired number of successfully finished pods the job should be run with"), + metric.WithUnit("{pod}"), + }, opt...)..., + ) + if err != nil { + return JobDesiredSuccessfulPods{noop.Int64UpDownCounter{}}, err + } + return JobDesiredSuccessfulPods{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m JobDesiredSuccessfulPods) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (JobDesiredSuccessfulPods) Name() string { + return "k8s.job.desired_successful_pods" +} + +// Unit returns the semantic convention unit of the instrument +func (JobDesiredSuccessfulPods) Unit() string { + return "{pod}" +} + +// Description returns the semantic convention description of the instrument +func (JobDesiredSuccessfulPods) Description() string { + return "The desired number of successfully finished pods the job should be run with" +} + +// Add adds incr to the existing count for attrs. +// +// This metric aligns with the `completions` field of the +// [K8s JobSpec].. +// +// [K8s JobSpec]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#jobspec-v1-batch +func (m JobDesiredSuccessfulPods) Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// This metric aligns with the `completions` field of the +// [K8s JobSpec].. +// +// [K8s JobSpec]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#jobspec-v1-batch +func (m JobDesiredSuccessfulPods) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// JobFailedPods is an instrument used to record metric values conforming to the +// "k8s.job.failed_pods" semantic conventions. It represents the number of pods +// which reached phase Failed for a job. +type JobFailedPods struct { + metric.Int64UpDownCounter +} + +// NewJobFailedPods returns a new JobFailedPods instrument. +func NewJobFailedPods( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (JobFailedPods, error) { + // Check if the meter is nil. + if m == nil { + return JobFailedPods{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "k8s.job.failed_pods", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("The number of pods which reached phase Failed for a job"), + metric.WithUnit("{pod}"), + }, opt...)..., + ) + if err != nil { + return JobFailedPods{noop.Int64UpDownCounter{}}, err + } + return JobFailedPods{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m JobFailedPods) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (JobFailedPods) Name() string { + return "k8s.job.failed_pods" +} + +// Unit returns the semantic convention unit of the instrument +func (JobFailedPods) Unit() string { + return "{pod}" +} + +// Description returns the semantic convention description of the instrument +func (JobFailedPods) Description() string { + return "The number of pods which reached phase Failed for a job" +} + +// Add adds incr to the existing count for attrs. +// +// This metric aligns with the `failed` field of the +// [K8s JobStatus]. +// +// [K8s JobStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#jobstatus-v1-batch +func (m JobFailedPods) Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// This metric aligns with the `failed` field of the +// [K8s JobStatus]. +// +// [K8s JobStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#jobstatus-v1-batch +func (m JobFailedPods) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// JobMaxParallelPods is an instrument used to record metric values conforming to +// the "k8s.job.max_parallel_pods" semantic conventions. It represents the max +// desired number of pods the job should run at any given time. +type JobMaxParallelPods struct { + metric.Int64UpDownCounter +} + +// NewJobMaxParallelPods returns a new JobMaxParallelPods instrument. +func NewJobMaxParallelPods( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (JobMaxParallelPods, error) { + // Check if the meter is nil. + if m == nil { + return JobMaxParallelPods{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "k8s.job.max_parallel_pods", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("The max desired number of pods the job should run at any given time"), + metric.WithUnit("{pod}"), + }, opt...)..., + ) + if err != nil { + return JobMaxParallelPods{noop.Int64UpDownCounter{}}, err + } + return JobMaxParallelPods{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m JobMaxParallelPods) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (JobMaxParallelPods) Name() string { + return "k8s.job.max_parallel_pods" +} + +// Unit returns the semantic convention unit of the instrument +func (JobMaxParallelPods) Unit() string { + return "{pod}" +} + +// Description returns the semantic convention description of the instrument +func (JobMaxParallelPods) Description() string { + return "The max desired number of pods the job should run at any given time" +} + +// Add adds incr to the existing count for attrs. +// +// This metric aligns with the `parallelism` field of the +// [K8s JobSpec]. +// +// [K8s JobSpec]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#jobspec-v1-batch +func (m JobMaxParallelPods) Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// This metric aligns with the `parallelism` field of the +// [K8s JobSpec]. +// +// [K8s JobSpec]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#jobspec-v1-batch +func (m JobMaxParallelPods) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// JobSuccessfulPods is an instrument used to record metric values conforming to +// the "k8s.job.successful_pods" semantic conventions. It represents the number +// of pods which reached phase Succeeded for a job. +type JobSuccessfulPods struct { + metric.Int64UpDownCounter +} + +// NewJobSuccessfulPods returns a new JobSuccessfulPods instrument. +func NewJobSuccessfulPods( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (JobSuccessfulPods, error) { + // Check if the meter is nil. + if m == nil { + return JobSuccessfulPods{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "k8s.job.successful_pods", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("The number of pods which reached phase Succeeded for a job"), + metric.WithUnit("{pod}"), + }, opt...)..., + ) + if err != nil { + return JobSuccessfulPods{noop.Int64UpDownCounter{}}, err + } + return JobSuccessfulPods{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m JobSuccessfulPods) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (JobSuccessfulPods) Name() string { + return "k8s.job.successful_pods" +} + +// Unit returns the semantic convention unit of the instrument +func (JobSuccessfulPods) Unit() string { + return "{pod}" +} + +// Description returns the semantic convention description of the instrument +func (JobSuccessfulPods) Description() string { + return "The number of pods which reached phase Succeeded for a job" +} + +// Add adds incr to the existing count for attrs. +// +// This metric aligns with the `succeeded` field of the +// [K8s JobStatus]. +// +// [K8s JobStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#jobstatus-v1-batch +func (m JobSuccessfulPods) Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// This metric aligns with the `succeeded` field of the +// [K8s JobStatus]. +// +// [K8s JobStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#jobstatus-v1-batch +func (m JobSuccessfulPods) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// NamespacePhase is an instrument used to record metric values conforming to the +// "k8s.namespace.phase" semantic conventions. It represents the describes number +// of K8s namespaces that are currently in a given phase. +type NamespacePhase struct { + metric.Int64UpDownCounter +} + +// NewNamespacePhase returns a new NamespacePhase instrument. +func NewNamespacePhase( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (NamespacePhase, error) { + // Check if the meter is nil. + if m == nil { + return NamespacePhase{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "k8s.namespace.phase", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("Describes number of K8s namespaces that are currently in a given phase."), + metric.WithUnit("{namespace}"), + }, opt...)..., + ) + if err != nil { + return NamespacePhase{noop.Int64UpDownCounter{}}, err + } + return NamespacePhase{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m NamespacePhase) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (NamespacePhase) Name() string { + return "k8s.namespace.phase" +} + +// Unit returns the semantic convention unit of the instrument +func (NamespacePhase) Unit() string { + return "{namespace}" +} + +// Description returns the semantic convention description of the instrument +func (NamespacePhase) Description() string { + return "Describes number of K8s namespaces that are currently in a given phase." +} + +// Add adds incr to the existing count for attrs. +// +// The namespacePhase is the the phase of the K8s namespace. +func (m NamespacePhase) Add( + ctx context.Context, + incr int64, + namespacePhase NamespacePhaseAttr, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("k8s.namespace.phase", string(namespacePhase)), + )..., + ), + ) + + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +func (m NamespacePhase) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// NodeAllocatableCPU is an instrument used to record metric values conforming to +// the "k8s.node.allocatable.cpu" semantic conventions. It represents the amount +// of cpu allocatable on the node. +type NodeAllocatableCPU struct { + metric.Int64UpDownCounter +} + +// NewNodeAllocatableCPU returns a new NodeAllocatableCPU instrument. +func NewNodeAllocatableCPU( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (NodeAllocatableCPU, error) { + // Check if the meter is nil. + if m == nil { + return NodeAllocatableCPU{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "k8s.node.allocatable.cpu", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("Amount of cpu allocatable on the node"), + metric.WithUnit("{cpu}"), + }, opt...)..., + ) + if err != nil { + return NodeAllocatableCPU{noop.Int64UpDownCounter{}}, err + } + return NodeAllocatableCPU{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m NodeAllocatableCPU) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (NodeAllocatableCPU) Name() string { + return "k8s.node.allocatable.cpu" +} + +// Unit returns the semantic convention unit of the instrument +func (NodeAllocatableCPU) Unit() string { + return "{cpu}" +} + +// Description returns the semantic convention description of the instrument +func (NodeAllocatableCPU) Description() string { + return "Amount of cpu allocatable on the node" +} + +// Add adds incr to the existing count for attrs. +func (m NodeAllocatableCPU) Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +func (m NodeAllocatableCPU) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// NodeAllocatableEphemeralStorage is an instrument used to record metric values +// conforming to the "k8s.node.allocatable.ephemeral_storage" semantic +// conventions. It represents the amount of ephemeral-storage allocatable on the +// node. +type NodeAllocatableEphemeralStorage struct { + metric.Int64UpDownCounter +} + +// NewNodeAllocatableEphemeralStorage returns a new +// NodeAllocatableEphemeralStorage instrument. +func NewNodeAllocatableEphemeralStorage( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (NodeAllocatableEphemeralStorage, error) { + // Check if the meter is nil. + if m == nil { + return NodeAllocatableEphemeralStorage{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "k8s.node.allocatable.ephemeral_storage", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("Amount of ephemeral-storage allocatable on the node"), + metric.WithUnit("By"), + }, opt...)..., + ) + if err != nil { + return NodeAllocatableEphemeralStorage{noop.Int64UpDownCounter{}}, err + } + return NodeAllocatableEphemeralStorage{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m NodeAllocatableEphemeralStorage) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (NodeAllocatableEphemeralStorage) Name() string { + return "k8s.node.allocatable.ephemeral_storage" +} + +// Unit returns the semantic convention unit of the instrument +func (NodeAllocatableEphemeralStorage) Unit() string { + return "By" +} + +// Description returns the semantic convention description of the instrument +func (NodeAllocatableEphemeralStorage) Description() string { + return "Amount of ephemeral-storage allocatable on the node" +} + +// Add adds incr to the existing count for attrs. +func (m NodeAllocatableEphemeralStorage) Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +func (m NodeAllocatableEphemeralStorage) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// NodeAllocatableMemory is an instrument used to record metric values conforming +// to the "k8s.node.allocatable.memory" semantic conventions. It represents the +// amount of memory allocatable on the node. +type NodeAllocatableMemory struct { + metric.Int64UpDownCounter +} + +// NewNodeAllocatableMemory returns a new NodeAllocatableMemory instrument. +func NewNodeAllocatableMemory( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (NodeAllocatableMemory, error) { + // Check if the meter is nil. + if m == nil { + return NodeAllocatableMemory{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "k8s.node.allocatable.memory", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("Amount of memory allocatable on the node"), + metric.WithUnit("By"), + }, opt...)..., + ) + if err != nil { + return NodeAllocatableMemory{noop.Int64UpDownCounter{}}, err + } + return NodeAllocatableMemory{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m NodeAllocatableMemory) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (NodeAllocatableMemory) Name() string { + return "k8s.node.allocatable.memory" +} + +// Unit returns the semantic convention unit of the instrument +func (NodeAllocatableMemory) Unit() string { + return "By" +} + +// Description returns the semantic convention description of the instrument +func (NodeAllocatableMemory) Description() string { + return "Amount of memory allocatable on the node" +} + +// Add adds incr to the existing count for attrs. +func (m NodeAllocatableMemory) Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +func (m NodeAllocatableMemory) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// NodeAllocatablePods is an instrument used to record metric values conforming +// to the "k8s.node.allocatable.pods" semantic conventions. It represents the +// amount of pods allocatable on the node. +type NodeAllocatablePods struct { + metric.Int64UpDownCounter +} + +// NewNodeAllocatablePods returns a new NodeAllocatablePods instrument. +func NewNodeAllocatablePods( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (NodeAllocatablePods, error) { + // Check if the meter is nil. + if m == nil { + return NodeAllocatablePods{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "k8s.node.allocatable.pods", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("Amount of pods allocatable on the node"), + metric.WithUnit("{pod}"), + }, opt...)..., + ) + if err != nil { + return NodeAllocatablePods{noop.Int64UpDownCounter{}}, err + } + return NodeAllocatablePods{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m NodeAllocatablePods) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (NodeAllocatablePods) Name() string { + return "k8s.node.allocatable.pods" +} + +// Unit returns the semantic convention unit of the instrument +func (NodeAllocatablePods) Unit() string { + return "{pod}" +} + +// Description returns the semantic convention description of the instrument +func (NodeAllocatablePods) Description() string { + return "Amount of pods allocatable on the node" +} + +// Add adds incr to the existing count for attrs. +func (m NodeAllocatablePods) Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +func (m NodeAllocatablePods) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// NodeConditionStatus is an instrument used to record metric values conforming +// to the "k8s.node.condition.status" semantic conventions. It represents the +// describes the condition of a particular Node. +type NodeConditionStatus struct { + metric.Int64UpDownCounter +} + +// NewNodeConditionStatus returns a new NodeConditionStatus instrument. +func NewNodeConditionStatus( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (NodeConditionStatus, error) { + // Check if the meter is nil. + if m == nil { + return NodeConditionStatus{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "k8s.node.condition.status", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("Describes the condition of a particular Node."), + metric.WithUnit("{node}"), + }, opt...)..., + ) + if err != nil { + return NodeConditionStatus{noop.Int64UpDownCounter{}}, err + } + return NodeConditionStatus{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m NodeConditionStatus) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (NodeConditionStatus) Name() string { + return "k8s.node.condition.status" +} + +// Unit returns the semantic convention unit of the instrument +func (NodeConditionStatus) Unit() string { + return "{node}" +} + +// Description returns the semantic convention description of the instrument +func (NodeConditionStatus) Description() string { + return "Describes the condition of a particular Node." +} + +// Add adds incr to the existing count for attrs. +// +// The nodeConditionStatus is the the status of the condition, one of True, +// False, Unknown. +// +// The nodeConditionType is the the condition type of a K8s Node. +// +// All possible node condition pairs (type and status) will be reported at each +// time interval to avoid missing metrics. Condition pairs corresponding to the +// current conditions' statuses will be non-zero. +func (m NodeConditionStatus) Add( + ctx context.Context, + incr int64, + nodeConditionStatus NodeConditionStatusAttr, + nodeConditionType NodeConditionTypeAttr, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("k8s.node.condition.status", string(nodeConditionStatus)), + attribute.String("k8s.node.condition.type", string(nodeConditionType)), + )..., + ), + ) + + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// All possible node condition pairs (type and status) will be reported at each +// time interval to avoid missing metrics. Condition pairs corresponding to the +// current conditions' statuses will be non-zero. +func (m NodeConditionStatus) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// NodeCPUTime is an instrument used to record metric values conforming to the +// "k8s.node.cpu.time" semantic conventions. It represents the total CPU time +// consumed. +type NodeCPUTime struct { + metric.Float64Counter +} + +// NewNodeCPUTime returns a new NodeCPUTime instrument. +func NewNodeCPUTime( + m metric.Meter, + opt ...metric.Float64CounterOption, +) (NodeCPUTime, error) { + // Check if the meter is nil. + if m == nil { + return NodeCPUTime{noop.Float64Counter{}}, nil + } + + i, err := m.Float64Counter( + "k8s.node.cpu.time", + append([]metric.Float64CounterOption{ + metric.WithDescription("Total CPU time consumed"), + metric.WithUnit("s"), + }, opt...)..., + ) + if err != nil { + return NodeCPUTime{noop.Float64Counter{}}, err + } + return NodeCPUTime{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m NodeCPUTime) Inst() metric.Float64Counter { + return m.Float64Counter +} + +// Name returns the semantic convention name of the instrument. +func (NodeCPUTime) Name() string { + return "k8s.node.cpu.time" +} + +// Unit returns the semantic convention unit of the instrument +func (NodeCPUTime) Unit() string { + return "s" +} + +// Description returns the semantic convention description of the instrument +func (NodeCPUTime) Description() string { + return "Total CPU time consumed" +} + +// Add adds incr to the existing count for attrs. +// +// Total CPU time consumed by the specific Node on all available CPU cores +func (m NodeCPUTime) Add(ctx context.Context, incr float64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Float64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Float64Counter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// Total CPU time consumed by the specific Node on all available CPU cores +func (m NodeCPUTime) AddSet(ctx context.Context, incr float64, set attribute.Set) { + if set.Len() == 0 { + m.Float64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Float64Counter.Add(ctx, incr, *o...) +} + +// NodeCPUUsage is an instrument used to record metric values conforming to the +// "k8s.node.cpu.usage" semantic conventions. It represents the node's CPU usage, +// measured in cpus. Range from 0 to the number of allocatable CPUs. +type NodeCPUUsage struct { + metric.Int64Gauge +} + +// NewNodeCPUUsage returns a new NodeCPUUsage instrument. +func NewNodeCPUUsage( + m metric.Meter, + opt ...metric.Int64GaugeOption, +) (NodeCPUUsage, error) { + // Check if the meter is nil. + if m == nil { + return NodeCPUUsage{noop.Int64Gauge{}}, nil + } + + i, err := m.Int64Gauge( + "k8s.node.cpu.usage", + append([]metric.Int64GaugeOption{ + metric.WithDescription("Node's CPU usage, measured in cpus. Range from 0 to the number of allocatable CPUs"), + metric.WithUnit("{cpu}"), + }, opt...)..., + ) + if err != nil { + return NodeCPUUsage{noop.Int64Gauge{}}, err + } + return NodeCPUUsage{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m NodeCPUUsage) Inst() metric.Int64Gauge { + return m.Int64Gauge +} + +// Name returns the semantic convention name of the instrument. +func (NodeCPUUsage) Name() string { + return "k8s.node.cpu.usage" +} + +// Unit returns the semantic convention unit of the instrument +func (NodeCPUUsage) Unit() string { + return "{cpu}" +} + +// Description returns the semantic convention description of the instrument +func (NodeCPUUsage) Description() string { + return "Node's CPU usage, measured in cpus. Range from 0 to the number of allocatable CPUs" +} + +// Record records val to the current distribution for attrs. +// +// CPU usage of the specific Node on all available CPU cores, averaged over the +// sample window +func (m NodeCPUUsage) Record(ctx context.Context, val int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64Gauge.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64Gauge.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +// +// CPU usage of the specific Node on all available CPU cores, averaged over the +// sample window +func (m NodeCPUUsage) RecordSet(ctx context.Context, val int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Gauge.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Gauge.Record(ctx, val, *o...) +} + +// NodeMemoryUsage is an instrument used to record metric values conforming to +// the "k8s.node.memory.usage" semantic conventions. It represents the memory +// usage of the Node. +type NodeMemoryUsage struct { + metric.Int64Gauge +} + +// NewNodeMemoryUsage returns a new NodeMemoryUsage instrument. +func NewNodeMemoryUsage( + m metric.Meter, + opt ...metric.Int64GaugeOption, +) (NodeMemoryUsage, error) { + // Check if the meter is nil. + if m == nil { + return NodeMemoryUsage{noop.Int64Gauge{}}, nil + } + + i, err := m.Int64Gauge( + "k8s.node.memory.usage", + append([]metric.Int64GaugeOption{ + metric.WithDescription("Memory usage of the Node"), + metric.WithUnit("By"), + }, opt...)..., + ) + if err != nil { + return NodeMemoryUsage{noop.Int64Gauge{}}, err + } + return NodeMemoryUsage{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m NodeMemoryUsage) Inst() metric.Int64Gauge { + return m.Int64Gauge +} + +// Name returns the semantic convention name of the instrument. +func (NodeMemoryUsage) Name() string { + return "k8s.node.memory.usage" +} + +// Unit returns the semantic convention unit of the instrument +func (NodeMemoryUsage) Unit() string { + return "By" +} + +// Description returns the semantic convention description of the instrument +func (NodeMemoryUsage) Description() string { + return "Memory usage of the Node" +} + +// Record records val to the current distribution for attrs. +// +// Total memory usage of the Node +func (m NodeMemoryUsage) Record(ctx context.Context, val int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64Gauge.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64Gauge.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +// +// Total memory usage of the Node +func (m NodeMemoryUsage) RecordSet(ctx context.Context, val int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Gauge.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Gauge.Record(ctx, val, *o...) +} + +// NodeNetworkErrors is an instrument used to record metric values conforming to +// the "k8s.node.network.errors" semantic conventions. It represents the node +// network errors. +type NodeNetworkErrors struct { + metric.Int64Counter +} + +// NewNodeNetworkErrors returns a new NodeNetworkErrors instrument. +func NewNodeNetworkErrors( + m metric.Meter, + opt ...metric.Int64CounterOption, +) (NodeNetworkErrors, error) { + // Check if the meter is nil. + if m == nil { + return NodeNetworkErrors{noop.Int64Counter{}}, nil + } + + i, err := m.Int64Counter( + "k8s.node.network.errors", + append([]metric.Int64CounterOption{ + metric.WithDescription("Node network errors"), + metric.WithUnit("{error}"), + }, opt...)..., + ) + if err != nil { + return NodeNetworkErrors{noop.Int64Counter{}}, err + } + return NodeNetworkErrors{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m NodeNetworkErrors) Inst() metric.Int64Counter { + return m.Int64Counter +} + +// Name returns the semantic convention name of the instrument. +func (NodeNetworkErrors) Name() string { + return "k8s.node.network.errors" +} + +// Unit returns the semantic convention unit of the instrument +func (NodeNetworkErrors) Unit() string { + return "{error}" +} + +// Description returns the semantic convention description of the instrument +func (NodeNetworkErrors) Description() string { + return "Node network errors" +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +func (m NodeNetworkErrors) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +func (m NodeNetworkErrors) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AttrNetworkInterfaceName returns an optional attribute for the +// "network.interface.name" semantic convention. It represents the network +// interface name. +func (NodeNetworkErrors) AttrNetworkInterfaceName(val string) attribute.KeyValue { + return attribute.String("network.interface.name", val) +} + +// AttrNetworkIODirection returns an optional attribute for the +// "network.io.direction" semantic convention. It represents the network IO +// operation direction. +func (NodeNetworkErrors) AttrNetworkIODirection(val NetworkIODirectionAttr) attribute.KeyValue { + return attribute.String("network.io.direction", string(val)) +} + +// NodeNetworkIO is an instrument used to record metric values conforming to the +// "k8s.node.network.io" semantic conventions. It represents the network bytes +// for the Node. +type NodeNetworkIO struct { + metric.Int64Counter +} + +// NewNodeNetworkIO returns a new NodeNetworkIO instrument. +func NewNodeNetworkIO( + m metric.Meter, + opt ...metric.Int64CounterOption, +) (NodeNetworkIO, error) { + // Check if the meter is nil. + if m == nil { + return NodeNetworkIO{noop.Int64Counter{}}, nil + } + + i, err := m.Int64Counter( + "k8s.node.network.io", + append([]metric.Int64CounterOption{ + metric.WithDescription("Network bytes for the Node"), + metric.WithUnit("By"), + }, opt...)..., + ) + if err != nil { + return NodeNetworkIO{noop.Int64Counter{}}, err + } + return NodeNetworkIO{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m NodeNetworkIO) Inst() metric.Int64Counter { + return m.Int64Counter +} + +// Name returns the semantic convention name of the instrument. +func (NodeNetworkIO) Name() string { + return "k8s.node.network.io" +} + +// Unit returns the semantic convention unit of the instrument +func (NodeNetworkIO) Unit() string { + return "By" +} + +// Description returns the semantic convention description of the instrument +func (NodeNetworkIO) Description() string { + return "Network bytes for the Node" +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +func (m NodeNetworkIO) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +func (m NodeNetworkIO) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AttrNetworkInterfaceName returns an optional attribute for the +// "network.interface.name" semantic convention. It represents the network +// interface name. +func (NodeNetworkIO) AttrNetworkInterfaceName(val string) attribute.KeyValue { + return attribute.String("network.interface.name", val) +} + +// AttrNetworkIODirection returns an optional attribute for the +// "network.io.direction" semantic convention. It represents the network IO +// operation direction. +func (NodeNetworkIO) AttrNetworkIODirection(val NetworkIODirectionAttr) attribute.KeyValue { + return attribute.String("network.io.direction", string(val)) +} + +// NodeUptime is an instrument used to record metric values conforming to the +// "k8s.node.uptime" semantic conventions. It represents the time the Node has +// been running. +type NodeUptime struct { + metric.Float64Gauge +} + +// NewNodeUptime returns a new NodeUptime instrument. +func NewNodeUptime( + m metric.Meter, + opt ...metric.Float64GaugeOption, +) (NodeUptime, error) { + // Check if the meter is nil. + if m == nil { + return NodeUptime{noop.Float64Gauge{}}, nil + } + + i, err := m.Float64Gauge( + "k8s.node.uptime", + append([]metric.Float64GaugeOption{ + metric.WithDescription("The time the Node has been running"), + metric.WithUnit("s"), + }, opt...)..., + ) + if err != nil { + return NodeUptime{noop.Float64Gauge{}}, err + } + return NodeUptime{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m NodeUptime) Inst() metric.Float64Gauge { + return m.Float64Gauge +} + +// Name returns the semantic convention name of the instrument. +func (NodeUptime) Name() string { + return "k8s.node.uptime" +} + +// Unit returns the semantic convention unit of the instrument +func (NodeUptime) Unit() string { + return "s" +} + +// Description returns the semantic convention description of the instrument +func (NodeUptime) Description() string { + return "The time the Node has been running" +} + +// Record records val to the current distribution for attrs. +// +// Instrumentations SHOULD use a gauge with type `double` and measure uptime in +// seconds as a floating point number with the highest precision available. +// The actual accuracy would depend on the instrumentation and operating system. +func (m NodeUptime) Record(ctx context.Context, val float64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Float64Gauge.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Float64Gauge.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +// +// Instrumentations SHOULD use a gauge with type `double` and measure uptime in +// seconds as a floating point number with the highest precision available. +// The actual accuracy would depend on the instrumentation and operating system. +func (m NodeUptime) RecordSet(ctx context.Context, val float64, set attribute.Set) { + if set.Len() == 0 { + m.Float64Gauge.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Float64Gauge.Record(ctx, val, *o...) +} + +// PodCPUTime is an instrument used to record metric values conforming to the +// "k8s.pod.cpu.time" semantic conventions. It represents the total CPU time +// consumed. +type PodCPUTime struct { + metric.Float64Counter +} + +// NewPodCPUTime returns a new PodCPUTime instrument. +func NewPodCPUTime( + m metric.Meter, + opt ...metric.Float64CounterOption, +) (PodCPUTime, error) { + // Check if the meter is nil. + if m == nil { + return PodCPUTime{noop.Float64Counter{}}, nil + } + + i, err := m.Float64Counter( + "k8s.pod.cpu.time", + append([]metric.Float64CounterOption{ + metric.WithDescription("Total CPU time consumed"), + metric.WithUnit("s"), + }, opt...)..., + ) + if err != nil { + return PodCPUTime{noop.Float64Counter{}}, err + } + return PodCPUTime{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m PodCPUTime) Inst() metric.Float64Counter { + return m.Float64Counter +} + +// Name returns the semantic convention name of the instrument. +func (PodCPUTime) Name() string { + return "k8s.pod.cpu.time" +} + +// Unit returns the semantic convention unit of the instrument +func (PodCPUTime) Unit() string { + return "s" +} + +// Description returns the semantic convention description of the instrument +func (PodCPUTime) Description() string { + return "Total CPU time consumed" +} + +// Add adds incr to the existing count for attrs. +// +// Total CPU time consumed by the specific Pod on all available CPU cores +func (m PodCPUTime) Add(ctx context.Context, incr float64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Float64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Float64Counter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// Total CPU time consumed by the specific Pod on all available CPU cores +func (m PodCPUTime) AddSet(ctx context.Context, incr float64, set attribute.Set) { + if set.Len() == 0 { + m.Float64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Float64Counter.Add(ctx, incr, *o...) +} + +// PodCPUUsage is an instrument used to record metric values conforming to the +// "k8s.pod.cpu.usage" semantic conventions. It represents the pod's CPU usage, +// measured in cpus. Range from 0 to the number of allocatable CPUs. +type PodCPUUsage struct { + metric.Int64Gauge +} + +// NewPodCPUUsage returns a new PodCPUUsage instrument. +func NewPodCPUUsage( + m metric.Meter, + opt ...metric.Int64GaugeOption, +) (PodCPUUsage, error) { + // Check if the meter is nil. + if m == nil { + return PodCPUUsage{noop.Int64Gauge{}}, nil + } + + i, err := m.Int64Gauge( + "k8s.pod.cpu.usage", + append([]metric.Int64GaugeOption{ + metric.WithDescription("Pod's CPU usage, measured in cpus. Range from 0 to the number of allocatable CPUs"), + metric.WithUnit("{cpu}"), + }, opt...)..., + ) + if err != nil { + return PodCPUUsage{noop.Int64Gauge{}}, err + } + return PodCPUUsage{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m PodCPUUsage) Inst() metric.Int64Gauge { + return m.Int64Gauge +} + +// Name returns the semantic convention name of the instrument. +func (PodCPUUsage) Name() string { + return "k8s.pod.cpu.usage" +} + +// Unit returns the semantic convention unit of the instrument +func (PodCPUUsage) Unit() string { + return "{cpu}" +} + +// Description returns the semantic convention description of the instrument +func (PodCPUUsage) Description() string { + return "Pod's CPU usage, measured in cpus. Range from 0 to the number of allocatable CPUs" +} + +// Record records val to the current distribution for attrs. +// +// CPU usage of the specific Pod on all available CPU cores, averaged over the +// sample window +func (m PodCPUUsage) Record(ctx context.Context, val int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64Gauge.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64Gauge.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +// +// CPU usage of the specific Pod on all available CPU cores, averaged over the +// sample window +func (m PodCPUUsage) RecordSet(ctx context.Context, val int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Gauge.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Gauge.Record(ctx, val, *o...) +} + +// PodMemoryUsage is an instrument used to record metric values conforming to the +// "k8s.pod.memory.usage" semantic conventions. It represents the memory usage of +// the Pod. +type PodMemoryUsage struct { + metric.Int64Gauge +} + +// NewPodMemoryUsage returns a new PodMemoryUsage instrument. +func NewPodMemoryUsage( + m metric.Meter, + opt ...metric.Int64GaugeOption, +) (PodMemoryUsage, error) { + // Check if the meter is nil. + if m == nil { + return PodMemoryUsage{noop.Int64Gauge{}}, nil + } + + i, err := m.Int64Gauge( + "k8s.pod.memory.usage", + append([]metric.Int64GaugeOption{ + metric.WithDescription("Memory usage of the Pod"), + metric.WithUnit("By"), + }, opt...)..., + ) + if err != nil { + return PodMemoryUsage{noop.Int64Gauge{}}, err + } + return PodMemoryUsage{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m PodMemoryUsage) Inst() metric.Int64Gauge { + return m.Int64Gauge +} + +// Name returns the semantic convention name of the instrument. +func (PodMemoryUsage) Name() string { + return "k8s.pod.memory.usage" +} + +// Unit returns the semantic convention unit of the instrument +func (PodMemoryUsage) Unit() string { + return "By" +} + +// Description returns the semantic convention description of the instrument +func (PodMemoryUsage) Description() string { + return "Memory usage of the Pod" +} + +// Record records val to the current distribution for attrs. +// +// Total memory usage of the Pod +func (m PodMemoryUsage) Record(ctx context.Context, val int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64Gauge.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64Gauge.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +// +// Total memory usage of the Pod +func (m PodMemoryUsage) RecordSet(ctx context.Context, val int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Gauge.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Gauge.Record(ctx, val, *o...) +} + +// PodNetworkErrors is an instrument used to record metric values conforming to +// the "k8s.pod.network.errors" semantic conventions. It represents the pod +// network errors. +type PodNetworkErrors struct { + metric.Int64Counter +} + +// NewPodNetworkErrors returns a new PodNetworkErrors instrument. +func NewPodNetworkErrors( + m metric.Meter, + opt ...metric.Int64CounterOption, +) (PodNetworkErrors, error) { + // Check if the meter is nil. + if m == nil { + return PodNetworkErrors{noop.Int64Counter{}}, nil + } + + i, err := m.Int64Counter( + "k8s.pod.network.errors", + append([]metric.Int64CounterOption{ + metric.WithDescription("Pod network errors"), + metric.WithUnit("{error}"), + }, opt...)..., + ) + if err != nil { + return PodNetworkErrors{noop.Int64Counter{}}, err + } + return PodNetworkErrors{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m PodNetworkErrors) Inst() metric.Int64Counter { + return m.Int64Counter +} + +// Name returns the semantic convention name of the instrument. +func (PodNetworkErrors) Name() string { + return "k8s.pod.network.errors" +} + +// Unit returns the semantic convention unit of the instrument +func (PodNetworkErrors) Unit() string { + return "{error}" +} + +// Description returns the semantic convention description of the instrument +func (PodNetworkErrors) Description() string { + return "Pod network errors" +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +func (m PodNetworkErrors) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +func (m PodNetworkErrors) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AttrNetworkInterfaceName returns an optional attribute for the +// "network.interface.name" semantic convention. It represents the network +// interface name. +func (PodNetworkErrors) AttrNetworkInterfaceName(val string) attribute.KeyValue { + return attribute.String("network.interface.name", val) +} + +// AttrNetworkIODirection returns an optional attribute for the +// "network.io.direction" semantic convention. It represents the network IO +// operation direction. +func (PodNetworkErrors) AttrNetworkIODirection(val NetworkIODirectionAttr) attribute.KeyValue { + return attribute.String("network.io.direction", string(val)) +} + +// PodNetworkIO is an instrument used to record metric values conforming to the +// "k8s.pod.network.io" semantic conventions. It represents the network bytes for +// the Pod. +type PodNetworkIO struct { + metric.Int64Counter +} + +// NewPodNetworkIO returns a new PodNetworkIO instrument. +func NewPodNetworkIO( + m metric.Meter, + opt ...metric.Int64CounterOption, +) (PodNetworkIO, error) { + // Check if the meter is nil. + if m == nil { + return PodNetworkIO{noop.Int64Counter{}}, nil + } + + i, err := m.Int64Counter( + "k8s.pod.network.io", + append([]metric.Int64CounterOption{ + metric.WithDescription("Network bytes for the Pod"), + metric.WithUnit("By"), + }, opt...)..., + ) + if err != nil { + return PodNetworkIO{noop.Int64Counter{}}, err + } + return PodNetworkIO{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m PodNetworkIO) Inst() metric.Int64Counter { + return m.Int64Counter +} + +// Name returns the semantic convention name of the instrument. +func (PodNetworkIO) Name() string { + return "k8s.pod.network.io" +} + +// Unit returns the semantic convention unit of the instrument +func (PodNetworkIO) Unit() string { + return "By" +} + +// Description returns the semantic convention description of the instrument +func (PodNetworkIO) Description() string { + return "Network bytes for the Pod" +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +func (m PodNetworkIO) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +func (m PodNetworkIO) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AttrNetworkInterfaceName returns an optional attribute for the +// "network.interface.name" semantic convention. It represents the network +// interface name. +func (PodNetworkIO) AttrNetworkInterfaceName(val string) attribute.KeyValue { + return attribute.String("network.interface.name", val) +} + +// AttrNetworkIODirection returns an optional attribute for the +// "network.io.direction" semantic convention. It represents the network IO +// operation direction. +func (PodNetworkIO) AttrNetworkIODirection(val NetworkIODirectionAttr) attribute.KeyValue { + return attribute.String("network.io.direction", string(val)) +} + +// PodUptime is an instrument used to record metric values conforming to the +// "k8s.pod.uptime" semantic conventions. It represents the time the Pod has been +// running. +type PodUptime struct { + metric.Float64Gauge +} + +// NewPodUptime returns a new PodUptime instrument. +func NewPodUptime( + m metric.Meter, + opt ...metric.Float64GaugeOption, +) (PodUptime, error) { + // Check if the meter is nil. + if m == nil { + return PodUptime{noop.Float64Gauge{}}, nil + } + + i, err := m.Float64Gauge( + "k8s.pod.uptime", + append([]metric.Float64GaugeOption{ + metric.WithDescription("The time the Pod has been running"), + metric.WithUnit("s"), + }, opt...)..., + ) + if err != nil { + return PodUptime{noop.Float64Gauge{}}, err + } + return PodUptime{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m PodUptime) Inst() metric.Float64Gauge { + return m.Float64Gauge +} + +// Name returns the semantic convention name of the instrument. +func (PodUptime) Name() string { + return "k8s.pod.uptime" +} + +// Unit returns the semantic convention unit of the instrument +func (PodUptime) Unit() string { + return "s" +} + +// Description returns the semantic convention description of the instrument +func (PodUptime) Description() string { + return "The time the Pod has been running" +} + +// Record records val to the current distribution for attrs. +// +// Instrumentations SHOULD use a gauge with type `double` and measure uptime in +// seconds as a floating point number with the highest precision available. +// The actual accuracy would depend on the instrumentation and operating system. +func (m PodUptime) Record(ctx context.Context, val float64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Float64Gauge.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Float64Gauge.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +// +// Instrumentations SHOULD use a gauge with type `double` and measure uptime in +// seconds as a floating point number with the highest precision available. +// The actual accuracy would depend on the instrumentation and operating system. +func (m PodUptime) RecordSet(ctx context.Context, val float64, set attribute.Set) { + if set.Len() == 0 { + m.Float64Gauge.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Float64Gauge.Record(ctx, val, *o...) +} + +// ReplicaSetAvailablePods is an instrument used to record metric values +// conforming to the "k8s.replicaset.available_pods" semantic conventions. It +// represents the total number of available replica pods (ready for at least +// minReadySeconds) targeted by this replicaset. +type ReplicaSetAvailablePods struct { + metric.Int64UpDownCounter +} + +// NewReplicaSetAvailablePods returns a new ReplicaSetAvailablePods instrument. +func NewReplicaSetAvailablePods( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (ReplicaSetAvailablePods, error) { + // Check if the meter is nil. + if m == nil { + return ReplicaSetAvailablePods{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "k8s.replicaset.available_pods", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("Total number of available replica pods (ready for at least minReadySeconds) targeted by this replicaset"), + metric.WithUnit("{pod}"), + }, opt...)..., + ) + if err != nil { + return ReplicaSetAvailablePods{noop.Int64UpDownCounter{}}, err + } + return ReplicaSetAvailablePods{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ReplicaSetAvailablePods) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (ReplicaSetAvailablePods) Name() string { + return "k8s.replicaset.available_pods" +} + +// Unit returns the semantic convention unit of the instrument +func (ReplicaSetAvailablePods) Unit() string { + return "{pod}" +} + +// Description returns the semantic convention description of the instrument +func (ReplicaSetAvailablePods) Description() string { + return "Total number of available replica pods (ready for at least minReadySeconds) targeted by this replicaset" +} + +// Add adds incr to the existing count for attrs. +// +// This metric aligns with the `availableReplicas` field of the +// [K8s ReplicaSetStatus]. +// +// [K8s ReplicaSetStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#replicasetstatus-v1-apps +func (m ReplicaSetAvailablePods) Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// This metric aligns with the `availableReplicas` field of the +// [K8s ReplicaSetStatus]. +// +// [K8s ReplicaSetStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#replicasetstatus-v1-apps +func (m ReplicaSetAvailablePods) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// ReplicaSetDesiredPods is an instrument used to record metric values conforming +// to the "k8s.replicaset.desired_pods" semantic conventions. It represents the +// number of desired replica pods in this replicaset. +type ReplicaSetDesiredPods struct { + metric.Int64UpDownCounter +} + +// NewReplicaSetDesiredPods returns a new ReplicaSetDesiredPods instrument. +func NewReplicaSetDesiredPods( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (ReplicaSetDesiredPods, error) { + // Check if the meter is nil. + if m == nil { + return ReplicaSetDesiredPods{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "k8s.replicaset.desired_pods", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("Number of desired replica pods in this replicaset"), + metric.WithUnit("{pod}"), + }, opt...)..., + ) + if err != nil { + return ReplicaSetDesiredPods{noop.Int64UpDownCounter{}}, err + } + return ReplicaSetDesiredPods{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ReplicaSetDesiredPods) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (ReplicaSetDesiredPods) Name() string { + return "k8s.replicaset.desired_pods" +} + +// Unit returns the semantic convention unit of the instrument +func (ReplicaSetDesiredPods) Unit() string { + return "{pod}" +} + +// Description returns the semantic convention description of the instrument +func (ReplicaSetDesiredPods) Description() string { + return "Number of desired replica pods in this replicaset" +} + +// Add adds incr to the existing count for attrs. +// +// This metric aligns with the `replicas` field of the +// [K8s ReplicaSetSpec]. +// +// [K8s ReplicaSetSpec]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#replicasetspec-v1-apps +func (m ReplicaSetDesiredPods) Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// This metric aligns with the `replicas` field of the +// [K8s ReplicaSetSpec]. +// +// [K8s ReplicaSetSpec]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#replicasetspec-v1-apps +func (m ReplicaSetDesiredPods) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// ReplicationControllerAvailablePods is an instrument used to record metric +// values conforming to the "k8s.replicationcontroller.available_pods" semantic +// conventions. It represents the total number of available replica pods (ready +// for at least minReadySeconds) targeted by this replication controller. +type ReplicationControllerAvailablePods struct { + metric.Int64UpDownCounter +} + +// NewReplicationControllerAvailablePods returns a new +// ReplicationControllerAvailablePods instrument. +func NewReplicationControllerAvailablePods( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (ReplicationControllerAvailablePods, error) { + // Check if the meter is nil. + if m == nil { + return ReplicationControllerAvailablePods{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "k8s.replicationcontroller.available_pods", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("Total number of available replica pods (ready for at least minReadySeconds) targeted by this replication controller"), + metric.WithUnit("{pod}"), + }, opt...)..., + ) + if err != nil { + return ReplicationControllerAvailablePods{noop.Int64UpDownCounter{}}, err + } + return ReplicationControllerAvailablePods{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ReplicationControllerAvailablePods) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (ReplicationControllerAvailablePods) Name() string { + return "k8s.replicationcontroller.available_pods" +} + +// Unit returns the semantic convention unit of the instrument +func (ReplicationControllerAvailablePods) Unit() string { + return "{pod}" +} + +// Description returns the semantic convention description of the instrument +func (ReplicationControllerAvailablePods) Description() string { + return "Total number of available replica pods (ready for at least minReadySeconds) targeted by this replication controller" +} + +// Add adds incr to the existing count for attrs. +// +// This metric aligns with the `availableReplicas` field of the +// [K8s ReplicationControllerStatus] +// +// [K8s ReplicationControllerStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#replicationcontrollerstatus-v1-core +func (m ReplicationControllerAvailablePods) Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// This metric aligns with the `availableReplicas` field of the +// [K8s ReplicationControllerStatus] +// +// [K8s ReplicationControllerStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#replicationcontrollerstatus-v1-core +func (m ReplicationControllerAvailablePods) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// ReplicationControllerDesiredPods is an instrument used to record metric values +// conforming to the "k8s.replicationcontroller.desired_pods" semantic +// conventions. It represents the number of desired replica pods in this +// replication controller. +type ReplicationControllerDesiredPods struct { + metric.Int64UpDownCounter +} + +// NewReplicationControllerDesiredPods returns a new +// ReplicationControllerDesiredPods instrument. +func NewReplicationControllerDesiredPods( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (ReplicationControllerDesiredPods, error) { + // Check if the meter is nil. + if m == nil { + return ReplicationControllerDesiredPods{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "k8s.replicationcontroller.desired_pods", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("Number of desired replica pods in this replication controller"), + metric.WithUnit("{pod}"), + }, opt...)..., + ) + if err != nil { + return ReplicationControllerDesiredPods{noop.Int64UpDownCounter{}}, err + } + return ReplicationControllerDesiredPods{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ReplicationControllerDesiredPods) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (ReplicationControllerDesiredPods) Name() string { + return "k8s.replicationcontroller.desired_pods" +} + +// Unit returns the semantic convention unit of the instrument +func (ReplicationControllerDesiredPods) Unit() string { + return "{pod}" +} + +// Description returns the semantic convention description of the instrument +func (ReplicationControllerDesiredPods) Description() string { + return "Number of desired replica pods in this replication controller" +} + +// Add adds incr to the existing count for attrs. +// +// This metric aligns with the `replicas` field of the +// [K8s ReplicationControllerSpec] +// +// [K8s ReplicationControllerSpec]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#replicationcontrollerspec-v1-core +func (m ReplicationControllerDesiredPods) Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// This metric aligns with the `replicas` field of the +// [K8s ReplicationControllerSpec] +// +// [K8s ReplicationControllerSpec]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#replicationcontrollerspec-v1-core +func (m ReplicationControllerDesiredPods) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// ResourceQuotaCPULimitHard is an instrument used to record metric values +// conforming to the "k8s.resourcequota.cpu.limit.hard" semantic conventions. It +// represents the CPU limits in a specific namespace. +// The value represents the configured quota limit of the resource in the +// namespace. +type ResourceQuotaCPULimitHard struct { + metric.Int64UpDownCounter +} + +// NewResourceQuotaCPULimitHard returns a new ResourceQuotaCPULimitHard +// instrument. +func NewResourceQuotaCPULimitHard( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (ResourceQuotaCPULimitHard, error) { + // Check if the meter is nil. + if m == nil { + return ResourceQuotaCPULimitHard{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "k8s.resourcequota.cpu.limit.hard", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("The CPU limits in a specific namespace. The value represents the configured quota limit of the resource in the namespace."), + metric.WithUnit("{cpu}"), + }, opt...)..., + ) + if err != nil { + return ResourceQuotaCPULimitHard{noop.Int64UpDownCounter{}}, err + } + return ResourceQuotaCPULimitHard{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ResourceQuotaCPULimitHard) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (ResourceQuotaCPULimitHard) Name() string { + return "k8s.resourcequota.cpu.limit.hard" +} + +// Unit returns the semantic convention unit of the instrument +func (ResourceQuotaCPULimitHard) Unit() string { + return "{cpu}" +} + +// Description returns the semantic convention description of the instrument +func (ResourceQuotaCPULimitHard) Description() string { + return "The CPU limits in a specific namespace. The value represents the configured quota limit of the resource in the namespace." +} + +// Add adds incr to the existing count for attrs. +// +// This metric is retrieved from the `hard` field of the +// [K8s ResourceQuotaStatus]. +// +// [K8s ResourceQuotaStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core +func (m ResourceQuotaCPULimitHard) Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// This metric is retrieved from the `hard` field of the +// [K8s ResourceQuotaStatus]. +// +// [K8s ResourceQuotaStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core +func (m ResourceQuotaCPULimitHard) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// ResourceQuotaCPULimitUsed is an instrument used to record metric values +// conforming to the "k8s.resourcequota.cpu.limit.used" semantic conventions. It +// represents the CPU limits in a specific namespace. +// The value represents the current observed total usage of the resource in the +// namespace. +type ResourceQuotaCPULimitUsed struct { + metric.Int64UpDownCounter +} + +// NewResourceQuotaCPULimitUsed returns a new ResourceQuotaCPULimitUsed +// instrument. +func NewResourceQuotaCPULimitUsed( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (ResourceQuotaCPULimitUsed, error) { + // Check if the meter is nil. + if m == nil { + return ResourceQuotaCPULimitUsed{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "k8s.resourcequota.cpu.limit.used", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("The CPU limits in a specific namespace. The value represents the current observed total usage of the resource in the namespace."), + metric.WithUnit("{cpu}"), + }, opt...)..., + ) + if err != nil { + return ResourceQuotaCPULimitUsed{noop.Int64UpDownCounter{}}, err + } + return ResourceQuotaCPULimitUsed{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ResourceQuotaCPULimitUsed) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (ResourceQuotaCPULimitUsed) Name() string { + return "k8s.resourcequota.cpu.limit.used" +} + +// Unit returns the semantic convention unit of the instrument +func (ResourceQuotaCPULimitUsed) Unit() string { + return "{cpu}" +} + +// Description returns the semantic convention description of the instrument +func (ResourceQuotaCPULimitUsed) Description() string { + return "The CPU limits in a specific namespace. The value represents the current observed total usage of the resource in the namespace." +} + +// Add adds incr to the existing count for attrs. +// +// This metric is retrieved from the `used` field of the +// [K8s ResourceQuotaStatus]. +// +// [K8s ResourceQuotaStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core +func (m ResourceQuotaCPULimitUsed) Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// This metric is retrieved from the `used` field of the +// [K8s ResourceQuotaStatus]. +// +// [K8s ResourceQuotaStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core +func (m ResourceQuotaCPULimitUsed) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// ResourceQuotaCPURequestHard is an instrument used to record metric values +// conforming to the "k8s.resourcequota.cpu.request.hard" semantic conventions. +// It represents the CPU requests in a specific namespace. +// The value represents the configured quota limit of the resource in the +// namespace. +type ResourceQuotaCPURequestHard struct { + metric.Int64UpDownCounter +} + +// NewResourceQuotaCPURequestHard returns a new ResourceQuotaCPURequestHard +// instrument. +func NewResourceQuotaCPURequestHard( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (ResourceQuotaCPURequestHard, error) { + // Check if the meter is nil. + if m == nil { + return ResourceQuotaCPURequestHard{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "k8s.resourcequota.cpu.request.hard", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("The CPU requests in a specific namespace. The value represents the configured quota limit of the resource in the namespace."), + metric.WithUnit("{cpu}"), + }, opt...)..., + ) + if err != nil { + return ResourceQuotaCPURequestHard{noop.Int64UpDownCounter{}}, err + } + return ResourceQuotaCPURequestHard{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ResourceQuotaCPURequestHard) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (ResourceQuotaCPURequestHard) Name() string { + return "k8s.resourcequota.cpu.request.hard" +} + +// Unit returns the semantic convention unit of the instrument +func (ResourceQuotaCPURequestHard) Unit() string { + return "{cpu}" +} + +// Description returns the semantic convention description of the instrument +func (ResourceQuotaCPURequestHard) Description() string { + return "The CPU requests in a specific namespace. The value represents the configured quota limit of the resource in the namespace." +} + +// Add adds incr to the existing count for attrs. +// +// This metric is retrieved from the `hard` field of the +// [K8s ResourceQuotaStatus]. +// +// [K8s ResourceQuotaStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core +func (m ResourceQuotaCPURequestHard) Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// This metric is retrieved from the `hard` field of the +// [K8s ResourceQuotaStatus]. +// +// [K8s ResourceQuotaStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core +func (m ResourceQuotaCPURequestHard) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// ResourceQuotaCPURequestUsed is an instrument used to record metric values +// conforming to the "k8s.resourcequota.cpu.request.used" semantic conventions. +// It represents the CPU requests in a specific namespace. +// The value represents the current observed total usage of the resource in the +// namespace. +type ResourceQuotaCPURequestUsed struct { + metric.Int64UpDownCounter +} + +// NewResourceQuotaCPURequestUsed returns a new ResourceQuotaCPURequestUsed +// instrument. +func NewResourceQuotaCPURequestUsed( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (ResourceQuotaCPURequestUsed, error) { + // Check if the meter is nil. + if m == nil { + return ResourceQuotaCPURequestUsed{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "k8s.resourcequota.cpu.request.used", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("The CPU requests in a specific namespace. The value represents the current observed total usage of the resource in the namespace."), + metric.WithUnit("{cpu}"), + }, opt...)..., + ) + if err != nil { + return ResourceQuotaCPURequestUsed{noop.Int64UpDownCounter{}}, err + } + return ResourceQuotaCPURequestUsed{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ResourceQuotaCPURequestUsed) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (ResourceQuotaCPURequestUsed) Name() string { + return "k8s.resourcequota.cpu.request.used" +} + +// Unit returns the semantic convention unit of the instrument +func (ResourceQuotaCPURequestUsed) Unit() string { + return "{cpu}" +} + +// Description returns the semantic convention description of the instrument +func (ResourceQuotaCPURequestUsed) Description() string { + return "The CPU requests in a specific namespace. The value represents the current observed total usage of the resource in the namespace." +} + +// Add adds incr to the existing count for attrs. +// +// This metric is retrieved from the `used` field of the +// [K8s ResourceQuotaStatus]. +// +// [K8s ResourceQuotaStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core +func (m ResourceQuotaCPURequestUsed) Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// This metric is retrieved from the `used` field of the +// [K8s ResourceQuotaStatus]. +// +// [K8s ResourceQuotaStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core +func (m ResourceQuotaCPURequestUsed) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// ResourceQuotaEphemeralStorageLimitHard is an instrument used to record metric +// values conforming to the "k8s.resourcequota.ephemeral_storage.limit.hard" +// semantic conventions. It represents the sum of local ephemeral storage limits +// in the namespace. +// The value represents the configured quota limit of the resource in the +// namespace. +type ResourceQuotaEphemeralStorageLimitHard struct { + metric.Int64UpDownCounter +} + +// NewResourceQuotaEphemeralStorageLimitHard returns a new +// ResourceQuotaEphemeralStorageLimitHard instrument. +func NewResourceQuotaEphemeralStorageLimitHard( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (ResourceQuotaEphemeralStorageLimitHard, error) { + // Check if the meter is nil. + if m == nil { + return ResourceQuotaEphemeralStorageLimitHard{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "k8s.resourcequota.ephemeral_storage.limit.hard", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("The sum of local ephemeral storage limits in the namespace. The value represents the configured quota limit of the resource in the namespace."), + metric.WithUnit("By"), + }, opt...)..., + ) + if err != nil { + return ResourceQuotaEphemeralStorageLimitHard{noop.Int64UpDownCounter{}}, err + } + return ResourceQuotaEphemeralStorageLimitHard{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ResourceQuotaEphemeralStorageLimitHard) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (ResourceQuotaEphemeralStorageLimitHard) Name() string { + return "k8s.resourcequota.ephemeral_storage.limit.hard" +} + +// Unit returns the semantic convention unit of the instrument +func (ResourceQuotaEphemeralStorageLimitHard) Unit() string { + return "By" +} + +// Description returns the semantic convention description of the instrument +func (ResourceQuotaEphemeralStorageLimitHard) Description() string { + return "The sum of local ephemeral storage limits in the namespace. The value represents the configured quota limit of the resource in the namespace." +} + +// Add adds incr to the existing count for attrs. +// +// This metric is retrieved from the `hard` field of the +// [K8s ResourceQuotaStatus]. +// +// [K8s ResourceQuotaStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core +func (m ResourceQuotaEphemeralStorageLimitHard) Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// This metric is retrieved from the `hard` field of the +// [K8s ResourceQuotaStatus]. +// +// [K8s ResourceQuotaStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core +func (m ResourceQuotaEphemeralStorageLimitHard) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// ResourceQuotaEphemeralStorageLimitUsed is an instrument used to record metric +// values conforming to the "k8s.resourcequota.ephemeral_storage.limit.used" +// semantic conventions. It represents the sum of local ephemeral storage limits +// in the namespace. +// The value represents the current observed total usage of the resource in the +// namespace. +type ResourceQuotaEphemeralStorageLimitUsed struct { + metric.Int64UpDownCounter +} + +// NewResourceQuotaEphemeralStorageLimitUsed returns a new +// ResourceQuotaEphemeralStorageLimitUsed instrument. +func NewResourceQuotaEphemeralStorageLimitUsed( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (ResourceQuotaEphemeralStorageLimitUsed, error) { + // Check if the meter is nil. + if m == nil { + return ResourceQuotaEphemeralStorageLimitUsed{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "k8s.resourcequota.ephemeral_storage.limit.used", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("The sum of local ephemeral storage limits in the namespace. The value represents the current observed total usage of the resource in the namespace."), + metric.WithUnit("By"), + }, opt...)..., + ) + if err != nil { + return ResourceQuotaEphemeralStorageLimitUsed{noop.Int64UpDownCounter{}}, err + } + return ResourceQuotaEphemeralStorageLimitUsed{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ResourceQuotaEphemeralStorageLimitUsed) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (ResourceQuotaEphemeralStorageLimitUsed) Name() string { + return "k8s.resourcequota.ephemeral_storage.limit.used" +} + +// Unit returns the semantic convention unit of the instrument +func (ResourceQuotaEphemeralStorageLimitUsed) Unit() string { + return "By" +} + +// Description returns the semantic convention description of the instrument +func (ResourceQuotaEphemeralStorageLimitUsed) Description() string { + return "The sum of local ephemeral storage limits in the namespace. The value represents the current observed total usage of the resource in the namespace." +} + +// Add adds incr to the existing count for attrs. +// +// This metric is retrieved from the `used` field of the +// [K8s ResourceQuotaStatus]. +// +// [K8s ResourceQuotaStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core +func (m ResourceQuotaEphemeralStorageLimitUsed) Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// This metric is retrieved from the `used` field of the +// [K8s ResourceQuotaStatus]. +// +// [K8s ResourceQuotaStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core +func (m ResourceQuotaEphemeralStorageLimitUsed) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// ResourceQuotaEphemeralStorageRequestHard is an instrument used to record +// metric values conforming to the +// "k8s.resourcequota.ephemeral_storage.request.hard" semantic conventions. It +// represents the sum of local ephemeral storage requests in the namespace. +// The value represents the configured quota limit of the resource in the +// namespace. +type ResourceQuotaEphemeralStorageRequestHard struct { + metric.Int64UpDownCounter +} + +// NewResourceQuotaEphemeralStorageRequestHard returns a new +// ResourceQuotaEphemeralStorageRequestHard instrument. +func NewResourceQuotaEphemeralStorageRequestHard( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (ResourceQuotaEphemeralStorageRequestHard, error) { + // Check if the meter is nil. + if m == nil { + return ResourceQuotaEphemeralStorageRequestHard{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "k8s.resourcequota.ephemeral_storage.request.hard", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("The sum of local ephemeral storage requests in the namespace. The value represents the configured quota limit of the resource in the namespace."), + metric.WithUnit("By"), + }, opt...)..., + ) + if err != nil { + return ResourceQuotaEphemeralStorageRequestHard{noop.Int64UpDownCounter{}}, err + } + return ResourceQuotaEphemeralStorageRequestHard{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ResourceQuotaEphemeralStorageRequestHard) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (ResourceQuotaEphemeralStorageRequestHard) Name() string { + return "k8s.resourcequota.ephemeral_storage.request.hard" +} + +// Unit returns the semantic convention unit of the instrument +func (ResourceQuotaEphemeralStorageRequestHard) Unit() string { + return "By" +} + +// Description returns the semantic convention description of the instrument +func (ResourceQuotaEphemeralStorageRequestHard) Description() string { + return "The sum of local ephemeral storage requests in the namespace. The value represents the configured quota limit of the resource in the namespace." +} + +// Add adds incr to the existing count for attrs. +// +// This metric is retrieved from the `hard` field of the +// [K8s ResourceQuotaStatus]. +// +// [K8s ResourceQuotaStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core +func (m ResourceQuotaEphemeralStorageRequestHard) Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// This metric is retrieved from the `hard` field of the +// [K8s ResourceQuotaStatus]. +// +// [K8s ResourceQuotaStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core +func (m ResourceQuotaEphemeralStorageRequestHard) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// ResourceQuotaEphemeralStorageRequestUsed is an instrument used to record +// metric values conforming to the +// "k8s.resourcequota.ephemeral_storage.request.used" semantic conventions. It +// represents the sum of local ephemeral storage requests in the namespace. +// The value represents the current observed total usage of the resource in the +// namespace. +type ResourceQuotaEphemeralStorageRequestUsed struct { + metric.Int64UpDownCounter +} + +// NewResourceQuotaEphemeralStorageRequestUsed returns a new +// ResourceQuotaEphemeralStorageRequestUsed instrument. +func NewResourceQuotaEphemeralStorageRequestUsed( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (ResourceQuotaEphemeralStorageRequestUsed, error) { + // Check if the meter is nil. + if m == nil { + return ResourceQuotaEphemeralStorageRequestUsed{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "k8s.resourcequota.ephemeral_storage.request.used", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("The sum of local ephemeral storage requests in the namespace. The value represents the current observed total usage of the resource in the namespace."), + metric.WithUnit("By"), + }, opt...)..., + ) + if err != nil { + return ResourceQuotaEphemeralStorageRequestUsed{noop.Int64UpDownCounter{}}, err + } + return ResourceQuotaEphemeralStorageRequestUsed{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ResourceQuotaEphemeralStorageRequestUsed) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (ResourceQuotaEphemeralStorageRequestUsed) Name() string { + return "k8s.resourcequota.ephemeral_storage.request.used" +} + +// Unit returns the semantic convention unit of the instrument +func (ResourceQuotaEphemeralStorageRequestUsed) Unit() string { + return "By" +} + +// Description returns the semantic convention description of the instrument +func (ResourceQuotaEphemeralStorageRequestUsed) Description() string { + return "The sum of local ephemeral storage requests in the namespace. The value represents the current observed total usage of the resource in the namespace." +} + +// Add adds incr to the existing count for attrs. +// +// This metric is retrieved from the `used` field of the +// [K8s ResourceQuotaStatus]. +// +// [K8s ResourceQuotaStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core +func (m ResourceQuotaEphemeralStorageRequestUsed) Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// This metric is retrieved from the `used` field of the +// [K8s ResourceQuotaStatus]. +// +// [K8s ResourceQuotaStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core +func (m ResourceQuotaEphemeralStorageRequestUsed) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// ResourceQuotaHugepageCountRequestHard is an instrument used to record metric +// values conforming to the "k8s.resourcequota.hugepage_count.request.hard" +// semantic conventions. It represents the huge page requests in a specific +// namespace. +// The value represents the configured quota limit of the resource in the +// namespace. +type ResourceQuotaHugepageCountRequestHard struct { + metric.Int64UpDownCounter +} + +// NewResourceQuotaHugepageCountRequestHard returns a new +// ResourceQuotaHugepageCountRequestHard instrument. +func NewResourceQuotaHugepageCountRequestHard( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (ResourceQuotaHugepageCountRequestHard, error) { + // Check if the meter is nil. + if m == nil { + return ResourceQuotaHugepageCountRequestHard{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "k8s.resourcequota.hugepage_count.request.hard", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("The huge page requests in a specific namespace. The value represents the configured quota limit of the resource in the namespace."), + metric.WithUnit("{hugepage}"), + }, opt...)..., + ) + if err != nil { + return ResourceQuotaHugepageCountRequestHard{noop.Int64UpDownCounter{}}, err + } + return ResourceQuotaHugepageCountRequestHard{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ResourceQuotaHugepageCountRequestHard) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (ResourceQuotaHugepageCountRequestHard) Name() string { + return "k8s.resourcequota.hugepage_count.request.hard" +} + +// Unit returns the semantic convention unit of the instrument +func (ResourceQuotaHugepageCountRequestHard) Unit() string { + return "{hugepage}" +} + +// Description returns the semantic convention description of the instrument +func (ResourceQuotaHugepageCountRequestHard) Description() string { + return "The huge page requests in a specific namespace. The value represents the configured quota limit of the resource in the namespace." +} + +// Add adds incr to the existing count for attrs. +// +// The hugepageSize is the the size (identifier) of the K8s huge page. +// +// This metric is retrieved from the `hard` field of the +// [K8s ResourceQuotaStatus]. +// +// [K8s ResourceQuotaStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core +func (m ResourceQuotaHugepageCountRequestHard) Add( + ctx context.Context, + incr int64, + hugepageSize string, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("k8s.hugepage.size", hugepageSize), + )..., + ), + ) + + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// This metric is retrieved from the `hard` field of the +// [K8s ResourceQuotaStatus]. +// +// [K8s ResourceQuotaStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core +func (m ResourceQuotaHugepageCountRequestHard) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// ResourceQuotaHugepageCountRequestUsed is an instrument used to record metric +// values conforming to the "k8s.resourcequota.hugepage_count.request.used" +// semantic conventions. It represents the huge page requests in a specific +// namespace. +// The value represents the current observed total usage of the resource in the +// namespace. +type ResourceQuotaHugepageCountRequestUsed struct { + metric.Int64UpDownCounter +} + +// NewResourceQuotaHugepageCountRequestUsed returns a new +// ResourceQuotaHugepageCountRequestUsed instrument. +func NewResourceQuotaHugepageCountRequestUsed( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (ResourceQuotaHugepageCountRequestUsed, error) { + // Check if the meter is nil. + if m == nil { + return ResourceQuotaHugepageCountRequestUsed{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "k8s.resourcequota.hugepage_count.request.used", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("The huge page requests in a specific namespace. The value represents the current observed total usage of the resource in the namespace."), + metric.WithUnit("{hugepage}"), + }, opt...)..., + ) + if err != nil { + return ResourceQuotaHugepageCountRequestUsed{noop.Int64UpDownCounter{}}, err + } + return ResourceQuotaHugepageCountRequestUsed{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ResourceQuotaHugepageCountRequestUsed) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (ResourceQuotaHugepageCountRequestUsed) Name() string { + return "k8s.resourcequota.hugepage_count.request.used" +} + +// Unit returns the semantic convention unit of the instrument +func (ResourceQuotaHugepageCountRequestUsed) Unit() string { + return "{hugepage}" +} + +// Description returns the semantic convention description of the instrument +func (ResourceQuotaHugepageCountRequestUsed) Description() string { + return "The huge page requests in a specific namespace. The value represents the current observed total usage of the resource in the namespace." +} + +// Add adds incr to the existing count for attrs. +// +// The hugepageSize is the the size (identifier) of the K8s huge page. +// +// This metric is retrieved from the `used` field of the +// [K8s ResourceQuotaStatus]. +// +// [K8s ResourceQuotaStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core +func (m ResourceQuotaHugepageCountRequestUsed) Add( + ctx context.Context, + incr int64, + hugepageSize string, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("k8s.hugepage.size", hugepageSize), + )..., + ), + ) + + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// This metric is retrieved from the `used` field of the +// [K8s ResourceQuotaStatus]. +// +// [K8s ResourceQuotaStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core +func (m ResourceQuotaHugepageCountRequestUsed) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// ResourceQuotaMemoryLimitHard is an instrument used to record metric values +// conforming to the "k8s.resourcequota.memory.limit.hard" semantic conventions. +// It represents the memory limits in a specific namespace. +// The value represents the configured quota limit of the resource in the +// namespace. +type ResourceQuotaMemoryLimitHard struct { + metric.Int64UpDownCounter +} + +// NewResourceQuotaMemoryLimitHard returns a new ResourceQuotaMemoryLimitHard +// instrument. +func NewResourceQuotaMemoryLimitHard( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (ResourceQuotaMemoryLimitHard, error) { + // Check if the meter is nil. + if m == nil { + return ResourceQuotaMemoryLimitHard{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "k8s.resourcequota.memory.limit.hard", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("The memory limits in a specific namespace. The value represents the configured quota limit of the resource in the namespace."), + metric.WithUnit("By"), + }, opt...)..., + ) + if err != nil { + return ResourceQuotaMemoryLimitHard{noop.Int64UpDownCounter{}}, err + } + return ResourceQuotaMemoryLimitHard{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ResourceQuotaMemoryLimitHard) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (ResourceQuotaMemoryLimitHard) Name() string { + return "k8s.resourcequota.memory.limit.hard" +} + +// Unit returns the semantic convention unit of the instrument +func (ResourceQuotaMemoryLimitHard) Unit() string { + return "By" +} + +// Description returns the semantic convention description of the instrument +func (ResourceQuotaMemoryLimitHard) Description() string { + return "The memory limits in a specific namespace. The value represents the configured quota limit of the resource in the namespace." +} + +// Add adds incr to the existing count for attrs. +// +// This metric is retrieved from the `hard` field of the +// [K8s ResourceQuotaStatus]. +// +// [K8s ResourceQuotaStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core +func (m ResourceQuotaMemoryLimitHard) Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// This metric is retrieved from the `hard` field of the +// [K8s ResourceQuotaStatus]. +// +// [K8s ResourceQuotaStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core +func (m ResourceQuotaMemoryLimitHard) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// ResourceQuotaMemoryLimitUsed is an instrument used to record metric values +// conforming to the "k8s.resourcequota.memory.limit.used" semantic conventions. +// It represents the memory limits in a specific namespace. +// The value represents the current observed total usage of the resource in the +// namespace. +type ResourceQuotaMemoryLimitUsed struct { + metric.Int64UpDownCounter +} + +// NewResourceQuotaMemoryLimitUsed returns a new ResourceQuotaMemoryLimitUsed +// instrument. +func NewResourceQuotaMemoryLimitUsed( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (ResourceQuotaMemoryLimitUsed, error) { + // Check if the meter is nil. + if m == nil { + return ResourceQuotaMemoryLimitUsed{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "k8s.resourcequota.memory.limit.used", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("The memory limits in a specific namespace. The value represents the current observed total usage of the resource in the namespace."), + metric.WithUnit("By"), + }, opt...)..., + ) + if err != nil { + return ResourceQuotaMemoryLimitUsed{noop.Int64UpDownCounter{}}, err + } + return ResourceQuotaMemoryLimitUsed{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ResourceQuotaMemoryLimitUsed) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (ResourceQuotaMemoryLimitUsed) Name() string { + return "k8s.resourcequota.memory.limit.used" +} + +// Unit returns the semantic convention unit of the instrument +func (ResourceQuotaMemoryLimitUsed) Unit() string { + return "By" +} + +// Description returns the semantic convention description of the instrument +func (ResourceQuotaMemoryLimitUsed) Description() string { + return "The memory limits in a specific namespace. The value represents the current observed total usage of the resource in the namespace." +} + +// Add adds incr to the existing count for attrs. +// +// This metric is retrieved from the `used` field of the +// [K8s ResourceQuotaStatus]. +// +// [K8s ResourceQuotaStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core +func (m ResourceQuotaMemoryLimitUsed) Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// This metric is retrieved from the `used` field of the +// [K8s ResourceQuotaStatus]. +// +// [K8s ResourceQuotaStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core +func (m ResourceQuotaMemoryLimitUsed) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// ResourceQuotaMemoryRequestHard is an instrument used to record metric values +// conforming to the "k8s.resourcequota.memory.request.hard" semantic +// conventions. It represents the memory requests in a specific namespace. +// The value represents the configured quota limit of the resource in the +// namespace. +type ResourceQuotaMemoryRequestHard struct { + metric.Int64UpDownCounter +} + +// NewResourceQuotaMemoryRequestHard returns a new ResourceQuotaMemoryRequestHard +// instrument. +func NewResourceQuotaMemoryRequestHard( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (ResourceQuotaMemoryRequestHard, error) { + // Check if the meter is nil. + if m == nil { + return ResourceQuotaMemoryRequestHard{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "k8s.resourcequota.memory.request.hard", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("The memory requests in a specific namespace. The value represents the configured quota limit of the resource in the namespace."), + metric.WithUnit("By"), + }, opt...)..., + ) + if err != nil { + return ResourceQuotaMemoryRequestHard{noop.Int64UpDownCounter{}}, err + } + return ResourceQuotaMemoryRequestHard{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ResourceQuotaMemoryRequestHard) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (ResourceQuotaMemoryRequestHard) Name() string { + return "k8s.resourcequota.memory.request.hard" +} + +// Unit returns the semantic convention unit of the instrument +func (ResourceQuotaMemoryRequestHard) Unit() string { + return "By" +} + +// Description returns the semantic convention description of the instrument +func (ResourceQuotaMemoryRequestHard) Description() string { + return "The memory requests in a specific namespace. The value represents the configured quota limit of the resource in the namespace." +} + +// Add adds incr to the existing count for attrs. +// +// This metric is retrieved from the `hard` field of the +// [K8s ResourceQuotaStatus]. +// +// [K8s ResourceQuotaStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core +func (m ResourceQuotaMemoryRequestHard) Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// This metric is retrieved from the `hard` field of the +// [K8s ResourceQuotaStatus]. +// +// [K8s ResourceQuotaStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core +func (m ResourceQuotaMemoryRequestHard) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// ResourceQuotaMemoryRequestUsed is an instrument used to record metric values +// conforming to the "k8s.resourcequota.memory.request.used" semantic +// conventions. It represents the memory requests in a specific namespace. +// The value represents the current observed total usage of the resource in the +// namespace. +type ResourceQuotaMemoryRequestUsed struct { + metric.Int64UpDownCounter +} + +// NewResourceQuotaMemoryRequestUsed returns a new ResourceQuotaMemoryRequestUsed +// instrument. +func NewResourceQuotaMemoryRequestUsed( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (ResourceQuotaMemoryRequestUsed, error) { + // Check if the meter is nil. + if m == nil { + return ResourceQuotaMemoryRequestUsed{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "k8s.resourcequota.memory.request.used", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("The memory requests in a specific namespace. The value represents the current observed total usage of the resource in the namespace."), + metric.WithUnit("By"), + }, opt...)..., + ) + if err != nil { + return ResourceQuotaMemoryRequestUsed{noop.Int64UpDownCounter{}}, err + } + return ResourceQuotaMemoryRequestUsed{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ResourceQuotaMemoryRequestUsed) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (ResourceQuotaMemoryRequestUsed) Name() string { + return "k8s.resourcequota.memory.request.used" +} + +// Unit returns the semantic convention unit of the instrument +func (ResourceQuotaMemoryRequestUsed) Unit() string { + return "By" +} + +// Description returns the semantic convention description of the instrument +func (ResourceQuotaMemoryRequestUsed) Description() string { + return "The memory requests in a specific namespace. The value represents the current observed total usage of the resource in the namespace." +} + +// Add adds incr to the existing count for attrs. +// +// This metric is retrieved from the `used` field of the +// [K8s ResourceQuotaStatus]. +// +// [K8s ResourceQuotaStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core +func (m ResourceQuotaMemoryRequestUsed) Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// This metric is retrieved from the `used` field of the +// [K8s ResourceQuotaStatus]. +// +// [K8s ResourceQuotaStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core +func (m ResourceQuotaMemoryRequestUsed) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// ResourceQuotaObjectCountHard is an instrument used to record metric values +// conforming to the "k8s.resourcequota.object_count.hard" semantic conventions. +// It represents the object count limits in a specific namespace. +// The value represents the configured quota limit of the resource in the +// namespace. +type ResourceQuotaObjectCountHard struct { + metric.Int64UpDownCounter +} + +// NewResourceQuotaObjectCountHard returns a new ResourceQuotaObjectCountHard +// instrument. +func NewResourceQuotaObjectCountHard( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (ResourceQuotaObjectCountHard, error) { + // Check if the meter is nil. + if m == nil { + return ResourceQuotaObjectCountHard{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "k8s.resourcequota.object_count.hard", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("The object count limits in a specific namespace. The value represents the configured quota limit of the resource in the namespace."), + metric.WithUnit("{object}"), + }, opt...)..., + ) + if err != nil { + return ResourceQuotaObjectCountHard{noop.Int64UpDownCounter{}}, err + } + return ResourceQuotaObjectCountHard{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ResourceQuotaObjectCountHard) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (ResourceQuotaObjectCountHard) Name() string { + return "k8s.resourcequota.object_count.hard" +} + +// Unit returns the semantic convention unit of the instrument +func (ResourceQuotaObjectCountHard) Unit() string { + return "{object}" +} + +// Description returns the semantic convention description of the instrument +func (ResourceQuotaObjectCountHard) Description() string { + return "The object count limits in a specific namespace. The value represents the configured quota limit of the resource in the namespace." +} + +// Add adds incr to the existing count for attrs. +// +// The resourcequotaResourceName is the the name of the K8s resource a resource +// quota defines. +// +// This metric is retrieved from the `hard` field of the +// [K8s ResourceQuotaStatus]. +// +// [K8s ResourceQuotaStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core +func (m ResourceQuotaObjectCountHard) Add( + ctx context.Context, + incr int64, + resourcequotaResourceName string, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("k8s.resourcequota.resource_name", resourcequotaResourceName), + )..., + ), + ) + + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// This metric is retrieved from the `hard` field of the +// [K8s ResourceQuotaStatus]. +// +// [K8s ResourceQuotaStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core +func (m ResourceQuotaObjectCountHard) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// ResourceQuotaObjectCountUsed is an instrument used to record metric values +// conforming to the "k8s.resourcequota.object_count.used" semantic conventions. +// It represents the object count limits in a specific namespace. +// The value represents the current observed total usage of the resource in the +// namespace. +type ResourceQuotaObjectCountUsed struct { + metric.Int64UpDownCounter +} + +// NewResourceQuotaObjectCountUsed returns a new ResourceQuotaObjectCountUsed +// instrument. +func NewResourceQuotaObjectCountUsed( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (ResourceQuotaObjectCountUsed, error) { + // Check if the meter is nil. + if m == nil { + return ResourceQuotaObjectCountUsed{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "k8s.resourcequota.object_count.used", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("The object count limits in a specific namespace. The value represents the current observed total usage of the resource in the namespace."), + metric.WithUnit("{object}"), + }, opt...)..., + ) + if err != nil { + return ResourceQuotaObjectCountUsed{noop.Int64UpDownCounter{}}, err + } + return ResourceQuotaObjectCountUsed{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ResourceQuotaObjectCountUsed) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (ResourceQuotaObjectCountUsed) Name() string { + return "k8s.resourcequota.object_count.used" +} + +// Unit returns the semantic convention unit of the instrument +func (ResourceQuotaObjectCountUsed) Unit() string { + return "{object}" +} + +// Description returns the semantic convention description of the instrument +func (ResourceQuotaObjectCountUsed) Description() string { + return "The object count limits in a specific namespace. The value represents the current observed total usage of the resource in the namespace." +} + +// Add adds incr to the existing count for attrs. +// +// The resourcequotaResourceName is the the name of the K8s resource a resource +// quota defines. +// +// This metric is retrieved from the `used` field of the +// [K8s ResourceQuotaStatus]. +// +// [K8s ResourceQuotaStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core +func (m ResourceQuotaObjectCountUsed) Add( + ctx context.Context, + incr int64, + resourcequotaResourceName string, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("k8s.resourcequota.resource_name", resourcequotaResourceName), + )..., + ), + ) + + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// This metric is retrieved from the `used` field of the +// [K8s ResourceQuotaStatus]. +// +// [K8s ResourceQuotaStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core +func (m ResourceQuotaObjectCountUsed) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// ResourceQuotaPersistentvolumeclaimCountHard is an instrument used to record +// metric values conforming to the +// "k8s.resourcequota.persistentvolumeclaim_count.hard" semantic conventions. It +// represents the total number of PersistentVolumeClaims that can exist in the +// namespace. +// The value represents the configured quota limit of the resource in the +// namespace. +type ResourceQuotaPersistentvolumeclaimCountHard struct { + metric.Int64UpDownCounter +} + +// NewResourceQuotaPersistentvolumeclaimCountHard returns a new +// ResourceQuotaPersistentvolumeclaimCountHard instrument. +func NewResourceQuotaPersistentvolumeclaimCountHard( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (ResourceQuotaPersistentvolumeclaimCountHard, error) { + // Check if the meter is nil. + if m == nil { + return ResourceQuotaPersistentvolumeclaimCountHard{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "k8s.resourcequota.persistentvolumeclaim_count.hard", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("The total number of PersistentVolumeClaims that can exist in the namespace. The value represents the configured quota limit of the resource in the namespace."), + metric.WithUnit("{persistentvolumeclaim}"), + }, opt...)..., + ) + if err != nil { + return ResourceQuotaPersistentvolumeclaimCountHard{noop.Int64UpDownCounter{}}, err + } + return ResourceQuotaPersistentvolumeclaimCountHard{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ResourceQuotaPersistentvolumeclaimCountHard) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (ResourceQuotaPersistentvolumeclaimCountHard) Name() string { + return "k8s.resourcequota.persistentvolumeclaim_count.hard" +} + +// Unit returns the semantic convention unit of the instrument +func (ResourceQuotaPersistentvolumeclaimCountHard) Unit() string { + return "{persistentvolumeclaim}" +} + +// Description returns the semantic convention description of the instrument +func (ResourceQuotaPersistentvolumeclaimCountHard) Description() string { + return "The total number of PersistentVolumeClaims that can exist in the namespace. The value represents the configured quota limit of the resource in the namespace." +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +// +// This metric is retrieved from the `hard` field of the +// [K8s ResourceQuotaStatus]. +// +// The `k8s.storageclass.name` should be required when a resource quota is +// defined for a specific +// storage class. +// +// [K8s ResourceQuotaStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core +func (m ResourceQuotaPersistentvolumeclaimCountHard) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// This metric is retrieved from the `hard` field of the +// [K8s ResourceQuotaStatus]. +// +// The `k8s.storageclass.name` should be required when a resource quota is +// defined for a specific +// storage class. +// +// [K8s ResourceQuotaStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core +func (m ResourceQuotaPersistentvolumeclaimCountHard) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AttrStorageclassName returns an optional attribute for the +// "k8s.storageclass.name" semantic convention. It represents the name of K8s +// [StorageClass] object. +// +// [StorageClass]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#storageclass-v1-storage-k8s-io +func (ResourceQuotaPersistentvolumeclaimCountHard) AttrStorageclassName(val string) attribute.KeyValue { + return attribute.String("k8s.storageclass.name", val) +} + +// ResourceQuotaPersistentvolumeclaimCountUsed is an instrument used to record +// metric values conforming to the +// "k8s.resourcequota.persistentvolumeclaim_count.used" semantic conventions. It +// represents the total number of PersistentVolumeClaims that can exist in the +// namespace. +// The value represents the current observed total usage of the resource in the +// namespace. +type ResourceQuotaPersistentvolumeclaimCountUsed struct { + metric.Int64UpDownCounter +} + +// NewResourceQuotaPersistentvolumeclaimCountUsed returns a new +// ResourceQuotaPersistentvolumeclaimCountUsed instrument. +func NewResourceQuotaPersistentvolumeclaimCountUsed( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (ResourceQuotaPersistentvolumeclaimCountUsed, error) { + // Check if the meter is nil. + if m == nil { + return ResourceQuotaPersistentvolumeclaimCountUsed{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "k8s.resourcequota.persistentvolumeclaim_count.used", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("The total number of PersistentVolumeClaims that can exist in the namespace. The value represents the current observed total usage of the resource in the namespace."), + metric.WithUnit("{persistentvolumeclaim}"), + }, opt...)..., + ) + if err != nil { + return ResourceQuotaPersistentvolumeclaimCountUsed{noop.Int64UpDownCounter{}}, err + } + return ResourceQuotaPersistentvolumeclaimCountUsed{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ResourceQuotaPersistentvolumeclaimCountUsed) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (ResourceQuotaPersistentvolumeclaimCountUsed) Name() string { + return "k8s.resourcequota.persistentvolumeclaim_count.used" +} + +// Unit returns the semantic convention unit of the instrument +func (ResourceQuotaPersistentvolumeclaimCountUsed) Unit() string { + return "{persistentvolumeclaim}" +} + +// Description returns the semantic convention description of the instrument +func (ResourceQuotaPersistentvolumeclaimCountUsed) Description() string { + return "The total number of PersistentVolumeClaims that can exist in the namespace. The value represents the current observed total usage of the resource in the namespace." +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +// +// This metric is retrieved from the `used` field of the +// [K8s ResourceQuotaStatus]. +// +// The `k8s.storageclass.name` should be required when a resource quota is +// defined for a specific +// storage class. +// +// [K8s ResourceQuotaStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core +func (m ResourceQuotaPersistentvolumeclaimCountUsed) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// This metric is retrieved from the `used` field of the +// [K8s ResourceQuotaStatus]. +// +// The `k8s.storageclass.name` should be required when a resource quota is +// defined for a specific +// storage class. +// +// [K8s ResourceQuotaStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core +func (m ResourceQuotaPersistentvolumeclaimCountUsed) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AttrStorageclassName returns an optional attribute for the +// "k8s.storageclass.name" semantic convention. It represents the name of K8s +// [StorageClass] object. +// +// [StorageClass]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#storageclass-v1-storage-k8s-io +func (ResourceQuotaPersistentvolumeclaimCountUsed) AttrStorageclassName(val string) attribute.KeyValue { + return attribute.String("k8s.storageclass.name", val) +} + +// ResourceQuotaStorageRequestHard is an instrument used to record metric values +// conforming to the "k8s.resourcequota.storage.request.hard" semantic +// conventions. It represents the storage requests in a specific namespace. +// The value represents the configured quota limit of the resource in the +// namespace. +type ResourceQuotaStorageRequestHard struct { + metric.Int64UpDownCounter +} + +// NewResourceQuotaStorageRequestHard returns a new +// ResourceQuotaStorageRequestHard instrument. +func NewResourceQuotaStorageRequestHard( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (ResourceQuotaStorageRequestHard, error) { + // Check if the meter is nil. + if m == nil { + return ResourceQuotaStorageRequestHard{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "k8s.resourcequota.storage.request.hard", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("The storage requests in a specific namespace. The value represents the configured quota limit of the resource in the namespace."), + metric.WithUnit("By"), + }, opt...)..., + ) + if err != nil { + return ResourceQuotaStorageRequestHard{noop.Int64UpDownCounter{}}, err + } + return ResourceQuotaStorageRequestHard{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ResourceQuotaStorageRequestHard) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (ResourceQuotaStorageRequestHard) Name() string { + return "k8s.resourcequota.storage.request.hard" +} + +// Unit returns the semantic convention unit of the instrument +func (ResourceQuotaStorageRequestHard) Unit() string { + return "By" +} + +// Description returns the semantic convention description of the instrument +func (ResourceQuotaStorageRequestHard) Description() string { + return "The storage requests in a specific namespace. The value represents the configured quota limit of the resource in the namespace." +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +// +// This metric is retrieved from the `hard` field of the +// [K8s ResourceQuotaStatus]. +// +// The `k8s.storageclass.name` should be required when a resource quota is +// defined for a specific +// storage class. +// +// [K8s ResourceQuotaStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core +func (m ResourceQuotaStorageRequestHard) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// This metric is retrieved from the `hard` field of the +// [K8s ResourceQuotaStatus]. +// +// The `k8s.storageclass.name` should be required when a resource quota is +// defined for a specific +// storage class. +// +// [K8s ResourceQuotaStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core +func (m ResourceQuotaStorageRequestHard) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AttrStorageclassName returns an optional attribute for the +// "k8s.storageclass.name" semantic convention. It represents the name of K8s +// [StorageClass] object. +// +// [StorageClass]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#storageclass-v1-storage-k8s-io +func (ResourceQuotaStorageRequestHard) AttrStorageclassName(val string) attribute.KeyValue { + return attribute.String("k8s.storageclass.name", val) +} + +// ResourceQuotaStorageRequestUsed is an instrument used to record metric values +// conforming to the "k8s.resourcequota.storage.request.used" semantic +// conventions. It represents the storage requests in a specific namespace. +// The value represents the current observed total usage of the resource in the +// namespace. +type ResourceQuotaStorageRequestUsed struct { + metric.Int64UpDownCounter +} + +// NewResourceQuotaStorageRequestUsed returns a new +// ResourceQuotaStorageRequestUsed instrument. +func NewResourceQuotaStorageRequestUsed( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (ResourceQuotaStorageRequestUsed, error) { + // Check if the meter is nil. + if m == nil { + return ResourceQuotaStorageRequestUsed{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "k8s.resourcequota.storage.request.used", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("The storage requests in a specific namespace. The value represents the current observed total usage of the resource in the namespace."), + metric.WithUnit("By"), + }, opt...)..., + ) + if err != nil { + return ResourceQuotaStorageRequestUsed{noop.Int64UpDownCounter{}}, err + } + return ResourceQuotaStorageRequestUsed{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ResourceQuotaStorageRequestUsed) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (ResourceQuotaStorageRequestUsed) Name() string { + return "k8s.resourcequota.storage.request.used" +} + +// Unit returns the semantic convention unit of the instrument +func (ResourceQuotaStorageRequestUsed) Unit() string { + return "By" +} + +// Description returns the semantic convention description of the instrument +func (ResourceQuotaStorageRequestUsed) Description() string { + return "The storage requests in a specific namespace. The value represents the current observed total usage of the resource in the namespace." +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +// +// This metric is retrieved from the `used` field of the +// [K8s ResourceQuotaStatus]. +// +// The `k8s.storageclass.name` should be required when a resource quota is +// defined for a specific +// storage class. +// +// [K8s ResourceQuotaStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core +func (m ResourceQuotaStorageRequestUsed) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// This metric is retrieved from the `used` field of the +// [K8s ResourceQuotaStatus]. +// +// The `k8s.storageclass.name` should be required when a resource quota is +// defined for a specific +// storage class. +// +// [K8s ResourceQuotaStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core +func (m ResourceQuotaStorageRequestUsed) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AttrStorageclassName returns an optional attribute for the +// "k8s.storageclass.name" semantic convention. It represents the name of K8s +// [StorageClass] object. +// +// [StorageClass]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#storageclass-v1-storage-k8s-io +func (ResourceQuotaStorageRequestUsed) AttrStorageclassName(val string) attribute.KeyValue { + return attribute.String("k8s.storageclass.name", val) +} + +// StatefulSetCurrentPods is an instrument used to record metric values +// conforming to the "k8s.statefulset.current_pods" semantic conventions. It +// represents the number of replica pods created by the statefulset controller +// from the statefulset version indicated by currentRevision. +type StatefulSetCurrentPods struct { + metric.Int64UpDownCounter +} + +// NewStatefulSetCurrentPods returns a new StatefulSetCurrentPods instrument. +func NewStatefulSetCurrentPods( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (StatefulSetCurrentPods, error) { + // Check if the meter is nil. + if m == nil { + return StatefulSetCurrentPods{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "k8s.statefulset.current_pods", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("The number of replica pods created by the statefulset controller from the statefulset version indicated by currentRevision"), + metric.WithUnit("{pod}"), + }, opt...)..., + ) + if err != nil { + return StatefulSetCurrentPods{noop.Int64UpDownCounter{}}, err + } + return StatefulSetCurrentPods{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m StatefulSetCurrentPods) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (StatefulSetCurrentPods) Name() string { + return "k8s.statefulset.current_pods" +} + +// Unit returns the semantic convention unit of the instrument +func (StatefulSetCurrentPods) Unit() string { + return "{pod}" +} + +// Description returns the semantic convention description of the instrument +func (StatefulSetCurrentPods) Description() string { + return "The number of replica pods created by the statefulset controller from the statefulset version indicated by currentRevision" +} + +// Add adds incr to the existing count for attrs. +// +// This metric aligns with the `currentReplicas` field of the +// [K8s StatefulSetStatus]. +// +// [K8s StatefulSetStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#statefulsetstatus-v1-apps +func (m StatefulSetCurrentPods) Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// This metric aligns with the `currentReplicas` field of the +// [K8s StatefulSetStatus]. +// +// [K8s StatefulSetStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#statefulsetstatus-v1-apps +func (m StatefulSetCurrentPods) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// StatefulSetDesiredPods is an instrument used to record metric values +// conforming to the "k8s.statefulset.desired_pods" semantic conventions. It +// represents the number of desired replica pods in this statefulset. +type StatefulSetDesiredPods struct { + metric.Int64UpDownCounter +} + +// NewStatefulSetDesiredPods returns a new StatefulSetDesiredPods instrument. +func NewStatefulSetDesiredPods( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (StatefulSetDesiredPods, error) { + // Check if the meter is nil. + if m == nil { + return StatefulSetDesiredPods{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "k8s.statefulset.desired_pods", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("Number of desired replica pods in this statefulset"), + metric.WithUnit("{pod}"), + }, opt...)..., + ) + if err != nil { + return StatefulSetDesiredPods{noop.Int64UpDownCounter{}}, err + } + return StatefulSetDesiredPods{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m StatefulSetDesiredPods) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (StatefulSetDesiredPods) Name() string { + return "k8s.statefulset.desired_pods" +} + +// Unit returns the semantic convention unit of the instrument +func (StatefulSetDesiredPods) Unit() string { + return "{pod}" +} + +// Description returns the semantic convention description of the instrument +func (StatefulSetDesiredPods) Description() string { + return "Number of desired replica pods in this statefulset" +} + +// Add adds incr to the existing count for attrs. +// +// This metric aligns with the `replicas` field of the +// [K8s StatefulSetSpec]. +// +// [K8s StatefulSetSpec]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#statefulsetspec-v1-apps +func (m StatefulSetDesiredPods) Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// This metric aligns with the `replicas` field of the +// [K8s StatefulSetSpec]. +// +// [K8s StatefulSetSpec]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#statefulsetspec-v1-apps +func (m StatefulSetDesiredPods) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// StatefulSetReadyPods is an instrument used to record metric values conforming +// to the "k8s.statefulset.ready_pods" semantic conventions. It represents the +// number of replica pods created for this statefulset with a Ready Condition. +type StatefulSetReadyPods struct { + metric.Int64UpDownCounter +} + +// NewStatefulSetReadyPods returns a new StatefulSetReadyPods instrument. +func NewStatefulSetReadyPods( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (StatefulSetReadyPods, error) { + // Check if the meter is nil. + if m == nil { + return StatefulSetReadyPods{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "k8s.statefulset.ready_pods", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("The number of replica pods created for this statefulset with a Ready Condition"), + metric.WithUnit("{pod}"), + }, opt...)..., + ) + if err != nil { + return StatefulSetReadyPods{noop.Int64UpDownCounter{}}, err + } + return StatefulSetReadyPods{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m StatefulSetReadyPods) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (StatefulSetReadyPods) Name() string { + return "k8s.statefulset.ready_pods" +} + +// Unit returns the semantic convention unit of the instrument +func (StatefulSetReadyPods) Unit() string { + return "{pod}" +} + +// Description returns the semantic convention description of the instrument +func (StatefulSetReadyPods) Description() string { + return "The number of replica pods created for this statefulset with a Ready Condition" +} + +// Add adds incr to the existing count for attrs. +// +// This metric aligns with the `readyReplicas` field of the +// [K8s StatefulSetStatus]. +// +// [K8s StatefulSetStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#statefulsetstatus-v1-apps +func (m StatefulSetReadyPods) Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// This metric aligns with the `readyReplicas` field of the +// [K8s StatefulSetStatus]. +// +// [K8s StatefulSetStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#statefulsetstatus-v1-apps +func (m StatefulSetReadyPods) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// StatefulSetUpdatedPods is an instrument used to record metric values +// conforming to the "k8s.statefulset.updated_pods" semantic conventions. It +// represents the number of replica pods created by the statefulset controller +// from the statefulset version indicated by updateRevision. +type StatefulSetUpdatedPods struct { + metric.Int64UpDownCounter +} + +// NewStatefulSetUpdatedPods returns a new StatefulSetUpdatedPods instrument. +func NewStatefulSetUpdatedPods( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (StatefulSetUpdatedPods, error) { + // Check if the meter is nil. + if m == nil { + return StatefulSetUpdatedPods{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "k8s.statefulset.updated_pods", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("Number of replica pods created by the statefulset controller from the statefulset version indicated by updateRevision"), + metric.WithUnit("{pod}"), + }, opt...)..., + ) + if err != nil { + return StatefulSetUpdatedPods{noop.Int64UpDownCounter{}}, err + } + return StatefulSetUpdatedPods{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m StatefulSetUpdatedPods) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (StatefulSetUpdatedPods) Name() string { + return "k8s.statefulset.updated_pods" +} + +// Unit returns the semantic convention unit of the instrument +func (StatefulSetUpdatedPods) Unit() string { + return "{pod}" +} + +// Description returns the semantic convention description of the instrument +func (StatefulSetUpdatedPods) Description() string { + return "Number of replica pods created by the statefulset controller from the statefulset version indicated by updateRevision" +} + +// Add adds incr to the existing count for attrs. +// +// This metric aligns with the `updatedReplicas` field of the +// [K8s StatefulSetStatus]. +// +// [K8s StatefulSetStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#statefulsetstatus-v1-apps +func (m StatefulSetUpdatedPods) Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// This metric aligns with the `updatedReplicas` field of the +// [K8s StatefulSetStatus]. +// +// [K8s StatefulSetStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#statefulsetstatus-v1-apps +func (m StatefulSetUpdatedPods) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} \ No newline at end of file diff --git a/semconv/v1.36.0/messagingconv/metric.go b/semconv/v1.36.0/messagingconv/metric.go new file mode 100644 index 00000000000..5f1837fe687 --- /dev/null +++ b/semconv/v1.36.0/messagingconv/metric.go @@ -0,0 +1,763 @@ +// Code generated from semantic convention specification. DO NOT EDIT. + +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package httpconv provides types and functionality for OpenTelemetry semantic +// conventions in the "messaging" namespace. +package messagingconv + +import ( + "context" + "sync" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/noop" +) + +var ( + addOptPool = &sync.Pool{New: func() any { return &[]metric.AddOption{} }} + recOptPool = &sync.Pool{New: func() any { return &[]metric.RecordOption{} }} +) + +// ErrorTypeAttr is an attribute conforming to the error.type semantic +// conventions. It represents the describes a class of error the operation ended +// with. +type ErrorTypeAttr string + +var ( + // ErrorTypeOther is a fallback error value to be used when the instrumentation + // doesn't define a custom value. + ErrorTypeOther ErrorTypeAttr = "_OTHER" +) + +// OperationTypeAttr is an attribute conforming to the messaging.operation.type +// semantic conventions. It represents a string identifying the type of the +// messaging operation. +type OperationTypeAttr string + +var ( + // OperationTypeCreate is a message is created. "Create" spans always refer to a + // single message and are used to provide a unique creation context for messages + // in batch sending scenarios. + OperationTypeCreate OperationTypeAttr = "create" + // OperationTypeSend is the one or more messages are provided for sending to an + // intermediary. If a single message is sent, the context of the "Send" span can + // be used as the creation context and no "Create" span needs to be created. + OperationTypeSend OperationTypeAttr = "send" + // OperationTypeReceive is the one or more messages are requested by a consumer. + // This operation refers to pull-based scenarios, where consumers explicitly + // call methods of messaging SDKs to receive messages. + OperationTypeReceive OperationTypeAttr = "receive" + // OperationTypeProcess is the one or more messages are processed by a consumer. + OperationTypeProcess OperationTypeAttr = "process" + // OperationTypeSettle is the one or more messages are settled. + OperationTypeSettle OperationTypeAttr = "settle" +) + +// SystemAttr is an attribute conforming to the messaging.system semantic +// conventions. It represents the messaging system as identified by the client +// instrumentation. +type SystemAttr string + +var ( + // SystemActiveMQ is the apache ActiveMQ. + SystemActiveMQ SystemAttr = "activemq" + // SystemAWSSQS is the amazon Simple Queue Service (SQS). + SystemAWSSQS SystemAttr = "aws_sqs" + // SystemEventGrid is the azure Event Grid. + SystemEventGrid SystemAttr = "eventgrid" + // SystemEventHubs is the azure Event Hubs. + SystemEventHubs SystemAttr = "eventhubs" + // SystemServiceBus is the azure Service Bus. + SystemServiceBus SystemAttr = "servicebus" + // SystemGCPPubSub is the google Cloud Pub/Sub. + SystemGCPPubSub SystemAttr = "gcp_pubsub" + // SystemJMS is the java Message Service. + SystemJMS SystemAttr = "jms" + // SystemKafka is the apache Kafka. + SystemKafka SystemAttr = "kafka" + // SystemRabbitMQ is the rabbitMQ. + SystemRabbitMQ SystemAttr = "rabbitmq" + // SystemRocketMQ is the apache RocketMQ. + SystemRocketMQ SystemAttr = "rocketmq" + // SystemPulsar is the apache Pulsar. + SystemPulsar SystemAttr = "pulsar" +) + +// ClientConsumedMessages is an instrument used to record metric values +// conforming to the "messaging.client.consumed.messages" semantic conventions. +// It represents the number of messages that were delivered to the application. +type ClientConsumedMessages struct { + metric.Int64Counter +} + +// NewClientConsumedMessages returns a new ClientConsumedMessages instrument. +func NewClientConsumedMessages( + m metric.Meter, + opt ...metric.Int64CounterOption, +) (ClientConsumedMessages, error) { + // Check if the meter is nil. + if m == nil { + return ClientConsumedMessages{noop.Int64Counter{}}, nil + } + + i, err := m.Int64Counter( + "messaging.client.consumed.messages", + append([]metric.Int64CounterOption{ + metric.WithDescription("Number of messages that were delivered to the application."), + metric.WithUnit("{message}"), + }, opt...)..., + ) + if err != nil { + return ClientConsumedMessages{noop.Int64Counter{}}, err + } + return ClientConsumedMessages{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ClientConsumedMessages) Inst() metric.Int64Counter { + return m.Int64Counter +} + +// Name returns the semantic convention name of the instrument. +func (ClientConsumedMessages) Name() string { + return "messaging.client.consumed.messages" +} + +// Unit returns the semantic convention unit of the instrument +func (ClientConsumedMessages) Unit() string { + return "{message}" +} + +// Description returns the semantic convention description of the instrument +func (ClientConsumedMessages) Description() string { + return "Number of messages that were delivered to the application." +} + +// Add adds incr to the existing count for attrs. +// +// The operationName is the the system-specific name of the messaging operation. +// +// The system is the the messaging system as identified by the client +// instrumentation. +// +// All additional attrs passed are included in the recorded value. +// +// Records the number of messages pulled from the broker or number of messages +// dispatched to the application in push-based scenarios. +// The metric SHOULD be reported once per message delivery. For example, if +// receiving and processing operations are both instrumented for a single message +// delivery, this counter is incremented when the message is received and not +// reported when it is processed. +func (m ClientConsumedMessages) Add( + ctx context.Context, + incr int64, + operationName string, + system SystemAttr, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("messaging.operation.name", operationName), + attribute.String("messaging.system", string(system)), + )..., + ), + ) + + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// Records the number of messages pulled from the broker or number of messages +// dispatched to the application in push-based scenarios. +// The metric SHOULD be reported once per message delivery. For example, if +// receiving and processing operations are both instrumented for a single message +// delivery, this counter is incremented when the message is received and not +// reported when it is processed. +func (m ClientConsumedMessages) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AttrErrorType returns an optional attribute for the "error.type" semantic +// convention. It represents the describes a class of error the operation ended +// with. +func (ClientConsumedMessages) AttrErrorType(val ErrorTypeAttr) attribute.KeyValue { + return attribute.String("error.type", string(val)) +} + +// AttrConsumerGroupName returns an optional attribute for the +// "messaging.consumer.group.name" semantic convention. It represents the name of +// the consumer group with which a consumer is associated. +func (ClientConsumedMessages) AttrConsumerGroupName(val string) attribute.KeyValue { + return attribute.String("messaging.consumer.group.name", val) +} + +// AttrDestinationName returns an optional attribute for the +// "messaging.destination.name" semantic convention. It represents the message +// destination name. +func (ClientConsumedMessages) AttrDestinationName(val string) attribute.KeyValue { + return attribute.String("messaging.destination.name", val) +} + +// AttrDestinationSubscriptionName returns an optional attribute for the +// "messaging.destination.subscription.name" semantic convention. It represents +// the name of the destination subscription from which a message is consumed. +func (ClientConsumedMessages) AttrDestinationSubscriptionName(val string) attribute.KeyValue { + return attribute.String("messaging.destination.subscription.name", val) +} + +// AttrDestinationTemplate returns an optional attribute for the +// "messaging.destination.template" semantic convention. It represents the low +// cardinality representation of the messaging destination name. +func (ClientConsumedMessages) AttrDestinationTemplate(val string) attribute.KeyValue { + return attribute.String("messaging.destination.template", val) +} + +// AttrServerAddress returns an optional attribute for the "server.address" +// semantic convention. It represents the server domain name if available without +// reverse DNS lookup; otherwise, IP address or Unix domain socket name. +func (ClientConsumedMessages) AttrServerAddress(val string) attribute.KeyValue { + return attribute.String("server.address", val) +} + +// AttrDestinationPartitionID returns an optional attribute for the +// "messaging.destination.partition.id" semantic convention. It represents the +// identifier of the partition messages are sent to or received from, unique +// within the `messaging.destination.name`. +func (ClientConsumedMessages) AttrDestinationPartitionID(val string) attribute.KeyValue { + return attribute.String("messaging.destination.partition.id", val) +} + +// AttrServerPort returns an optional attribute for the "server.port" semantic +// convention. It represents the server port number. +func (ClientConsumedMessages) AttrServerPort(val int) attribute.KeyValue { + return attribute.Int("server.port", val) +} + +// ClientOperationDuration is an instrument used to record metric values +// conforming to the "messaging.client.operation.duration" semantic conventions. +// It represents the duration of messaging operation initiated by a producer or +// consumer client. +type ClientOperationDuration struct { + metric.Float64Histogram +} + +// NewClientOperationDuration returns a new ClientOperationDuration instrument. +func NewClientOperationDuration( + m metric.Meter, + opt ...metric.Float64HistogramOption, +) (ClientOperationDuration, error) { + // Check if the meter is nil. + if m == nil { + return ClientOperationDuration{noop.Float64Histogram{}}, nil + } + + i, err := m.Float64Histogram( + "messaging.client.operation.duration", + append([]metric.Float64HistogramOption{ + metric.WithDescription("Duration of messaging operation initiated by a producer or consumer client."), + metric.WithUnit("s"), + }, opt...)..., + ) + if err != nil { + return ClientOperationDuration{noop.Float64Histogram{}}, err + } + return ClientOperationDuration{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ClientOperationDuration) Inst() metric.Float64Histogram { + return m.Float64Histogram +} + +// Name returns the semantic convention name of the instrument. +func (ClientOperationDuration) Name() string { + return "messaging.client.operation.duration" +} + +// Unit returns the semantic convention unit of the instrument +func (ClientOperationDuration) Unit() string { + return "s" +} + +// Description returns the semantic convention description of the instrument +func (ClientOperationDuration) Description() string { + return "Duration of messaging operation initiated by a producer or consumer client." +} + +// Record records val to the current distribution for attrs. +// +// The operationName is the the system-specific name of the messaging operation. +// +// The system is the the messaging system as identified by the client +// instrumentation. +// +// All additional attrs passed are included in the recorded value. +// +// This metric SHOULD NOT be used to report processing duration - processing +// duration is reported in `messaging.process.duration` metric. +func (m ClientOperationDuration) Record( + ctx context.Context, + val float64, + operationName string, + system SystemAttr, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Float64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("messaging.operation.name", operationName), + attribute.String("messaging.system", string(system)), + )..., + ), + ) + + m.Float64Histogram.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +// +// This metric SHOULD NOT be used to report processing duration - processing +// duration is reported in `messaging.process.duration` metric. +func (m ClientOperationDuration) RecordSet(ctx context.Context, val float64, set attribute.Set) { + if set.Len() == 0 { + m.Float64Histogram.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Float64Histogram.Record(ctx, val, *o...) +} + +// AttrErrorType returns an optional attribute for the "error.type" semantic +// convention. It represents the describes a class of error the operation ended +// with. +func (ClientOperationDuration) AttrErrorType(val ErrorTypeAttr) attribute.KeyValue { + return attribute.String("error.type", string(val)) +} + +// AttrConsumerGroupName returns an optional attribute for the +// "messaging.consumer.group.name" semantic convention. It represents the name of +// the consumer group with which a consumer is associated. +func (ClientOperationDuration) AttrConsumerGroupName(val string) attribute.KeyValue { + return attribute.String("messaging.consumer.group.name", val) +} + +// AttrDestinationName returns an optional attribute for the +// "messaging.destination.name" semantic convention. It represents the message +// destination name. +func (ClientOperationDuration) AttrDestinationName(val string) attribute.KeyValue { + return attribute.String("messaging.destination.name", val) +} + +// AttrDestinationSubscriptionName returns an optional attribute for the +// "messaging.destination.subscription.name" semantic convention. It represents +// the name of the destination subscription from which a message is consumed. +func (ClientOperationDuration) AttrDestinationSubscriptionName(val string) attribute.KeyValue { + return attribute.String("messaging.destination.subscription.name", val) +} + +// AttrDestinationTemplate returns an optional attribute for the +// "messaging.destination.template" semantic convention. It represents the low +// cardinality representation of the messaging destination name. +func (ClientOperationDuration) AttrDestinationTemplate(val string) attribute.KeyValue { + return attribute.String("messaging.destination.template", val) +} + +// AttrOperationType returns an optional attribute for the +// "messaging.operation.type" semantic convention. It represents a string +// identifying the type of the messaging operation. +func (ClientOperationDuration) AttrOperationType(val OperationTypeAttr) attribute.KeyValue { + return attribute.String("messaging.operation.type", string(val)) +} + +// AttrServerAddress returns an optional attribute for the "server.address" +// semantic convention. It represents the server domain name if available without +// reverse DNS lookup; otherwise, IP address or Unix domain socket name. +func (ClientOperationDuration) AttrServerAddress(val string) attribute.KeyValue { + return attribute.String("server.address", val) +} + +// AttrDestinationPartitionID returns an optional attribute for the +// "messaging.destination.partition.id" semantic convention. It represents the +// identifier of the partition messages are sent to or received from, unique +// within the `messaging.destination.name`. +func (ClientOperationDuration) AttrDestinationPartitionID(val string) attribute.KeyValue { + return attribute.String("messaging.destination.partition.id", val) +} + +// AttrServerPort returns an optional attribute for the "server.port" semantic +// convention. It represents the server port number. +func (ClientOperationDuration) AttrServerPort(val int) attribute.KeyValue { + return attribute.Int("server.port", val) +} + +// ClientSentMessages is an instrument used to record metric values conforming to +// the "messaging.client.sent.messages" semantic conventions. It represents the +// number of messages producer attempted to send to the broker. +type ClientSentMessages struct { + metric.Int64Counter +} + +// NewClientSentMessages returns a new ClientSentMessages instrument. +func NewClientSentMessages( + m metric.Meter, + opt ...metric.Int64CounterOption, +) (ClientSentMessages, error) { + // Check if the meter is nil. + if m == nil { + return ClientSentMessages{noop.Int64Counter{}}, nil + } + + i, err := m.Int64Counter( + "messaging.client.sent.messages", + append([]metric.Int64CounterOption{ + metric.WithDescription("Number of messages producer attempted to send to the broker."), + metric.WithUnit("{message}"), + }, opt...)..., + ) + if err != nil { + return ClientSentMessages{noop.Int64Counter{}}, err + } + return ClientSentMessages{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ClientSentMessages) Inst() metric.Int64Counter { + return m.Int64Counter +} + +// Name returns the semantic convention name of the instrument. +func (ClientSentMessages) Name() string { + return "messaging.client.sent.messages" +} + +// Unit returns the semantic convention unit of the instrument +func (ClientSentMessages) Unit() string { + return "{message}" +} + +// Description returns the semantic convention description of the instrument +func (ClientSentMessages) Description() string { + return "Number of messages producer attempted to send to the broker." +} + +// Add adds incr to the existing count for attrs. +// +// The operationName is the the system-specific name of the messaging operation. +// +// The system is the the messaging system as identified by the client +// instrumentation. +// +// All additional attrs passed are included in the recorded value. +// +// This metric MUST NOT count messages that were created but haven't yet been +// sent. +func (m ClientSentMessages) Add( + ctx context.Context, + incr int64, + operationName string, + system SystemAttr, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("messaging.operation.name", operationName), + attribute.String("messaging.system", string(system)), + )..., + ), + ) + + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// This metric MUST NOT count messages that were created but haven't yet been +// sent. +func (m ClientSentMessages) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AttrErrorType returns an optional attribute for the "error.type" semantic +// convention. It represents the describes a class of error the operation ended +// with. +func (ClientSentMessages) AttrErrorType(val ErrorTypeAttr) attribute.KeyValue { + return attribute.String("error.type", string(val)) +} + +// AttrDestinationName returns an optional attribute for the +// "messaging.destination.name" semantic convention. It represents the message +// destination name. +func (ClientSentMessages) AttrDestinationName(val string) attribute.KeyValue { + return attribute.String("messaging.destination.name", val) +} + +// AttrDestinationTemplate returns an optional attribute for the +// "messaging.destination.template" semantic convention. It represents the low +// cardinality representation of the messaging destination name. +func (ClientSentMessages) AttrDestinationTemplate(val string) attribute.KeyValue { + return attribute.String("messaging.destination.template", val) +} + +// AttrServerAddress returns an optional attribute for the "server.address" +// semantic convention. It represents the server domain name if available without +// reverse DNS lookup; otherwise, IP address or Unix domain socket name. +func (ClientSentMessages) AttrServerAddress(val string) attribute.KeyValue { + return attribute.String("server.address", val) +} + +// AttrDestinationPartitionID returns an optional attribute for the +// "messaging.destination.partition.id" semantic convention. It represents the +// identifier of the partition messages are sent to or received from, unique +// within the `messaging.destination.name`. +func (ClientSentMessages) AttrDestinationPartitionID(val string) attribute.KeyValue { + return attribute.String("messaging.destination.partition.id", val) +} + +// AttrServerPort returns an optional attribute for the "server.port" semantic +// convention. It represents the server port number. +func (ClientSentMessages) AttrServerPort(val int) attribute.KeyValue { + return attribute.Int("server.port", val) +} + +// ProcessDuration is an instrument used to record metric values conforming to +// the "messaging.process.duration" semantic conventions. It represents the +// duration of processing operation. +type ProcessDuration struct { + metric.Float64Histogram +} + +// NewProcessDuration returns a new ProcessDuration instrument. +func NewProcessDuration( + m metric.Meter, + opt ...metric.Float64HistogramOption, +) (ProcessDuration, error) { + // Check if the meter is nil. + if m == nil { + return ProcessDuration{noop.Float64Histogram{}}, nil + } + + i, err := m.Float64Histogram( + "messaging.process.duration", + append([]metric.Float64HistogramOption{ + metric.WithDescription("Duration of processing operation."), + metric.WithUnit("s"), + }, opt...)..., + ) + if err != nil { + return ProcessDuration{noop.Float64Histogram{}}, err + } + return ProcessDuration{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ProcessDuration) Inst() metric.Float64Histogram { + return m.Float64Histogram +} + +// Name returns the semantic convention name of the instrument. +func (ProcessDuration) Name() string { + return "messaging.process.duration" +} + +// Unit returns the semantic convention unit of the instrument +func (ProcessDuration) Unit() string { + return "s" +} + +// Description returns the semantic convention description of the instrument +func (ProcessDuration) Description() string { + return "Duration of processing operation." +} + +// Record records val to the current distribution for attrs. +// +// The operationName is the the system-specific name of the messaging operation. +// +// The system is the the messaging system as identified by the client +// instrumentation. +// +// All additional attrs passed are included in the recorded value. +// +// This metric MUST be reported for operations with `messaging.operation.type` +// that matches `process`. +func (m ProcessDuration) Record( + ctx context.Context, + val float64, + operationName string, + system SystemAttr, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Float64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("messaging.operation.name", operationName), + attribute.String("messaging.system", string(system)), + )..., + ), + ) + + m.Float64Histogram.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +// +// This metric MUST be reported for operations with `messaging.operation.type` +// that matches `process`. +func (m ProcessDuration) RecordSet(ctx context.Context, val float64, set attribute.Set) { + if set.Len() == 0 { + m.Float64Histogram.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Float64Histogram.Record(ctx, val, *o...) +} + +// AttrErrorType returns an optional attribute for the "error.type" semantic +// convention. It represents the describes a class of error the operation ended +// with. +func (ProcessDuration) AttrErrorType(val ErrorTypeAttr) attribute.KeyValue { + return attribute.String("error.type", string(val)) +} + +// AttrConsumerGroupName returns an optional attribute for the +// "messaging.consumer.group.name" semantic convention. It represents the name of +// the consumer group with which a consumer is associated. +func (ProcessDuration) AttrConsumerGroupName(val string) attribute.KeyValue { + return attribute.String("messaging.consumer.group.name", val) +} + +// AttrDestinationName returns an optional attribute for the +// "messaging.destination.name" semantic convention. It represents the message +// destination name. +func (ProcessDuration) AttrDestinationName(val string) attribute.KeyValue { + return attribute.String("messaging.destination.name", val) +} + +// AttrDestinationSubscriptionName returns an optional attribute for the +// "messaging.destination.subscription.name" semantic convention. It represents +// the name of the destination subscription from which a message is consumed. +func (ProcessDuration) AttrDestinationSubscriptionName(val string) attribute.KeyValue { + return attribute.String("messaging.destination.subscription.name", val) +} + +// AttrDestinationTemplate returns an optional attribute for the +// "messaging.destination.template" semantic convention. It represents the low +// cardinality representation of the messaging destination name. +func (ProcessDuration) AttrDestinationTemplate(val string) attribute.KeyValue { + return attribute.String("messaging.destination.template", val) +} + +// AttrServerAddress returns an optional attribute for the "server.address" +// semantic convention. It represents the server domain name if available without +// reverse DNS lookup; otherwise, IP address or Unix domain socket name. +func (ProcessDuration) AttrServerAddress(val string) attribute.KeyValue { + return attribute.String("server.address", val) +} + +// AttrDestinationPartitionID returns an optional attribute for the +// "messaging.destination.partition.id" semantic convention. It represents the +// identifier of the partition messages are sent to or received from, unique +// within the `messaging.destination.name`. +func (ProcessDuration) AttrDestinationPartitionID(val string) attribute.KeyValue { + return attribute.String("messaging.destination.partition.id", val) +} + +// AttrServerPort returns an optional attribute for the "server.port" semantic +// convention. It represents the server port number. +func (ProcessDuration) AttrServerPort(val int) attribute.KeyValue { + return attribute.Int("server.port", val) +} \ No newline at end of file diff --git a/semconv/v1.36.0/otelconv/metric.go b/semconv/v1.36.0/otelconv/metric.go new file mode 100644 index 00000000000..f5dd8fe80b6 --- /dev/null +++ b/semconv/v1.36.0/otelconv/metric.go @@ -0,0 +1,2126 @@ +// Code generated from semantic convention specification. DO NOT EDIT. + +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package httpconv provides types and functionality for OpenTelemetry semantic +// conventions in the "otel" namespace. +package otelconv + +import ( + "context" + "sync" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/noop" +) + +var ( + addOptPool = &sync.Pool{New: func() any { return &[]metric.AddOption{} }} + recOptPool = &sync.Pool{New: func() any { return &[]metric.RecordOption{} }} +) + +// ErrorTypeAttr is an attribute conforming to the error.type semantic +// conventions. It represents the describes a class of error the operation ended +// with. +type ErrorTypeAttr string + +var ( + // ErrorTypeOther is a fallback error value to be used when the instrumentation + // doesn't define a custom value. + ErrorTypeOther ErrorTypeAttr = "_OTHER" +) + +// ComponentTypeAttr is an attribute conforming to the otel.component.type +// semantic conventions. It represents a name identifying the type of the +// OpenTelemetry component. +type ComponentTypeAttr string + +var ( + // ComponentTypeBatchingSpanProcessor is the builtin SDK batching span + // processor. + ComponentTypeBatchingSpanProcessor ComponentTypeAttr = "batching_span_processor" + // ComponentTypeSimpleSpanProcessor is the builtin SDK simple span processor. + ComponentTypeSimpleSpanProcessor ComponentTypeAttr = "simple_span_processor" + // ComponentTypeBatchingLogProcessor is the builtin SDK batching log record + // processor. + ComponentTypeBatchingLogProcessor ComponentTypeAttr = "batching_log_processor" + // ComponentTypeSimpleLogProcessor is the builtin SDK simple log record + // processor. + ComponentTypeSimpleLogProcessor ComponentTypeAttr = "simple_log_processor" + // ComponentTypeOtlpGRPCSpanExporter is the OTLP span exporter over gRPC with + // protobuf serialization. + ComponentTypeOtlpGRPCSpanExporter ComponentTypeAttr = "otlp_grpc_span_exporter" + // ComponentTypeOtlpHTTPSpanExporter is the OTLP span exporter over HTTP with + // protobuf serialization. + ComponentTypeOtlpHTTPSpanExporter ComponentTypeAttr = "otlp_http_span_exporter" + // ComponentTypeOtlpHTTPJSONSpanExporter is the OTLP span exporter over HTTP + // with JSON serialization. + ComponentTypeOtlpHTTPJSONSpanExporter ComponentTypeAttr = "otlp_http_json_span_exporter" + // ComponentTypeZipkinHTTPSpanExporter is the zipkin span exporter over HTTP. + ComponentTypeZipkinHTTPSpanExporter ComponentTypeAttr = "zipkin_http_span_exporter" + // ComponentTypeOtlpGRPCLogExporter is the OTLP log record exporter over gRPC + // with protobuf serialization. + ComponentTypeOtlpGRPCLogExporter ComponentTypeAttr = "otlp_grpc_log_exporter" + // ComponentTypeOtlpHTTPLogExporter is the OTLP log record exporter over HTTP + // with protobuf serialization. + ComponentTypeOtlpHTTPLogExporter ComponentTypeAttr = "otlp_http_log_exporter" + // ComponentTypeOtlpHTTPJSONLogExporter is the OTLP log record exporter over + // HTTP with JSON serialization. + ComponentTypeOtlpHTTPJSONLogExporter ComponentTypeAttr = "otlp_http_json_log_exporter" + // ComponentTypePeriodicMetricReader is the builtin SDK periodically exporting + // metric reader. + ComponentTypePeriodicMetricReader ComponentTypeAttr = "periodic_metric_reader" + // ComponentTypeOtlpGRPCMetricExporter is the OTLP metric exporter over gRPC + // with protobuf serialization. + ComponentTypeOtlpGRPCMetricExporter ComponentTypeAttr = "otlp_grpc_metric_exporter" + // ComponentTypeOtlpHTTPMetricExporter is the OTLP metric exporter over HTTP + // with protobuf serialization. + ComponentTypeOtlpHTTPMetricExporter ComponentTypeAttr = "otlp_http_metric_exporter" + // ComponentTypeOtlpHTTPJSONMetricExporter is the OTLP metric exporter over HTTP + // with JSON serialization. + ComponentTypeOtlpHTTPJSONMetricExporter ComponentTypeAttr = "otlp_http_json_metric_exporter" + // ComponentTypePrometheusHTTPTextMetricExporter is the prometheus metric + // exporter over HTTP with the default text-based format. + ComponentTypePrometheusHTTPTextMetricExporter ComponentTypeAttr = "prometheus_http_text_metric_exporter" +) + +// SpanParentOriginAttr is an attribute conforming to the otel.span.parent.origin +// semantic conventions. It represents the determines whether the span has a +// parent span, and if so, [whether it is a remote parent]. +// +// [whether it is a remote parent]: https://opentelemetry.io/docs/specs/otel/trace/api/#isremote +type SpanParentOriginAttr string + +var ( + // SpanParentOriginNone is the span does not have a parent, it is a root span. + SpanParentOriginNone SpanParentOriginAttr = "none" + // SpanParentOriginLocal is the span has a parent and the parent's span context + // [isRemote()] is false. + // + // [isRemote()]: https://opentelemetry.io/docs/specs/otel/trace/api/#isremote + SpanParentOriginLocal SpanParentOriginAttr = "local" + // SpanParentOriginRemote is the span has a parent and the parent's span context + // [isRemote()] is true. + // + // [isRemote()]: https://opentelemetry.io/docs/specs/otel/trace/api/#isremote + SpanParentOriginRemote SpanParentOriginAttr = "remote" +) + +// SpanSamplingResultAttr is an attribute conforming to the +// otel.span.sampling_result semantic conventions. It represents the result value +// of the sampler for this span. +type SpanSamplingResultAttr string + +var ( + // SpanSamplingResultDrop is the span is not sampled and not recording. + SpanSamplingResultDrop SpanSamplingResultAttr = "DROP" + // SpanSamplingResultRecordOnly is the span is not sampled, but recording. + SpanSamplingResultRecordOnly SpanSamplingResultAttr = "RECORD_ONLY" + // SpanSamplingResultRecordAndSample is the span is sampled and recording. + SpanSamplingResultRecordAndSample SpanSamplingResultAttr = "RECORD_AND_SAMPLE" +) + +// RPCGRPCStatusCodeAttr is an attribute conforming to the rpc.grpc.status_code +// semantic conventions. It represents the gRPC status code of the last gRPC +// requests performed in scope of this export call. +type RPCGRPCStatusCodeAttr int64 + +var ( + // RPCGRPCStatusCodeOk is the OK. + RPCGRPCStatusCodeOk RPCGRPCStatusCodeAttr = 0 + // RPCGRPCStatusCodeCancelled is the CANCELLED. + RPCGRPCStatusCodeCancelled RPCGRPCStatusCodeAttr = 1 + // RPCGRPCStatusCodeUnknown is the UNKNOWN. + RPCGRPCStatusCodeUnknown RPCGRPCStatusCodeAttr = 2 + // RPCGRPCStatusCodeInvalidArgument is the INVALID_ARGUMENT. + RPCGRPCStatusCodeInvalidArgument RPCGRPCStatusCodeAttr = 3 + // RPCGRPCStatusCodeDeadlineExceeded is the DEADLINE_EXCEEDED. + RPCGRPCStatusCodeDeadlineExceeded RPCGRPCStatusCodeAttr = 4 + // RPCGRPCStatusCodeNotFound is the NOT_FOUND. + RPCGRPCStatusCodeNotFound RPCGRPCStatusCodeAttr = 5 + // RPCGRPCStatusCodeAlreadyExists is the ALREADY_EXISTS. + RPCGRPCStatusCodeAlreadyExists RPCGRPCStatusCodeAttr = 6 + // RPCGRPCStatusCodePermissionDenied is the PERMISSION_DENIED. + RPCGRPCStatusCodePermissionDenied RPCGRPCStatusCodeAttr = 7 + // RPCGRPCStatusCodeResourceExhausted is the RESOURCE_EXHAUSTED. + RPCGRPCStatusCodeResourceExhausted RPCGRPCStatusCodeAttr = 8 + // RPCGRPCStatusCodeFailedPrecondition is the FAILED_PRECONDITION. + RPCGRPCStatusCodeFailedPrecondition RPCGRPCStatusCodeAttr = 9 + // RPCGRPCStatusCodeAborted is the ABORTED. + RPCGRPCStatusCodeAborted RPCGRPCStatusCodeAttr = 10 + // RPCGRPCStatusCodeOutOfRange is the OUT_OF_RANGE. + RPCGRPCStatusCodeOutOfRange RPCGRPCStatusCodeAttr = 11 + // RPCGRPCStatusCodeUnimplemented is the UNIMPLEMENTED. + RPCGRPCStatusCodeUnimplemented RPCGRPCStatusCodeAttr = 12 + // RPCGRPCStatusCodeInternal is the INTERNAL. + RPCGRPCStatusCodeInternal RPCGRPCStatusCodeAttr = 13 + // RPCGRPCStatusCodeUnavailable is the UNAVAILABLE. + RPCGRPCStatusCodeUnavailable RPCGRPCStatusCodeAttr = 14 + // RPCGRPCStatusCodeDataLoss is the DATA_LOSS. + RPCGRPCStatusCodeDataLoss RPCGRPCStatusCodeAttr = 15 + // RPCGRPCStatusCodeUnauthenticated is the UNAUTHENTICATED. + RPCGRPCStatusCodeUnauthenticated RPCGRPCStatusCodeAttr = 16 +) + +// SDKExporterLogExported is an instrument used to record metric values +// conforming to the "otel.sdk.exporter.log.exported" semantic conventions. It +// represents the number of log records for which the export has finished, either +// successful or failed. +type SDKExporterLogExported struct { + metric.Int64Counter +} + +// NewSDKExporterLogExported returns a new SDKExporterLogExported instrument. +func NewSDKExporterLogExported( + m metric.Meter, + opt ...metric.Int64CounterOption, +) (SDKExporterLogExported, error) { + // Check if the meter is nil. + if m == nil { + return SDKExporterLogExported{noop.Int64Counter{}}, nil + } + + i, err := m.Int64Counter( + "otel.sdk.exporter.log.exported", + append([]metric.Int64CounterOption{ + metric.WithDescription("The number of log records for which the export has finished, either successful or failed"), + metric.WithUnit("{log_record}"), + }, opt...)..., + ) + if err != nil { + return SDKExporterLogExported{noop.Int64Counter{}}, err + } + return SDKExporterLogExported{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SDKExporterLogExported) Inst() metric.Int64Counter { + return m.Int64Counter +} + +// Name returns the semantic convention name of the instrument. +func (SDKExporterLogExported) Name() string { + return "otel.sdk.exporter.log.exported" +} + +// Unit returns the semantic convention unit of the instrument +func (SDKExporterLogExported) Unit() string { + return "{log_record}" +} + +// Description returns the semantic convention description of the instrument +func (SDKExporterLogExported) Description() string { + return "The number of log records for which the export has finished, either successful or failed" +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +// +// For successful exports, `error.type` MUST NOT be set. For failed exports, +// `error.type` MUST contain the failure cause. +// For exporters with partial success semantics (e.g. OTLP with +// `rejected_log_records`), rejected log records MUST count as failed and only +// non-rejected log records count as success. +// If no rejection reason is available, `rejected` SHOULD be used as value for +// `error.type`. +func (m SDKExporterLogExported) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// For successful exports, `error.type` MUST NOT be set. For failed exports, +// `error.type` MUST contain the failure cause. +// For exporters with partial success semantics (e.g. OTLP with +// `rejected_log_records`), rejected log records MUST count as failed and only +// non-rejected log records count as success. +// If no rejection reason is available, `rejected` SHOULD be used as value for +// `error.type`. +func (m SDKExporterLogExported) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AttrErrorType returns an optional attribute for the "error.type" semantic +// convention. It represents the describes a class of error the operation ended +// with. +func (SDKExporterLogExported) AttrErrorType(val ErrorTypeAttr) attribute.KeyValue { + return attribute.String("error.type", string(val)) +} + +// AttrComponentName returns an optional attribute for the "otel.component.name" +// semantic convention. It represents a name uniquely identifying the instance of +// the OpenTelemetry component within its containing SDK instance. +func (SDKExporterLogExported) AttrComponentName(val string) attribute.KeyValue { + return attribute.String("otel.component.name", val) +} + +// AttrComponentType returns an optional attribute for the "otel.component.type" +// semantic convention. It represents a name identifying the type of the +// OpenTelemetry component. +func (SDKExporterLogExported) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue { + return attribute.String("otel.component.type", string(val)) +} + +// AttrServerAddress returns an optional attribute for the "server.address" +// semantic convention. It represents the server domain name if available without +// reverse DNS lookup; otherwise, IP address or Unix domain socket name. +func (SDKExporterLogExported) AttrServerAddress(val string) attribute.KeyValue { + return attribute.String("server.address", val) +} + +// AttrServerPort returns an optional attribute for the "server.port" semantic +// convention. It represents the server port number. +func (SDKExporterLogExported) AttrServerPort(val int) attribute.KeyValue { + return attribute.Int("server.port", val) +} + +// SDKExporterLogInflight is an instrument used to record metric values +// conforming to the "otel.sdk.exporter.log.inflight" semantic conventions. It +// represents the number of log records which were passed to the exporter, but +// that have not been exported yet (neither successful, nor failed). +type SDKExporterLogInflight struct { + metric.Int64UpDownCounter +} + +// NewSDKExporterLogInflight returns a new SDKExporterLogInflight instrument. +func NewSDKExporterLogInflight( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (SDKExporterLogInflight, error) { + // Check if the meter is nil. + if m == nil { + return SDKExporterLogInflight{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "otel.sdk.exporter.log.inflight", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("The number of log records which were passed to the exporter, but that have not been exported yet (neither successful, nor failed)"), + metric.WithUnit("{log_record}"), + }, opt...)..., + ) + if err != nil { + return SDKExporterLogInflight{noop.Int64UpDownCounter{}}, err + } + return SDKExporterLogInflight{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SDKExporterLogInflight) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (SDKExporterLogInflight) Name() string { + return "otel.sdk.exporter.log.inflight" +} + +// Unit returns the semantic convention unit of the instrument +func (SDKExporterLogInflight) Unit() string { + return "{log_record}" +} + +// Description returns the semantic convention description of the instrument +func (SDKExporterLogInflight) Description() string { + return "The number of log records which were passed to the exporter, but that have not been exported yet (neither successful, nor failed)" +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +// +// For successful exports, `error.type` MUST NOT be set. For failed exports, +// `error.type` MUST contain the failure cause. +func (m SDKExporterLogInflight) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// For successful exports, `error.type` MUST NOT be set. For failed exports, +// `error.type` MUST contain the failure cause. +func (m SDKExporterLogInflight) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AttrComponentName returns an optional attribute for the "otel.component.name" +// semantic convention. It represents a name uniquely identifying the instance of +// the OpenTelemetry component within its containing SDK instance. +func (SDKExporterLogInflight) AttrComponentName(val string) attribute.KeyValue { + return attribute.String("otel.component.name", val) +} + +// AttrComponentType returns an optional attribute for the "otel.component.type" +// semantic convention. It represents a name identifying the type of the +// OpenTelemetry component. +func (SDKExporterLogInflight) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue { + return attribute.String("otel.component.type", string(val)) +} + +// AttrServerAddress returns an optional attribute for the "server.address" +// semantic convention. It represents the server domain name if available without +// reverse DNS lookup; otherwise, IP address or Unix domain socket name. +func (SDKExporterLogInflight) AttrServerAddress(val string) attribute.KeyValue { + return attribute.String("server.address", val) +} + +// AttrServerPort returns an optional attribute for the "server.port" semantic +// convention. It represents the server port number. +func (SDKExporterLogInflight) AttrServerPort(val int) attribute.KeyValue { + return attribute.Int("server.port", val) +} + +// SDKExporterMetricDataPointExported is an instrument used to record metric +// values conforming to the "otel.sdk.exporter.metric_data_point.exported" +// semantic conventions. It represents the number of metric data points for which +// the export has finished, either successful or failed. +type SDKExporterMetricDataPointExported struct { + metric.Int64Counter +} + +// NewSDKExporterMetricDataPointExported returns a new +// SDKExporterMetricDataPointExported instrument. +func NewSDKExporterMetricDataPointExported( + m metric.Meter, + opt ...metric.Int64CounterOption, +) (SDKExporterMetricDataPointExported, error) { + // Check if the meter is nil. + if m == nil { + return SDKExporterMetricDataPointExported{noop.Int64Counter{}}, nil + } + + i, err := m.Int64Counter( + "otel.sdk.exporter.metric_data_point.exported", + append([]metric.Int64CounterOption{ + metric.WithDescription("The number of metric data points for which the export has finished, either successful or failed"), + metric.WithUnit("{data_point}"), + }, opt...)..., + ) + if err != nil { + return SDKExporterMetricDataPointExported{noop.Int64Counter{}}, err + } + return SDKExporterMetricDataPointExported{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SDKExporterMetricDataPointExported) Inst() metric.Int64Counter { + return m.Int64Counter +} + +// Name returns the semantic convention name of the instrument. +func (SDKExporterMetricDataPointExported) Name() string { + return "otel.sdk.exporter.metric_data_point.exported" +} + +// Unit returns the semantic convention unit of the instrument +func (SDKExporterMetricDataPointExported) Unit() string { + return "{data_point}" +} + +// Description returns the semantic convention description of the instrument +func (SDKExporterMetricDataPointExported) Description() string { + return "The number of metric data points for which the export has finished, either successful or failed" +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +// +// For successful exports, `error.type` MUST NOT be set. For failed exports, +// `error.type` MUST contain the failure cause. +// For exporters with partial success semantics (e.g. OTLP with +// `rejected_data_points`), rejected data points MUST count as failed and only +// non-rejected data points count as success. +// If no rejection reason is available, `rejected` SHOULD be used as value for +// `error.type`. +func (m SDKExporterMetricDataPointExported) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// For successful exports, `error.type` MUST NOT be set. For failed exports, +// `error.type` MUST contain the failure cause. +// For exporters with partial success semantics (e.g. OTLP with +// `rejected_data_points`), rejected data points MUST count as failed and only +// non-rejected data points count as success. +// If no rejection reason is available, `rejected` SHOULD be used as value for +// `error.type`. +func (m SDKExporterMetricDataPointExported) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AttrErrorType returns an optional attribute for the "error.type" semantic +// convention. It represents the describes a class of error the operation ended +// with. +func (SDKExporterMetricDataPointExported) AttrErrorType(val ErrorTypeAttr) attribute.KeyValue { + return attribute.String("error.type", string(val)) +} + +// AttrComponentName returns an optional attribute for the "otel.component.name" +// semantic convention. It represents a name uniquely identifying the instance of +// the OpenTelemetry component within its containing SDK instance. +func (SDKExporterMetricDataPointExported) AttrComponentName(val string) attribute.KeyValue { + return attribute.String("otel.component.name", val) +} + +// AttrComponentType returns an optional attribute for the "otel.component.type" +// semantic convention. It represents a name identifying the type of the +// OpenTelemetry component. +func (SDKExporterMetricDataPointExported) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue { + return attribute.String("otel.component.type", string(val)) +} + +// AttrServerAddress returns an optional attribute for the "server.address" +// semantic convention. It represents the server domain name if available without +// reverse DNS lookup; otherwise, IP address or Unix domain socket name. +func (SDKExporterMetricDataPointExported) AttrServerAddress(val string) attribute.KeyValue { + return attribute.String("server.address", val) +} + +// AttrServerPort returns an optional attribute for the "server.port" semantic +// convention. It represents the server port number. +func (SDKExporterMetricDataPointExported) AttrServerPort(val int) attribute.KeyValue { + return attribute.Int("server.port", val) +} + +// SDKExporterMetricDataPointInflight is an instrument used to record metric +// values conforming to the "otel.sdk.exporter.metric_data_point.inflight" +// semantic conventions. It represents the number of metric data points which +// were passed to the exporter, but that have not been exported yet (neither +// successful, nor failed). +type SDKExporterMetricDataPointInflight struct { + metric.Int64UpDownCounter +} + +// NewSDKExporterMetricDataPointInflight returns a new +// SDKExporterMetricDataPointInflight instrument. +func NewSDKExporterMetricDataPointInflight( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (SDKExporterMetricDataPointInflight, error) { + // Check if the meter is nil. + if m == nil { + return SDKExporterMetricDataPointInflight{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "otel.sdk.exporter.metric_data_point.inflight", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("The number of metric data points which were passed to the exporter, but that have not been exported yet (neither successful, nor failed)"), + metric.WithUnit("{data_point}"), + }, opt...)..., + ) + if err != nil { + return SDKExporterMetricDataPointInflight{noop.Int64UpDownCounter{}}, err + } + return SDKExporterMetricDataPointInflight{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SDKExporterMetricDataPointInflight) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (SDKExporterMetricDataPointInflight) Name() string { + return "otel.sdk.exporter.metric_data_point.inflight" +} + +// Unit returns the semantic convention unit of the instrument +func (SDKExporterMetricDataPointInflight) Unit() string { + return "{data_point}" +} + +// Description returns the semantic convention description of the instrument +func (SDKExporterMetricDataPointInflight) Description() string { + return "The number of metric data points which were passed to the exporter, but that have not been exported yet (neither successful, nor failed)" +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +// +// For successful exports, `error.type` MUST NOT be set. For failed exports, +// `error.type` MUST contain the failure cause. +func (m SDKExporterMetricDataPointInflight) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// For successful exports, `error.type` MUST NOT be set. For failed exports, +// `error.type` MUST contain the failure cause. +func (m SDKExporterMetricDataPointInflight) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AttrComponentName returns an optional attribute for the "otel.component.name" +// semantic convention. It represents a name uniquely identifying the instance of +// the OpenTelemetry component within its containing SDK instance. +func (SDKExporterMetricDataPointInflight) AttrComponentName(val string) attribute.KeyValue { + return attribute.String("otel.component.name", val) +} + +// AttrComponentType returns an optional attribute for the "otel.component.type" +// semantic convention. It represents a name identifying the type of the +// OpenTelemetry component. +func (SDKExporterMetricDataPointInflight) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue { + return attribute.String("otel.component.type", string(val)) +} + +// AttrServerAddress returns an optional attribute for the "server.address" +// semantic convention. It represents the server domain name if available without +// reverse DNS lookup; otherwise, IP address or Unix domain socket name. +func (SDKExporterMetricDataPointInflight) AttrServerAddress(val string) attribute.KeyValue { + return attribute.String("server.address", val) +} + +// AttrServerPort returns an optional attribute for the "server.port" semantic +// convention. It represents the server port number. +func (SDKExporterMetricDataPointInflight) AttrServerPort(val int) attribute.KeyValue { + return attribute.Int("server.port", val) +} + +// SDKExporterOperationDuration is an instrument used to record metric values +// conforming to the "otel.sdk.exporter.operation.duration" semantic conventions. +// It represents the duration of exporting a batch of telemetry records. +type SDKExporterOperationDuration struct { + metric.Float64Histogram +} + +// NewSDKExporterOperationDuration returns a new SDKExporterOperationDuration +// instrument. +func NewSDKExporterOperationDuration( + m metric.Meter, + opt ...metric.Float64HistogramOption, +) (SDKExporterOperationDuration, error) { + // Check if the meter is nil. + if m == nil { + return SDKExporterOperationDuration{noop.Float64Histogram{}}, nil + } + + i, err := m.Float64Histogram( + "otel.sdk.exporter.operation.duration", + append([]metric.Float64HistogramOption{ + metric.WithDescription("The duration of exporting a batch of telemetry records."), + metric.WithUnit("s"), + }, opt...)..., + ) + if err != nil { + return SDKExporterOperationDuration{noop.Float64Histogram{}}, err + } + return SDKExporterOperationDuration{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SDKExporterOperationDuration) Inst() metric.Float64Histogram { + return m.Float64Histogram +} + +// Name returns the semantic convention name of the instrument. +func (SDKExporterOperationDuration) Name() string { + return "otel.sdk.exporter.operation.duration" +} + +// Unit returns the semantic convention unit of the instrument +func (SDKExporterOperationDuration) Unit() string { + return "s" +} + +// Description returns the semantic convention description of the instrument +func (SDKExporterOperationDuration) Description() string { + return "The duration of exporting a batch of telemetry records." +} + +// Record records val to the current distribution for attrs. +// +// All additional attrs passed are included in the recorded value. +// +// This metric defines successful operations using the full success definitions +// for [http] +// and [grpc]. Anything else is defined as an unsuccessful operation. For +// successful +// operations, `error.type` MUST NOT be set. For unsuccessful export operations, +// `error.type` MUST contain a relevant failure cause. +// +// [http]: https://github.com/open-telemetry/opentelemetry-proto/blob/v1.5.0/docs/specification.md#full-success-1 +// [grpc]: https://github.com/open-telemetry/opentelemetry-proto/blob/v1.5.0/docs/specification.md#full-success +func (m SDKExporterOperationDuration) Record( + ctx context.Context, + val float64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Float64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Float64Histogram.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +// +// This metric defines successful operations using the full success definitions +// for [http] +// and [grpc]. Anything else is defined as an unsuccessful operation. For +// successful +// operations, `error.type` MUST NOT be set. For unsuccessful export operations, +// `error.type` MUST contain a relevant failure cause. +// +// [http]: https://github.com/open-telemetry/opentelemetry-proto/blob/v1.5.0/docs/specification.md#full-success-1 +// [grpc]: https://github.com/open-telemetry/opentelemetry-proto/blob/v1.5.0/docs/specification.md#full-success +func (m SDKExporterOperationDuration) RecordSet(ctx context.Context, val float64, set attribute.Set) { + if set.Len() == 0 { + m.Float64Histogram.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Float64Histogram.Record(ctx, val, *o...) +} + +// AttrErrorType returns an optional attribute for the "error.type" semantic +// convention. It represents the describes a class of error the operation ended +// with. +func (SDKExporterOperationDuration) AttrErrorType(val ErrorTypeAttr) attribute.KeyValue { + return attribute.String("error.type", string(val)) +} + +// AttrHTTPResponseStatusCode returns an optional attribute for the +// "http.response.status_code" semantic convention. It represents the HTTP status +// code of the last HTTP request performed in scope of this export call. +func (SDKExporterOperationDuration) AttrHTTPResponseStatusCode(val int) attribute.KeyValue { + return attribute.Int("http.response.status_code", val) +} + +// AttrComponentName returns an optional attribute for the "otel.component.name" +// semantic convention. It represents a name uniquely identifying the instance of +// the OpenTelemetry component within its containing SDK instance. +func (SDKExporterOperationDuration) AttrComponentName(val string) attribute.KeyValue { + return attribute.String("otel.component.name", val) +} + +// AttrComponentType returns an optional attribute for the "otel.component.type" +// semantic convention. It represents a name identifying the type of the +// OpenTelemetry component. +func (SDKExporterOperationDuration) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue { + return attribute.String("otel.component.type", string(val)) +} + +// AttrRPCGRPCStatusCode returns an optional attribute for the +// "rpc.grpc.status_code" semantic convention. It represents the gRPC status code +// of the last gRPC requests performed in scope of this export call. +func (SDKExporterOperationDuration) AttrRPCGRPCStatusCode(val RPCGRPCStatusCodeAttr) attribute.KeyValue { + return attribute.Int64("rpc.grpc.status_code", int64(val)) +} + +// AttrServerAddress returns an optional attribute for the "server.address" +// semantic convention. It represents the server domain name if available without +// reverse DNS lookup; otherwise, IP address or Unix domain socket name. +func (SDKExporterOperationDuration) AttrServerAddress(val string) attribute.KeyValue { + return attribute.String("server.address", val) +} + +// AttrServerPort returns an optional attribute for the "server.port" semantic +// convention. It represents the server port number. +func (SDKExporterOperationDuration) AttrServerPort(val int) attribute.KeyValue { + return attribute.Int("server.port", val) +} + +// SDKExporterSpanExported is an instrument used to record metric values +// conforming to the "otel.sdk.exporter.span.exported" semantic conventions. It +// represents the number of spans for which the export has finished, either +// successful or failed. +type SDKExporterSpanExported struct { + metric.Int64Counter +} + +// NewSDKExporterSpanExported returns a new SDKExporterSpanExported instrument. +func NewSDKExporterSpanExported( + m metric.Meter, + opt ...metric.Int64CounterOption, +) (SDKExporterSpanExported, error) { + // Check if the meter is nil. + if m == nil { + return SDKExporterSpanExported{noop.Int64Counter{}}, nil + } + + i, err := m.Int64Counter( + "otel.sdk.exporter.span.exported", + append([]metric.Int64CounterOption{ + metric.WithDescription("The number of spans for which the export has finished, either successful or failed"), + metric.WithUnit("{span}"), + }, opt...)..., + ) + if err != nil { + return SDKExporterSpanExported{noop.Int64Counter{}}, err + } + return SDKExporterSpanExported{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SDKExporterSpanExported) Inst() metric.Int64Counter { + return m.Int64Counter +} + +// Name returns the semantic convention name of the instrument. +func (SDKExporterSpanExported) Name() string { + return "otel.sdk.exporter.span.exported" +} + +// Unit returns the semantic convention unit of the instrument +func (SDKExporterSpanExported) Unit() string { + return "{span}" +} + +// Description returns the semantic convention description of the instrument +func (SDKExporterSpanExported) Description() string { + return "The number of spans for which the export has finished, either successful or failed" +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +// +// For successful exports, `error.type` MUST NOT be set. For failed exports, +// `error.type` MUST contain the failure cause. +// For exporters with partial success semantics (e.g. OTLP with `rejected_spans` +// ), rejected spans MUST count as failed and only non-rejected spans count as +// success. +// If no rejection reason is available, `rejected` SHOULD be used as value for +// `error.type`. +func (m SDKExporterSpanExported) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// For successful exports, `error.type` MUST NOT be set. For failed exports, +// `error.type` MUST contain the failure cause. +// For exporters with partial success semantics (e.g. OTLP with `rejected_spans` +// ), rejected spans MUST count as failed and only non-rejected spans count as +// success. +// If no rejection reason is available, `rejected` SHOULD be used as value for +// `error.type`. +func (m SDKExporterSpanExported) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AttrErrorType returns an optional attribute for the "error.type" semantic +// convention. It represents the describes a class of error the operation ended +// with. +func (SDKExporterSpanExported) AttrErrorType(val ErrorTypeAttr) attribute.KeyValue { + return attribute.String("error.type", string(val)) +} + +// AttrComponentName returns an optional attribute for the "otel.component.name" +// semantic convention. It represents a name uniquely identifying the instance of +// the OpenTelemetry component within its containing SDK instance. +func (SDKExporterSpanExported) AttrComponentName(val string) attribute.KeyValue { + return attribute.String("otel.component.name", val) +} + +// AttrComponentType returns an optional attribute for the "otel.component.type" +// semantic convention. It represents a name identifying the type of the +// OpenTelemetry component. +func (SDKExporterSpanExported) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue { + return attribute.String("otel.component.type", string(val)) +} + +// AttrServerAddress returns an optional attribute for the "server.address" +// semantic convention. It represents the server domain name if available without +// reverse DNS lookup; otherwise, IP address or Unix domain socket name. +func (SDKExporterSpanExported) AttrServerAddress(val string) attribute.KeyValue { + return attribute.String("server.address", val) +} + +// AttrServerPort returns an optional attribute for the "server.port" semantic +// convention. It represents the server port number. +func (SDKExporterSpanExported) AttrServerPort(val int) attribute.KeyValue { + return attribute.Int("server.port", val) +} + +// SDKExporterSpanInflight is an instrument used to record metric values +// conforming to the "otel.sdk.exporter.span.inflight" semantic conventions. It +// represents the number of spans which were passed to the exporter, but that +// have not been exported yet (neither successful, nor failed). +type SDKExporterSpanInflight struct { + metric.Int64UpDownCounter +} + +// NewSDKExporterSpanInflight returns a new SDKExporterSpanInflight instrument. +func NewSDKExporterSpanInflight( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (SDKExporterSpanInflight, error) { + // Check if the meter is nil. + if m == nil { + return SDKExporterSpanInflight{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "otel.sdk.exporter.span.inflight", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("The number of spans which were passed to the exporter, but that have not been exported yet (neither successful, nor failed)"), + metric.WithUnit("{span}"), + }, opt...)..., + ) + if err != nil { + return SDKExporterSpanInflight{noop.Int64UpDownCounter{}}, err + } + return SDKExporterSpanInflight{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SDKExporterSpanInflight) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (SDKExporterSpanInflight) Name() string { + return "otel.sdk.exporter.span.inflight" +} + +// Unit returns the semantic convention unit of the instrument +func (SDKExporterSpanInflight) Unit() string { + return "{span}" +} + +// Description returns the semantic convention description of the instrument +func (SDKExporterSpanInflight) Description() string { + return "The number of spans which were passed to the exporter, but that have not been exported yet (neither successful, nor failed)" +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +// +// For successful exports, `error.type` MUST NOT be set. For failed exports, +// `error.type` MUST contain the failure cause. +func (m SDKExporterSpanInflight) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// For successful exports, `error.type` MUST NOT be set. For failed exports, +// `error.type` MUST contain the failure cause. +func (m SDKExporterSpanInflight) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AttrComponentName returns an optional attribute for the "otel.component.name" +// semantic convention. It represents a name uniquely identifying the instance of +// the OpenTelemetry component within its containing SDK instance. +func (SDKExporterSpanInflight) AttrComponentName(val string) attribute.KeyValue { + return attribute.String("otel.component.name", val) +} + +// AttrComponentType returns an optional attribute for the "otel.component.type" +// semantic convention. It represents a name identifying the type of the +// OpenTelemetry component. +func (SDKExporterSpanInflight) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue { + return attribute.String("otel.component.type", string(val)) +} + +// AttrServerAddress returns an optional attribute for the "server.address" +// semantic convention. It represents the server domain name if available without +// reverse DNS lookup; otherwise, IP address or Unix domain socket name. +func (SDKExporterSpanInflight) AttrServerAddress(val string) attribute.KeyValue { + return attribute.String("server.address", val) +} + +// AttrServerPort returns an optional attribute for the "server.port" semantic +// convention. It represents the server port number. +func (SDKExporterSpanInflight) AttrServerPort(val int) attribute.KeyValue { + return attribute.Int("server.port", val) +} + +// SDKLogCreated is an instrument used to record metric values conforming to the +// "otel.sdk.log.created" semantic conventions. It represents the number of logs +// submitted to enabled SDK Loggers. +type SDKLogCreated struct { + metric.Int64Counter +} + +// NewSDKLogCreated returns a new SDKLogCreated instrument. +func NewSDKLogCreated( + m metric.Meter, + opt ...metric.Int64CounterOption, +) (SDKLogCreated, error) { + // Check if the meter is nil. + if m == nil { + return SDKLogCreated{noop.Int64Counter{}}, nil + } + + i, err := m.Int64Counter( + "otel.sdk.log.created", + append([]metric.Int64CounterOption{ + metric.WithDescription("The number of logs submitted to enabled SDK Loggers"), + metric.WithUnit("{log_record}"), + }, opt...)..., + ) + if err != nil { + return SDKLogCreated{noop.Int64Counter{}}, err + } + return SDKLogCreated{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SDKLogCreated) Inst() metric.Int64Counter { + return m.Int64Counter +} + +// Name returns the semantic convention name of the instrument. +func (SDKLogCreated) Name() string { + return "otel.sdk.log.created" +} + +// Unit returns the semantic convention unit of the instrument +func (SDKLogCreated) Unit() string { + return "{log_record}" +} + +// Description returns the semantic convention description of the instrument +func (SDKLogCreated) Description() string { + return "The number of logs submitted to enabled SDK Loggers" +} + +// Add adds incr to the existing count for attrs. +func (m SDKLogCreated) Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +func (m SDKLogCreated) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Counter.Add(ctx, incr, *o...) +} + +// SDKMetricReaderCollectionDuration is an instrument used to record metric +// values conforming to the "otel.sdk.metric_reader.collection.duration" semantic +// conventions. It represents the duration of the collect operation of the metric +// reader. +type SDKMetricReaderCollectionDuration struct { + metric.Float64Histogram +} + +// NewSDKMetricReaderCollectionDuration returns a new +// SDKMetricReaderCollectionDuration instrument. +func NewSDKMetricReaderCollectionDuration( + m metric.Meter, + opt ...metric.Float64HistogramOption, +) (SDKMetricReaderCollectionDuration, error) { + // Check if the meter is nil. + if m == nil { + return SDKMetricReaderCollectionDuration{noop.Float64Histogram{}}, nil + } + + i, err := m.Float64Histogram( + "otel.sdk.metric_reader.collection.duration", + append([]metric.Float64HistogramOption{ + metric.WithDescription("The duration of the collect operation of the metric reader."), + metric.WithUnit("s"), + }, opt...)..., + ) + if err != nil { + return SDKMetricReaderCollectionDuration{noop.Float64Histogram{}}, err + } + return SDKMetricReaderCollectionDuration{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SDKMetricReaderCollectionDuration) Inst() metric.Float64Histogram { + return m.Float64Histogram +} + +// Name returns the semantic convention name of the instrument. +func (SDKMetricReaderCollectionDuration) Name() string { + return "otel.sdk.metric_reader.collection.duration" +} + +// Unit returns the semantic convention unit of the instrument +func (SDKMetricReaderCollectionDuration) Unit() string { + return "s" +} + +// Description returns the semantic convention description of the instrument +func (SDKMetricReaderCollectionDuration) Description() string { + return "The duration of the collect operation of the metric reader." +} + +// Record records val to the current distribution for attrs. +// +// All additional attrs passed are included in the recorded value. +// +// For successful collections, `error.type` MUST NOT be set. For failed +// collections, `error.type` SHOULD contain the failure cause. +// It can happen that metrics collection is successful for some MetricProducers, +// while others fail. In that case `error.type` SHOULD be set to any of the +// failure causes. +func (m SDKMetricReaderCollectionDuration) Record( + ctx context.Context, + val float64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Float64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Float64Histogram.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +// +// For successful collections, `error.type` MUST NOT be set. For failed +// collections, `error.type` SHOULD contain the failure cause. +// It can happen that metrics collection is successful for some MetricProducers, +// while others fail. In that case `error.type` SHOULD be set to any of the +// failure causes. +func (m SDKMetricReaderCollectionDuration) RecordSet(ctx context.Context, val float64, set attribute.Set) { + if set.Len() == 0 { + m.Float64Histogram.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Float64Histogram.Record(ctx, val, *o...) +} + +// AttrErrorType returns an optional attribute for the "error.type" semantic +// convention. It represents the describes a class of error the operation ended +// with. +func (SDKMetricReaderCollectionDuration) AttrErrorType(val ErrorTypeAttr) attribute.KeyValue { + return attribute.String("error.type", string(val)) +} + +// AttrComponentName returns an optional attribute for the "otel.component.name" +// semantic convention. It represents a name uniquely identifying the instance of +// the OpenTelemetry component within its containing SDK instance. +func (SDKMetricReaderCollectionDuration) AttrComponentName(val string) attribute.KeyValue { + return attribute.String("otel.component.name", val) +} + +// AttrComponentType returns an optional attribute for the "otel.component.type" +// semantic convention. It represents a name identifying the type of the +// OpenTelemetry component. +func (SDKMetricReaderCollectionDuration) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue { + return attribute.String("otel.component.type", string(val)) +} + +// SDKProcessorLogProcessed is an instrument used to record metric values +// conforming to the "otel.sdk.processor.log.processed" semantic conventions. It +// represents the number of log records for which the processing has finished, +// either successful or failed. +type SDKProcessorLogProcessed struct { + metric.Int64Counter +} + +// NewSDKProcessorLogProcessed returns a new SDKProcessorLogProcessed instrument. +func NewSDKProcessorLogProcessed( + m metric.Meter, + opt ...metric.Int64CounterOption, +) (SDKProcessorLogProcessed, error) { + // Check if the meter is nil. + if m == nil { + return SDKProcessorLogProcessed{noop.Int64Counter{}}, nil + } + + i, err := m.Int64Counter( + "otel.sdk.processor.log.processed", + append([]metric.Int64CounterOption{ + metric.WithDescription("The number of log records for which the processing has finished, either successful or failed"), + metric.WithUnit("{log_record}"), + }, opt...)..., + ) + if err != nil { + return SDKProcessorLogProcessed{noop.Int64Counter{}}, err + } + return SDKProcessorLogProcessed{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SDKProcessorLogProcessed) Inst() metric.Int64Counter { + return m.Int64Counter +} + +// Name returns the semantic convention name of the instrument. +func (SDKProcessorLogProcessed) Name() string { + return "otel.sdk.processor.log.processed" +} + +// Unit returns the semantic convention unit of the instrument +func (SDKProcessorLogProcessed) Unit() string { + return "{log_record}" +} + +// Description returns the semantic convention description of the instrument +func (SDKProcessorLogProcessed) Description() string { + return "The number of log records for which the processing has finished, either successful or failed" +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +// +// For successful processing, `error.type` MUST NOT be set. For failed +// processing, `error.type` MUST contain the failure cause. +// For the SDK Simple and Batching Log Record Processor a log record is +// considered to be processed already when it has been submitted to the exporter, +// not when the corresponding export call has finished. +func (m SDKProcessorLogProcessed) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// For successful processing, `error.type` MUST NOT be set. For failed +// processing, `error.type` MUST contain the failure cause. +// For the SDK Simple and Batching Log Record Processor a log record is +// considered to be processed already when it has been submitted to the exporter, +// not when the corresponding export call has finished. +func (m SDKProcessorLogProcessed) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AttrErrorType returns an optional attribute for the "error.type" semantic +// convention. It represents a low-cardinality description of the failure reason. +// SDK Batching Log Record Processors MUST use `queue_full` for log records +// dropped due to a full queue. +func (SDKProcessorLogProcessed) AttrErrorType(val ErrorTypeAttr) attribute.KeyValue { + return attribute.String("error.type", string(val)) +} + +// AttrComponentName returns an optional attribute for the "otel.component.name" +// semantic convention. It represents a name uniquely identifying the instance of +// the OpenTelemetry component within its containing SDK instance. +func (SDKProcessorLogProcessed) AttrComponentName(val string) attribute.KeyValue { + return attribute.String("otel.component.name", val) +} + +// AttrComponentType returns an optional attribute for the "otel.component.type" +// semantic convention. It represents a name identifying the type of the +// OpenTelemetry component. +func (SDKProcessorLogProcessed) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue { + return attribute.String("otel.component.type", string(val)) +} + +// SDKProcessorLogQueueCapacity is an instrument used to record metric values +// conforming to the "otel.sdk.processor.log.queue.capacity" semantic +// conventions. It represents the maximum number of log records the queue of a +// given instance of an SDK Log Record processor can hold. +type SDKProcessorLogQueueCapacity struct { + metric.Int64ObservableUpDownCounter +} + +// NewSDKProcessorLogQueueCapacity returns a new SDKProcessorLogQueueCapacity +// instrument. +func NewSDKProcessorLogQueueCapacity( + m metric.Meter, + opt ...metric.Int64ObservableUpDownCounterOption, +) (SDKProcessorLogQueueCapacity, error) { + // Check if the meter is nil. + if m == nil { + return SDKProcessorLogQueueCapacity{noop.Int64ObservableUpDownCounter{}}, nil + } + + i, err := m.Int64ObservableUpDownCounter( + "otel.sdk.processor.log.queue.capacity", + append([]metric.Int64ObservableUpDownCounterOption{ + metric.WithDescription("The maximum number of log records the queue of a given instance of an SDK Log Record processor can hold"), + metric.WithUnit("{log_record}"), + }, opt...)..., + ) + if err != nil { + return SDKProcessorLogQueueCapacity{noop.Int64ObservableUpDownCounter{}}, err + } + return SDKProcessorLogQueueCapacity{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SDKProcessorLogQueueCapacity) Inst() metric.Int64ObservableUpDownCounter { + return m.Int64ObservableUpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (SDKProcessorLogQueueCapacity) Name() string { + return "otel.sdk.processor.log.queue.capacity" +} + +// Unit returns the semantic convention unit of the instrument +func (SDKProcessorLogQueueCapacity) Unit() string { + return "{log_record}" +} + +// Description returns the semantic convention description of the instrument +func (SDKProcessorLogQueueCapacity) Description() string { + return "The maximum number of log records the queue of a given instance of an SDK Log Record processor can hold" +} + +// AttrComponentName returns an optional attribute for the "otel.component.name" +// semantic convention. It represents a name uniquely identifying the instance of +// the OpenTelemetry component within its containing SDK instance. +func (SDKProcessorLogQueueCapacity) AttrComponentName(val string) attribute.KeyValue { + return attribute.String("otel.component.name", val) +} + +// AttrComponentType returns an optional attribute for the "otel.component.type" +// semantic convention. It represents a name identifying the type of the +// OpenTelemetry component. +func (SDKProcessorLogQueueCapacity) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue { + return attribute.String("otel.component.type", string(val)) +} + +// SDKProcessorLogQueueSize is an instrument used to record metric values +// conforming to the "otel.sdk.processor.log.queue.size" semantic conventions. It +// represents the number of log records in the queue of a given instance of an +// SDK log processor. +type SDKProcessorLogQueueSize struct { + metric.Int64ObservableUpDownCounter +} + +// NewSDKProcessorLogQueueSize returns a new SDKProcessorLogQueueSize instrument. +func NewSDKProcessorLogQueueSize( + m metric.Meter, + opt ...metric.Int64ObservableUpDownCounterOption, +) (SDKProcessorLogQueueSize, error) { + // Check if the meter is nil. + if m == nil { + return SDKProcessorLogQueueSize{noop.Int64ObservableUpDownCounter{}}, nil + } + + i, err := m.Int64ObservableUpDownCounter( + "otel.sdk.processor.log.queue.size", + append([]metric.Int64ObservableUpDownCounterOption{ + metric.WithDescription("The number of log records in the queue of a given instance of an SDK log processor"), + metric.WithUnit("{log_record}"), + }, opt...)..., + ) + if err != nil { + return SDKProcessorLogQueueSize{noop.Int64ObservableUpDownCounter{}}, err + } + return SDKProcessorLogQueueSize{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SDKProcessorLogQueueSize) Inst() metric.Int64ObservableUpDownCounter { + return m.Int64ObservableUpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (SDKProcessorLogQueueSize) Name() string { + return "otel.sdk.processor.log.queue.size" +} + +// Unit returns the semantic convention unit of the instrument +func (SDKProcessorLogQueueSize) Unit() string { + return "{log_record}" +} + +// Description returns the semantic convention description of the instrument +func (SDKProcessorLogQueueSize) Description() string { + return "The number of log records in the queue of a given instance of an SDK log processor" +} + +// AttrComponentName returns an optional attribute for the "otel.component.name" +// semantic convention. It represents a name uniquely identifying the instance of +// the OpenTelemetry component within its containing SDK instance. +func (SDKProcessorLogQueueSize) AttrComponentName(val string) attribute.KeyValue { + return attribute.String("otel.component.name", val) +} + +// AttrComponentType returns an optional attribute for the "otel.component.type" +// semantic convention. It represents a name identifying the type of the +// OpenTelemetry component. +func (SDKProcessorLogQueueSize) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue { + return attribute.String("otel.component.type", string(val)) +} + +// SDKProcessorSpanProcessed is an instrument used to record metric values +// conforming to the "otel.sdk.processor.span.processed" semantic conventions. It +// represents the number of spans for which the processing has finished, either +// successful or failed. +type SDKProcessorSpanProcessed struct { + metric.Int64Counter +} + +// NewSDKProcessorSpanProcessed returns a new SDKProcessorSpanProcessed +// instrument. +func NewSDKProcessorSpanProcessed( + m metric.Meter, + opt ...metric.Int64CounterOption, +) (SDKProcessorSpanProcessed, error) { + // Check if the meter is nil. + if m == nil { + return SDKProcessorSpanProcessed{noop.Int64Counter{}}, nil + } + + i, err := m.Int64Counter( + "otel.sdk.processor.span.processed", + append([]metric.Int64CounterOption{ + metric.WithDescription("The number of spans for which the processing has finished, either successful or failed"), + metric.WithUnit("{span}"), + }, opt...)..., + ) + if err != nil { + return SDKProcessorSpanProcessed{noop.Int64Counter{}}, err + } + return SDKProcessorSpanProcessed{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SDKProcessorSpanProcessed) Inst() metric.Int64Counter { + return m.Int64Counter +} + +// Name returns the semantic convention name of the instrument. +func (SDKProcessorSpanProcessed) Name() string { + return "otel.sdk.processor.span.processed" +} + +// Unit returns the semantic convention unit of the instrument +func (SDKProcessorSpanProcessed) Unit() string { + return "{span}" +} + +// Description returns the semantic convention description of the instrument +func (SDKProcessorSpanProcessed) Description() string { + return "The number of spans for which the processing has finished, either successful or failed" +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +// +// For successful processing, `error.type` MUST NOT be set. For failed +// processing, `error.type` MUST contain the failure cause. +// For the SDK Simple and Batching Span Processor a span is considered to be +// processed already when it has been submitted to the exporter, not when the +// corresponding export call has finished. +func (m SDKProcessorSpanProcessed) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// For successful processing, `error.type` MUST NOT be set. For failed +// processing, `error.type` MUST contain the failure cause. +// For the SDK Simple and Batching Span Processor a span is considered to be +// processed already when it has been submitted to the exporter, not when the +// corresponding export call has finished. +func (m SDKProcessorSpanProcessed) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AttrErrorType returns an optional attribute for the "error.type" semantic +// convention. It represents a low-cardinality description of the failure reason. +// SDK Batching Span Processors MUST use `queue_full` for spans dropped due to a +// full queue. +func (SDKProcessorSpanProcessed) AttrErrorType(val ErrorTypeAttr) attribute.KeyValue { + return attribute.String("error.type", string(val)) +} + +// AttrComponentName returns an optional attribute for the "otel.component.name" +// semantic convention. It represents a name uniquely identifying the instance of +// the OpenTelemetry component within its containing SDK instance. +func (SDKProcessorSpanProcessed) AttrComponentName(val string) attribute.KeyValue { + return attribute.String("otel.component.name", val) +} + +// AttrComponentType returns an optional attribute for the "otel.component.type" +// semantic convention. It represents a name identifying the type of the +// OpenTelemetry component. +func (SDKProcessorSpanProcessed) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue { + return attribute.String("otel.component.type", string(val)) +} + +// SDKProcessorSpanQueueCapacity is an instrument used to record metric values +// conforming to the "otel.sdk.processor.span.queue.capacity" semantic +// conventions. It represents the maximum number of spans the queue of a given +// instance of an SDK span processor can hold. +type SDKProcessorSpanQueueCapacity struct { + metric.Int64ObservableUpDownCounter +} + +// NewSDKProcessorSpanQueueCapacity returns a new SDKProcessorSpanQueueCapacity +// instrument. +func NewSDKProcessorSpanQueueCapacity( + m metric.Meter, + opt ...metric.Int64ObservableUpDownCounterOption, +) (SDKProcessorSpanQueueCapacity, error) { + // Check if the meter is nil. + if m == nil { + return SDKProcessorSpanQueueCapacity{noop.Int64ObservableUpDownCounter{}}, nil + } + + i, err := m.Int64ObservableUpDownCounter( + "otel.sdk.processor.span.queue.capacity", + append([]metric.Int64ObservableUpDownCounterOption{ + metric.WithDescription("The maximum number of spans the queue of a given instance of an SDK span processor can hold"), + metric.WithUnit("{span}"), + }, opt...)..., + ) + if err != nil { + return SDKProcessorSpanQueueCapacity{noop.Int64ObservableUpDownCounter{}}, err + } + return SDKProcessorSpanQueueCapacity{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SDKProcessorSpanQueueCapacity) Inst() metric.Int64ObservableUpDownCounter { + return m.Int64ObservableUpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (SDKProcessorSpanQueueCapacity) Name() string { + return "otel.sdk.processor.span.queue.capacity" +} + +// Unit returns the semantic convention unit of the instrument +func (SDKProcessorSpanQueueCapacity) Unit() string { + return "{span}" +} + +// Description returns the semantic convention description of the instrument +func (SDKProcessorSpanQueueCapacity) Description() string { + return "The maximum number of spans the queue of a given instance of an SDK span processor can hold" +} + +// AttrComponentName returns an optional attribute for the "otel.component.name" +// semantic convention. It represents a name uniquely identifying the instance of +// the OpenTelemetry component within its containing SDK instance. +func (SDKProcessorSpanQueueCapacity) AttrComponentName(val string) attribute.KeyValue { + return attribute.String("otel.component.name", val) +} + +// AttrComponentType returns an optional attribute for the "otel.component.type" +// semantic convention. It represents a name identifying the type of the +// OpenTelemetry component. +func (SDKProcessorSpanQueueCapacity) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue { + return attribute.String("otel.component.type", string(val)) +} + +// SDKProcessorSpanQueueSize is an instrument used to record metric values +// conforming to the "otel.sdk.processor.span.queue.size" semantic conventions. +// It represents the number of spans in the queue of a given instance of an SDK +// span processor. +type SDKProcessorSpanQueueSize struct { + metric.Int64ObservableUpDownCounter +} + +// NewSDKProcessorSpanQueueSize returns a new SDKProcessorSpanQueueSize +// instrument. +func NewSDKProcessorSpanQueueSize( + m metric.Meter, + opt ...metric.Int64ObservableUpDownCounterOption, +) (SDKProcessorSpanQueueSize, error) { + // Check if the meter is nil. + if m == nil { + return SDKProcessorSpanQueueSize{noop.Int64ObservableUpDownCounter{}}, nil + } + + i, err := m.Int64ObservableUpDownCounter( + "otel.sdk.processor.span.queue.size", + append([]metric.Int64ObservableUpDownCounterOption{ + metric.WithDescription("The number of spans in the queue of a given instance of an SDK span processor"), + metric.WithUnit("{span}"), + }, opt...)..., + ) + if err != nil { + return SDKProcessorSpanQueueSize{noop.Int64ObservableUpDownCounter{}}, err + } + return SDKProcessorSpanQueueSize{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SDKProcessorSpanQueueSize) Inst() metric.Int64ObservableUpDownCounter { + return m.Int64ObservableUpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (SDKProcessorSpanQueueSize) Name() string { + return "otel.sdk.processor.span.queue.size" +} + +// Unit returns the semantic convention unit of the instrument +func (SDKProcessorSpanQueueSize) Unit() string { + return "{span}" +} + +// Description returns the semantic convention description of the instrument +func (SDKProcessorSpanQueueSize) Description() string { + return "The number of spans in the queue of a given instance of an SDK span processor" +} + +// AttrComponentName returns an optional attribute for the "otel.component.name" +// semantic convention. It represents a name uniquely identifying the instance of +// the OpenTelemetry component within its containing SDK instance. +func (SDKProcessorSpanQueueSize) AttrComponentName(val string) attribute.KeyValue { + return attribute.String("otel.component.name", val) +} + +// AttrComponentType returns an optional attribute for the "otel.component.type" +// semantic convention. It represents a name identifying the type of the +// OpenTelemetry component. +func (SDKProcessorSpanQueueSize) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue { + return attribute.String("otel.component.type", string(val)) +} + +// SDKSpanLive is an instrument used to record metric values conforming to the +// "otel.sdk.span.live" semantic conventions. It represents the number of created +// spans with `recording=true` for which the end operation has not been called +// yet. +type SDKSpanLive struct { + metric.Int64UpDownCounter +} + +// NewSDKSpanLive returns a new SDKSpanLive instrument. +func NewSDKSpanLive( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (SDKSpanLive, error) { + // Check if the meter is nil. + if m == nil { + return SDKSpanLive{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "otel.sdk.span.live", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("The number of created spans with `recording=true` for which the end operation has not been called yet"), + metric.WithUnit("{span}"), + }, opt...)..., + ) + if err != nil { + return SDKSpanLive{noop.Int64UpDownCounter{}}, err + } + return SDKSpanLive{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SDKSpanLive) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (SDKSpanLive) Name() string { + return "otel.sdk.span.live" +} + +// Unit returns the semantic convention unit of the instrument +func (SDKSpanLive) Unit() string { + return "{span}" +} + +// Description returns the semantic convention description of the instrument +func (SDKSpanLive) Description() string { + return "The number of created spans with `recording=true` for which the end operation has not been called yet" +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +func (m SDKSpanLive) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +func (m SDKSpanLive) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AttrSpanSamplingResult returns an optional attribute for the +// "otel.span.sampling_result" semantic convention. It represents the result +// value of the sampler for this span. +func (SDKSpanLive) AttrSpanSamplingResult(val SpanSamplingResultAttr) attribute.KeyValue { + return attribute.String("otel.span.sampling_result", string(val)) +} + +// SDKSpanStarted is an instrument used to record metric values conforming to the +// "otel.sdk.span.started" semantic conventions. It represents the number of +// created spans. +type SDKSpanStarted struct { + metric.Int64Counter +} + +// NewSDKSpanStarted returns a new SDKSpanStarted instrument. +func NewSDKSpanStarted( + m metric.Meter, + opt ...metric.Int64CounterOption, +) (SDKSpanStarted, error) { + // Check if the meter is nil. + if m == nil { + return SDKSpanStarted{noop.Int64Counter{}}, nil + } + + i, err := m.Int64Counter( + "otel.sdk.span.started", + append([]metric.Int64CounterOption{ + metric.WithDescription("The number of created spans"), + metric.WithUnit("{span}"), + }, opt...)..., + ) + if err != nil { + return SDKSpanStarted{noop.Int64Counter{}}, err + } + return SDKSpanStarted{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SDKSpanStarted) Inst() metric.Int64Counter { + return m.Int64Counter +} + +// Name returns the semantic convention name of the instrument. +func (SDKSpanStarted) Name() string { + return "otel.sdk.span.started" +} + +// Unit returns the semantic convention unit of the instrument +func (SDKSpanStarted) Unit() string { + return "{span}" +} + +// Description returns the semantic convention description of the instrument +func (SDKSpanStarted) Description() string { + return "The number of created spans" +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +// +// Implementations MUST record this metric for all spans, even for non-recording +// ones. +func (m SDKSpanStarted) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// Implementations MUST record this metric for all spans, even for non-recording +// ones. +func (m SDKSpanStarted) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AttrSpanParentOrigin returns an optional attribute for the +// "otel.span.parent.origin" semantic convention. It represents the determines +// whether the span has a parent span, and if so, [whether it is a remote parent] +// . +// +// [whether it is a remote parent]: https://opentelemetry.io/docs/specs/otel/trace/api/#isremote +func (SDKSpanStarted) AttrSpanParentOrigin(val SpanParentOriginAttr) attribute.KeyValue { + return attribute.String("otel.span.parent.origin", string(val)) +} + +// AttrSpanSamplingResult returns an optional attribute for the +// "otel.span.sampling_result" semantic convention. It represents the result +// value of the sampler for this span. +func (SDKSpanStarted) AttrSpanSamplingResult(val SpanSamplingResultAttr) attribute.KeyValue { + return attribute.String("otel.span.sampling_result", string(val)) +} \ No newline at end of file diff --git a/semconv/v1.36.0/processconv/metric.go b/semconv/v1.36.0/processconv/metric.go new file mode 100644 index 00000000000..608c6212633 --- /dev/null +++ b/semconv/v1.36.0/processconv/metric.go @@ -0,0 +1,1101 @@ +// Code generated from semantic convention specification. DO NOT EDIT. + +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package httpconv provides types and functionality for OpenTelemetry semantic +// conventions in the "process" namespace. +package processconv + +import ( + "context" + "sync" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/noop" +) + +var ( + addOptPool = &sync.Pool{New: func() any { return &[]metric.AddOption{} }} + recOptPool = &sync.Pool{New: func() any { return &[]metric.RecordOption{} }} +) + +// CPUModeAttr is an attribute conforming to the cpu.mode semantic conventions. +// It represents a process SHOULD be characterized *either* by data points with +// no `mode` labels, *or only* data points with `mode` labels. +type CPUModeAttr string + +var ( + // CPUModeUser is the standardized value "user" of CPUModeAttr. + CPUModeUser CPUModeAttr = "user" + // CPUModeSystem is the standardized value "system" of CPUModeAttr. + CPUModeSystem CPUModeAttr = "system" + // CPUModeNice is the standardized value "nice" of CPUModeAttr. + CPUModeNice CPUModeAttr = "nice" + // CPUModeIdle is the standardized value "idle" of CPUModeAttr. + CPUModeIdle CPUModeAttr = "idle" + // CPUModeIOWait is the standardized value "iowait" of CPUModeAttr. + CPUModeIOWait CPUModeAttr = "iowait" + // CPUModeInterrupt is the standardized value "interrupt" of CPUModeAttr. + CPUModeInterrupt CPUModeAttr = "interrupt" + // CPUModeSteal is the standardized value "steal" of CPUModeAttr. + CPUModeSteal CPUModeAttr = "steal" + // CPUModeKernel is the standardized value "kernel" of CPUModeAttr. + CPUModeKernel CPUModeAttr = "kernel" +) + +// DiskIODirectionAttr is an attribute conforming to the disk.io.direction +// semantic conventions. It represents the disk IO operation direction. +type DiskIODirectionAttr string + +var ( + // DiskIODirectionRead is the standardized value "read" of DiskIODirectionAttr. + DiskIODirectionRead DiskIODirectionAttr = "read" + // DiskIODirectionWrite is the standardized value "write" of + // DiskIODirectionAttr. + DiskIODirectionWrite DiskIODirectionAttr = "write" +) + +// NetworkIODirectionAttr is an attribute conforming to the network.io.direction +// semantic conventions. It represents the network IO operation direction. +type NetworkIODirectionAttr string + +var ( + // NetworkIODirectionTransmit is the standardized value "transmit" of + // NetworkIODirectionAttr. + NetworkIODirectionTransmit NetworkIODirectionAttr = "transmit" + // NetworkIODirectionReceive is the standardized value "receive" of + // NetworkIODirectionAttr. + NetworkIODirectionReceive NetworkIODirectionAttr = "receive" +) + +// ContextSwitchTypeAttr is an attribute conforming to the +// process.context_switch_type semantic conventions. It represents the specifies +// whether the context switches for this data point were voluntary or +// involuntary. +type ContextSwitchTypeAttr string + +var ( + // ContextSwitchTypeVoluntary is the standardized value "voluntary" of + // ContextSwitchTypeAttr. + ContextSwitchTypeVoluntary ContextSwitchTypeAttr = "voluntary" + // ContextSwitchTypeInvoluntary is the standardized value "involuntary" of + // ContextSwitchTypeAttr. + ContextSwitchTypeInvoluntary ContextSwitchTypeAttr = "involuntary" +) + +// PagingFaultTypeAttr is an attribute conforming to the +// process.paging.fault_type semantic conventions. It represents the type of page +// fault for this data point. Type `major` is for major/hard page faults, and +// `minor` is for minor/soft page faults. +type PagingFaultTypeAttr string + +var ( + // PagingFaultTypeMajor is the standardized value "major" of + // PagingFaultTypeAttr. + PagingFaultTypeMajor PagingFaultTypeAttr = "major" + // PagingFaultTypeMinor is the standardized value "minor" of + // PagingFaultTypeAttr. + PagingFaultTypeMinor PagingFaultTypeAttr = "minor" +) + +// ContextSwitches is an instrument used to record metric values conforming to +// the "process.context_switches" semantic conventions. It represents the number +// of times the process has been context switched. +type ContextSwitches struct { + metric.Int64Counter +} + +// NewContextSwitches returns a new ContextSwitches instrument. +func NewContextSwitches( + m metric.Meter, + opt ...metric.Int64CounterOption, +) (ContextSwitches, error) { + // Check if the meter is nil. + if m == nil { + return ContextSwitches{noop.Int64Counter{}}, nil + } + + i, err := m.Int64Counter( + "process.context_switches", + append([]metric.Int64CounterOption{ + metric.WithDescription("Number of times the process has been context switched."), + metric.WithUnit("{context_switch}"), + }, opt...)..., + ) + if err != nil { + return ContextSwitches{noop.Int64Counter{}}, err + } + return ContextSwitches{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ContextSwitches) Inst() metric.Int64Counter { + return m.Int64Counter +} + +// Name returns the semantic convention name of the instrument. +func (ContextSwitches) Name() string { + return "process.context_switches" +} + +// Unit returns the semantic convention unit of the instrument +func (ContextSwitches) Unit() string { + return "{context_switch}" +} + +// Description returns the semantic convention description of the instrument +func (ContextSwitches) Description() string { + return "Number of times the process has been context switched." +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +func (m ContextSwitches) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +func (m ContextSwitches) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AttrContextSwitchType returns an optional attribute for the +// "process.context_switch_type" semantic convention. It represents the specifies +// whether the context switches for this data point were voluntary or +// involuntary. +func (ContextSwitches) AttrContextSwitchType(val ContextSwitchTypeAttr) attribute.KeyValue { + return attribute.String("process.context_switch_type", string(val)) +} + +// CPUTime is an instrument used to record metric values conforming to the +// "process.cpu.time" semantic conventions. It represents the total CPU seconds +// broken down by different states. +type CPUTime struct { + metric.Float64ObservableCounter +} + +// NewCPUTime returns a new CPUTime instrument. +func NewCPUTime( + m metric.Meter, + opt ...metric.Float64ObservableCounterOption, +) (CPUTime, error) { + // Check if the meter is nil. + if m == nil { + return CPUTime{noop.Float64ObservableCounter{}}, nil + } + + i, err := m.Float64ObservableCounter( + "process.cpu.time", + append([]metric.Float64ObservableCounterOption{ + metric.WithDescription("Total CPU seconds broken down by different states."), + metric.WithUnit("s"), + }, opt...)..., + ) + if err != nil { + return CPUTime{noop.Float64ObservableCounter{}}, err + } + return CPUTime{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m CPUTime) Inst() metric.Float64ObservableCounter { + return m.Float64ObservableCounter +} + +// Name returns the semantic convention name of the instrument. +func (CPUTime) Name() string { + return "process.cpu.time" +} + +// Unit returns the semantic convention unit of the instrument +func (CPUTime) Unit() string { + return "s" +} + +// Description returns the semantic convention description of the instrument +func (CPUTime) Description() string { + return "Total CPU seconds broken down by different states." +} + +// AttrCPUMode returns an optional attribute for the "cpu.mode" semantic +// convention. It represents a process SHOULD be characterized *either* by data +// points with no `mode` labels, *or only* data points with `mode` labels. +func (CPUTime) AttrCPUMode(val CPUModeAttr) attribute.KeyValue { + return attribute.String("cpu.mode", string(val)) +} + +// CPUUtilization is an instrument used to record metric values conforming to the +// "process.cpu.utilization" semantic conventions. It represents the difference +// in process.cpu.time since the last measurement, divided by the elapsed time +// and number of CPUs available to the process. +type CPUUtilization struct { + metric.Int64Gauge +} + +// NewCPUUtilization returns a new CPUUtilization instrument. +func NewCPUUtilization( + m metric.Meter, + opt ...metric.Int64GaugeOption, +) (CPUUtilization, error) { + // Check if the meter is nil. + if m == nil { + return CPUUtilization{noop.Int64Gauge{}}, nil + } + + i, err := m.Int64Gauge( + "process.cpu.utilization", + append([]metric.Int64GaugeOption{ + metric.WithDescription("Difference in process.cpu.time since the last measurement, divided by the elapsed time and number of CPUs available to the process."), + metric.WithUnit("1"), + }, opt...)..., + ) + if err != nil { + return CPUUtilization{noop.Int64Gauge{}}, err + } + return CPUUtilization{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m CPUUtilization) Inst() metric.Int64Gauge { + return m.Int64Gauge +} + +// Name returns the semantic convention name of the instrument. +func (CPUUtilization) Name() string { + return "process.cpu.utilization" +} + +// Unit returns the semantic convention unit of the instrument +func (CPUUtilization) Unit() string { + return "1" +} + +// Description returns the semantic convention description of the instrument +func (CPUUtilization) Description() string { + return "Difference in process.cpu.time since the last measurement, divided by the elapsed time and number of CPUs available to the process." +} + +// Record records val to the current distribution for attrs. +// +// All additional attrs passed are included in the recorded value. +func (m CPUUtilization) Record( + ctx context.Context, + val int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Gauge.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64Gauge.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +func (m CPUUtilization) RecordSet(ctx context.Context, val int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Gauge.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Gauge.Record(ctx, val, *o...) +} + +// AttrCPUMode returns an optional attribute for the "cpu.mode" semantic +// convention. It represents a process SHOULD be characterized *either* by data +// points with no `mode` labels, *or only* data points with `mode` labels. +func (CPUUtilization) AttrCPUMode(val CPUModeAttr) attribute.KeyValue { + return attribute.String("cpu.mode", string(val)) +} + +// DiskIO is an instrument used to record metric values conforming to the +// "process.disk.io" semantic conventions. It represents the disk bytes +// transferred. +type DiskIO struct { + metric.Int64Counter +} + +// NewDiskIO returns a new DiskIO instrument. +func NewDiskIO( + m metric.Meter, + opt ...metric.Int64CounterOption, +) (DiskIO, error) { + // Check if the meter is nil. + if m == nil { + return DiskIO{noop.Int64Counter{}}, nil + } + + i, err := m.Int64Counter( + "process.disk.io", + append([]metric.Int64CounterOption{ + metric.WithDescription("Disk bytes transferred."), + metric.WithUnit("By"), + }, opt...)..., + ) + if err != nil { + return DiskIO{noop.Int64Counter{}}, err + } + return DiskIO{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m DiskIO) Inst() metric.Int64Counter { + return m.Int64Counter +} + +// Name returns the semantic convention name of the instrument. +func (DiskIO) Name() string { + return "process.disk.io" +} + +// Unit returns the semantic convention unit of the instrument +func (DiskIO) Unit() string { + return "By" +} + +// Description returns the semantic convention description of the instrument +func (DiskIO) Description() string { + return "Disk bytes transferred." +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +func (m DiskIO) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +func (m DiskIO) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AttrDiskIODirection returns an optional attribute for the "disk.io.direction" +// semantic convention. It represents the disk IO operation direction. +func (DiskIO) AttrDiskIODirection(val DiskIODirectionAttr) attribute.KeyValue { + return attribute.String("disk.io.direction", string(val)) +} + +// MemoryUsage is an instrument used to record metric values conforming to the +// "process.memory.usage" semantic conventions. It represents the amount of +// physical memory in use. +type MemoryUsage struct { + metric.Int64UpDownCounter +} + +// NewMemoryUsage returns a new MemoryUsage instrument. +func NewMemoryUsage( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (MemoryUsage, error) { + // Check if the meter is nil. + if m == nil { + return MemoryUsage{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "process.memory.usage", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("The amount of physical memory in use."), + metric.WithUnit("By"), + }, opt...)..., + ) + if err != nil { + return MemoryUsage{noop.Int64UpDownCounter{}}, err + } + return MemoryUsage{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m MemoryUsage) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (MemoryUsage) Name() string { + return "process.memory.usage" +} + +// Unit returns the semantic convention unit of the instrument +func (MemoryUsage) Unit() string { + return "By" +} + +// Description returns the semantic convention description of the instrument +func (MemoryUsage) Description() string { + return "The amount of physical memory in use." +} + +// Add adds incr to the existing count for attrs. +func (m MemoryUsage) Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +func (m MemoryUsage) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// MemoryVirtual is an instrument used to record metric values conforming to the +// "process.memory.virtual" semantic conventions. It represents the amount of +// committed virtual memory. +type MemoryVirtual struct { + metric.Int64UpDownCounter +} + +// NewMemoryVirtual returns a new MemoryVirtual instrument. +func NewMemoryVirtual( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (MemoryVirtual, error) { + // Check if the meter is nil. + if m == nil { + return MemoryVirtual{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "process.memory.virtual", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("The amount of committed virtual memory."), + metric.WithUnit("By"), + }, opt...)..., + ) + if err != nil { + return MemoryVirtual{noop.Int64UpDownCounter{}}, err + } + return MemoryVirtual{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m MemoryVirtual) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (MemoryVirtual) Name() string { + return "process.memory.virtual" +} + +// Unit returns the semantic convention unit of the instrument +func (MemoryVirtual) Unit() string { + return "By" +} + +// Description returns the semantic convention description of the instrument +func (MemoryVirtual) Description() string { + return "The amount of committed virtual memory." +} + +// Add adds incr to the existing count for attrs. +func (m MemoryVirtual) Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +func (m MemoryVirtual) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// NetworkIO is an instrument used to record metric values conforming to the +// "process.network.io" semantic conventions. It represents the network bytes +// transferred. +type NetworkIO struct { + metric.Int64Counter +} + +// NewNetworkIO returns a new NetworkIO instrument. +func NewNetworkIO( + m metric.Meter, + opt ...metric.Int64CounterOption, +) (NetworkIO, error) { + // Check if the meter is nil. + if m == nil { + return NetworkIO{noop.Int64Counter{}}, nil + } + + i, err := m.Int64Counter( + "process.network.io", + append([]metric.Int64CounterOption{ + metric.WithDescription("Network bytes transferred."), + metric.WithUnit("By"), + }, opt...)..., + ) + if err != nil { + return NetworkIO{noop.Int64Counter{}}, err + } + return NetworkIO{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m NetworkIO) Inst() metric.Int64Counter { + return m.Int64Counter +} + +// Name returns the semantic convention name of the instrument. +func (NetworkIO) Name() string { + return "process.network.io" +} + +// Unit returns the semantic convention unit of the instrument +func (NetworkIO) Unit() string { + return "By" +} + +// Description returns the semantic convention description of the instrument +func (NetworkIO) Description() string { + return "Network bytes transferred." +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +func (m NetworkIO) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +func (m NetworkIO) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AttrNetworkIODirection returns an optional attribute for the +// "network.io.direction" semantic convention. It represents the network IO +// operation direction. +func (NetworkIO) AttrNetworkIODirection(val NetworkIODirectionAttr) attribute.KeyValue { + return attribute.String("network.io.direction", string(val)) +} + +// OpenFileDescriptorCount is an instrument used to record metric values +// conforming to the "process.open_file_descriptor.count" semantic conventions. +// It represents the number of file descriptors in use by the process. +type OpenFileDescriptorCount struct { + metric.Int64UpDownCounter +} + +// NewOpenFileDescriptorCount returns a new OpenFileDescriptorCount instrument. +func NewOpenFileDescriptorCount( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (OpenFileDescriptorCount, error) { + // Check if the meter is nil. + if m == nil { + return OpenFileDescriptorCount{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "process.open_file_descriptor.count", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("Number of file descriptors in use by the process."), + metric.WithUnit("{file_descriptor}"), + }, opt...)..., + ) + if err != nil { + return OpenFileDescriptorCount{noop.Int64UpDownCounter{}}, err + } + return OpenFileDescriptorCount{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m OpenFileDescriptorCount) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (OpenFileDescriptorCount) Name() string { + return "process.open_file_descriptor.count" +} + +// Unit returns the semantic convention unit of the instrument +func (OpenFileDescriptorCount) Unit() string { + return "{file_descriptor}" +} + +// Description returns the semantic convention description of the instrument +func (OpenFileDescriptorCount) Description() string { + return "Number of file descriptors in use by the process." +} + +// Add adds incr to the existing count for attrs. +func (m OpenFileDescriptorCount) Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +func (m OpenFileDescriptorCount) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// PagingFaults is an instrument used to record metric values conforming to the +// "process.paging.faults" semantic conventions. It represents the number of page +// faults the process has made. +type PagingFaults struct { + metric.Int64Counter +} + +// NewPagingFaults returns a new PagingFaults instrument. +func NewPagingFaults( + m metric.Meter, + opt ...metric.Int64CounterOption, +) (PagingFaults, error) { + // Check if the meter is nil. + if m == nil { + return PagingFaults{noop.Int64Counter{}}, nil + } + + i, err := m.Int64Counter( + "process.paging.faults", + append([]metric.Int64CounterOption{ + metric.WithDescription("Number of page faults the process has made."), + metric.WithUnit("{fault}"), + }, opt...)..., + ) + if err != nil { + return PagingFaults{noop.Int64Counter{}}, err + } + return PagingFaults{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m PagingFaults) Inst() metric.Int64Counter { + return m.Int64Counter +} + +// Name returns the semantic convention name of the instrument. +func (PagingFaults) Name() string { + return "process.paging.faults" +} + +// Unit returns the semantic convention unit of the instrument +func (PagingFaults) Unit() string { + return "{fault}" +} + +// Description returns the semantic convention description of the instrument +func (PagingFaults) Description() string { + return "Number of page faults the process has made." +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +func (m PagingFaults) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +func (m PagingFaults) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AttrPagingFaultType returns an optional attribute for the +// "process.paging.fault_type" semantic convention. It represents the type of +// page fault for this data point. Type `major` is for major/hard page faults, +// and `minor` is for minor/soft page faults. +func (PagingFaults) AttrPagingFaultType(val PagingFaultTypeAttr) attribute.KeyValue { + return attribute.String("process.paging.fault_type", string(val)) +} + +// ThreadCount is an instrument used to record metric values conforming to the +// "process.thread.count" semantic conventions. It represents the process threads +// count. +type ThreadCount struct { + metric.Int64UpDownCounter +} + +// NewThreadCount returns a new ThreadCount instrument. +func NewThreadCount( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (ThreadCount, error) { + // Check if the meter is nil. + if m == nil { + return ThreadCount{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "process.thread.count", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("Process threads count."), + metric.WithUnit("{thread}"), + }, opt...)..., + ) + if err != nil { + return ThreadCount{noop.Int64UpDownCounter{}}, err + } + return ThreadCount{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ThreadCount) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (ThreadCount) Name() string { + return "process.thread.count" +} + +// Unit returns the semantic convention unit of the instrument +func (ThreadCount) Unit() string { + return "{thread}" +} + +// Description returns the semantic convention description of the instrument +func (ThreadCount) Description() string { + return "Process threads count." +} + +// Add adds incr to the existing count for attrs. +func (m ThreadCount) Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +func (m ThreadCount) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// Uptime is an instrument used to record metric values conforming to the +// "process.uptime" semantic conventions. It represents the time the process has +// been running. +type Uptime struct { + metric.Float64Gauge +} + +// NewUptime returns a new Uptime instrument. +func NewUptime( + m metric.Meter, + opt ...metric.Float64GaugeOption, +) (Uptime, error) { + // Check if the meter is nil. + if m == nil { + return Uptime{noop.Float64Gauge{}}, nil + } + + i, err := m.Float64Gauge( + "process.uptime", + append([]metric.Float64GaugeOption{ + metric.WithDescription("The time the process has been running."), + metric.WithUnit("s"), + }, opt...)..., + ) + if err != nil { + return Uptime{noop.Float64Gauge{}}, err + } + return Uptime{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m Uptime) Inst() metric.Float64Gauge { + return m.Float64Gauge +} + +// Name returns the semantic convention name of the instrument. +func (Uptime) Name() string { + return "process.uptime" +} + +// Unit returns the semantic convention unit of the instrument +func (Uptime) Unit() string { + return "s" +} + +// Description returns the semantic convention description of the instrument +func (Uptime) Description() string { + return "The time the process has been running." +} + +// Record records val to the current distribution for attrs. +// +// Instrumentations SHOULD use a gauge with type `double` and measure uptime in +// seconds as a floating point number with the highest precision available. +// The actual accuracy would depend on the instrumentation and operating system. +func (m Uptime) Record(ctx context.Context, val float64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Float64Gauge.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Float64Gauge.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +// +// Instrumentations SHOULD use a gauge with type `double` and measure uptime in +// seconds as a floating point number with the highest precision available. +// The actual accuracy would depend on the instrumentation and operating system. +func (m Uptime) RecordSet(ctx context.Context, val float64, set attribute.Set) { + if set.Len() == 0 { + m.Float64Gauge.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Float64Gauge.Record(ctx, val, *o...) +} \ No newline at end of file diff --git a/semconv/v1.36.0/rpcconv/metric.go b/semconv/v1.36.0/rpcconv/metric.go new file mode 100644 index 00000000000..146b7eda62c --- /dev/null +++ b/semconv/v1.36.0/rpcconv/metric.go @@ -0,0 +1,920 @@ +// Code generated from semantic convention specification. DO NOT EDIT. + +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package httpconv provides types and functionality for OpenTelemetry semantic +// conventions in the "rpc" namespace. +package rpcconv + +import ( + "context" + "sync" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/noop" +) + +var ( + addOptPool = &sync.Pool{New: func() any { return &[]metric.AddOption{} }} + recOptPool = &sync.Pool{New: func() any { return &[]metric.RecordOption{} }} +) + +// ClientDuration is an instrument used to record metric values conforming to the +// "rpc.client.duration" semantic conventions. It represents the measures the +// duration of outbound RPC. +type ClientDuration struct { + metric.Float64Histogram +} + +// NewClientDuration returns a new ClientDuration instrument. +func NewClientDuration( + m metric.Meter, + opt ...metric.Float64HistogramOption, +) (ClientDuration, error) { + // Check if the meter is nil. + if m == nil { + return ClientDuration{noop.Float64Histogram{}}, nil + } + + i, err := m.Float64Histogram( + "rpc.client.duration", + append([]metric.Float64HistogramOption{ + metric.WithDescription("Measures the duration of outbound RPC."), + metric.WithUnit("ms"), + }, opt...)..., + ) + if err != nil { + return ClientDuration{noop.Float64Histogram{}}, err + } + return ClientDuration{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ClientDuration) Inst() metric.Float64Histogram { + return m.Float64Histogram +} + +// Name returns the semantic convention name of the instrument. +func (ClientDuration) Name() string { + return "rpc.client.duration" +} + +// Unit returns the semantic convention unit of the instrument +func (ClientDuration) Unit() string { + return "ms" +} + +// Description returns the semantic convention description of the instrument +func (ClientDuration) Description() string { + return "Measures the duration of outbound RPC." +} + +// Record records val to the current distribution for attrs. +// +// While streaming RPCs may record this metric as start-of-batch +// to end-of-batch, it's hard to interpret in practice. +// +// **Streaming**: N/A. +func (m ClientDuration) Record(ctx context.Context, val float64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Float64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Float64Histogram.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +// +// While streaming RPCs may record this metric as start-of-batch +// to end-of-batch, it's hard to interpret in practice. +// +// **Streaming**: N/A. +func (m ClientDuration) RecordSet(ctx context.Context, val float64, set attribute.Set) { + if set.Len() == 0 { + m.Float64Histogram.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Float64Histogram.Record(ctx, val, *o...) +} + +// ClientRequestSize is an instrument used to record metric values conforming to +// the "rpc.client.request.size" semantic conventions. It represents the measures +// the size of RPC request messages (uncompressed). +type ClientRequestSize struct { + metric.Int64Histogram +} + +// NewClientRequestSize returns a new ClientRequestSize instrument. +func NewClientRequestSize( + m metric.Meter, + opt ...metric.Int64HistogramOption, +) (ClientRequestSize, error) { + // Check if the meter is nil. + if m == nil { + return ClientRequestSize{noop.Int64Histogram{}}, nil + } + + i, err := m.Int64Histogram( + "rpc.client.request.size", + append([]metric.Int64HistogramOption{ + metric.WithDescription("Measures the size of RPC request messages (uncompressed)."), + metric.WithUnit("By"), + }, opt...)..., + ) + if err != nil { + return ClientRequestSize{noop.Int64Histogram{}}, err + } + return ClientRequestSize{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ClientRequestSize) Inst() metric.Int64Histogram { + return m.Int64Histogram +} + +// Name returns the semantic convention name of the instrument. +func (ClientRequestSize) Name() string { + return "rpc.client.request.size" +} + +// Unit returns the semantic convention unit of the instrument +func (ClientRequestSize) Unit() string { + return "By" +} + +// Description returns the semantic convention description of the instrument +func (ClientRequestSize) Description() string { + return "Measures the size of RPC request messages (uncompressed)." +} + +// Record records val to the current distribution for attrs. +// +// **Streaming**: Recorded per message in a streaming batch +func (m ClientRequestSize) Record(ctx context.Context, val int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64Histogram.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +// +// **Streaming**: Recorded per message in a streaming batch +func (m ClientRequestSize) RecordSet(ctx context.Context, val int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Histogram.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Histogram.Record(ctx, val, *o...) +} + +// ClientRequestsPerRPC is an instrument used to record metric values conforming +// to the "rpc.client.requests_per_rpc" semantic conventions. It represents the +// measures the number of messages received per RPC. +type ClientRequestsPerRPC struct { + metric.Int64Histogram +} + +// NewClientRequestsPerRPC returns a new ClientRequestsPerRPC instrument. +func NewClientRequestsPerRPC( + m metric.Meter, + opt ...metric.Int64HistogramOption, +) (ClientRequestsPerRPC, error) { + // Check if the meter is nil. + if m == nil { + return ClientRequestsPerRPC{noop.Int64Histogram{}}, nil + } + + i, err := m.Int64Histogram( + "rpc.client.requests_per_rpc", + append([]metric.Int64HistogramOption{ + metric.WithDescription("Measures the number of messages received per RPC."), + metric.WithUnit("{count}"), + }, opt...)..., + ) + if err != nil { + return ClientRequestsPerRPC{noop.Int64Histogram{}}, err + } + return ClientRequestsPerRPC{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ClientRequestsPerRPC) Inst() metric.Int64Histogram { + return m.Int64Histogram +} + +// Name returns the semantic convention name of the instrument. +func (ClientRequestsPerRPC) Name() string { + return "rpc.client.requests_per_rpc" +} + +// Unit returns the semantic convention unit of the instrument +func (ClientRequestsPerRPC) Unit() string { + return "{count}" +} + +// Description returns the semantic convention description of the instrument +func (ClientRequestsPerRPC) Description() string { + return "Measures the number of messages received per RPC." +} + +// Record records val to the current distribution for attrs. +// +// Should be 1 for all non-streaming RPCs. +// +// **Streaming**: This metric is required for server and client streaming RPCs +func (m ClientRequestsPerRPC) Record(ctx context.Context, val int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64Histogram.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +// +// Should be 1 for all non-streaming RPCs. +// +// **Streaming**: This metric is required for server and client streaming RPCs +func (m ClientRequestsPerRPC) RecordSet(ctx context.Context, val int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Histogram.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Histogram.Record(ctx, val, *o...) +} + +// ClientResponseSize is an instrument used to record metric values conforming to +// the "rpc.client.response.size" semantic conventions. It represents the +// measures the size of RPC response messages (uncompressed). +type ClientResponseSize struct { + metric.Int64Histogram +} + +// NewClientResponseSize returns a new ClientResponseSize instrument. +func NewClientResponseSize( + m metric.Meter, + opt ...metric.Int64HistogramOption, +) (ClientResponseSize, error) { + // Check if the meter is nil. + if m == nil { + return ClientResponseSize{noop.Int64Histogram{}}, nil + } + + i, err := m.Int64Histogram( + "rpc.client.response.size", + append([]metric.Int64HistogramOption{ + metric.WithDescription("Measures the size of RPC response messages (uncompressed)."), + metric.WithUnit("By"), + }, opt...)..., + ) + if err != nil { + return ClientResponseSize{noop.Int64Histogram{}}, err + } + return ClientResponseSize{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ClientResponseSize) Inst() metric.Int64Histogram { + return m.Int64Histogram +} + +// Name returns the semantic convention name of the instrument. +func (ClientResponseSize) Name() string { + return "rpc.client.response.size" +} + +// Unit returns the semantic convention unit of the instrument +func (ClientResponseSize) Unit() string { + return "By" +} + +// Description returns the semantic convention description of the instrument +func (ClientResponseSize) Description() string { + return "Measures the size of RPC response messages (uncompressed)." +} + +// Record records val to the current distribution for attrs. +// +// **Streaming**: Recorded per response in a streaming batch +func (m ClientResponseSize) Record(ctx context.Context, val int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64Histogram.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +// +// **Streaming**: Recorded per response in a streaming batch +func (m ClientResponseSize) RecordSet(ctx context.Context, val int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Histogram.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Histogram.Record(ctx, val, *o...) +} + +// ClientResponsesPerRPC is an instrument used to record metric values conforming +// to the "rpc.client.responses_per_rpc" semantic conventions. It represents the +// measures the number of messages sent per RPC. +type ClientResponsesPerRPC struct { + metric.Int64Histogram +} + +// NewClientResponsesPerRPC returns a new ClientResponsesPerRPC instrument. +func NewClientResponsesPerRPC( + m metric.Meter, + opt ...metric.Int64HistogramOption, +) (ClientResponsesPerRPC, error) { + // Check if the meter is nil. + if m == nil { + return ClientResponsesPerRPC{noop.Int64Histogram{}}, nil + } + + i, err := m.Int64Histogram( + "rpc.client.responses_per_rpc", + append([]metric.Int64HistogramOption{ + metric.WithDescription("Measures the number of messages sent per RPC."), + metric.WithUnit("{count}"), + }, opt...)..., + ) + if err != nil { + return ClientResponsesPerRPC{noop.Int64Histogram{}}, err + } + return ClientResponsesPerRPC{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ClientResponsesPerRPC) Inst() metric.Int64Histogram { + return m.Int64Histogram +} + +// Name returns the semantic convention name of the instrument. +func (ClientResponsesPerRPC) Name() string { + return "rpc.client.responses_per_rpc" +} + +// Unit returns the semantic convention unit of the instrument +func (ClientResponsesPerRPC) Unit() string { + return "{count}" +} + +// Description returns the semantic convention description of the instrument +func (ClientResponsesPerRPC) Description() string { + return "Measures the number of messages sent per RPC." +} + +// Record records val to the current distribution for attrs. +// +// Should be 1 for all non-streaming RPCs. +// +// **Streaming**: This metric is required for server and client streaming RPCs +func (m ClientResponsesPerRPC) Record(ctx context.Context, val int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64Histogram.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +// +// Should be 1 for all non-streaming RPCs. +// +// **Streaming**: This metric is required for server and client streaming RPCs +func (m ClientResponsesPerRPC) RecordSet(ctx context.Context, val int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Histogram.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Histogram.Record(ctx, val, *o...) +} + +// ServerDuration is an instrument used to record metric values conforming to the +// "rpc.server.duration" semantic conventions. It represents the measures the +// duration of inbound RPC. +type ServerDuration struct { + metric.Float64Histogram +} + +// NewServerDuration returns a new ServerDuration instrument. +func NewServerDuration( + m metric.Meter, + opt ...metric.Float64HistogramOption, +) (ServerDuration, error) { + // Check if the meter is nil. + if m == nil { + return ServerDuration{noop.Float64Histogram{}}, nil + } + + i, err := m.Float64Histogram( + "rpc.server.duration", + append([]metric.Float64HistogramOption{ + metric.WithDescription("Measures the duration of inbound RPC."), + metric.WithUnit("ms"), + }, opt...)..., + ) + if err != nil { + return ServerDuration{noop.Float64Histogram{}}, err + } + return ServerDuration{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ServerDuration) Inst() metric.Float64Histogram { + return m.Float64Histogram +} + +// Name returns the semantic convention name of the instrument. +func (ServerDuration) Name() string { + return "rpc.server.duration" +} + +// Unit returns the semantic convention unit of the instrument +func (ServerDuration) Unit() string { + return "ms" +} + +// Description returns the semantic convention description of the instrument +func (ServerDuration) Description() string { + return "Measures the duration of inbound RPC." +} + +// Record records val to the current distribution for attrs. +// +// While streaming RPCs may record this metric as start-of-batch +// to end-of-batch, it's hard to interpret in practice. +// +// **Streaming**: N/A. +func (m ServerDuration) Record(ctx context.Context, val float64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Float64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Float64Histogram.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +// +// While streaming RPCs may record this metric as start-of-batch +// to end-of-batch, it's hard to interpret in practice. +// +// **Streaming**: N/A. +func (m ServerDuration) RecordSet(ctx context.Context, val float64, set attribute.Set) { + if set.Len() == 0 { + m.Float64Histogram.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Float64Histogram.Record(ctx, val, *o...) +} + +// ServerRequestSize is an instrument used to record metric values conforming to +// the "rpc.server.request.size" semantic conventions. It represents the measures +// the size of RPC request messages (uncompressed). +type ServerRequestSize struct { + metric.Int64Histogram +} + +// NewServerRequestSize returns a new ServerRequestSize instrument. +func NewServerRequestSize( + m metric.Meter, + opt ...metric.Int64HistogramOption, +) (ServerRequestSize, error) { + // Check if the meter is nil. + if m == nil { + return ServerRequestSize{noop.Int64Histogram{}}, nil + } + + i, err := m.Int64Histogram( + "rpc.server.request.size", + append([]metric.Int64HistogramOption{ + metric.WithDescription("Measures the size of RPC request messages (uncompressed)."), + metric.WithUnit("By"), + }, opt...)..., + ) + if err != nil { + return ServerRequestSize{noop.Int64Histogram{}}, err + } + return ServerRequestSize{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ServerRequestSize) Inst() metric.Int64Histogram { + return m.Int64Histogram +} + +// Name returns the semantic convention name of the instrument. +func (ServerRequestSize) Name() string { + return "rpc.server.request.size" +} + +// Unit returns the semantic convention unit of the instrument +func (ServerRequestSize) Unit() string { + return "By" +} + +// Description returns the semantic convention description of the instrument +func (ServerRequestSize) Description() string { + return "Measures the size of RPC request messages (uncompressed)." +} + +// Record records val to the current distribution for attrs. +// +// **Streaming**: Recorded per message in a streaming batch +func (m ServerRequestSize) Record(ctx context.Context, val int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64Histogram.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +// +// **Streaming**: Recorded per message in a streaming batch +func (m ServerRequestSize) RecordSet(ctx context.Context, val int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Histogram.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Histogram.Record(ctx, val, *o...) +} + +// ServerRequestsPerRPC is an instrument used to record metric values conforming +// to the "rpc.server.requests_per_rpc" semantic conventions. It represents the +// measures the number of messages received per RPC. +type ServerRequestsPerRPC struct { + metric.Int64Histogram +} + +// NewServerRequestsPerRPC returns a new ServerRequestsPerRPC instrument. +func NewServerRequestsPerRPC( + m metric.Meter, + opt ...metric.Int64HistogramOption, +) (ServerRequestsPerRPC, error) { + // Check if the meter is nil. + if m == nil { + return ServerRequestsPerRPC{noop.Int64Histogram{}}, nil + } + + i, err := m.Int64Histogram( + "rpc.server.requests_per_rpc", + append([]metric.Int64HistogramOption{ + metric.WithDescription("Measures the number of messages received per RPC."), + metric.WithUnit("{count}"), + }, opt...)..., + ) + if err != nil { + return ServerRequestsPerRPC{noop.Int64Histogram{}}, err + } + return ServerRequestsPerRPC{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ServerRequestsPerRPC) Inst() metric.Int64Histogram { + return m.Int64Histogram +} + +// Name returns the semantic convention name of the instrument. +func (ServerRequestsPerRPC) Name() string { + return "rpc.server.requests_per_rpc" +} + +// Unit returns the semantic convention unit of the instrument +func (ServerRequestsPerRPC) Unit() string { + return "{count}" +} + +// Description returns the semantic convention description of the instrument +func (ServerRequestsPerRPC) Description() string { + return "Measures the number of messages received per RPC." +} + +// Record records val to the current distribution for attrs. +// +// Should be 1 for all non-streaming RPCs. +// +// **Streaming** : This metric is required for server and client streaming RPCs +func (m ServerRequestsPerRPC) Record(ctx context.Context, val int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64Histogram.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +// +// Should be 1 for all non-streaming RPCs. +// +// **Streaming** : This metric is required for server and client streaming RPCs +func (m ServerRequestsPerRPC) RecordSet(ctx context.Context, val int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Histogram.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Histogram.Record(ctx, val, *o...) +} + +// ServerResponseSize is an instrument used to record metric values conforming to +// the "rpc.server.response.size" semantic conventions. It represents the +// measures the size of RPC response messages (uncompressed). +type ServerResponseSize struct { + metric.Int64Histogram +} + +// NewServerResponseSize returns a new ServerResponseSize instrument. +func NewServerResponseSize( + m metric.Meter, + opt ...metric.Int64HistogramOption, +) (ServerResponseSize, error) { + // Check if the meter is nil. + if m == nil { + return ServerResponseSize{noop.Int64Histogram{}}, nil + } + + i, err := m.Int64Histogram( + "rpc.server.response.size", + append([]metric.Int64HistogramOption{ + metric.WithDescription("Measures the size of RPC response messages (uncompressed)."), + metric.WithUnit("By"), + }, opt...)..., + ) + if err != nil { + return ServerResponseSize{noop.Int64Histogram{}}, err + } + return ServerResponseSize{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ServerResponseSize) Inst() metric.Int64Histogram { + return m.Int64Histogram +} + +// Name returns the semantic convention name of the instrument. +func (ServerResponseSize) Name() string { + return "rpc.server.response.size" +} + +// Unit returns the semantic convention unit of the instrument +func (ServerResponseSize) Unit() string { + return "By" +} + +// Description returns the semantic convention description of the instrument +func (ServerResponseSize) Description() string { + return "Measures the size of RPC response messages (uncompressed)." +} + +// Record records val to the current distribution for attrs. +// +// **Streaming**: Recorded per response in a streaming batch +func (m ServerResponseSize) Record(ctx context.Context, val int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64Histogram.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +// +// **Streaming**: Recorded per response in a streaming batch +func (m ServerResponseSize) RecordSet(ctx context.Context, val int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Histogram.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Histogram.Record(ctx, val, *o...) +} + +// ServerResponsesPerRPC is an instrument used to record metric values conforming +// to the "rpc.server.responses_per_rpc" semantic conventions. It represents the +// measures the number of messages sent per RPC. +type ServerResponsesPerRPC struct { + metric.Int64Histogram +} + +// NewServerResponsesPerRPC returns a new ServerResponsesPerRPC instrument. +func NewServerResponsesPerRPC( + m metric.Meter, + opt ...metric.Int64HistogramOption, +) (ServerResponsesPerRPC, error) { + // Check if the meter is nil. + if m == nil { + return ServerResponsesPerRPC{noop.Int64Histogram{}}, nil + } + + i, err := m.Int64Histogram( + "rpc.server.responses_per_rpc", + append([]metric.Int64HistogramOption{ + metric.WithDescription("Measures the number of messages sent per RPC."), + metric.WithUnit("{count}"), + }, opt...)..., + ) + if err != nil { + return ServerResponsesPerRPC{noop.Int64Histogram{}}, err + } + return ServerResponsesPerRPC{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ServerResponsesPerRPC) Inst() metric.Int64Histogram { + return m.Int64Histogram +} + +// Name returns the semantic convention name of the instrument. +func (ServerResponsesPerRPC) Name() string { + return "rpc.server.responses_per_rpc" +} + +// Unit returns the semantic convention unit of the instrument +func (ServerResponsesPerRPC) Unit() string { + return "{count}" +} + +// Description returns the semantic convention description of the instrument +func (ServerResponsesPerRPC) Description() string { + return "Measures the number of messages sent per RPC." +} + +// Record records val to the current distribution for attrs. +// +// Should be 1 for all non-streaming RPCs. +// +// **Streaming**: This metric is required for server and client streaming RPCs +func (m ServerResponsesPerRPC) Record(ctx context.Context, val int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64Histogram.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +// +// Should be 1 for all non-streaming RPCs. +// +// **Streaming**: This metric is required for server and client streaming RPCs +func (m ServerResponsesPerRPC) RecordSet(ctx context.Context, val int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Histogram.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Histogram.Record(ctx, val, *o...) +} \ No newline at end of file diff --git a/semconv/v1.36.0/schema.go b/semconv/v1.36.0/schema.go new file mode 100644 index 00000000000..379becbd4e7 --- /dev/null +++ b/semconv/v1.36.0/schema.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.36.0" + +// SchemaURL is the schema URL that matches the version of the semantic conventions +// that this package defines. Semconv packages starting from v1.4.0 must declare +// non-empty schema URL in the form https://opentelemetry.io/schemas/ +const SchemaURL = "https://opentelemetry.io/schemas/1.36.0" diff --git a/semconv/v1.36.0/signalrconv/metric.go b/semconv/v1.36.0/signalrconv/metric.go new file mode 100644 index 00000000000..57fb9528645 --- /dev/null +++ b/semconv/v1.36.0/signalrconv/metric.go @@ -0,0 +1,285 @@ +// Code generated from semantic convention specification. DO NOT EDIT. + +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package httpconv provides types and functionality for OpenTelemetry semantic +// conventions in the "signalr" namespace. +package signalrconv + +import ( + "context" + "sync" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/noop" +) + +var ( + addOptPool = &sync.Pool{New: func() any { return &[]metric.AddOption{} }} + recOptPool = &sync.Pool{New: func() any { return &[]metric.RecordOption{} }} +) + +// ConnectionStatusAttr is an attribute conforming to the +// signalr.connection.status semantic conventions. It represents the signalR HTTP +// connection closure status. +type ConnectionStatusAttr string + +var ( + // ConnectionStatusNormalClosure is the connection was closed normally. + ConnectionStatusNormalClosure ConnectionStatusAttr = "normal_closure" + // ConnectionStatusTimeout is the connection was closed due to a timeout. + ConnectionStatusTimeout ConnectionStatusAttr = "timeout" + // ConnectionStatusAppShutdown is the connection was closed because the app is + // shutting down. + ConnectionStatusAppShutdown ConnectionStatusAttr = "app_shutdown" +) + +// TransportAttr is an attribute conforming to the signalr.transport semantic +// conventions. It represents the [SignalR transport type]. +// +// [SignalR transport type]: https://github.com/dotnet/aspnetcore/blob/main/src/SignalR/docs/specs/TransportProtocols.md +type TransportAttr string + +var ( + // TransportServerSentEvents is the serverSentEvents protocol. + TransportServerSentEvents TransportAttr = "server_sent_events" + // TransportLongPolling is the longPolling protocol. + TransportLongPolling TransportAttr = "long_polling" + // TransportWebSockets is the webSockets protocol. + TransportWebSockets TransportAttr = "web_sockets" +) + +// ServerActiveConnections is an instrument used to record metric values +// conforming to the "signalr.server.active_connections" semantic conventions. It +// represents the number of connections that are currently active on the server. +type ServerActiveConnections struct { + metric.Int64UpDownCounter +} + +// NewServerActiveConnections returns a new ServerActiveConnections instrument. +func NewServerActiveConnections( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (ServerActiveConnections, error) { + // Check if the meter is nil. + if m == nil { + return ServerActiveConnections{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "signalr.server.active_connections", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("Number of connections that are currently active on the server."), + metric.WithUnit("{connection}"), + }, opt...)..., + ) + if err != nil { + return ServerActiveConnections{noop.Int64UpDownCounter{}}, err + } + return ServerActiveConnections{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ServerActiveConnections) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (ServerActiveConnections) Name() string { + return "signalr.server.active_connections" +} + +// Unit returns the semantic convention unit of the instrument +func (ServerActiveConnections) Unit() string { + return "{connection}" +} + +// Description returns the semantic convention description of the instrument +func (ServerActiveConnections) Description() string { + return "Number of connections that are currently active on the server." +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +// +// Meter name: `Microsoft.AspNetCore.Http.Connections`; Added in: ASP.NET Core +// 8.0 +func (m ServerActiveConnections) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// Meter name: `Microsoft.AspNetCore.Http.Connections`; Added in: ASP.NET Core +// 8.0 +func (m ServerActiveConnections) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AttrConnectionStatus returns an optional attribute for the +// "signalr.connection.status" semantic convention. It represents the signalR +// HTTP connection closure status. +func (ServerActiveConnections) AttrConnectionStatus(val ConnectionStatusAttr) attribute.KeyValue { + return attribute.String("signalr.connection.status", string(val)) +} + +// AttrTransport returns an optional attribute for the "signalr.transport" +// semantic convention. It represents the [SignalR transport type]. +// +// [SignalR transport type]: https://github.com/dotnet/aspnetcore/blob/main/src/SignalR/docs/specs/TransportProtocols.md +func (ServerActiveConnections) AttrTransport(val TransportAttr) attribute.KeyValue { + return attribute.String("signalr.transport", string(val)) +} + +// ServerConnectionDuration is an instrument used to record metric values +// conforming to the "signalr.server.connection.duration" semantic conventions. +// It represents the duration of connections on the server. +type ServerConnectionDuration struct { + metric.Float64Histogram +} + +// NewServerConnectionDuration returns a new ServerConnectionDuration instrument. +func NewServerConnectionDuration( + m metric.Meter, + opt ...metric.Float64HistogramOption, +) (ServerConnectionDuration, error) { + // Check if the meter is nil. + if m == nil { + return ServerConnectionDuration{noop.Float64Histogram{}}, nil + } + + i, err := m.Float64Histogram( + "signalr.server.connection.duration", + append([]metric.Float64HistogramOption{ + metric.WithDescription("The duration of connections on the server."), + metric.WithUnit("s"), + }, opt...)..., + ) + if err != nil { + return ServerConnectionDuration{noop.Float64Histogram{}}, err + } + return ServerConnectionDuration{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ServerConnectionDuration) Inst() metric.Float64Histogram { + return m.Float64Histogram +} + +// Name returns the semantic convention name of the instrument. +func (ServerConnectionDuration) Name() string { + return "signalr.server.connection.duration" +} + +// Unit returns the semantic convention unit of the instrument +func (ServerConnectionDuration) Unit() string { + return "s" +} + +// Description returns the semantic convention description of the instrument +func (ServerConnectionDuration) Description() string { + return "The duration of connections on the server." +} + +// Record records val to the current distribution for attrs. +// +// All additional attrs passed are included in the recorded value. +// +// Meter name: `Microsoft.AspNetCore.Http.Connections`; Added in: ASP.NET Core +// 8.0 +func (m ServerConnectionDuration) Record( + ctx context.Context, + val float64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Float64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Float64Histogram.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +// +// Meter name: `Microsoft.AspNetCore.Http.Connections`; Added in: ASP.NET Core +// 8.0 +func (m ServerConnectionDuration) RecordSet(ctx context.Context, val float64, set attribute.Set) { + if set.Len() == 0 { + m.Float64Histogram.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Float64Histogram.Record(ctx, val, *o...) +} + +// AttrConnectionStatus returns an optional attribute for the +// "signalr.connection.status" semantic convention. It represents the signalR +// HTTP connection closure status. +func (ServerConnectionDuration) AttrConnectionStatus(val ConnectionStatusAttr) attribute.KeyValue { + return attribute.String("signalr.connection.status", string(val)) +} + +// AttrTransport returns an optional attribute for the "signalr.transport" +// semantic convention. It represents the [SignalR transport type]. +// +// [SignalR transport type]: https://github.com/dotnet/aspnetcore/blob/main/src/SignalR/docs/specs/TransportProtocols.md +func (ServerConnectionDuration) AttrTransport(val TransportAttr) attribute.KeyValue { + return attribute.String("signalr.transport", string(val)) +} \ No newline at end of file diff --git a/semconv/v1.36.0/systemconv/metric.go b/semconv/v1.36.0/systemconv/metric.go new file mode 100644 index 00000000000..30dd2d24be3 --- /dev/null +++ b/semconv/v1.36.0/systemconv/metric.go @@ -0,0 +1,3502 @@ +// Code generated from semantic convention specification. DO NOT EDIT. + +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package httpconv provides types and functionality for OpenTelemetry semantic +// conventions in the "system" namespace. +package systemconv + +import ( + "context" + "sync" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/noop" +) + +var ( + addOptPool = &sync.Pool{New: func() any { return &[]metric.AddOption{} }} + recOptPool = &sync.Pool{New: func() any { return &[]metric.RecordOption{} }} +) + +// CPUModeAttr is an attribute conforming to the cpu.mode semantic conventions. +// It represents the mode of the CPU. +type CPUModeAttr string + +var ( + // CPUModeUser is the standardized value "user" of CPUModeAttr. + CPUModeUser CPUModeAttr = "user" + // CPUModeSystem is the standardized value "system" of CPUModeAttr. + CPUModeSystem CPUModeAttr = "system" + // CPUModeNice is the standardized value "nice" of CPUModeAttr. + CPUModeNice CPUModeAttr = "nice" + // CPUModeIdle is the standardized value "idle" of CPUModeAttr. + CPUModeIdle CPUModeAttr = "idle" + // CPUModeIOWait is the standardized value "iowait" of CPUModeAttr. + CPUModeIOWait CPUModeAttr = "iowait" + // CPUModeInterrupt is the standardized value "interrupt" of CPUModeAttr. + CPUModeInterrupt CPUModeAttr = "interrupt" + // CPUModeSteal is the standardized value "steal" of CPUModeAttr. + CPUModeSteal CPUModeAttr = "steal" + // CPUModeKernel is the standardized value "kernel" of CPUModeAttr. + CPUModeKernel CPUModeAttr = "kernel" +) + +// DiskIODirectionAttr is an attribute conforming to the disk.io.direction +// semantic conventions. It represents the disk IO operation direction. +type DiskIODirectionAttr string + +var ( + // DiskIODirectionRead is the standardized value "read" of DiskIODirectionAttr. + DiskIODirectionRead DiskIODirectionAttr = "read" + // DiskIODirectionWrite is the standardized value "write" of + // DiskIODirectionAttr. + DiskIODirectionWrite DiskIODirectionAttr = "write" +) + +// LinuxMemorySlabStateAttr is an attribute conforming to the +// linux.memory.slab.state semantic conventions. It represents the Linux Slab +// memory state. +type LinuxMemorySlabStateAttr string + +var ( + // LinuxMemorySlabStateReclaimable is the standardized value "reclaimable" of + // LinuxMemorySlabStateAttr. + LinuxMemorySlabStateReclaimable LinuxMemorySlabStateAttr = "reclaimable" + // LinuxMemorySlabStateUnreclaimable is the standardized value "unreclaimable" + // of LinuxMemorySlabStateAttr. + LinuxMemorySlabStateUnreclaimable LinuxMemorySlabStateAttr = "unreclaimable" +) + +// NetworkConnectionStateAttr is an attribute conforming to the +// network.connection.state semantic conventions. It represents the state of +// network connection. +type NetworkConnectionStateAttr string + +var ( + // NetworkConnectionStateClosed is the standardized value "closed" of + // NetworkConnectionStateAttr. + NetworkConnectionStateClosed NetworkConnectionStateAttr = "closed" + // NetworkConnectionStateCloseWait is the standardized value "close_wait" of + // NetworkConnectionStateAttr. + NetworkConnectionStateCloseWait NetworkConnectionStateAttr = "close_wait" + // NetworkConnectionStateClosing is the standardized value "closing" of + // NetworkConnectionStateAttr. + NetworkConnectionStateClosing NetworkConnectionStateAttr = "closing" + // NetworkConnectionStateEstablished is the standardized value "established" of + // NetworkConnectionStateAttr. + NetworkConnectionStateEstablished NetworkConnectionStateAttr = "established" + // NetworkConnectionStateFinWait1 is the standardized value "fin_wait_1" of + // NetworkConnectionStateAttr. + NetworkConnectionStateFinWait1 NetworkConnectionStateAttr = "fin_wait_1" + // NetworkConnectionStateFinWait2 is the standardized value "fin_wait_2" of + // NetworkConnectionStateAttr. + NetworkConnectionStateFinWait2 NetworkConnectionStateAttr = "fin_wait_2" + // NetworkConnectionStateLastAck is the standardized value "last_ack" of + // NetworkConnectionStateAttr. + NetworkConnectionStateLastAck NetworkConnectionStateAttr = "last_ack" + // NetworkConnectionStateListen is the standardized value "listen" of + // NetworkConnectionStateAttr. + NetworkConnectionStateListen NetworkConnectionStateAttr = "listen" + // NetworkConnectionStateSynReceived is the standardized value "syn_received" of + // NetworkConnectionStateAttr. + NetworkConnectionStateSynReceived NetworkConnectionStateAttr = "syn_received" + // NetworkConnectionStateSynSent is the standardized value "syn_sent" of + // NetworkConnectionStateAttr. + NetworkConnectionStateSynSent NetworkConnectionStateAttr = "syn_sent" + // NetworkConnectionStateTimeWait is the standardized value "time_wait" of + // NetworkConnectionStateAttr. + NetworkConnectionStateTimeWait NetworkConnectionStateAttr = "time_wait" +) + +// NetworkIODirectionAttr is an attribute conforming to the network.io.direction +// semantic conventions. It represents the network IO operation direction. +type NetworkIODirectionAttr string + +var ( + // NetworkIODirectionTransmit is the standardized value "transmit" of + // NetworkIODirectionAttr. + NetworkIODirectionTransmit NetworkIODirectionAttr = "transmit" + // NetworkIODirectionReceive is the standardized value "receive" of + // NetworkIODirectionAttr. + NetworkIODirectionReceive NetworkIODirectionAttr = "receive" +) + +// NetworkTransportAttr is an attribute conforming to the network.transport +// semantic conventions. It represents the [OSI transport layer] or +// [inter-process communication method]. +// +// [OSI transport layer]: https://wikipedia.org/wiki/Transport_layer +// [inter-process communication method]: https://wikipedia.org/wiki/Inter-process_communication +type NetworkTransportAttr string + +var ( + // NetworkTransportTCP is the TCP. + NetworkTransportTCP NetworkTransportAttr = "tcp" + // NetworkTransportUDP is the UDP. + NetworkTransportUDP NetworkTransportAttr = "udp" + // NetworkTransportPipe is the named or anonymous pipe. + NetworkTransportPipe NetworkTransportAttr = "pipe" + // NetworkTransportUnix is the unix domain socket. + NetworkTransportUnix NetworkTransportAttr = "unix" + // NetworkTransportQUIC is the QUIC. + NetworkTransportQUIC NetworkTransportAttr = "quic" +) + +// FilesystemStateAttr is an attribute conforming to the system.filesystem.state +// semantic conventions. It represents the filesystem state. +type FilesystemStateAttr string + +var ( + // FilesystemStateUsed is the standardized value "used" of FilesystemStateAttr. + FilesystemStateUsed FilesystemStateAttr = "used" + // FilesystemStateFree is the standardized value "free" of FilesystemStateAttr. + FilesystemStateFree FilesystemStateAttr = "free" + // FilesystemStateReserved is the standardized value "reserved" of + // FilesystemStateAttr. + FilesystemStateReserved FilesystemStateAttr = "reserved" +) + +// FilesystemTypeAttr is an attribute conforming to the system.filesystem.type +// semantic conventions. It represents the filesystem type. +type FilesystemTypeAttr string + +var ( + // FilesystemTypeFat32 is the standardized value "fat32" of FilesystemTypeAttr. + FilesystemTypeFat32 FilesystemTypeAttr = "fat32" + // FilesystemTypeExfat is the standardized value "exfat" of FilesystemTypeAttr. + FilesystemTypeExfat FilesystemTypeAttr = "exfat" + // FilesystemTypeNtfs is the standardized value "ntfs" of FilesystemTypeAttr. + FilesystemTypeNtfs FilesystemTypeAttr = "ntfs" + // FilesystemTypeRefs is the standardized value "refs" of FilesystemTypeAttr. + FilesystemTypeRefs FilesystemTypeAttr = "refs" + // FilesystemTypeHfsplus is the standardized value "hfsplus" of + // FilesystemTypeAttr. + FilesystemTypeHfsplus FilesystemTypeAttr = "hfsplus" + // FilesystemTypeExt4 is the standardized value "ext4" of FilesystemTypeAttr. + FilesystemTypeExt4 FilesystemTypeAttr = "ext4" +) + +// MemoryStateAttr is an attribute conforming to the system.memory.state semantic +// conventions. It represents the memory state. +type MemoryStateAttr string + +var ( + // MemoryStateUsed is the standardized value "used" of MemoryStateAttr. + MemoryStateUsed MemoryStateAttr = "used" + // MemoryStateFree is the standardized value "free" of MemoryStateAttr. + MemoryStateFree MemoryStateAttr = "free" + // MemoryStateBuffers is the standardized value "buffers" of MemoryStateAttr. + MemoryStateBuffers MemoryStateAttr = "buffers" + // MemoryStateCached is the standardized value "cached" of MemoryStateAttr. + MemoryStateCached MemoryStateAttr = "cached" +) + +// PagingDirectionAttr is an attribute conforming to the system.paging.direction +// semantic conventions. It represents the paging access direction. +type PagingDirectionAttr string + +var ( + // PagingDirectionIn is the standardized value "in" of PagingDirectionAttr. + PagingDirectionIn PagingDirectionAttr = "in" + // PagingDirectionOut is the standardized value "out" of PagingDirectionAttr. + PagingDirectionOut PagingDirectionAttr = "out" +) + +// PagingStateAttr is an attribute conforming to the system.paging.state semantic +// conventions. It represents the memory paging state. +type PagingStateAttr string + +var ( + // PagingStateUsed is the standardized value "used" of PagingStateAttr. + PagingStateUsed PagingStateAttr = "used" + // PagingStateFree is the standardized value "free" of PagingStateAttr. + PagingStateFree PagingStateAttr = "free" +) + +// PagingTypeAttr is an attribute conforming to the system.paging.type semantic +// conventions. It represents the memory paging type. +type PagingTypeAttr string + +var ( + // PagingTypeMajor is the standardized value "major" of PagingTypeAttr. + PagingTypeMajor PagingTypeAttr = "major" + // PagingTypeMinor is the standardized value "minor" of PagingTypeAttr. + PagingTypeMinor PagingTypeAttr = "minor" +) + +// ProcessStatusAttr is an attribute conforming to the system.process.status +// semantic conventions. It represents the process state, e.g., +// [Linux Process State Codes]. +// +// [Linux Process State Codes]: https://man7.org/linux/man-pages/man1/ps.1.html#PROCESS_STATE_CODES +type ProcessStatusAttr string + +var ( + // ProcessStatusRunning is the standardized value "running" of + // ProcessStatusAttr. + ProcessStatusRunning ProcessStatusAttr = "running" + // ProcessStatusSleeping is the standardized value "sleeping" of + // ProcessStatusAttr. + ProcessStatusSleeping ProcessStatusAttr = "sleeping" + // ProcessStatusStopped is the standardized value "stopped" of + // ProcessStatusAttr. + ProcessStatusStopped ProcessStatusAttr = "stopped" + // ProcessStatusDefunct is the standardized value "defunct" of + // ProcessStatusAttr. + ProcessStatusDefunct ProcessStatusAttr = "defunct" +) + +// CPUFrequency is an instrument used to record metric values conforming to the +// "system.cpu.frequency" semantic conventions. It represents the operating +// frequency of the logical CPU in Hertz. +type CPUFrequency struct { + metric.Int64Gauge +} + +// NewCPUFrequency returns a new CPUFrequency instrument. +func NewCPUFrequency( + m metric.Meter, + opt ...metric.Int64GaugeOption, +) (CPUFrequency, error) { + // Check if the meter is nil. + if m == nil { + return CPUFrequency{noop.Int64Gauge{}}, nil + } + + i, err := m.Int64Gauge( + "system.cpu.frequency", + append([]metric.Int64GaugeOption{ + metric.WithDescription("Operating frequency of the logical CPU in Hertz."), + metric.WithUnit("Hz"), + }, opt...)..., + ) + if err != nil { + return CPUFrequency{noop.Int64Gauge{}}, err + } + return CPUFrequency{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m CPUFrequency) Inst() metric.Int64Gauge { + return m.Int64Gauge +} + +// Name returns the semantic convention name of the instrument. +func (CPUFrequency) Name() string { + return "system.cpu.frequency" +} + +// Unit returns the semantic convention unit of the instrument +func (CPUFrequency) Unit() string { + return "Hz" +} + +// Description returns the semantic convention description of the instrument +func (CPUFrequency) Description() string { + return "Operating frequency of the logical CPU in Hertz." +} + +// Record records val to the current distribution for attrs. +// +// All additional attrs passed are included in the recorded value. +func (m CPUFrequency) Record( + ctx context.Context, + val int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Gauge.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64Gauge.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +func (m CPUFrequency) RecordSet(ctx context.Context, val int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Gauge.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Gauge.Record(ctx, val, *o...) +} + +// AttrCPULogicalNumber returns an optional attribute for the +// "cpu.logical_number" semantic convention. It represents the logical CPU number +// [0..n-1]. +func (CPUFrequency) AttrCPULogicalNumber(val int) attribute.KeyValue { + return attribute.Int("cpu.logical_number", val) +} + +// CPULogicalCount is an instrument used to record metric values conforming to +// the "system.cpu.logical.count" semantic conventions. It represents the reports +// the number of logical (virtual) processor cores created by the operating +// system to manage multitasking. +type CPULogicalCount struct { + metric.Int64UpDownCounter +} + +// NewCPULogicalCount returns a new CPULogicalCount instrument. +func NewCPULogicalCount( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (CPULogicalCount, error) { + // Check if the meter is nil. + if m == nil { + return CPULogicalCount{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "system.cpu.logical.count", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("Reports the number of logical (virtual) processor cores created by the operating system to manage multitasking"), + metric.WithUnit("{cpu}"), + }, opt...)..., + ) + if err != nil { + return CPULogicalCount{noop.Int64UpDownCounter{}}, err + } + return CPULogicalCount{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m CPULogicalCount) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (CPULogicalCount) Name() string { + return "system.cpu.logical.count" +} + +// Unit returns the semantic convention unit of the instrument +func (CPULogicalCount) Unit() string { + return "{cpu}" +} + +// Description returns the semantic convention description of the instrument +func (CPULogicalCount) Description() string { + return "Reports the number of logical (virtual) processor cores created by the operating system to manage multitasking" +} + +// Add adds incr to the existing count for attrs. +// +// Calculated by multiplying the number of sockets by the number of cores per +// socket, and then by the number of threads per core +func (m CPULogicalCount) Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// Calculated by multiplying the number of sockets by the number of cores per +// socket, and then by the number of threads per core +func (m CPULogicalCount) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// CPUPhysicalCount is an instrument used to record metric values conforming to +// the "system.cpu.physical.count" semantic conventions. It represents the +// reports the number of actual physical processor cores on the hardware. +type CPUPhysicalCount struct { + metric.Int64UpDownCounter +} + +// NewCPUPhysicalCount returns a new CPUPhysicalCount instrument. +func NewCPUPhysicalCount( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (CPUPhysicalCount, error) { + // Check if the meter is nil. + if m == nil { + return CPUPhysicalCount{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "system.cpu.physical.count", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("Reports the number of actual physical processor cores on the hardware"), + metric.WithUnit("{cpu}"), + }, opt...)..., + ) + if err != nil { + return CPUPhysicalCount{noop.Int64UpDownCounter{}}, err + } + return CPUPhysicalCount{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m CPUPhysicalCount) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (CPUPhysicalCount) Name() string { + return "system.cpu.physical.count" +} + +// Unit returns the semantic convention unit of the instrument +func (CPUPhysicalCount) Unit() string { + return "{cpu}" +} + +// Description returns the semantic convention description of the instrument +func (CPUPhysicalCount) Description() string { + return "Reports the number of actual physical processor cores on the hardware" +} + +// Add adds incr to the existing count for attrs. +// +// Calculated by multiplying the number of sockets by the number of cores per +// socket +func (m CPUPhysicalCount) Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// Calculated by multiplying the number of sockets by the number of cores per +// socket +func (m CPUPhysicalCount) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// CPUTime is an instrument used to record metric values conforming to the +// "system.cpu.time" semantic conventions. It represents the seconds each logical +// CPU spent on each mode. +type CPUTime struct { + metric.Float64ObservableCounter +} + +// NewCPUTime returns a new CPUTime instrument. +func NewCPUTime( + m metric.Meter, + opt ...metric.Float64ObservableCounterOption, +) (CPUTime, error) { + // Check if the meter is nil. + if m == nil { + return CPUTime{noop.Float64ObservableCounter{}}, nil + } + + i, err := m.Float64ObservableCounter( + "system.cpu.time", + append([]metric.Float64ObservableCounterOption{ + metric.WithDescription("Seconds each logical CPU spent on each mode"), + metric.WithUnit("s"), + }, opt...)..., + ) + if err != nil { + return CPUTime{noop.Float64ObservableCounter{}}, err + } + return CPUTime{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m CPUTime) Inst() metric.Float64ObservableCounter { + return m.Float64ObservableCounter +} + +// Name returns the semantic convention name of the instrument. +func (CPUTime) Name() string { + return "system.cpu.time" +} + +// Unit returns the semantic convention unit of the instrument +func (CPUTime) Unit() string { + return "s" +} + +// Description returns the semantic convention description of the instrument +func (CPUTime) Description() string { + return "Seconds each logical CPU spent on each mode" +} + +// AttrCPULogicalNumber returns an optional attribute for the +// "cpu.logical_number" semantic convention. It represents the logical CPU number +// [0..n-1]. +func (CPUTime) AttrCPULogicalNumber(val int) attribute.KeyValue { + return attribute.Int("cpu.logical_number", val) +} + +// AttrCPUMode returns an optional attribute for the "cpu.mode" semantic +// convention. It represents the mode of the CPU. +func (CPUTime) AttrCPUMode(val CPUModeAttr) attribute.KeyValue { + return attribute.String("cpu.mode", string(val)) +} + +// CPUUtilization is an instrument used to record metric values conforming to the +// "system.cpu.utilization" semantic conventions. It represents the for each +// logical CPU, the utilization is calculated as the change in cumulative CPU +// time (cpu.time) over a measurement interval, divided by the elapsed time. +type CPUUtilization struct { + metric.Int64Gauge +} + +// NewCPUUtilization returns a new CPUUtilization instrument. +func NewCPUUtilization( + m metric.Meter, + opt ...metric.Int64GaugeOption, +) (CPUUtilization, error) { + // Check if the meter is nil. + if m == nil { + return CPUUtilization{noop.Int64Gauge{}}, nil + } + + i, err := m.Int64Gauge( + "system.cpu.utilization", + append([]metric.Int64GaugeOption{ + metric.WithDescription("For each logical CPU, the utilization is calculated as the change in cumulative CPU time (cpu.time) over a measurement interval, divided by the elapsed time."), + metric.WithUnit("1"), + }, opt...)..., + ) + if err != nil { + return CPUUtilization{noop.Int64Gauge{}}, err + } + return CPUUtilization{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m CPUUtilization) Inst() metric.Int64Gauge { + return m.Int64Gauge +} + +// Name returns the semantic convention name of the instrument. +func (CPUUtilization) Name() string { + return "system.cpu.utilization" +} + +// Unit returns the semantic convention unit of the instrument +func (CPUUtilization) Unit() string { + return "1" +} + +// Description returns the semantic convention description of the instrument +func (CPUUtilization) Description() string { + return "For each logical CPU, the utilization is calculated as the change in cumulative CPU time (cpu.time) over a measurement interval, divided by the elapsed time." +} + +// Record records val to the current distribution for attrs. +// +// All additional attrs passed are included in the recorded value. +func (m CPUUtilization) Record( + ctx context.Context, + val int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Gauge.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64Gauge.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +func (m CPUUtilization) RecordSet(ctx context.Context, val int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Gauge.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Gauge.Record(ctx, val, *o...) +} + +// AttrCPULogicalNumber returns an optional attribute for the +// "cpu.logical_number" semantic convention. It represents the logical CPU number +// [0..n-1]. +func (CPUUtilization) AttrCPULogicalNumber(val int) attribute.KeyValue { + return attribute.Int("cpu.logical_number", val) +} + +// AttrCPUMode returns an optional attribute for the "cpu.mode" semantic +// convention. It represents the mode of the CPU. +func (CPUUtilization) AttrCPUMode(val CPUModeAttr) attribute.KeyValue { + return attribute.String("cpu.mode", string(val)) +} + +// DiskIO is an instrument used to record metric values conforming to the +// "system.disk.io" semantic conventions. +type DiskIO struct { + metric.Int64Counter +} + +// NewDiskIO returns a new DiskIO instrument. +func NewDiskIO( + m metric.Meter, + opt ...metric.Int64CounterOption, +) (DiskIO, error) { + // Check if the meter is nil. + if m == nil { + return DiskIO{noop.Int64Counter{}}, nil + } + + i, err := m.Int64Counter( + "system.disk.io", + append([]metric.Int64CounterOption{ + metric.WithDescription(""), + metric.WithUnit("By"), + }, opt...)..., + ) + if err != nil { + return DiskIO{noop.Int64Counter{}}, err + } + return DiskIO{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m DiskIO) Inst() metric.Int64Counter { + return m.Int64Counter +} + +// Name returns the semantic convention name of the instrument. +func (DiskIO) Name() string { + return "system.disk.io" +} + +// Unit returns the semantic convention unit of the instrument +func (DiskIO) Unit() string { + return "By" +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +func (m DiskIO) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +func (m DiskIO) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AttrDiskIODirection returns an optional attribute for the "disk.io.direction" +// semantic convention. It represents the disk IO operation direction. +func (DiskIO) AttrDiskIODirection(val DiskIODirectionAttr) attribute.KeyValue { + return attribute.String("disk.io.direction", string(val)) +} + +// AttrDevice returns an optional attribute for the "system.device" semantic +// convention. It represents the device identifier. +func (DiskIO) AttrDevice(val string) attribute.KeyValue { + return attribute.String("system.device", val) +} + +// DiskIOTime is an instrument used to record metric values conforming to the +// "system.disk.io_time" semantic conventions. It represents the time disk spent +// activated. +type DiskIOTime struct { + metric.Float64Counter +} + +// NewDiskIOTime returns a new DiskIOTime instrument. +func NewDiskIOTime( + m metric.Meter, + opt ...metric.Float64CounterOption, +) (DiskIOTime, error) { + // Check if the meter is nil. + if m == nil { + return DiskIOTime{noop.Float64Counter{}}, nil + } + + i, err := m.Float64Counter( + "system.disk.io_time", + append([]metric.Float64CounterOption{ + metric.WithDescription("Time disk spent activated"), + metric.WithUnit("s"), + }, opt...)..., + ) + if err != nil { + return DiskIOTime{noop.Float64Counter{}}, err + } + return DiskIOTime{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m DiskIOTime) Inst() metric.Float64Counter { + return m.Float64Counter +} + +// Name returns the semantic convention name of the instrument. +func (DiskIOTime) Name() string { + return "system.disk.io_time" +} + +// Unit returns the semantic convention unit of the instrument +func (DiskIOTime) Unit() string { + return "s" +} + +// Description returns the semantic convention description of the instrument +func (DiskIOTime) Description() string { + return "Time disk spent activated" +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +// +// The real elapsed time ("wall clock") used in the I/O path (time from +// operations running in parallel are not counted). Measured as: +// +// - Linux: Field 13 from [procfs-diskstats] +// - Windows: The complement of +// ["Disk% Idle Time"] +// performance counter: `uptime * (100 - "Disk\% Idle Time") / 100` +// +// +// [procfs-diskstats]: https://www.kernel.org/doc/Documentation/ABI/testing/procfs-diskstats +// ["Disk% Idle Time"]: https://learn.microsoft.com/archive/blogs/askcore/windows-performance-monitor-disk-counters-explained#windows-performance-monitor-disk-counters-explained +func (m DiskIOTime) Add( + ctx context.Context, + incr float64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Float64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Float64Counter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// The real elapsed time ("wall clock") used in the I/O path (time from +// operations running in parallel are not counted). Measured as: +// +// - Linux: Field 13 from [procfs-diskstats] +// - Windows: The complement of +// ["Disk% Idle Time"] +// performance counter: `uptime * (100 - "Disk\% Idle Time") / 100` +// +// +// [procfs-diskstats]: https://www.kernel.org/doc/Documentation/ABI/testing/procfs-diskstats +// ["Disk% Idle Time"]: https://learn.microsoft.com/archive/blogs/askcore/windows-performance-monitor-disk-counters-explained#windows-performance-monitor-disk-counters-explained +func (m DiskIOTime) AddSet(ctx context.Context, incr float64, set attribute.Set) { + if set.Len() == 0 { + m.Float64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Float64Counter.Add(ctx, incr, *o...) +} + +// AttrDevice returns an optional attribute for the "system.device" semantic +// convention. It represents the device identifier. +func (DiskIOTime) AttrDevice(val string) attribute.KeyValue { + return attribute.String("system.device", val) +} + +// DiskLimit is an instrument used to record metric values conforming to the +// "system.disk.limit" semantic conventions. It represents the total storage +// capacity of the disk. +type DiskLimit struct { + metric.Int64UpDownCounter +} + +// NewDiskLimit returns a new DiskLimit instrument. +func NewDiskLimit( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (DiskLimit, error) { + // Check if the meter is nil. + if m == nil { + return DiskLimit{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "system.disk.limit", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("The total storage capacity of the disk"), + metric.WithUnit("By"), + }, opt...)..., + ) + if err != nil { + return DiskLimit{noop.Int64UpDownCounter{}}, err + } + return DiskLimit{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m DiskLimit) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (DiskLimit) Name() string { + return "system.disk.limit" +} + +// Unit returns the semantic convention unit of the instrument +func (DiskLimit) Unit() string { + return "By" +} + +// Description returns the semantic convention description of the instrument +func (DiskLimit) Description() string { + return "The total storage capacity of the disk" +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +func (m DiskLimit) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +func (m DiskLimit) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AttrDevice returns an optional attribute for the "system.device" semantic +// convention. It represents the device identifier. +func (DiskLimit) AttrDevice(val string) attribute.KeyValue { + return attribute.String("system.device", val) +} + +// DiskMerged is an instrument used to record metric values conforming to the +// "system.disk.merged" semantic conventions. +type DiskMerged struct { + metric.Int64Counter +} + +// NewDiskMerged returns a new DiskMerged instrument. +func NewDiskMerged( + m metric.Meter, + opt ...metric.Int64CounterOption, +) (DiskMerged, error) { + // Check if the meter is nil. + if m == nil { + return DiskMerged{noop.Int64Counter{}}, nil + } + + i, err := m.Int64Counter( + "system.disk.merged", + append([]metric.Int64CounterOption{ + metric.WithDescription(""), + metric.WithUnit("{operation}"), + }, opt...)..., + ) + if err != nil { + return DiskMerged{noop.Int64Counter{}}, err + } + return DiskMerged{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m DiskMerged) Inst() metric.Int64Counter { + return m.Int64Counter +} + +// Name returns the semantic convention name of the instrument. +func (DiskMerged) Name() string { + return "system.disk.merged" +} + +// Unit returns the semantic convention unit of the instrument +func (DiskMerged) Unit() string { + return "{operation}" +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +func (m DiskMerged) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +func (m DiskMerged) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AttrDiskIODirection returns an optional attribute for the "disk.io.direction" +// semantic convention. It represents the disk IO operation direction. +func (DiskMerged) AttrDiskIODirection(val DiskIODirectionAttr) attribute.KeyValue { + return attribute.String("disk.io.direction", string(val)) +} + +// AttrDevice returns an optional attribute for the "system.device" semantic +// convention. It represents the device identifier. +func (DiskMerged) AttrDevice(val string) attribute.KeyValue { + return attribute.String("system.device", val) +} + +// DiskOperationTime is an instrument used to record metric values conforming to +// the "system.disk.operation_time" semantic conventions. It represents the sum +// of the time each operation took to complete. +type DiskOperationTime struct { + metric.Float64Counter +} + +// NewDiskOperationTime returns a new DiskOperationTime instrument. +func NewDiskOperationTime( + m metric.Meter, + opt ...metric.Float64CounterOption, +) (DiskOperationTime, error) { + // Check if the meter is nil. + if m == nil { + return DiskOperationTime{noop.Float64Counter{}}, nil + } + + i, err := m.Float64Counter( + "system.disk.operation_time", + append([]metric.Float64CounterOption{ + metric.WithDescription("Sum of the time each operation took to complete"), + metric.WithUnit("s"), + }, opt...)..., + ) + if err != nil { + return DiskOperationTime{noop.Float64Counter{}}, err + } + return DiskOperationTime{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m DiskOperationTime) Inst() metric.Float64Counter { + return m.Float64Counter +} + +// Name returns the semantic convention name of the instrument. +func (DiskOperationTime) Name() string { + return "system.disk.operation_time" +} + +// Unit returns the semantic convention unit of the instrument +func (DiskOperationTime) Unit() string { + return "s" +} + +// Description returns the semantic convention description of the instrument +func (DiskOperationTime) Description() string { + return "Sum of the time each operation took to complete" +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +// +// Because it is the sum of time each request took, parallel-issued requests each +// contribute to make the count grow. Measured as: +// +// - Linux: Fields 7 & 11 from [procfs-diskstats] +// - Windows: "Avg. Disk sec/Read" perf counter multiplied by "Disk Reads/sec" +// perf counter (similar for Writes) +// +// +// [procfs-diskstats]: https://www.kernel.org/doc/Documentation/ABI/testing/procfs-diskstats +func (m DiskOperationTime) Add( + ctx context.Context, + incr float64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Float64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Float64Counter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// Because it is the sum of time each request took, parallel-issued requests each +// contribute to make the count grow. Measured as: +// +// - Linux: Fields 7 & 11 from [procfs-diskstats] +// - Windows: "Avg. Disk sec/Read" perf counter multiplied by "Disk Reads/sec" +// perf counter (similar for Writes) +// +// +// [procfs-diskstats]: https://www.kernel.org/doc/Documentation/ABI/testing/procfs-diskstats +func (m DiskOperationTime) AddSet(ctx context.Context, incr float64, set attribute.Set) { + if set.Len() == 0 { + m.Float64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Float64Counter.Add(ctx, incr, *o...) +} + +// AttrDiskIODirection returns an optional attribute for the "disk.io.direction" +// semantic convention. It represents the disk IO operation direction. +func (DiskOperationTime) AttrDiskIODirection(val DiskIODirectionAttr) attribute.KeyValue { + return attribute.String("disk.io.direction", string(val)) +} + +// AttrDevice returns an optional attribute for the "system.device" semantic +// convention. It represents the device identifier. +func (DiskOperationTime) AttrDevice(val string) attribute.KeyValue { + return attribute.String("system.device", val) +} + +// DiskOperations is an instrument used to record metric values conforming to the +// "system.disk.operations" semantic conventions. +type DiskOperations struct { + metric.Int64Counter +} + +// NewDiskOperations returns a new DiskOperations instrument. +func NewDiskOperations( + m metric.Meter, + opt ...metric.Int64CounterOption, +) (DiskOperations, error) { + // Check if the meter is nil. + if m == nil { + return DiskOperations{noop.Int64Counter{}}, nil + } + + i, err := m.Int64Counter( + "system.disk.operations", + append([]metric.Int64CounterOption{ + metric.WithDescription(""), + metric.WithUnit("{operation}"), + }, opt...)..., + ) + if err != nil { + return DiskOperations{noop.Int64Counter{}}, err + } + return DiskOperations{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m DiskOperations) Inst() metric.Int64Counter { + return m.Int64Counter +} + +// Name returns the semantic convention name of the instrument. +func (DiskOperations) Name() string { + return "system.disk.operations" +} + +// Unit returns the semantic convention unit of the instrument +func (DiskOperations) Unit() string { + return "{operation}" +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +func (m DiskOperations) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +func (m DiskOperations) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AttrDiskIODirection returns an optional attribute for the "disk.io.direction" +// semantic convention. It represents the disk IO operation direction. +func (DiskOperations) AttrDiskIODirection(val DiskIODirectionAttr) attribute.KeyValue { + return attribute.String("disk.io.direction", string(val)) +} + +// AttrDevice returns an optional attribute for the "system.device" semantic +// convention. It represents the device identifier. +func (DiskOperations) AttrDevice(val string) attribute.KeyValue { + return attribute.String("system.device", val) +} + +// FilesystemLimit is an instrument used to record metric values conforming to +// the "system.filesystem.limit" semantic conventions. It represents the total +// storage capacity of the filesystem. +type FilesystemLimit struct { + metric.Int64UpDownCounter +} + +// NewFilesystemLimit returns a new FilesystemLimit instrument. +func NewFilesystemLimit( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (FilesystemLimit, error) { + // Check if the meter is nil. + if m == nil { + return FilesystemLimit{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "system.filesystem.limit", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("The total storage capacity of the filesystem"), + metric.WithUnit("By"), + }, opt...)..., + ) + if err != nil { + return FilesystemLimit{noop.Int64UpDownCounter{}}, err + } + return FilesystemLimit{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m FilesystemLimit) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (FilesystemLimit) Name() string { + return "system.filesystem.limit" +} + +// Unit returns the semantic convention unit of the instrument +func (FilesystemLimit) Unit() string { + return "By" +} + +// Description returns the semantic convention description of the instrument +func (FilesystemLimit) Description() string { + return "The total storage capacity of the filesystem" +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +func (m FilesystemLimit) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +func (m FilesystemLimit) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AttrDevice returns an optional attribute for the "system.device" semantic +// convention. It represents the identifier for the device where the filesystem +// resides. +func (FilesystemLimit) AttrDevice(val string) attribute.KeyValue { + return attribute.String("system.device", val) +} + +// AttrFilesystemMode returns an optional attribute for the +// "system.filesystem.mode" semantic convention. It represents the filesystem +// mode. +func (FilesystemLimit) AttrFilesystemMode(val string) attribute.KeyValue { + return attribute.String("system.filesystem.mode", val) +} + +// AttrFilesystemMountpoint returns an optional attribute for the +// "system.filesystem.mountpoint" semantic convention. It represents the +// filesystem mount path. +func (FilesystemLimit) AttrFilesystemMountpoint(val string) attribute.KeyValue { + return attribute.String("system.filesystem.mountpoint", val) +} + +// AttrFilesystemType returns an optional attribute for the +// "system.filesystem.type" semantic convention. It represents the filesystem +// type. +func (FilesystemLimit) AttrFilesystemType(val FilesystemTypeAttr) attribute.KeyValue { + return attribute.String("system.filesystem.type", string(val)) +} + +// FilesystemUsage is an instrument used to record metric values conforming to +// the "system.filesystem.usage" semantic conventions. It represents the reports +// a filesystem's space usage across different states. +type FilesystemUsage struct { + metric.Int64UpDownCounter +} + +// NewFilesystemUsage returns a new FilesystemUsage instrument. +func NewFilesystemUsage( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (FilesystemUsage, error) { + // Check if the meter is nil. + if m == nil { + return FilesystemUsage{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "system.filesystem.usage", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("Reports a filesystem's space usage across different states."), + metric.WithUnit("By"), + }, opt...)..., + ) + if err != nil { + return FilesystemUsage{noop.Int64UpDownCounter{}}, err + } + return FilesystemUsage{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m FilesystemUsage) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (FilesystemUsage) Name() string { + return "system.filesystem.usage" +} + +// Unit returns the semantic convention unit of the instrument +func (FilesystemUsage) Unit() string { + return "By" +} + +// Description returns the semantic convention description of the instrument +func (FilesystemUsage) Description() string { + return "Reports a filesystem's space usage across different states." +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +// +// The sum of all `system.filesystem.usage` values over the different +// `system.filesystem.state` attributes +// SHOULD equal the total storage capacity of the filesystem, that is +// `system.filesystem.limit`. +func (m FilesystemUsage) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// The sum of all `system.filesystem.usage` values over the different +// `system.filesystem.state` attributes +// SHOULD equal the total storage capacity of the filesystem, that is +// `system.filesystem.limit`. +func (m FilesystemUsage) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AttrDevice returns an optional attribute for the "system.device" semantic +// convention. It represents the identifier for the device where the filesystem +// resides. +func (FilesystemUsage) AttrDevice(val string) attribute.KeyValue { + return attribute.String("system.device", val) +} + +// AttrFilesystemMode returns an optional attribute for the +// "system.filesystem.mode" semantic convention. It represents the filesystem +// mode. +func (FilesystemUsage) AttrFilesystemMode(val string) attribute.KeyValue { + return attribute.String("system.filesystem.mode", val) +} + +// AttrFilesystemMountpoint returns an optional attribute for the +// "system.filesystem.mountpoint" semantic convention. It represents the +// filesystem mount path. +func (FilesystemUsage) AttrFilesystemMountpoint(val string) attribute.KeyValue { + return attribute.String("system.filesystem.mountpoint", val) +} + +// AttrFilesystemState returns an optional attribute for the +// "system.filesystem.state" semantic convention. It represents the filesystem +// state. +func (FilesystemUsage) AttrFilesystemState(val FilesystemStateAttr) attribute.KeyValue { + return attribute.String("system.filesystem.state", string(val)) +} + +// AttrFilesystemType returns an optional attribute for the +// "system.filesystem.type" semantic convention. It represents the filesystem +// type. +func (FilesystemUsage) AttrFilesystemType(val FilesystemTypeAttr) attribute.KeyValue { + return attribute.String("system.filesystem.type", string(val)) +} + +// FilesystemUtilization is an instrument used to record metric values conforming +// to the "system.filesystem.utilization" semantic conventions. +type FilesystemUtilization struct { + metric.Int64Gauge +} + +// NewFilesystemUtilization returns a new FilesystemUtilization instrument. +func NewFilesystemUtilization( + m metric.Meter, + opt ...metric.Int64GaugeOption, +) (FilesystemUtilization, error) { + // Check if the meter is nil. + if m == nil { + return FilesystemUtilization{noop.Int64Gauge{}}, nil + } + + i, err := m.Int64Gauge( + "system.filesystem.utilization", + append([]metric.Int64GaugeOption{ + metric.WithDescription(""), + metric.WithUnit("1"), + }, opt...)..., + ) + if err != nil { + return FilesystemUtilization{noop.Int64Gauge{}}, err + } + return FilesystemUtilization{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m FilesystemUtilization) Inst() metric.Int64Gauge { + return m.Int64Gauge +} + +// Name returns the semantic convention name of the instrument. +func (FilesystemUtilization) Name() string { + return "system.filesystem.utilization" +} + +// Unit returns the semantic convention unit of the instrument +func (FilesystemUtilization) Unit() string { + return "1" +} + +// Record records val to the current distribution for attrs. +// +// All additional attrs passed are included in the recorded value. +func (m FilesystemUtilization) Record( + ctx context.Context, + val int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Gauge.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64Gauge.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +func (m FilesystemUtilization) RecordSet(ctx context.Context, val int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Gauge.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Gauge.Record(ctx, val, *o...) +} + +// AttrDevice returns an optional attribute for the "system.device" semantic +// convention. It represents the identifier for the device where the filesystem +// resides. +func (FilesystemUtilization) AttrDevice(val string) attribute.KeyValue { + return attribute.String("system.device", val) +} + +// AttrFilesystemMode returns an optional attribute for the +// "system.filesystem.mode" semantic convention. It represents the filesystem +// mode. +func (FilesystemUtilization) AttrFilesystemMode(val string) attribute.KeyValue { + return attribute.String("system.filesystem.mode", val) +} + +// AttrFilesystemMountpoint returns an optional attribute for the +// "system.filesystem.mountpoint" semantic convention. It represents the +// filesystem mount path. +func (FilesystemUtilization) AttrFilesystemMountpoint(val string) attribute.KeyValue { + return attribute.String("system.filesystem.mountpoint", val) +} + +// AttrFilesystemState returns an optional attribute for the +// "system.filesystem.state" semantic convention. It represents the filesystem +// state. +func (FilesystemUtilization) AttrFilesystemState(val FilesystemStateAttr) attribute.KeyValue { + return attribute.String("system.filesystem.state", string(val)) +} + +// AttrFilesystemType returns an optional attribute for the +// "system.filesystem.type" semantic convention. It represents the filesystem +// type. +func (FilesystemUtilization) AttrFilesystemType(val FilesystemTypeAttr) attribute.KeyValue { + return attribute.String("system.filesystem.type", string(val)) +} + +// LinuxMemoryAvailable is an instrument used to record metric values conforming +// to the "system.linux.memory.available" semantic conventions. It represents an +// estimate of how much memory is available for starting new applications, +// without causing swapping. +type LinuxMemoryAvailable struct { + metric.Int64UpDownCounter +} + +// NewLinuxMemoryAvailable returns a new LinuxMemoryAvailable instrument. +func NewLinuxMemoryAvailable( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (LinuxMemoryAvailable, error) { + // Check if the meter is nil. + if m == nil { + return LinuxMemoryAvailable{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "system.linux.memory.available", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("An estimate of how much memory is available for starting new applications, without causing swapping"), + metric.WithUnit("By"), + }, opt...)..., + ) + if err != nil { + return LinuxMemoryAvailable{noop.Int64UpDownCounter{}}, err + } + return LinuxMemoryAvailable{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m LinuxMemoryAvailable) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (LinuxMemoryAvailable) Name() string { + return "system.linux.memory.available" +} + +// Unit returns the semantic convention unit of the instrument +func (LinuxMemoryAvailable) Unit() string { + return "By" +} + +// Description returns the semantic convention description of the instrument +func (LinuxMemoryAvailable) Description() string { + return "An estimate of how much memory is available for starting new applications, without causing swapping" +} + +// Add adds incr to the existing count for attrs. +// +// This is an alternative to `system.memory.usage` metric with `state=free`. +// Linux starting from 3.14 exports "available" memory. It takes "free" memory as +// a baseline, and then factors in kernel-specific values. +// This is supposed to be more accurate than just "free" memory. +// For reference, see the calculations [here]. +// See also `MemAvailable` in [/proc/meminfo]. +// +// [here]: https://superuser.com/a/980821 +// [/proc/meminfo]: https://man7.org/linux/man-pages/man5/proc.5.html +func (m LinuxMemoryAvailable) Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// This is an alternative to `system.memory.usage` metric with `state=free`. +// Linux starting from 3.14 exports "available" memory. It takes "free" memory as +// a baseline, and then factors in kernel-specific values. +// This is supposed to be more accurate than just "free" memory. +// For reference, see the calculations [here]. +// See also `MemAvailable` in [/proc/meminfo]. +// +// [here]: https://superuser.com/a/980821 +// [/proc/meminfo]: https://man7.org/linux/man-pages/man5/proc.5.html +func (m LinuxMemoryAvailable) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// LinuxMemorySlabUsage is an instrument used to record metric values conforming +// to the "system.linux.memory.slab.usage" semantic conventions. It represents +// the reports the memory used by the Linux kernel for managing caches of +// frequently used objects. +type LinuxMemorySlabUsage struct { + metric.Int64UpDownCounter +} + +// NewLinuxMemorySlabUsage returns a new LinuxMemorySlabUsage instrument. +func NewLinuxMemorySlabUsage( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (LinuxMemorySlabUsage, error) { + // Check if the meter is nil. + if m == nil { + return LinuxMemorySlabUsage{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "system.linux.memory.slab.usage", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("Reports the memory used by the Linux kernel for managing caches of frequently used objects."), + metric.WithUnit("By"), + }, opt...)..., + ) + if err != nil { + return LinuxMemorySlabUsage{noop.Int64UpDownCounter{}}, err + } + return LinuxMemorySlabUsage{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m LinuxMemorySlabUsage) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (LinuxMemorySlabUsage) Name() string { + return "system.linux.memory.slab.usage" +} + +// Unit returns the semantic convention unit of the instrument +func (LinuxMemorySlabUsage) Unit() string { + return "By" +} + +// Description returns the semantic convention description of the instrument +func (LinuxMemorySlabUsage) Description() string { + return "Reports the memory used by the Linux kernel for managing caches of frequently used objects." +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +// +// The sum over the `reclaimable` and `unreclaimable` state values in +// `linux.memory.slab.usage` SHOULD be equal to the total slab memory available +// on the system. +// Note that the total slab memory is not constant and may vary over time. +// See also the [Slab allocator] and `Slab` in [/proc/meminfo]. +// +// [Slab allocator]: https://blogs.oracle.com/linux/post/understanding-linux-kernel-memory-statistics +// [/proc/meminfo]: https://man7.org/linux/man-pages/man5/proc.5.html +func (m LinuxMemorySlabUsage) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// The sum over the `reclaimable` and `unreclaimable` state values in +// `linux.memory.slab.usage` SHOULD be equal to the total slab memory available +// on the system. +// Note that the total slab memory is not constant and may vary over time. +// See also the [Slab allocator] and `Slab` in [/proc/meminfo]. +// +// [Slab allocator]: https://blogs.oracle.com/linux/post/understanding-linux-kernel-memory-statistics +// [/proc/meminfo]: https://man7.org/linux/man-pages/man5/proc.5.html +func (m LinuxMemorySlabUsage) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AttrLinuxMemorySlabState returns an optional attribute for the +// "linux.memory.slab.state" semantic convention. It represents the Linux Slab +// memory state. +func (LinuxMemorySlabUsage) AttrLinuxMemorySlabState(val LinuxMemorySlabStateAttr) attribute.KeyValue { + return attribute.String("linux.memory.slab.state", string(val)) +} + +// MemoryLimit is an instrument used to record metric values conforming to the +// "system.memory.limit" semantic conventions. It represents the total memory +// available in the system. +type MemoryLimit struct { + metric.Int64UpDownCounter +} + +// NewMemoryLimit returns a new MemoryLimit instrument. +func NewMemoryLimit( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (MemoryLimit, error) { + // Check if the meter is nil. + if m == nil { + return MemoryLimit{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "system.memory.limit", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("Total memory available in the system."), + metric.WithUnit("By"), + }, opt...)..., + ) + if err != nil { + return MemoryLimit{noop.Int64UpDownCounter{}}, err + } + return MemoryLimit{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m MemoryLimit) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (MemoryLimit) Name() string { + return "system.memory.limit" +} + +// Unit returns the semantic convention unit of the instrument +func (MemoryLimit) Unit() string { + return "By" +} + +// Description returns the semantic convention description of the instrument +func (MemoryLimit) Description() string { + return "Total memory available in the system." +} + +// Add adds incr to the existing count for attrs. +// +// Its value SHOULD equal the sum of `system.memory.state` over all states. +func (m MemoryLimit) Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// Its value SHOULD equal the sum of `system.memory.state` over all states. +func (m MemoryLimit) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// MemoryShared is an instrument used to record metric values conforming to the +// "system.memory.shared" semantic conventions. It represents the shared memory +// used (mostly by tmpfs). +type MemoryShared struct { + metric.Int64UpDownCounter +} + +// NewMemoryShared returns a new MemoryShared instrument. +func NewMemoryShared( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (MemoryShared, error) { + // Check if the meter is nil. + if m == nil { + return MemoryShared{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "system.memory.shared", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("Shared memory used (mostly by tmpfs)."), + metric.WithUnit("By"), + }, opt...)..., + ) + if err != nil { + return MemoryShared{noop.Int64UpDownCounter{}}, err + } + return MemoryShared{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m MemoryShared) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (MemoryShared) Name() string { + return "system.memory.shared" +} + +// Unit returns the semantic convention unit of the instrument +func (MemoryShared) Unit() string { + return "By" +} + +// Description returns the semantic convention description of the instrument +func (MemoryShared) Description() string { + return "Shared memory used (mostly by tmpfs)." +} + +// Add adds incr to the existing count for attrs. +// +// Equivalent of `shared` from [`free` command] or +// `Shmem` from [`/proc/meminfo`]" +// +// [`free` command]: https://man7.org/linux/man-pages/man1/free.1.html +// [`/proc/meminfo`]: https://man7.org/linux/man-pages/man5/proc.5.html +func (m MemoryShared) Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// Equivalent of `shared` from [`free` command] or +// `Shmem` from [`/proc/meminfo`]" +// +// [`free` command]: https://man7.org/linux/man-pages/man1/free.1.html +// [`/proc/meminfo`]: https://man7.org/linux/man-pages/man5/proc.5.html +func (m MemoryShared) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// MemoryUsage is an instrument used to record metric values conforming to the +// "system.memory.usage" semantic conventions. It represents the reports memory +// in use by state. +type MemoryUsage struct { + metric.Int64ObservableUpDownCounter +} + +// NewMemoryUsage returns a new MemoryUsage instrument. +func NewMemoryUsage( + m metric.Meter, + opt ...metric.Int64ObservableUpDownCounterOption, +) (MemoryUsage, error) { + // Check if the meter is nil. + if m == nil { + return MemoryUsage{noop.Int64ObservableUpDownCounter{}}, nil + } + + i, err := m.Int64ObservableUpDownCounter( + "system.memory.usage", + append([]metric.Int64ObservableUpDownCounterOption{ + metric.WithDescription("Reports memory in use by state."), + metric.WithUnit("By"), + }, opt...)..., + ) + if err != nil { + return MemoryUsage{noop.Int64ObservableUpDownCounter{}}, err + } + return MemoryUsage{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m MemoryUsage) Inst() metric.Int64ObservableUpDownCounter { + return m.Int64ObservableUpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (MemoryUsage) Name() string { + return "system.memory.usage" +} + +// Unit returns the semantic convention unit of the instrument +func (MemoryUsage) Unit() string { + return "By" +} + +// Description returns the semantic convention description of the instrument +func (MemoryUsage) Description() string { + return "Reports memory in use by state." +} + +// AttrMemoryState returns an optional attribute for the "system.memory.state" +// semantic convention. It represents the memory state. +func (MemoryUsage) AttrMemoryState(val MemoryStateAttr) attribute.KeyValue { + return attribute.String("system.memory.state", string(val)) +} + +// MemoryUtilization is an instrument used to record metric values conforming to +// the "system.memory.utilization" semantic conventions. +type MemoryUtilization struct { + metric.Float64ObservableGauge +} + +// NewMemoryUtilization returns a new MemoryUtilization instrument. +func NewMemoryUtilization( + m metric.Meter, + opt ...metric.Float64ObservableGaugeOption, +) (MemoryUtilization, error) { + // Check if the meter is nil. + if m == nil { + return MemoryUtilization{noop.Float64ObservableGauge{}}, nil + } + + i, err := m.Float64ObservableGauge( + "system.memory.utilization", + append([]metric.Float64ObservableGaugeOption{ + metric.WithDescription(""), + metric.WithUnit("1"), + }, opt...)..., + ) + if err != nil { + return MemoryUtilization{noop.Float64ObservableGauge{}}, err + } + return MemoryUtilization{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m MemoryUtilization) Inst() metric.Float64ObservableGauge { + return m.Float64ObservableGauge +} + +// Name returns the semantic convention name of the instrument. +func (MemoryUtilization) Name() string { + return "system.memory.utilization" +} + +// Unit returns the semantic convention unit of the instrument +func (MemoryUtilization) Unit() string { + return "1" +} + +// AttrMemoryState returns an optional attribute for the "system.memory.state" +// semantic convention. It represents the memory state. +func (MemoryUtilization) AttrMemoryState(val MemoryStateAttr) attribute.KeyValue { + return attribute.String("system.memory.state", string(val)) +} + +// NetworkConnectionCount is an instrument used to record metric values +// conforming to the "system.network.connection.count" semantic conventions. +type NetworkConnectionCount struct { + metric.Int64UpDownCounter +} + +// NewNetworkConnectionCount returns a new NetworkConnectionCount instrument. +func NewNetworkConnectionCount( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (NetworkConnectionCount, error) { + // Check if the meter is nil. + if m == nil { + return NetworkConnectionCount{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "system.network.connection.count", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription(""), + metric.WithUnit("{connection}"), + }, opt...)..., + ) + if err != nil { + return NetworkConnectionCount{noop.Int64UpDownCounter{}}, err + } + return NetworkConnectionCount{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m NetworkConnectionCount) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (NetworkConnectionCount) Name() string { + return "system.network.connection.count" +} + +// Unit returns the semantic convention unit of the instrument +func (NetworkConnectionCount) Unit() string { + return "{connection}" +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +func (m NetworkConnectionCount) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +func (m NetworkConnectionCount) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AttrNetworkConnectionState returns an optional attribute for the +// "network.connection.state" semantic convention. It represents the state of +// network connection. +func (NetworkConnectionCount) AttrNetworkConnectionState(val NetworkConnectionStateAttr) attribute.KeyValue { + return attribute.String("network.connection.state", string(val)) +} + +// AttrNetworkInterfaceName returns an optional attribute for the +// "network.interface.name" semantic convention. It represents the network +// interface name. +func (NetworkConnectionCount) AttrNetworkInterfaceName(val string) attribute.KeyValue { + return attribute.String("network.interface.name", val) +} + +// AttrNetworkTransport returns an optional attribute for the "network.transport" +// semantic convention. It represents the [OSI transport layer] or +// [inter-process communication method]. +// +// [OSI transport layer]: https://wikipedia.org/wiki/Transport_layer +// [inter-process communication method]: https://wikipedia.org/wiki/Inter-process_communication +func (NetworkConnectionCount) AttrNetworkTransport(val NetworkTransportAttr) attribute.KeyValue { + return attribute.String("network.transport", string(val)) +} + +// NetworkDropped is an instrument used to record metric values conforming to the +// "system.network.dropped" semantic conventions. It represents the count of +// packets that are dropped or discarded even though there was no error. +type NetworkDropped struct { + metric.Int64Counter +} + +// NewNetworkDropped returns a new NetworkDropped instrument. +func NewNetworkDropped( + m metric.Meter, + opt ...metric.Int64CounterOption, +) (NetworkDropped, error) { + // Check if the meter is nil. + if m == nil { + return NetworkDropped{noop.Int64Counter{}}, nil + } + + i, err := m.Int64Counter( + "system.network.dropped", + append([]metric.Int64CounterOption{ + metric.WithDescription("Count of packets that are dropped or discarded even though there was no error"), + metric.WithUnit("{packet}"), + }, opt...)..., + ) + if err != nil { + return NetworkDropped{noop.Int64Counter{}}, err + } + return NetworkDropped{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m NetworkDropped) Inst() metric.Int64Counter { + return m.Int64Counter +} + +// Name returns the semantic convention name of the instrument. +func (NetworkDropped) Name() string { + return "system.network.dropped" +} + +// Unit returns the semantic convention unit of the instrument +func (NetworkDropped) Unit() string { + return "{packet}" +} + +// Description returns the semantic convention description of the instrument +func (NetworkDropped) Description() string { + return "Count of packets that are dropped or discarded even though there was no error" +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +// +// Measured as: +// +// - Linux: the `drop` column in `/proc/dev/net` ([source]) +// - Windows: [`InDiscards`/`OutDiscards`] +// from [`GetIfEntry2`] +// +// +// [source]: https://web.archive.org/web/20180321091318/http://www.onlamp.com/pub/a/linux/2000/11/16/LinuxAdmin.html +// [`InDiscards`/`OutDiscards`]: https://docs.microsoft.com/windows/win32/api/netioapi/ns-netioapi-mib_if_row2 +// [`GetIfEntry2`]: https://docs.microsoft.com/windows/win32/api/netioapi/nf-netioapi-getifentry2 +func (m NetworkDropped) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// Measured as: +// +// - Linux: the `drop` column in `/proc/dev/net` ([source]) +// - Windows: [`InDiscards`/`OutDiscards`] +// from [`GetIfEntry2`] +// +// +// [source]: https://web.archive.org/web/20180321091318/http://www.onlamp.com/pub/a/linux/2000/11/16/LinuxAdmin.html +// [`InDiscards`/`OutDiscards`]: https://docs.microsoft.com/windows/win32/api/netioapi/ns-netioapi-mib_if_row2 +// [`GetIfEntry2`]: https://docs.microsoft.com/windows/win32/api/netioapi/nf-netioapi-getifentry2 +func (m NetworkDropped) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AttrNetworkInterfaceName returns an optional attribute for the +// "network.interface.name" semantic convention. It represents the network +// interface name. +func (NetworkDropped) AttrNetworkInterfaceName(val string) attribute.KeyValue { + return attribute.String("network.interface.name", val) +} + +// AttrNetworkIODirection returns an optional attribute for the +// "network.io.direction" semantic convention. It represents the network IO +// operation direction. +func (NetworkDropped) AttrNetworkIODirection(val NetworkIODirectionAttr) attribute.KeyValue { + return attribute.String("network.io.direction", string(val)) +} + +// NetworkErrors is an instrument used to record metric values conforming to the +// "system.network.errors" semantic conventions. It represents the count of +// network errors detected. +type NetworkErrors struct { + metric.Int64Counter +} + +// NewNetworkErrors returns a new NetworkErrors instrument. +func NewNetworkErrors( + m metric.Meter, + opt ...metric.Int64CounterOption, +) (NetworkErrors, error) { + // Check if the meter is nil. + if m == nil { + return NetworkErrors{noop.Int64Counter{}}, nil + } + + i, err := m.Int64Counter( + "system.network.errors", + append([]metric.Int64CounterOption{ + metric.WithDescription("Count of network errors detected"), + metric.WithUnit("{error}"), + }, opt...)..., + ) + if err != nil { + return NetworkErrors{noop.Int64Counter{}}, err + } + return NetworkErrors{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m NetworkErrors) Inst() metric.Int64Counter { + return m.Int64Counter +} + +// Name returns the semantic convention name of the instrument. +func (NetworkErrors) Name() string { + return "system.network.errors" +} + +// Unit returns the semantic convention unit of the instrument +func (NetworkErrors) Unit() string { + return "{error}" +} + +// Description returns the semantic convention description of the instrument +func (NetworkErrors) Description() string { + return "Count of network errors detected" +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +// +// Measured as: +// +// - Linux: the `errs` column in `/proc/dev/net` ([source]). +// - Windows: [`InErrors`/`OutErrors`] +// from [`GetIfEntry2`]. +// +// +// [source]: https://web.archive.org/web/20180321091318/http://www.onlamp.com/pub/a/linux/2000/11/16/LinuxAdmin.html +// [`InErrors`/`OutErrors`]: https://docs.microsoft.com/windows/win32/api/netioapi/ns-netioapi-mib_if_row2 +// [`GetIfEntry2`]: https://docs.microsoft.com/windows/win32/api/netioapi/nf-netioapi-getifentry2 +func (m NetworkErrors) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// Measured as: +// +// - Linux: the `errs` column in `/proc/dev/net` ([source]). +// - Windows: [`InErrors`/`OutErrors`] +// from [`GetIfEntry2`]. +// +// +// [source]: https://web.archive.org/web/20180321091318/http://www.onlamp.com/pub/a/linux/2000/11/16/LinuxAdmin.html +// [`InErrors`/`OutErrors`]: https://docs.microsoft.com/windows/win32/api/netioapi/ns-netioapi-mib_if_row2 +// [`GetIfEntry2`]: https://docs.microsoft.com/windows/win32/api/netioapi/nf-netioapi-getifentry2 +func (m NetworkErrors) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AttrNetworkInterfaceName returns an optional attribute for the +// "network.interface.name" semantic convention. It represents the network +// interface name. +func (NetworkErrors) AttrNetworkInterfaceName(val string) attribute.KeyValue { + return attribute.String("network.interface.name", val) +} + +// AttrNetworkIODirection returns an optional attribute for the +// "network.io.direction" semantic convention. It represents the network IO +// operation direction. +func (NetworkErrors) AttrNetworkIODirection(val NetworkIODirectionAttr) attribute.KeyValue { + return attribute.String("network.io.direction", string(val)) +} + +// NetworkIO is an instrument used to record metric values conforming to the +// "system.network.io" semantic conventions. +type NetworkIO struct { + metric.Int64ObservableCounter +} + +// NewNetworkIO returns a new NetworkIO instrument. +func NewNetworkIO( + m metric.Meter, + opt ...metric.Int64ObservableCounterOption, +) (NetworkIO, error) { + // Check if the meter is nil. + if m == nil { + return NetworkIO{noop.Int64ObservableCounter{}}, nil + } + + i, err := m.Int64ObservableCounter( + "system.network.io", + append([]metric.Int64ObservableCounterOption{ + metric.WithDescription(""), + metric.WithUnit("By"), + }, opt...)..., + ) + if err != nil { + return NetworkIO{noop.Int64ObservableCounter{}}, err + } + return NetworkIO{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m NetworkIO) Inst() metric.Int64ObservableCounter { + return m.Int64ObservableCounter +} + +// Name returns the semantic convention name of the instrument. +func (NetworkIO) Name() string { + return "system.network.io" +} + +// Unit returns the semantic convention unit of the instrument +func (NetworkIO) Unit() string { + return "By" +} + +// AttrNetworkInterfaceName returns an optional attribute for the +// "network.interface.name" semantic convention. It represents the network +// interface name. +func (NetworkIO) AttrNetworkInterfaceName(val string) attribute.KeyValue { + return attribute.String("network.interface.name", val) +} + +// AttrNetworkIODirection returns an optional attribute for the +// "network.io.direction" semantic convention. It represents the network IO +// operation direction. +func (NetworkIO) AttrNetworkIODirection(val NetworkIODirectionAttr) attribute.KeyValue { + return attribute.String("network.io.direction", string(val)) +} + +// NetworkPackets is an instrument used to record metric values conforming to the +// "system.network.packets" semantic conventions. +type NetworkPackets struct { + metric.Int64Counter +} + +// NewNetworkPackets returns a new NetworkPackets instrument. +func NewNetworkPackets( + m metric.Meter, + opt ...metric.Int64CounterOption, +) (NetworkPackets, error) { + // Check if the meter is nil. + if m == nil { + return NetworkPackets{noop.Int64Counter{}}, nil + } + + i, err := m.Int64Counter( + "system.network.packets", + append([]metric.Int64CounterOption{ + metric.WithDescription(""), + metric.WithUnit("{packet}"), + }, opt...)..., + ) + if err != nil { + return NetworkPackets{noop.Int64Counter{}}, err + } + return NetworkPackets{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m NetworkPackets) Inst() metric.Int64Counter { + return m.Int64Counter +} + +// Name returns the semantic convention name of the instrument. +func (NetworkPackets) Name() string { + return "system.network.packets" +} + +// Unit returns the semantic convention unit of the instrument +func (NetworkPackets) Unit() string { + return "{packet}" +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +func (m NetworkPackets) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +func (m NetworkPackets) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AttrNetworkIODirection returns an optional attribute for the +// "network.io.direction" semantic convention. It represents the network IO +// operation direction. +func (NetworkPackets) AttrNetworkIODirection(val NetworkIODirectionAttr) attribute.KeyValue { + return attribute.String("network.io.direction", string(val)) +} + +// AttrDevice returns an optional attribute for the "system.device" semantic +// convention. It represents the device identifier. +func (NetworkPackets) AttrDevice(val string) attribute.KeyValue { + return attribute.String("system.device", val) +} + +// PagingFaults is an instrument used to record metric values conforming to the +// "system.paging.faults" semantic conventions. +type PagingFaults struct { + metric.Int64Counter +} + +// NewPagingFaults returns a new PagingFaults instrument. +func NewPagingFaults( + m metric.Meter, + opt ...metric.Int64CounterOption, +) (PagingFaults, error) { + // Check if the meter is nil. + if m == nil { + return PagingFaults{noop.Int64Counter{}}, nil + } + + i, err := m.Int64Counter( + "system.paging.faults", + append([]metric.Int64CounterOption{ + metric.WithDescription(""), + metric.WithUnit("{fault}"), + }, opt...)..., + ) + if err != nil { + return PagingFaults{noop.Int64Counter{}}, err + } + return PagingFaults{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m PagingFaults) Inst() metric.Int64Counter { + return m.Int64Counter +} + +// Name returns the semantic convention name of the instrument. +func (PagingFaults) Name() string { + return "system.paging.faults" +} + +// Unit returns the semantic convention unit of the instrument +func (PagingFaults) Unit() string { + return "{fault}" +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +func (m PagingFaults) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +func (m PagingFaults) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AttrPagingType returns an optional attribute for the "system.paging.type" +// semantic convention. It represents the memory paging type. +func (PagingFaults) AttrPagingType(val PagingTypeAttr) attribute.KeyValue { + return attribute.String("system.paging.type", string(val)) +} + +// PagingOperations is an instrument used to record metric values conforming to +// the "system.paging.operations" semantic conventions. +type PagingOperations struct { + metric.Int64Counter +} + +// NewPagingOperations returns a new PagingOperations instrument. +func NewPagingOperations( + m metric.Meter, + opt ...metric.Int64CounterOption, +) (PagingOperations, error) { + // Check if the meter is nil. + if m == nil { + return PagingOperations{noop.Int64Counter{}}, nil + } + + i, err := m.Int64Counter( + "system.paging.operations", + append([]metric.Int64CounterOption{ + metric.WithDescription(""), + metric.WithUnit("{operation}"), + }, opt...)..., + ) + if err != nil { + return PagingOperations{noop.Int64Counter{}}, err + } + return PagingOperations{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m PagingOperations) Inst() metric.Int64Counter { + return m.Int64Counter +} + +// Name returns the semantic convention name of the instrument. +func (PagingOperations) Name() string { + return "system.paging.operations" +} + +// Unit returns the semantic convention unit of the instrument +func (PagingOperations) Unit() string { + return "{operation}" +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +func (m PagingOperations) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +func (m PagingOperations) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AttrPagingDirection returns an optional attribute for the +// "system.paging.direction" semantic convention. It represents the paging access +// direction. +func (PagingOperations) AttrPagingDirection(val PagingDirectionAttr) attribute.KeyValue { + return attribute.String("system.paging.direction", string(val)) +} + +// AttrPagingType returns an optional attribute for the "system.paging.type" +// semantic convention. It represents the memory paging type. +func (PagingOperations) AttrPagingType(val PagingTypeAttr) attribute.KeyValue { + return attribute.String("system.paging.type", string(val)) +} + +// PagingUsage is an instrument used to record metric values conforming to the +// "system.paging.usage" semantic conventions. It represents the unix swap or +// windows pagefile usage. +type PagingUsage struct { + metric.Int64UpDownCounter +} + +// NewPagingUsage returns a new PagingUsage instrument. +func NewPagingUsage( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (PagingUsage, error) { + // Check if the meter is nil. + if m == nil { + return PagingUsage{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "system.paging.usage", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("Unix swap or windows pagefile usage"), + metric.WithUnit("By"), + }, opt...)..., + ) + if err != nil { + return PagingUsage{noop.Int64UpDownCounter{}}, err + } + return PagingUsage{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m PagingUsage) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (PagingUsage) Name() string { + return "system.paging.usage" +} + +// Unit returns the semantic convention unit of the instrument +func (PagingUsage) Unit() string { + return "By" +} + +// Description returns the semantic convention description of the instrument +func (PagingUsage) Description() string { + return "Unix swap or windows pagefile usage" +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +func (m PagingUsage) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +func (m PagingUsage) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AttrDevice returns an optional attribute for the "system.device" semantic +// convention. It represents the unique identifier for the device responsible for +// managing paging operations. +func (PagingUsage) AttrDevice(val string) attribute.KeyValue { + return attribute.String("system.device", val) +} + +// AttrPagingState returns an optional attribute for the "system.paging.state" +// semantic convention. It represents the memory paging state. +func (PagingUsage) AttrPagingState(val PagingStateAttr) attribute.KeyValue { + return attribute.String("system.paging.state", string(val)) +} + +// PagingUtilization is an instrument used to record metric values conforming to +// the "system.paging.utilization" semantic conventions. +type PagingUtilization struct { + metric.Int64Gauge +} + +// NewPagingUtilization returns a new PagingUtilization instrument. +func NewPagingUtilization( + m metric.Meter, + opt ...metric.Int64GaugeOption, +) (PagingUtilization, error) { + // Check if the meter is nil. + if m == nil { + return PagingUtilization{noop.Int64Gauge{}}, nil + } + + i, err := m.Int64Gauge( + "system.paging.utilization", + append([]metric.Int64GaugeOption{ + metric.WithDescription(""), + metric.WithUnit("1"), + }, opt...)..., + ) + if err != nil { + return PagingUtilization{noop.Int64Gauge{}}, err + } + return PagingUtilization{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m PagingUtilization) Inst() metric.Int64Gauge { + return m.Int64Gauge +} + +// Name returns the semantic convention name of the instrument. +func (PagingUtilization) Name() string { + return "system.paging.utilization" +} + +// Unit returns the semantic convention unit of the instrument +func (PagingUtilization) Unit() string { + return "1" +} + +// Record records val to the current distribution for attrs. +// +// All additional attrs passed are included in the recorded value. +func (m PagingUtilization) Record( + ctx context.Context, + val int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Gauge.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64Gauge.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +func (m PagingUtilization) RecordSet(ctx context.Context, val int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Gauge.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Gauge.Record(ctx, val, *o...) +} + +// AttrDevice returns an optional attribute for the "system.device" semantic +// convention. It represents the unique identifier for the device responsible for +// managing paging operations. +func (PagingUtilization) AttrDevice(val string) attribute.KeyValue { + return attribute.String("system.device", val) +} + +// AttrPagingState returns an optional attribute for the "system.paging.state" +// semantic convention. It represents the memory paging state. +func (PagingUtilization) AttrPagingState(val PagingStateAttr) attribute.KeyValue { + return attribute.String("system.paging.state", string(val)) +} + +// ProcessCount is an instrument used to record metric values conforming to the +// "system.process.count" semantic conventions. It represents the total number of +// processes in each state. +type ProcessCount struct { + metric.Int64UpDownCounter +} + +// NewProcessCount returns a new ProcessCount instrument. +func NewProcessCount( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (ProcessCount, error) { + // Check if the meter is nil. + if m == nil { + return ProcessCount{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "system.process.count", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("Total number of processes in each state"), + metric.WithUnit("{process}"), + }, opt...)..., + ) + if err != nil { + return ProcessCount{noop.Int64UpDownCounter{}}, err + } + return ProcessCount{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ProcessCount) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (ProcessCount) Name() string { + return "system.process.count" +} + +// Unit returns the semantic convention unit of the instrument +func (ProcessCount) Unit() string { + return "{process}" +} + +// Description returns the semantic convention description of the instrument +func (ProcessCount) Description() string { + return "Total number of processes in each state" +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +func (m ProcessCount) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +func (m ProcessCount) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AttrProcessStatus returns an optional attribute for the +// "system.process.status" semantic convention. It represents the process state, +// e.g., [Linux Process State Codes]. +// +// [Linux Process State Codes]: https://man7.org/linux/man-pages/man1/ps.1.html#PROCESS_STATE_CODES +func (ProcessCount) AttrProcessStatus(val ProcessStatusAttr) attribute.KeyValue { + return attribute.String("system.process.status", string(val)) +} + +// ProcessCreated is an instrument used to record metric values conforming to the +// "system.process.created" semantic conventions. It represents the total number +// of processes created over uptime of the host. +type ProcessCreated struct { + metric.Int64Counter +} + +// NewProcessCreated returns a new ProcessCreated instrument. +func NewProcessCreated( + m metric.Meter, + opt ...metric.Int64CounterOption, +) (ProcessCreated, error) { + // Check if the meter is nil. + if m == nil { + return ProcessCreated{noop.Int64Counter{}}, nil + } + + i, err := m.Int64Counter( + "system.process.created", + append([]metric.Int64CounterOption{ + metric.WithDescription("Total number of processes created over uptime of the host"), + metric.WithUnit("{process}"), + }, opt...)..., + ) + if err != nil { + return ProcessCreated{noop.Int64Counter{}}, err + } + return ProcessCreated{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ProcessCreated) Inst() metric.Int64Counter { + return m.Int64Counter +} + +// Name returns the semantic convention name of the instrument. +func (ProcessCreated) Name() string { + return "system.process.created" +} + +// Unit returns the semantic convention unit of the instrument +func (ProcessCreated) Unit() string { + return "{process}" +} + +// Description returns the semantic convention description of the instrument +func (ProcessCreated) Description() string { + return "Total number of processes created over uptime of the host" +} + +// Add adds incr to the existing count for attrs. +func (m ProcessCreated) Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +func (m ProcessCreated) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Counter.Add(ctx, incr, *o...) +} + +// Uptime is an instrument used to record metric values conforming to the +// "system.uptime" semantic conventions. It represents the time the system has +// been running. +type Uptime struct { + metric.Float64Gauge +} + +// NewUptime returns a new Uptime instrument. +func NewUptime( + m metric.Meter, + opt ...metric.Float64GaugeOption, +) (Uptime, error) { + // Check if the meter is nil. + if m == nil { + return Uptime{noop.Float64Gauge{}}, nil + } + + i, err := m.Float64Gauge( + "system.uptime", + append([]metric.Float64GaugeOption{ + metric.WithDescription("The time the system has been running"), + metric.WithUnit("s"), + }, opt...)..., + ) + if err != nil { + return Uptime{noop.Float64Gauge{}}, err + } + return Uptime{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m Uptime) Inst() metric.Float64Gauge { + return m.Float64Gauge +} + +// Name returns the semantic convention name of the instrument. +func (Uptime) Name() string { + return "system.uptime" +} + +// Unit returns the semantic convention unit of the instrument +func (Uptime) Unit() string { + return "s" +} + +// Description returns the semantic convention description of the instrument +func (Uptime) Description() string { + return "The time the system has been running" +} + +// Record records val to the current distribution for attrs. +// +// Instrumentations SHOULD use a gauge with type `double` and measure uptime in +// seconds as a floating point number with the highest precision available. +// The actual accuracy would depend on the instrumentation and operating system. +func (m Uptime) Record(ctx context.Context, val float64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Float64Gauge.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Float64Gauge.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +// +// Instrumentations SHOULD use a gauge with type `double` and measure uptime in +// seconds as a floating point number with the highest precision available. +// The actual accuracy would depend on the instrumentation and operating system. +func (m Uptime) RecordSet(ctx context.Context, val float64, set attribute.Set) { + if set.Len() == 0 { + m.Float64Gauge.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Float64Gauge.Record(ctx, val, *o...) +} \ No newline at end of file diff --git a/semconv/v1.36.0/vcsconv/metric.go b/semconv/v1.36.0/vcsconv/metric.go new file mode 100644 index 00000000000..5f1974cd36f --- /dev/null +++ b/semconv/v1.36.0/vcsconv/metric.go @@ -0,0 +1,1592 @@ +// Code generated from semantic convention specification. DO NOT EDIT. + +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package httpconv provides types and functionality for OpenTelemetry semantic +// conventions in the "vcs" namespace. +package vcsconv + +import ( + "context" + "sync" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/noop" +) + +var ( + addOptPool = &sync.Pool{New: func() any { return &[]metric.AddOption{} }} + recOptPool = &sync.Pool{New: func() any { return &[]metric.RecordOption{} }} +) + +// ChangeStateAttr is an attribute conforming to the vcs.change.state semantic +// conventions. It represents the state of the change (pull request/merge +// request/changelist). +type ChangeStateAttr string + +var ( + // ChangeStateOpen is the open means the change is currently active and under + // review. It hasn't been merged into the target branch yet, and it's still + // possible to make changes or add comments. + ChangeStateOpen ChangeStateAttr = "open" + // ChangeStateWip is the WIP (work-in-progress, draft) means the change is still + // in progress and not yet ready for a full review. It might still undergo + // significant changes. + ChangeStateWip ChangeStateAttr = "wip" + // ChangeStateClosed is the closed means the merge request has been closed + // without merging. This can happen for various reasons, such as the changes + // being deemed unnecessary, the issue being resolved in another way, or the + // author deciding to withdraw the request. + ChangeStateClosed ChangeStateAttr = "closed" + // ChangeStateMerged is the merged indicates that the change has been + // successfully integrated into the target codebase. + ChangeStateMerged ChangeStateAttr = "merged" +) + +// LineChangeTypeAttr is an attribute conforming to the vcs.line_change.type +// semantic conventions. It represents the type of line change being measured on +// a branch or change. +type LineChangeTypeAttr string + +var ( + // LineChangeTypeAdded is the how many lines were added. + LineChangeTypeAdded LineChangeTypeAttr = "added" + // LineChangeTypeRemoved is the how many lines were removed. + LineChangeTypeRemoved LineChangeTypeAttr = "removed" +) + +// ProviderNameAttr is an attribute conforming to the vcs.provider.name semantic +// conventions. It represents the name of the version control system provider. +type ProviderNameAttr string + +var ( + // ProviderNameGithub is the [GitHub]. + // + // [GitHub]: https://github.com + ProviderNameGithub ProviderNameAttr = "github" + // ProviderNameGitlab is the [GitLab]. + // + // [GitLab]: https://gitlab.com + ProviderNameGitlab ProviderNameAttr = "gitlab" + // ProviderNameGitea is the [Gitea]. + // + // [Gitea]: https://gitea.io + ProviderNameGitea ProviderNameAttr = "gitea" + // ProviderNameBitbucket is the [Bitbucket]. + // + // [Bitbucket]: https://bitbucket.org + ProviderNameBitbucket ProviderNameAttr = "bitbucket" +) + +// RefBaseTypeAttr is an attribute conforming to the vcs.ref.base.type semantic +// conventions. It represents the type of the [reference] in the repository. +// +// [reference]: https://git-scm.com/docs/gitglossary#def_ref +type RefBaseTypeAttr string + +var ( + // RefBaseTypeBranch is the [branch]. + // + // [branch]: https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddefbranchabranch + RefBaseTypeBranch RefBaseTypeAttr = "branch" + // RefBaseTypeTag is the [tag]. + // + // [tag]: https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddeftagatag + RefBaseTypeTag RefBaseTypeAttr = "tag" +) + +// RefHeadTypeAttr is an attribute conforming to the vcs.ref.head.type semantic +// conventions. It represents the type of the [reference] in the repository. +// +// [reference]: https://git-scm.com/docs/gitglossary#def_ref +type RefHeadTypeAttr string + +var ( + // RefHeadTypeBranch is the [branch]. + // + // [branch]: https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddefbranchabranch + RefHeadTypeBranch RefHeadTypeAttr = "branch" + // RefHeadTypeTag is the [tag]. + // + // [tag]: https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddeftagatag + RefHeadTypeTag RefHeadTypeAttr = "tag" +) + +// RefTypeAttr is an attribute conforming to the vcs.ref.type semantic +// conventions. It represents the type of the [reference] in the repository. +// +// [reference]: https://git-scm.com/docs/gitglossary#def_ref +type RefTypeAttr string + +var ( + // RefTypeBranch is the [branch]. + // + // [branch]: https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddefbranchabranch + RefTypeBranch RefTypeAttr = "branch" + // RefTypeTag is the [tag]. + // + // [tag]: https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddeftagatag + RefTypeTag RefTypeAttr = "tag" +) + +// RevisionDeltaDirectionAttr is an attribute conforming to the +// vcs.revision_delta.direction semantic conventions. It represents the type of +// revision comparison. +type RevisionDeltaDirectionAttr string + +var ( + // RevisionDeltaDirectionBehind is the how many revisions the change is behind + // the target ref. + RevisionDeltaDirectionBehind RevisionDeltaDirectionAttr = "behind" + // RevisionDeltaDirectionAhead is the how many revisions the change is ahead of + // the target ref. + RevisionDeltaDirectionAhead RevisionDeltaDirectionAttr = "ahead" +) + +// ChangeCount is an instrument used to record metric values conforming to the +// "vcs.change.count" semantic conventions. It represents the number of changes +// (pull requests/merge requests/changelists) in a repository, categorized by +// their state (e.g. open or merged). +type ChangeCount struct { + metric.Int64UpDownCounter +} + +// NewChangeCount returns a new ChangeCount instrument. +func NewChangeCount( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (ChangeCount, error) { + // Check if the meter is nil. + if m == nil { + return ChangeCount{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "vcs.change.count", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("The number of changes (pull requests/merge requests/changelists) in a repository, categorized by their state (e.g. open or merged)"), + metric.WithUnit("{change}"), + }, opt...)..., + ) + if err != nil { + return ChangeCount{noop.Int64UpDownCounter{}}, err + } + return ChangeCount{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ChangeCount) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (ChangeCount) Name() string { + return "vcs.change.count" +} + +// Unit returns the semantic convention unit of the instrument +func (ChangeCount) Unit() string { + return "{change}" +} + +// Description returns the semantic convention description of the instrument +func (ChangeCount) Description() string { + return "The number of changes (pull requests/merge requests/changelists) in a repository, categorized by their state (e.g. open or merged)" +} + +// Add adds incr to the existing count for attrs. +// +// The changeState is the the state of the change (pull request/merge +// request/changelist). +// +// The repositoryUrlFull is the the [canonical URL] of the repository providing +// the complete HTTP(S) address in order to locate and identify the repository +// through a browser. +// +// All additional attrs passed are included in the recorded value. +// +// [canonical URL]: https://support.google.com/webmasters/answer/10347851?hl=en#:~:text=A%20canonical%20URL%20is%20the,Google%20chooses%20one%20as%20canonical. +func (m ChangeCount) Add( + ctx context.Context, + incr int64, + changeState ChangeStateAttr, + repositoryUrlFull string, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("vcs.change.state", string(changeState)), + attribute.String("vcs.repository.url.full", repositoryUrlFull), + )..., + ), + ) + + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +func (m ChangeCount) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AttrOwnerName returns an optional attribute for the "vcs.owner.name" semantic +// convention. It represents the group owner within the version control system. +func (ChangeCount) AttrOwnerName(val string) attribute.KeyValue { + return attribute.String("vcs.owner.name", val) +} + +// AttrRepositoryName returns an optional attribute for the "vcs.repository.name" +// semantic convention. It represents the human readable name of the repository. +// It SHOULD NOT include any additional identifier like Group/SubGroup in GitLab +// or organization in GitHub. +func (ChangeCount) AttrRepositoryName(val string) attribute.KeyValue { + return attribute.String("vcs.repository.name", val) +} + +// AttrProviderName returns an optional attribute for the "vcs.provider.name" +// semantic convention. It represents the name of the version control system +// provider. +func (ChangeCount) AttrProviderName(val ProviderNameAttr) attribute.KeyValue { + return attribute.String("vcs.provider.name", string(val)) +} + +// ChangeDuration is an instrument used to record metric values conforming to the +// "vcs.change.duration" semantic conventions. It represents the time duration a +// change (pull request/merge request/changelist) has been in a given state. +type ChangeDuration struct { + metric.Float64Gauge +} + +// NewChangeDuration returns a new ChangeDuration instrument. +func NewChangeDuration( + m metric.Meter, + opt ...metric.Float64GaugeOption, +) (ChangeDuration, error) { + // Check if the meter is nil. + if m == nil { + return ChangeDuration{noop.Float64Gauge{}}, nil + } + + i, err := m.Float64Gauge( + "vcs.change.duration", + append([]metric.Float64GaugeOption{ + metric.WithDescription("The time duration a change (pull request/merge request/changelist) has been in a given state."), + metric.WithUnit("s"), + }, opt...)..., + ) + if err != nil { + return ChangeDuration{noop.Float64Gauge{}}, err + } + return ChangeDuration{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ChangeDuration) Inst() metric.Float64Gauge { + return m.Float64Gauge +} + +// Name returns the semantic convention name of the instrument. +func (ChangeDuration) Name() string { + return "vcs.change.duration" +} + +// Unit returns the semantic convention unit of the instrument +func (ChangeDuration) Unit() string { + return "s" +} + +// Description returns the semantic convention description of the instrument +func (ChangeDuration) Description() string { + return "The time duration a change (pull request/merge request/changelist) has been in a given state." +} + +// Record records val to the current distribution for attrs. +// +// The changeState is the the state of the change (pull request/merge +// request/changelist). +// +// The refHeadName is the the name of the [reference] such as **branch** or +// **tag** in the repository. +// +// The repositoryUrlFull is the the [canonical URL] of the repository providing +// the complete HTTP(S) address in order to locate and identify the repository +// through a browser. +// +// All additional attrs passed are included in the recorded value. +// +// [reference]: https://git-scm.com/docs/gitglossary#def_ref +// [canonical URL]: https://support.google.com/webmasters/answer/10347851?hl=en#:~:text=A%20canonical%20URL%20is%20the,Google%20chooses%20one%20as%20canonical. +func (m ChangeDuration) Record( + ctx context.Context, + val float64, + changeState ChangeStateAttr, + refHeadName string, + repositoryUrlFull string, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Float64Gauge.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("vcs.change.state", string(changeState)), + attribute.String("vcs.ref.head.name", refHeadName), + attribute.String("vcs.repository.url.full", repositoryUrlFull), + )..., + ), + ) + + m.Float64Gauge.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +func (m ChangeDuration) RecordSet(ctx context.Context, val float64, set attribute.Set) { + if set.Len() == 0 { + m.Float64Gauge.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Float64Gauge.Record(ctx, val, *o...) +} + +// AttrOwnerName returns an optional attribute for the "vcs.owner.name" semantic +// convention. It represents the group owner within the version control system. +func (ChangeDuration) AttrOwnerName(val string) attribute.KeyValue { + return attribute.String("vcs.owner.name", val) +} + +// AttrRepositoryName returns an optional attribute for the "vcs.repository.name" +// semantic convention. It represents the human readable name of the repository. +// It SHOULD NOT include any additional identifier like Group/SubGroup in GitLab +// or organization in GitHub. +func (ChangeDuration) AttrRepositoryName(val string) attribute.KeyValue { + return attribute.String("vcs.repository.name", val) +} + +// AttrProviderName returns an optional attribute for the "vcs.provider.name" +// semantic convention. It represents the name of the version control system +// provider. +func (ChangeDuration) AttrProviderName(val ProviderNameAttr) attribute.KeyValue { + return attribute.String("vcs.provider.name", string(val)) +} + +// ChangeTimeToApproval is an instrument used to record metric values conforming +// to the "vcs.change.time_to_approval" semantic conventions. It represents the +// amount of time since its creation it took a change (pull request/merge +// request/changelist) to get the first approval. +type ChangeTimeToApproval struct { + metric.Float64Gauge +} + +// NewChangeTimeToApproval returns a new ChangeTimeToApproval instrument. +func NewChangeTimeToApproval( + m metric.Meter, + opt ...metric.Float64GaugeOption, +) (ChangeTimeToApproval, error) { + // Check if the meter is nil. + if m == nil { + return ChangeTimeToApproval{noop.Float64Gauge{}}, nil + } + + i, err := m.Float64Gauge( + "vcs.change.time_to_approval", + append([]metric.Float64GaugeOption{ + metric.WithDescription("The amount of time since its creation it took a change (pull request/merge request/changelist) to get the first approval."), + metric.WithUnit("s"), + }, opt...)..., + ) + if err != nil { + return ChangeTimeToApproval{noop.Float64Gauge{}}, err + } + return ChangeTimeToApproval{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ChangeTimeToApproval) Inst() metric.Float64Gauge { + return m.Float64Gauge +} + +// Name returns the semantic convention name of the instrument. +func (ChangeTimeToApproval) Name() string { + return "vcs.change.time_to_approval" +} + +// Unit returns the semantic convention unit of the instrument +func (ChangeTimeToApproval) Unit() string { + return "s" +} + +// Description returns the semantic convention description of the instrument +func (ChangeTimeToApproval) Description() string { + return "The amount of time since its creation it took a change (pull request/merge request/changelist) to get the first approval." +} + +// Record records val to the current distribution for attrs. +// +// The refHeadName is the the name of the [reference] such as **branch** or +// **tag** in the repository. +// +// The repositoryUrlFull is the the [canonical URL] of the repository providing +// the complete HTTP(S) address in order to locate and identify the repository +// through a browser. +// +// All additional attrs passed are included in the recorded value. +// +// [reference]: https://git-scm.com/docs/gitglossary#def_ref +// [canonical URL]: https://support.google.com/webmasters/answer/10347851?hl=en#:~:text=A%20canonical%20URL%20is%20the,Google%20chooses%20one%20as%20canonical. +func (m ChangeTimeToApproval) Record( + ctx context.Context, + val float64, + refHeadName string, + repositoryUrlFull string, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Float64Gauge.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("vcs.ref.head.name", refHeadName), + attribute.String("vcs.repository.url.full", repositoryUrlFull), + )..., + ), + ) + + m.Float64Gauge.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +func (m ChangeTimeToApproval) RecordSet(ctx context.Context, val float64, set attribute.Set) { + if set.Len() == 0 { + m.Float64Gauge.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Float64Gauge.Record(ctx, val, *o...) +} + +// AttrOwnerName returns an optional attribute for the "vcs.owner.name" semantic +// convention. It represents the group owner within the version control system. +func (ChangeTimeToApproval) AttrOwnerName(val string) attribute.KeyValue { + return attribute.String("vcs.owner.name", val) +} + +// AttrRefBaseName returns an optional attribute for the "vcs.ref.base.name" +// semantic convention. It represents the name of the [reference] such as +// **branch** or **tag** in the repository. +// +// [reference]: https://git-scm.com/docs/gitglossary#def_ref +func (ChangeTimeToApproval) AttrRefBaseName(val string) attribute.KeyValue { + return attribute.String("vcs.ref.base.name", val) +} + +// AttrRepositoryName returns an optional attribute for the "vcs.repository.name" +// semantic convention. It represents the human readable name of the repository. +// It SHOULD NOT include any additional identifier like Group/SubGroup in GitLab +// or organization in GitHub. +func (ChangeTimeToApproval) AttrRepositoryName(val string) attribute.KeyValue { + return attribute.String("vcs.repository.name", val) +} + +// AttrProviderName returns an optional attribute for the "vcs.provider.name" +// semantic convention. It represents the name of the version control system +// provider. +func (ChangeTimeToApproval) AttrProviderName(val ProviderNameAttr) attribute.KeyValue { + return attribute.String("vcs.provider.name", string(val)) +} + +// AttrRefBaseRevision returns an optional attribute for the +// "vcs.ref.base.revision" semantic convention. It represents the revision, +// literally [revised version], The revision most often refers to a commit object +// in Git, or a revision number in SVN. +// +// [revised version]: https://www.merriam-webster.com/dictionary/revision +func (ChangeTimeToApproval) AttrRefBaseRevision(val string) attribute.KeyValue { + return attribute.String("vcs.ref.base.revision", val) +} + +// AttrRefHeadRevision returns an optional attribute for the +// "vcs.ref.head.revision" semantic convention. It represents the revision, +// literally [revised version], The revision most often refers to a commit object +// in Git, or a revision number in SVN. +// +// [revised version]: https://www.merriam-webster.com/dictionary/revision +func (ChangeTimeToApproval) AttrRefHeadRevision(val string) attribute.KeyValue { + return attribute.String("vcs.ref.head.revision", val) +} + +// ChangeTimeToMerge is an instrument used to record metric values conforming to +// the "vcs.change.time_to_merge" semantic conventions. It represents the amount +// of time since its creation it took a change (pull request/merge +// request/changelist) to get merged into the target(base) ref. +type ChangeTimeToMerge struct { + metric.Float64Gauge +} + +// NewChangeTimeToMerge returns a new ChangeTimeToMerge instrument. +func NewChangeTimeToMerge( + m metric.Meter, + opt ...metric.Float64GaugeOption, +) (ChangeTimeToMerge, error) { + // Check if the meter is nil. + if m == nil { + return ChangeTimeToMerge{noop.Float64Gauge{}}, nil + } + + i, err := m.Float64Gauge( + "vcs.change.time_to_merge", + append([]metric.Float64GaugeOption{ + metric.WithDescription("The amount of time since its creation it took a change (pull request/merge request/changelist) to get merged into the target(base) ref."), + metric.WithUnit("s"), + }, opt...)..., + ) + if err != nil { + return ChangeTimeToMerge{noop.Float64Gauge{}}, err + } + return ChangeTimeToMerge{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ChangeTimeToMerge) Inst() metric.Float64Gauge { + return m.Float64Gauge +} + +// Name returns the semantic convention name of the instrument. +func (ChangeTimeToMerge) Name() string { + return "vcs.change.time_to_merge" +} + +// Unit returns the semantic convention unit of the instrument +func (ChangeTimeToMerge) Unit() string { + return "s" +} + +// Description returns the semantic convention description of the instrument +func (ChangeTimeToMerge) Description() string { + return "The amount of time since its creation it took a change (pull request/merge request/changelist) to get merged into the target(base) ref." +} + +// Record records val to the current distribution for attrs. +// +// The refHeadName is the the name of the [reference] such as **branch** or +// **tag** in the repository. +// +// The repositoryUrlFull is the the [canonical URL] of the repository providing +// the complete HTTP(S) address in order to locate and identify the repository +// through a browser. +// +// All additional attrs passed are included in the recorded value. +// +// [reference]: https://git-scm.com/docs/gitglossary#def_ref +// [canonical URL]: https://support.google.com/webmasters/answer/10347851?hl=en#:~:text=A%20canonical%20URL%20is%20the,Google%20chooses%20one%20as%20canonical. +func (m ChangeTimeToMerge) Record( + ctx context.Context, + val float64, + refHeadName string, + repositoryUrlFull string, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Float64Gauge.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("vcs.ref.head.name", refHeadName), + attribute.String("vcs.repository.url.full", repositoryUrlFull), + )..., + ), + ) + + m.Float64Gauge.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +func (m ChangeTimeToMerge) RecordSet(ctx context.Context, val float64, set attribute.Set) { + if set.Len() == 0 { + m.Float64Gauge.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Float64Gauge.Record(ctx, val, *o...) +} + +// AttrOwnerName returns an optional attribute for the "vcs.owner.name" semantic +// convention. It represents the group owner within the version control system. +func (ChangeTimeToMerge) AttrOwnerName(val string) attribute.KeyValue { + return attribute.String("vcs.owner.name", val) +} + +// AttrRefBaseName returns an optional attribute for the "vcs.ref.base.name" +// semantic convention. It represents the name of the [reference] such as +// **branch** or **tag** in the repository. +// +// [reference]: https://git-scm.com/docs/gitglossary#def_ref +func (ChangeTimeToMerge) AttrRefBaseName(val string) attribute.KeyValue { + return attribute.String("vcs.ref.base.name", val) +} + +// AttrRepositoryName returns an optional attribute for the "vcs.repository.name" +// semantic convention. It represents the human readable name of the repository. +// It SHOULD NOT include any additional identifier like Group/SubGroup in GitLab +// or organization in GitHub. +func (ChangeTimeToMerge) AttrRepositoryName(val string) attribute.KeyValue { + return attribute.String("vcs.repository.name", val) +} + +// AttrProviderName returns an optional attribute for the "vcs.provider.name" +// semantic convention. It represents the name of the version control system +// provider. +func (ChangeTimeToMerge) AttrProviderName(val ProviderNameAttr) attribute.KeyValue { + return attribute.String("vcs.provider.name", string(val)) +} + +// AttrRefBaseRevision returns an optional attribute for the +// "vcs.ref.base.revision" semantic convention. It represents the revision, +// literally [revised version], The revision most often refers to a commit object +// in Git, or a revision number in SVN. +// +// [revised version]: https://www.merriam-webster.com/dictionary/revision +func (ChangeTimeToMerge) AttrRefBaseRevision(val string) attribute.KeyValue { + return attribute.String("vcs.ref.base.revision", val) +} + +// AttrRefHeadRevision returns an optional attribute for the +// "vcs.ref.head.revision" semantic convention. It represents the revision, +// literally [revised version], The revision most often refers to a commit object +// in Git, or a revision number in SVN. +// +// [revised version]: https://www.merriam-webster.com/dictionary/revision +func (ChangeTimeToMerge) AttrRefHeadRevision(val string) attribute.KeyValue { + return attribute.String("vcs.ref.head.revision", val) +} + +// ContributorCount is an instrument used to record metric values conforming to +// the "vcs.contributor.count" semantic conventions. It represents the number of +// unique contributors to a repository. +type ContributorCount struct { + metric.Int64Gauge +} + +// NewContributorCount returns a new ContributorCount instrument. +func NewContributorCount( + m metric.Meter, + opt ...metric.Int64GaugeOption, +) (ContributorCount, error) { + // Check if the meter is nil. + if m == nil { + return ContributorCount{noop.Int64Gauge{}}, nil + } + + i, err := m.Int64Gauge( + "vcs.contributor.count", + append([]metric.Int64GaugeOption{ + metric.WithDescription("The number of unique contributors to a repository"), + metric.WithUnit("{contributor}"), + }, opt...)..., + ) + if err != nil { + return ContributorCount{noop.Int64Gauge{}}, err + } + return ContributorCount{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ContributorCount) Inst() metric.Int64Gauge { + return m.Int64Gauge +} + +// Name returns the semantic convention name of the instrument. +func (ContributorCount) Name() string { + return "vcs.contributor.count" +} + +// Unit returns the semantic convention unit of the instrument +func (ContributorCount) Unit() string { + return "{contributor}" +} + +// Description returns the semantic convention description of the instrument +func (ContributorCount) Description() string { + return "The number of unique contributors to a repository" +} + +// Record records val to the current distribution for attrs. +// +// The repositoryUrlFull is the the [canonical URL] of the repository providing +// the complete HTTP(S) address in order to locate and identify the repository +// through a browser. +// +// All additional attrs passed are included in the recorded value. +// +// [canonical URL]: https://support.google.com/webmasters/answer/10347851?hl=en#:~:text=A%20canonical%20URL%20is%20the,Google%20chooses%20one%20as%20canonical. +func (m ContributorCount) Record( + ctx context.Context, + val int64, + repositoryUrlFull string, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Gauge.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("vcs.repository.url.full", repositoryUrlFull), + )..., + ), + ) + + m.Int64Gauge.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +func (m ContributorCount) RecordSet(ctx context.Context, val int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Gauge.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Gauge.Record(ctx, val, *o...) +} + +// AttrOwnerName returns an optional attribute for the "vcs.owner.name" semantic +// convention. It represents the group owner within the version control system. +func (ContributorCount) AttrOwnerName(val string) attribute.KeyValue { + return attribute.String("vcs.owner.name", val) +} + +// AttrRepositoryName returns an optional attribute for the "vcs.repository.name" +// semantic convention. It represents the human readable name of the repository. +// It SHOULD NOT include any additional identifier like Group/SubGroup in GitLab +// or organization in GitHub. +func (ContributorCount) AttrRepositoryName(val string) attribute.KeyValue { + return attribute.String("vcs.repository.name", val) +} + +// AttrProviderName returns an optional attribute for the "vcs.provider.name" +// semantic convention. It represents the name of the version control system +// provider. +func (ContributorCount) AttrProviderName(val ProviderNameAttr) attribute.KeyValue { + return attribute.String("vcs.provider.name", string(val)) +} + +// RefCount is an instrument used to record metric values conforming to the +// "vcs.ref.count" semantic conventions. It represents the number of refs of type +// branch or tag in a repository. +type RefCount struct { + metric.Int64UpDownCounter +} + +// NewRefCount returns a new RefCount instrument. +func NewRefCount( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (RefCount, error) { + // Check if the meter is nil. + if m == nil { + return RefCount{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "vcs.ref.count", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("The number of refs of type branch or tag in a repository."), + metric.WithUnit("{ref}"), + }, opt...)..., + ) + if err != nil { + return RefCount{noop.Int64UpDownCounter{}}, err + } + return RefCount{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m RefCount) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (RefCount) Name() string { + return "vcs.ref.count" +} + +// Unit returns the semantic convention unit of the instrument +func (RefCount) Unit() string { + return "{ref}" +} + +// Description returns the semantic convention description of the instrument +func (RefCount) Description() string { + return "The number of refs of type branch or tag in a repository." +} + +// Add adds incr to the existing count for attrs. +// +// The refType is the the type of the [reference] in the repository. +// +// The repositoryUrlFull is the the [canonical URL] of the repository providing +// the complete HTTP(S) address in order to locate and identify the repository +// through a browser. +// +// All additional attrs passed are included in the recorded value. +// +// [reference]: https://git-scm.com/docs/gitglossary#def_ref +// [canonical URL]: https://support.google.com/webmasters/answer/10347851?hl=en#:~:text=A%20canonical%20URL%20is%20the,Google%20chooses%20one%20as%20canonical. +func (m RefCount) Add( + ctx context.Context, + incr int64, + refType RefTypeAttr, + repositoryUrlFull string, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("vcs.ref.type", string(refType)), + attribute.String("vcs.repository.url.full", repositoryUrlFull), + )..., + ), + ) + + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +func (m RefCount) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AttrOwnerName returns an optional attribute for the "vcs.owner.name" semantic +// convention. It represents the group owner within the version control system. +func (RefCount) AttrOwnerName(val string) attribute.KeyValue { + return attribute.String("vcs.owner.name", val) +} + +// AttrRepositoryName returns an optional attribute for the "vcs.repository.name" +// semantic convention. It represents the human readable name of the repository. +// It SHOULD NOT include any additional identifier like Group/SubGroup in GitLab +// or organization in GitHub. +func (RefCount) AttrRepositoryName(val string) attribute.KeyValue { + return attribute.String("vcs.repository.name", val) +} + +// AttrProviderName returns an optional attribute for the "vcs.provider.name" +// semantic convention. It represents the name of the version control system +// provider. +func (RefCount) AttrProviderName(val ProviderNameAttr) attribute.KeyValue { + return attribute.String("vcs.provider.name", string(val)) +} + +// RefLinesDelta is an instrument used to record metric values conforming to the +// "vcs.ref.lines_delta" semantic conventions. It represents the number of lines +// added/removed in a ref (branch) relative to the ref from the +// `vcs.ref.base.name` attribute. +type RefLinesDelta struct { + metric.Int64Gauge +} + +// NewRefLinesDelta returns a new RefLinesDelta instrument. +func NewRefLinesDelta( + m metric.Meter, + opt ...metric.Int64GaugeOption, +) (RefLinesDelta, error) { + // Check if the meter is nil. + if m == nil { + return RefLinesDelta{noop.Int64Gauge{}}, nil + } + + i, err := m.Int64Gauge( + "vcs.ref.lines_delta", + append([]metric.Int64GaugeOption{ + metric.WithDescription("The number of lines added/removed in a ref (branch) relative to the ref from the `vcs.ref.base.name` attribute."), + metric.WithUnit("{line}"), + }, opt...)..., + ) + if err != nil { + return RefLinesDelta{noop.Int64Gauge{}}, err + } + return RefLinesDelta{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m RefLinesDelta) Inst() metric.Int64Gauge { + return m.Int64Gauge +} + +// Name returns the semantic convention name of the instrument. +func (RefLinesDelta) Name() string { + return "vcs.ref.lines_delta" +} + +// Unit returns the semantic convention unit of the instrument +func (RefLinesDelta) Unit() string { + return "{line}" +} + +// Description returns the semantic convention description of the instrument +func (RefLinesDelta) Description() string { + return "The number of lines added/removed in a ref (branch) relative to the ref from the `vcs.ref.base.name` attribute." +} + +// Record records val to the current distribution for attrs. +// +// The lineChangeType is the the type of line change being measured on a branch +// or change. +// +// The refBaseName is the the name of the [reference] such as **branch** or +// **tag** in the repository. +// +// The refBaseType is the the type of the [reference] in the repository. +// +// The refHeadName is the the name of the [reference] such as **branch** or +// **tag** in the repository. +// +// The refHeadType is the the type of the [reference] in the repository. +// +// The repositoryUrlFull is the the [canonical URL] of the repository providing +// the complete HTTP(S) address in order to locate and identify the repository +// through a browser. +// +// All additional attrs passed are included in the recorded value. +// +// [reference]: https://git-scm.com/docs/gitglossary#def_ref +// [reference]: https://git-scm.com/docs/gitglossary#def_ref +// [reference]: https://git-scm.com/docs/gitglossary#def_ref +// [reference]: https://git-scm.com/docs/gitglossary#def_ref +// [canonical URL]: https://support.google.com/webmasters/answer/10347851?hl=en#:~:text=A%20canonical%20URL%20is%20the,Google%20chooses%20one%20as%20canonical. +// +// This metric should be reported for each `vcs.line_change.type` value. For +// example if a ref added 3 lines and removed 2 lines, +// instrumentation SHOULD report two measurements: 3 and 2 (both positive +// numbers). +// If number of lines added/removed should be calculated from the start of time, +// then `vcs.ref.base.name` SHOULD be set to an empty string. +func (m RefLinesDelta) Record( + ctx context.Context, + val int64, + lineChangeType LineChangeTypeAttr, + refBaseName string, + refBaseType RefBaseTypeAttr, + refHeadName string, + refHeadType RefHeadTypeAttr, + repositoryUrlFull string, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Gauge.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("vcs.line_change.type", string(lineChangeType)), + attribute.String("vcs.ref.base.name", refBaseName), + attribute.String("vcs.ref.base.type", string(refBaseType)), + attribute.String("vcs.ref.head.name", refHeadName), + attribute.String("vcs.ref.head.type", string(refHeadType)), + attribute.String("vcs.repository.url.full", repositoryUrlFull), + )..., + ), + ) + + m.Int64Gauge.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +// +// This metric should be reported for each `vcs.line_change.type` value. For +// example if a ref added 3 lines and removed 2 lines, +// instrumentation SHOULD report two measurements: 3 and 2 (both positive +// numbers). +// If number of lines added/removed should be calculated from the start of time, +// then `vcs.ref.base.name` SHOULD be set to an empty string. +func (m RefLinesDelta) RecordSet(ctx context.Context, val int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Gauge.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Gauge.Record(ctx, val, *o...) +} + +// AttrChangeID returns an optional attribute for the "vcs.change.id" semantic +// convention. It represents the ID of the change (pull request/merge +// request/changelist) if applicable. This is usually a unique (within +// repository) identifier generated by the VCS system. +func (RefLinesDelta) AttrChangeID(val string) attribute.KeyValue { + return attribute.String("vcs.change.id", val) +} + +// AttrOwnerName returns an optional attribute for the "vcs.owner.name" semantic +// convention. It represents the group owner within the version control system. +func (RefLinesDelta) AttrOwnerName(val string) attribute.KeyValue { + return attribute.String("vcs.owner.name", val) +} + +// AttrRepositoryName returns an optional attribute for the "vcs.repository.name" +// semantic convention. It represents the human readable name of the repository. +// It SHOULD NOT include any additional identifier like Group/SubGroup in GitLab +// or organization in GitHub. +func (RefLinesDelta) AttrRepositoryName(val string) attribute.KeyValue { + return attribute.String("vcs.repository.name", val) +} + +// AttrProviderName returns an optional attribute for the "vcs.provider.name" +// semantic convention. It represents the name of the version control system +// provider. +func (RefLinesDelta) AttrProviderName(val ProviderNameAttr) attribute.KeyValue { + return attribute.String("vcs.provider.name", string(val)) +} + +// RefRevisionsDelta is an instrument used to record metric values conforming to +// the "vcs.ref.revisions_delta" semantic conventions. It represents the number +// of revisions (commits) a ref (branch) is ahead/behind the branch from the +// `vcs.ref.base.name` attribute. +type RefRevisionsDelta struct { + metric.Int64Gauge +} + +// NewRefRevisionsDelta returns a new RefRevisionsDelta instrument. +func NewRefRevisionsDelta( + m metric.Meter, + opt ...metric.Int64GaugeOption, +) (RefRevisionsDelta, error) { + // Check if the meter is nil. + if m == nil { + return RefRevisionsDelta{noop.Int64Gauge{}}, nil + } + + i, err := m.Int64Gauge( + "vcs.ref.revisions_delta", + append([]metric.Int64GaugeOption{ + metric.WithDescription("The number of revisions (commits) a ref (branch) is ahead/behind the branch from the `vcs.ref.base.name` attribute"), + metric.WithUnit("{revision}"), + }, opt...)..., + ) + if err != nil { + return RefRevisionsDelta{noop.Int64Gauge{}}, err + } + return RefRevisionsDelta{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m RefRevisionsDelta) Inst() metric.Int64Gauge { + return m.Int64Gauge +} + +// Name returns the semantic convention name of the instrument. +func (RefRevisionsDelta) Name() string { + return "vcs.ref.revisions_delta" +} + +// Unit returns the semantic convention unit of the instrument +func (RefRevisionsDelta) Unit() string { + return "{revision}" +} + +// Description returns the semantic convention description of the instrument +func (RefRevisionsDelta) Description() string { + return "The number of revisions (commits) a ref (branch) is ahead/behind the branch from the `vcs.ref.base.name` attribute" +} + +// Record records val to the current distribution for attrs. +// +// The refBaseName is the the name of the [reference] such as **branch** or +// **tag** in the repository. +// +// The refBaseType is the the type of the [reference] in the repository. +// +// The refHeadName is the the name of the [reference] such as **branch** or +// **tag** in the repository. +// +// The refHeadType is the the type of the [reference] in the repository. +// +// The repositoryUrlFull is the the [canonical URL] of the repository providing +// the complete HTTP(S) address in order to locate and identify the repository +// through a browser. +// +// The revisionDeltaDirection is the the type of revision comparison. +// +// All additional attrs passed are included in the recorded value. +// +// [reference]: https://git-scm.com/docs/gitglossary#def_ref +// [reference]: https://git-scm.com/docs/gitglossary#def_ref +// [reference]: https://git-scm.com/docs/gitglossary#def_ref +// [reference]: https://git-scm.com/docs/gitglossary#def_ref +// [canonical URL]: https://support.google.com/webmasters/answer/10347851?hl=en#:~:text=A%20canonical%20URL%20is%20the,Google%20chooses%20one%20as%20canonical. +// +// This metric should be reported for each `vcs.revision_delta.direction` value. +// For example if branch `a` is 3 commits behind and 2 commits ahead of `trunk`, +// instrumentation SHOULD report two measurements: 3 and 2 (both positive +// numbers) and `vcs.ref.base.name` is set to `trunk`. +func (m RefRevisionsDelta) Record( + ctx context.Context, + val int64, + refBaseName string, + refBaseType RefBaseTypeAttr, + refHeadName string, + refHeadType RefHeadTypeAttr, + repositoryUrlFull string, + revisionDeltaDirection RevisionDeltaDirectionAttr, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Gauge.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("vcs.ref.base.name", refBaseName), + attribute.String("vcs.ref.base.type", string(refBaseType)), + attribute.String("vcs.ref.head.name", refHeadName), + attribute.String("vcs.ref.head.type", string(refHeadType)), + attribute.String("vcs.repository.url.full", repositoryUrlFull), + attribute.String("vcs.revision_delta.direction", string(revisionDeltaDirection)), + )..., + ), + ) + + m.Int64Gauge.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +// +// This metric should be reported for each `vcs.revision_delta.direction` value. +// For example if branch `a` is 3 commits behind and 2 commits ahead of `trunk`, +// instrumentation SHOULD report two measurements: 3 and 2 (both positive +// numbers) and `vcs.ref.base.name` is set to `trunk`. +func (m RefRevisionsDelta) RecordSet(ctx context.Context, val int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Gauge.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Gauge.Record(ctx, val, *o...) +} + +// AttrChangeID returns an optional attribute for the "vcs.change.id" semantic +// convention. It represents the ID of the change (pull request/merge +// request/changelist) if applicable. This is usually a unique (within +// repository) identifier generated by the VCS system. +func (RefRevisionsDelta) AttrChangeID(val string) attribute.KeyValue { + return attribute.String("vcs.change.id", val) +} + +// AttrOwnerName returns an optional attribute for the "vcs.owner.name" semantic +// convention. It represents the group owner within the version control system. +func (RefRevisionsDelta) AttrOwnerName(val string) attribute.KeyValue { + return attribute.String("vcs.owner.name", val) +} + +// AttrRepositoryName returns an optional attribute for the "vcs.repository.name" +// semantic convention. It represents the human readable name of the repository. +// It SHOULD NOT include any additional identifier like Group/SubGroup in GitLab +// or organization in GitHub. +func (RefRevisionsDelta) AttrRepositoryName(val string) attribute.KeyValue { + return attribute.String("vcs.repository.name", val) +} + +// AttrProviderName returns an optional attribute for the "vcs.provider.name" +// semantic convention. It represents the name of the version control system +// provider. +func (RefRevisionsDelta) AttrProviderName(val ProviderNameAttr) attribute.KeyValue { + return attribute.String("vcs.provider.name", string(val)) +} + +// RefTime is an instrument used to record metric values conforming to the +// "vcs.ref.time" semantic conventions. It represents the time a ref (branch) +// created from the default branch (trunk) has existed. The `ref.type` attribute +// will always be `branch`. +type RefTime struct { + metric.Float64Gauge +} + +// NewRefTime returns a new RefTime instrument. +func NewRefTime( + m metric.Meter, + opt ...metric.Float64GaugeOption, +) (RefTime, error) { + // Check if the meter is nil. + if m == nil { + return RefTime{noop.Float64Gauge{}}, nil + } + + i, err := m.Float64Gauge( + "vcs.ref.time", + append([]metric.Float64GaugeOption{ + metric.WithDescription("Time a ref (branch) created from the default branch (trunk) has existed. The `ref.type` attribute will always be `branch`"), + metric.WithUnit("s"), + }, opt...)..., + ) + if err != nil { + return RefTime{noop.Float64Gauge{}}, err + } + return RefTime{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m RefTime) Inst() metric.Float64Gauge { + return m.Float64Gauge +} + +// Name returns the semantic convention name of the instrument. +func (RefTime) Name() string { + return "vcs.ref.time" +} + +// Unit returns the semantic convention unit of the instrument +func (RefTime) Unit() string { + return "s" +} + +// Description returns the semantic convention description of the instrument +func (RefTime) Description() string { + return "Time a ref (branch) created from the default branch (trunk) has existed. The `ref.type` attribute will always be `branch`" +} + +// Record records val to the current distribution for attrs. +// +// The refHeadName is the the name of the [reference] such as **branch** or +// **tag** in the repository. +// +// The refHeadType is the the type of the [reference] in the repository. +// +// The repositoryUrlFull is the the [canonical URL] of the repository providing +// the complete HTTP(S) address in order to locate and identify the repository +// through a browser. +// +// All additional attrs passed are included in the recorded value. +// +// [reference]: https://git-scm.com/docs/gitglossary#def_ref +// [reference]: https://git-scm.com/docs/gitglossary#def_ref +// [canonical URL]: https://support.google.com/webmasters/answer/10347851?hl=en#:~:text=A%20canonical%20URL%20is%20the,Google%20chooses%20one%20as%20canonical. +func (m RefTime) Record( + ctx context.Context, + val float64, + refHeadName string, + refHeadType RefHeadTypeAttr, + repositoryUrlFull string, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Float64Gauge.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("vcs.ref.head.name", refHeadName), + attribute.String("vcs.ref.head.type", string(refHeadType)), + attribute.String("vcs.repository.url.full", repositoryUrlFull), + )..., + ), + ) + + m.Float64Gauge.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +func (m RefTime) RecordSet(ctx context.Context, val float64, set attribute.Set) { + if set.Len() == 0 { + m.Float64Gauge.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Float64Gauge.Record(ctx, val, *o...) +} + +// AttrOwnerName returns an optional attribute for the "vcs.owner.name" semantic +// convention. It represents the group owner within the version control system. +func (RefTime) AttrOwnerName(val string) attribute.KeyValue { + return attribute.String("vcs.owner.name", val) +} + +// AttrRepositoryName returns an optional attribute for the "vcs.repository.name" +// semantic convention. It represents the human readable name of the repository. +// It SHOULD NOT include any additional identifier like Group/SubGroup in GitLab +// or organization in GitHub. +func (RefTime) AttrRepositoryName(val string) attribute.KeyValue { + return attribute.String("vcs.repository.name", val) +} + +// AttrProviderName returns an optional attribute for the "vcs.provider.name" +// semantic convention. It represents the name of the version control system +// provider. +func (RefTime) AttrProviderName(val ProviderNameAttr) attribute.KeyValue { + return attribute.String("vcs.provider.name", string(val)) +} + +// RepositoryCount is an instrument used to record metric values conforming to +// the "vcs.repository.count" semantic conventions. It represents the number of +// repositories in an organization. +type RepositoryCount struct { + metric.Int64UpDownCounter +} + +// NewRepositoryCount returns a new RepositoryCount instrument. +func NewRepositoryCount( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (RepositoryCount, error) { + // Check if the meter is nil. + if m == nil { + return RepositoryCount{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "vcs.repository.count", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("The number of repositories in an organization."), + metric.WithUnit("{repository}"), + }, opt...)..., + ) + if err != nil { + return RepositoryCount{noop.Int64UpDownCounter{}}, err + } + return RepositoryCount{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m RepositoryCount) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (RepositoryCount) Name() string { + return "vcs.repository.count" +} + +// Unit returns the semantic convention unit of the instrument +func (RepositoryCount) Unit() string { + return "{repository}" +} + +// Description returns the semantic convention description of the instrument +func (RepositoryCount) Description() string { + return "The number of repositories in an organization." +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +func (m RepositoryCount) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +func (m RepositoryCount) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AttrOwnerName returns an optional attribute for the "vcs.owner.name" semantic +// convention. It represents the group owner within the version control system. +func (RepositoryCount) AttrOwnerName(val string) attribute.KeyValue { + return attribute.String("vcs.owner.name", val) +} + +// AttrProviderName returns an optional attribute for the "vcs.provider.name" +// semantic convention. It represents the name of the version control system +// provider. +func (RepositoryCount) AttrProviderName(val ProviderNameAttr) attribute.KeyValue { + return attribute.String("vcs.provider.name", string(val)) +} \ No newline at end of file diff --git a/semconv/v1.37.0/MIGRATION.md b/semconv/v1.37.0/MIGRATION.md new file mode 100644 index 00000000000..24805478952 --- /dev/null +++ b/semconv/v1.37.0/MIGRATION.md @@ -0,0 +1,41 @@ + +# Migration from v1.36.0 to v1.37.0 + +The `go.opentelemetry.io/otel/semconv/v1.37.0` package should be a drop-in replacement for `go.opentelemetry.io/otel/semconv/v1.36.0` with the following exceptions. + +## Removed + +The following declarations have been removed. +Refer to the [OpenTelemetry Semantic Conventions documentation] for deprecation instructions. + +If the type is not listed in the documentation as deprecated, it has been removed in this version due to lack of applicability or use. +If you use any of these non-deprecated declarations in your Go application, please [open an issue] describing your use-case. + +- `ContainerRuntime` +- `ContainerRuntimeKey` +- `GenAIOpenAIRequestServiceTierAuto` +- `GenAIOpenAIRequestServiceTierDefault` +- `GenAIOpenAIRequestServiceTierKey` +- `GenAIOpenAIResponseServiceTier` +- `GenAIOpenAIResponseServiceTierKey` +- `GenAIOpenAIResponseSystemFingerprint` +- `GenAIOpenAIResponseSystemFingerprintKey` +- `GenAISystemAWSBedrock` +- `GenAISystemAnthropic` +- `GenAISystemAzureAIInference` +- `GenAISystemAzureAIOpenAI` +- `GenAISystemCohere` +- `GenAISystemDeepseek` +- `GenAISystemGCPGemini` +- `GenAISystemGCPGenAI` +- `GenAISystemGCPVertexAI` +- `GenAISystemGroq` +- `GenAISystemIBMWatsonxAI` +- `GenAISystemKey` +- `GenAISystemMistralAI` +- `GenAISystemOpenAI` +- `GenAISystemPerplexity` +- `GenAISystemXai` + +[OpenTelemetry Semantic Conventions documentation]: https://github.com/open-telemetry/semantic-conventions +[open an issue]: https://github.com/open-telemetry/opentelemetry-go/issues/new?template=Blank+issue diff --git a/semconv/v1.37.0/README.md b/semconv/v1.37.0/README.md new file mode 100644 index 00000000000..d795247f326 --- /dev/null +++ b/semconv/v1.37.0/README.md @@ -0,0 +1,3 @@ +# Semconv v1.37.0 + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/semconv/v1.37.0)](https://pkg.go.dev/go.opentelemetry.io/otel/semconv/v1.37.0) diff --git a/semconv/v1.37.0/attribute_group.go b/semconv/v1.37.0/attribute_group.go new file mode 100644 index 00000000000..b6b27498f2b --- /dev/null +++ b/semconv/v1.37.0/attribute_group.go @@ -0,0 +1,15193 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.37.0" + +import "go.opentelemetry.io/otel/attribute" + +// Namespace: android +const ( + // AndroidAppStateKey is the attribute Key conforming to the "android.app.state" + // semantic conventions. It represents the this attribute represents the state + // of the application. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "created" + // Note: The Android lifecycle states are defined in + // [Activity lifecycle callbacks], and from which the `OS identifiers` are + // derived. + // + // [Activity lifecycle callbacks]: https://developer.android.com/guide/components/activities/activity-lifecycle#lc + AndroidAppStateKey = attribute.Key("android.app.state") + + // AndroidOSAPILevelKey is the attribute Key conforming to the + // "android.os.api_level" semantic conventions. It represents the uniquely + // identifies the framework API revision offered by a version (`os.version`) of + // the android operating system. More information can be found in the + // [Android API levels documentation]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "33", "32" + // + // [Android API levels documentation]: https://developer.android.com/guide/topics/manifest/uses-sdk-element#ApiLevels + AndroidOSAPILevelKey = attribute.Key("android.os.api_level") +) + +// AndroidOSAPILevel returns an attribute KeyValue conforming to the +// "android.os.api_level" semantic conventions. It represents the uniquely +// identifies the framework API revision offered by a version (`os.version`) of +// the android operating system. More information can be found in the +// [Android API levels documentation]. +// +// [Android API levels documentation]: https://developer.android.com/guide/topics/manifest/uses-sdk-element#ApiLevels +func AndroidOSAPILevel(val string) attribute.KeyValue { + return AndroidOSAPILevelKey.String(val) +} + +// Enum values for android.app.state +var ( + // Any time before Activity.onResume() or, if the app has no Activity, + // Context.startService() has been called in the app for the first time. + // + // Stability: development + AndroidAppStateCreated = AndroidAppStateKey.String("created") + // Any time after Activity.onPause() or, if the app has no Activity, + // Context.stopService() has been called when the app was in the foreground + // state. + // + // Stability: development + AndroidAppStateBackground = AndroidAppStateKey.String("background") + // Any time after Activity.onResume() or, if the app has no Activity, + // Context.startService() has been called when the app was in either the created + // or background states. + // + // Stability: development + AndroidAppStateForeground = AndroidAppStateKey.String("foreground") +) + +// Namespace: app +const ( + // AppBuildIDKey is the attribute Key conforming to the "app.build_id" semantic + // conventions. It represents the unique identifier for a particular build or + // compilation of the application. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "6cff0a7e-cefc-4668-96f5-1273d8b334d0", + // "9f2b833506aa6973a92fde9733e6271f", "my-app-1.0.0-code-123" + AppBuildIDKey = attribute.Key("app.build_id") + + // AppInstallationIDKey is the attribute Key conforming to the + // "app.installation.id" semantic conventions. It represents a unique identifier + // representing the installation of an application on a specific device. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2ab2916d-a51f-4ac8-80ee-45ac31a28092" + // Note: Its value SHOULD persist across launches of the same application + // installation, including through application upgrades. + // It SHOULD change if the application is uninstalled or if all applications of + // the vendor are uninstalled. + // Additionally, users might be able to reset this value (e.g. by clearing + // application data). + // If an app is installed multiple times on the same device (e.g. in different + // accounts on Android), each `app.installation.id` SHOULD have a different + // value. + // If multiple OpenTelemetry SDKs are used within the same application, they + // SHOULD use the same value for `app.installation.id`. + // Hardware IDs (e.g. serial number, IMEI, MAC address) MUST NOT be used as the + // `app.installation.id`. + // + // For iOS, this value SHOULD be equal to the [vendor identifier]. + // + // For Android, examples of `app.installation.id` implementations include: + // + // - [Firebase Installation ID]. + // - A globally unique UUID which is persisted across sessions in your + // application. + // - [App set ID]. + // - [`Settings.getString(Settings.Secure.ANDROID_ID)`]. + // + // More information about Android identifier best practices can be found in the + // [Android user data IDs guide]. + // + // [vendor identifier]: https://developer.apple.com/documentation/uikit/uidevice/identifierforvendor + // [Firebase Installation ID]: https://firebase.google.com/docs/projects/manage-installations + // [App set ID]: https://developer.android.com/identity/app-set-id + // [`Settings.getString(Settings.Secure.ANDROID_ID)`]: https://developer.android.com/reference/android/provider/Settings.Secure#ANDROID_ID + // [Android user data IDs guide]: https://developer.android.com/training/articles/user-data-ids + AppInstallationIDKey = attribute.Key("app.installation.id") + + // AppJankFrameCountKey is the attribute Key conforming to the + // "app.jank.frame_count" semantic conventions. It represents a number of frame + // renders that experienced jank. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 9, 42 + // Note: Depending on platform limitations, the value provided MAY be + // approximation. + AppJankFrameCountKey = attribute.Key("app.jank.frame_count") + + // AppJankPeriodKey is the attribute Key conforming to the "app.jank.period" + // semantic conventions. It represents the time period, in seconds, for which + // this jank is being reported. + // + // Type: double + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1.0, 5.0, 10.24 + AppJankPeriodKey = attribute.Key("app.jank.period") + + // AppJankThresholdKey is the attribute Key conforming to the + // "app.jank.threshold" semantic conventions. It represents the minimum + // rendering threshold for this jank, in seconds. + // + // Type: double + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 0.016, 0.7, 1.024 + AppJankThresholdKey = attribute.Key("app.jank.threshold") + + // AppScreenCoordinateXKey is the attribute Key conforming to the + // "app.screen.coordinate.x" semantic conventions. It represents the x + // (horizontal) coordinate of a screen coordinate, in screen pixels. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 0, 131 + AppScreenCoordinateXKey = attribute.Key("app.screen.coordinate.x") + + // AppScreenCoordinateYKey is the attribute Key conforming to the + // "app.screen.coordinate.y" semantic conventions. It represents the y + // (vertical) component of a screen coordinate, in screen pixels. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 12, 99 + AppScreenCoordinateYKey = attribute.Key("app.screen.coordinate.y") + + // AppWidgetIDKey is the attribute Key conforming to the "app.widget.id" + // semantic conventions. It represents an identifier that uniquely + // differentiates this widget from other widgets in the same application. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "f9bc787d-ff05-48ad-90e1-fca1d46130b3", "submit_order_1829" + // Note: A widget is an application component, typically an on-screen visual GUI + // element. + AppWidgetIDKey = attribute.Key("app.widget.id") + + // AppWidgetNameKey is the attribute Key conforming to the "app.widget.name" + // semantic conventions. It represents the name of an application widget. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "submit", "attack", "Clear Cart" + // Note: A widget is an application component, typically an on-screen visual GUI + // element. + AppWidgetNameKey = attribute.Key("app.widget.name") +) + +// AppBuildID returns an attribute KeyValue conforming to the "app.build_id" +// semantic conventions. It represents the unique identifier for a particular +// build or compilation of the application. +func AppBuildID(val string) attribute.KeyValue { + return AppBuildIDKey.String(val) +} + +// AppInstallationID returns an attribute KeyValue conforming to the +// "app.installation.id" semantic conventions. It represents a unique identifier +// representing the installation of an application on a specific device. +func AppInstallationID(val string) attribute.KeyValue { + return AppInstallationIDKey.String(val) +} + +// AppJankFrameCount returns an attribute KeyValue conforming to the +// "app.jank.frame_count" semantic conventions. It represents a number of frame +// renders that experienced jank. +func AppJankFrameCount(val int) attribute.KeyValue { + return AppJankFrameCountKey.Int(val) +} + +// AppJankPeriod returns an attribute KeyValue conforming to the +// "app.jank.period" semantic conventions. It represents the time period, in +// seconds, for which this jank is being reported. +func AppJankPeriod(val float64) attribute.KeyValue { + return AppJankPeriodKey.Float64(val) +} + +// AppJankThreshold returns an attribute KeyValue conforming to the +// "app.jank.threshold" semantic conventions. It represents the minimum rendering +// threshold for this jank, in seconds. +func AppJankThreshold(val float64) attribute.KeyValue { + return AppJankThresholdKey.Float64(val) +} + +// AppScreenCoordinateX returns an attribute KeyValue conforming to the +// "app.screen.coordinate.x" semantic conventions. It represents the x +// (horizontal) coordinate of a screen coordinate, in screen pixels. +func AppScreenCoordinateX(val int) attribute.KeyValue { + return AppScreenCoordinateXKey.Int(val) +} + +// AppScreenCoordinateY returns an attribute KeyValue conforming to the +// "app.screen.coordinate.y" semantic conventions. It represents the y (vertical) +// component of a screen coordinate, in screen pixels. +func AppScreenCoordinateY(val int) attribute.KeyValue { + return AppScreenCoordinateYKey.Int(val) +} + +// AppWidgetID returns an attribute KeyValue conforming to the "app.widget.id" +// semantic conventions. It represents an identifier that uniquely differentiates +// this widget from other widgets in the same application. +func AppWidgetID(val string) attribute.KeyValue { + return AppWidgetIDKey.String(val) +} + +// AppWidgetName returns an attribute KeyValue conforming to the +// "app.widget.name" semantic conventions. It represents the name of an +// application widget. +func AppWidgetName(val string) attribute.KeyValue { + return AppWidgetNameKey.String(val) +} + +// Namespace: artifact +const ( + // ArtifactAttestationFilenameKey is the attribute Key conforming to the + // "artifact.attestation.filename" semantic conventions. It represents the + // provenance filename of the built attestation which directly relates to the + // build artifact filename. This filename SHOULD accompany the artifact at + // publish time. See the [SLSA Relationship] specification for more information. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "golang-binary-amd64-v0.1.0.attestation", + // "docker-image-amd64-v0.1.0.intoto.json1", "release-1.tar.gz.attestation", + // "file-name-package.tar.gz.intoto.json1" + // + // [SLSA Relationship]: https://slsa.dev/spec/v1.0/distributing-provenance#relationship-between-artifacts-and-attestations + ArtifactAttestationFilenameKey = attribute.Key("artifact.attestation.filename") + + // ArtifactAttestationHashKey is the attribute Key conforming to the + // "artifact.attestation.hash" semantic conventions. It represents the full + // [hash value (see glossary)], of the built attestation. Some envelopes in the + // [software attestation space] also refer to this as the **digest**. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "1b31dfcd5b7f9267bf2ff47651df1cfb9147b9e4df1f335accf65b4cda498408" + // + // [hash value (see glossary)]: https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-5.pdf + // [software attestation space]: https://github.com/in-toto/attestation/tree/main/spec + ArtifactAttestationHashKey = attribute.Key("artifact.attestation.hash") + + // ArtifactAttestationIDKey is the attribute Key conforming to the + // "artifact.attestation.id" semantic conventions. It represents the id of the + // build [software attestation]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "123" + // + // [software attestation]: https://slsa.dev/attestation-model + ArtifactAttestationIDKey = attribute.Key("artifact.attestation.id") + + // ArtifactFilenameKey is the attribute Key conforming to the + // "artifact.filename" semantic conventions. It represents the human readable + // file name of the artifact, typically generated during build and release + // processes. Often includes the package name and version in the file name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "golang-binary-amd64-v0.1.0", "docker-image-amd64-v0.1.0", + // "release-1.tar.gz", "file-name-package.tar.gz" + // Note: This file name can also act as the [Package Name] + // in cases where the package ecosystem maps accordingly. + // Additionally, the artifact [can be published] + // for others, but that is not a guarantee. + // + // [Package Name]: https://slsa.dev/spec/v1.0/terminology#package-model + // [can be published]: https://slsa.dev/spec/v1.0/terminology#software-supply-chain + ArtifactFilenameKey = attribute.Key("artifact.filename") + + // ArtifactHashKey is the attribute Key conforming to the "artifact.hash" + // semantic conventions. It represents the full [hash value (see glossary)], + // often found in checksum.txt on a release of the artifact and used to verify + // package integrity. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "9ff4c52759e2c4ac70b7d517bc7fcdc1cda631ca0045271ddd1b192544f8a3e9" + // Note: The specific algorithm used to create the cryptographic hash value is + // not defined. In situations where an artifact has multiple + // cryptographic hashes, it is up to the implementer to choose which + // hash value to set here; this should be the most secure hash algorithm + // that is suitable for the situation and consistent with the + // corresponding attestation. The implementer can then provide the other + // hash values through an additional set of attribute extensions as they + // deem necessary. + // + // [hash value (see glossary)]: https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-5.pdf + ArtifactHashKey = attribute.Key("artifact.hash") + + // ArtifactPurlKey is the attribute Key conforming to the "artifact.purl" + // semantic conventions. It represents the [Package URL] of the + // [package artifact] provides a standard way to identify and locate the + // packaged artifact. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "pkg:github/package-url/purl-spec@1209109710924", + // "pkg:npm/foo@12.12.3" + // + // [Package URL]: https://github.com/package-url/purl-spec + // [package artifact]: https://slsa.dev/spec/v1.0/terminology#package-model + ArtifactPurlKey = attribute.Key("artifact.purl") + + // ArtifactVersionKey is the attribute Key conforming to the "artifact.version" + // semantic conventions. It represents the version of the artifact. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "v0.1.0", "1.2.1", "122691-build" + ArtifactVersionKey = attribute.Key("artifact.version") +) + +// ArtifactAttestationFilename returns an attribute KeyValue conforming to the +// "artifact.attestation.filename" semantic conventions. It represents the +// provenance filename of the built attestation which directly relates to the +// build artifact filename. This filename SHOULD accompany the artifact at +// publish time. See the [SLSA Relationship] specification for more information. +// +// [SLSA Relationship]: https://slsa.dev/spec/v1.0/distributing-provenance#relationship-between-artifacts-and-attestations +func ArtifactAttestationFilename(val string) attribute.KeyValue { + return ArtifactAttestationFilenameKey.String(val) +} + +// ArtifactAttestationHash returns an attribute KeyValue conforming to the +// "artifact.attestation.hash" semantic conventions. It represents the full +// [hash value (see glossary)], of the built attestation. Some envelopes in the +// [software attestation space] also refer to this as the **digest**. +// +// [hash value (see glossary)]: https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-5.pdf +// [software attestation space]: https://github.com/in-toto/attestation/tree/main/spec +func ArtifactAttestationHash(val string) attribute.KeyValue { + return ArtifactAttestationHashKey.String(val) +} + +// ArtifactAttestationID returns an attribute KeyValue conforming to the +// "artifact.attestation.id" semantic conventions. It represents the id of the +// build [software attestation]. +// +// [software attestation]: https://slsa.dev/attestation-model +func ArtifactAttestationID(val string) attribute.KeyValue { + return ArtifactAttestationIDKey.String(val) +} + +// ArtifactFilename returns an attribute KeyValue conforming to the +// "artifact.filename" semantic conventions. It represents the human readable +// file name of the artifact, typically generated during build and release +// processes. Often includes the package name and version in the file name. +func ArtifactFilename(val string) attribute.KeyValue { + return ArtifactFilenameKey.String(val) +} + +// ArtifactHash returns an attribute KeyValue conforming to the "artifact.hash" +// semantic conventions. It represents the full [hash value (see glossary)], +// often found in checksum.txt on a release of the artifact and used to verify +// package integrity. +// +// [hash value (see glossary)]: https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-5.pdf +func ArtifactHash(val string) attribute.KeyValue { + return ArtifactHashKey.String(val) +} + +// ArtifactPurl returns an attribute KeyValue conforming to the "artifact.purl" +// semantic conventions. It represents the [Package URL] of the +// [package artifact] provides a standard way to identify and locate the packaged +// artifact. +// +// [Package URL]: https://github.com/package-url/purl-spec +// [package artifact]: https://slsa.dev/spec/v1.0/terminology#package-model +func ArtifactPurl(https://github.com/val%20string) attribute.KeyValue { + return ArtifactPurlKey.String(val) +} + +// ArtifactVersion returns an attribute KeyValue conforming to the +// "artifact.version" semantic conventions. It represents the version of the +// artifact. +func ArtifactVersion(val string) attribute.KeyValue { + return ArtifactVersionKey.String(val) +} + +// Namespace: aws +const ( + // AWSBedrockGuardrailIDKey is the attribute Key conforming to the + // "aws.bedrock.guardrail.id" semantic conventions. It represents the unique + // identifier of the AWS Bedrock Guardrail. A [guardrail] helps safeguard and + // prevent unwanted behavior from model responses or user messages. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "sgi5gkybzqak" + // + // [guardrail]: https://docs.aws.amazon.com/bedrock/latest/userguide/guardrails.html + AWSBedrockGuardrailIDKey = attribute.Key("aws.bedrock.guardrail.id") + + // AWSBedrockKnowledgeBaseIDKey is the attribute Key conforming to the + // "aws.bedrock.knowledge_base.id" semantic conventions. It represents the + // unique identifier of the AWS Bedrock Knowledge base. A [knowledge base] is a + // bank of information that can be queried by models to generate more relevant + // responses and augment prompts. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "XFWUPB9PAW" + // + // [knowledge base]: https://docs.aws.amazon.com/bedrock/latest/userguide/knowledge-base.html + AWSBedrockKnowledgeBaseIDKey = attribute.Key("aws.bedrock.knowledge_base.id") + + // AWSDynamoDBAttributeDefinitionsKey is the attribute Key conforming to the + // "aws.dynamodb.attribute_definitions" semantic conventions. It represents the + // JSON-serialized value of each item in the `AttributeDefinitions` request + // field. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "{ "AttributeName": "string", "AttributeType": "string" }" + AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions") + + // AWSDynamoDBAttributesToGetKey is the attribute Key conforming to the + // "aws.dynamodb.attributes_to_get" semantic conventions. It represents the + // value of the `AttributesToGet` request parameter. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "lives", "id" + AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get") + + // AWSDynamoDBConsistentReadKey is the attribute Key conforming to the + // "aws.dynamodb.consistent_read" semantic conventions. It represents the value + // of the `ConsistentRead` request parameter. + // + // Type: boolean + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read") + + // AWSDynamoDBConsumedCapacityKey is the attribute Key conforming to the + // "aws.dynamodb.consumed_capacity" semantic conventions. It represents the + // JSON-serialized value of each item in the `ConsumedCapacity` response field. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "{ "CapacityUnits": number, "GlobalSecondaryIndexes": { "string" : + // { "CapacityUnits": number, "ReadCapacityUnits": number, "WriteCapacityUnits": + // number } }, "LocalSecondaryIndexes": { "string" : { "CapacityUnits": number, + // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }, + // "ReadCapacityUnits": number, "Table": { "CapacityUnits": number, + // "ReadCapacityUnits": number, "WriteCapacityUnits": number }, "TableName": + // "string", "WriteCapacityUnits": number }" + AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity") + + // AWSDynamoDBCountKey is the attribute Key conforming to the + // "aws.dynamodb.count" semantic conventions. It represents the value of the + // `Count` response parameter. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 10 + AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count") + + // AWSDynamoDBExclusiveStartTableKey is the attribute Key conforming to the + // "aws.dynamodb.exclusive_start_table" semantic conventions. It represents the + // value of the `ExclusiveStartTableName` request parameter. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Users", "CatsTable" + AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table") + + // AWSDynamoDBGlobalSecondaryIndexUpdatesKey is the attribute Key conforming to + // the "aws.dynamodb.global_secondary_index_updates" semantic conventions. It + // represents the JSON-serialized value of each item in the + // `GlobalSecondaryIndexUpdates` request field. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "{ "Create": { "IndexName": "string", "KeySchema": [ { + // "AttributeName": "string", "KeyType": "string" } ], "Projection": { + // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" }, + // "ProvisionedThroughput": { "ReadCapacityUnits": number, "WriteCapacityUnits": + // number } }" + AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates") + + // AWSDynamoDBGlobalSecondaryIndexesKey is the attribute Key conforming to the + // "aws.dynamodb.global_secondary_indexes" semantic conventions. It represents + // the JSON-serialized value of each item of the `GlobalSecondaryIndexes` + // request field. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "{ "IndexName": "string", "KeySchema": [ { "AttributeName": + // "string", "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ + // "string" ], "ProjectionType": "string" }, "ProvisionedThroughput": { + // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }" + AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes") + + // AWSDynamoDBIndexNameKey is the attribute Key conforming to the + // "aws.dynamodb.index_name" semantic conventions. It represents the value of + // the `IndexName` request parameter. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "name_to_group" + AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name") + + // AWSDynamoDBItemCollectionMetricsKey is the attribute Key conforming to the + // "aws.dynamodb.item_collection_metrics" semantic conventions. It represents + // the JSON-serialized value of the `ItemCollectionMetrics` response field. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "{ "string" : [ { "ItemCollectionKey": { "string" : { "B": blob, + // "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": { "string" : + // "AttributeValue" }, "N": "string", "NS": [ "string" ], "NULL": boolean, "S": + // "string", "SS": [ "string" ] } }, "SizeEstimateRangeGB": [ number ] } ] }" + AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics") + + // AWSDynamoDBLimitKey is the attribute Key conforming to the + // "aws.dynamodb.limit" semantic conventions. It represents the value of the + // `Limit` request parameter. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 10 + AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit") + + // AWSDynamoDBLocalSecondaryIndexesKey is the attribute Key conforming to the + // "aws.dynamodb.local_secondary_indexes" semantic conventions. It represents + // the JSON-serialized value of each item of the `LocalSecondaryIndexes` request + // field. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "{ "IndexArn": "string", "IndexName": "string", "IndexSizeBytes": + // number, "ItemCount": number, "KeySchema": [ { "AttributeName": "string", + // "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ "string" ], + // "ProjectionType": "string" } }" + AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes") + + // AWSDynamoDBProjectionKey is the attribute Key conforming to the + // "aws.dynamodb.projection" semantic conventions. It represents the value of + // the `ProjectionExpression` request parameter. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Title", "Title, Price, Color", "Title, Description, RelatedItems, + // ProductReviews" + AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection") + + // AWSDynamoDBProvisionedReadCapacityKey is the attribute Key conforming to the + // "aws.dynamodb.provisioned_read_capacity" semantic conventions. It represents + // the value of the `ProvisionedThroughput.ReadCapacityUnits` request parameter. + // + // Type: double + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1.0, 2.0 + AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity") + + // AWSDynamoDBProvisionedWriteCapacityKey is the attribute Key conforming to the + // "aws.dynamodb.provisioned_write_capacity" semantic conventions. It represents + // the value of the `ProvisionedThroughput.WriteCapacityUnits` request + // parameter. + // + // Type: double + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1.0, 2.0 + AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity") + + // AWSDynamoDBScanForwardKey is the attribute Key conforming to the + // "aws.dynamodb.scan_forward" semantic conventions. It represents the value of + // the `ScanIndexForward` request parameter. + // + // Type: boolean + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward") + + // AWSDynamoDBScannedCountKey is the attribute Key conforming to the + // "aws.dynamodb.scanned_count" semantic conventions. It represents the value of + // the `ScannedCount` response parameter. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 50 + AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count") + + // AWSDynamoDBSegmentKey is the attribute Key conforming to the + // "aws.dynamodb.segment" semantic conventions. It represents the value of the + // `Segment` request parameter. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 10 + AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment") + + // AWSDynamoDBSelectKey is the attribute Key conforming to the + // "aws.dynamodb.select" semantic conventions. It represents the value of the + // `Select` request parameter. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "ALL_ATTRIBUTES", "COUNT" + AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select") + + // AWSDynamoDBTableCountKey is the attribute Key conforming to the + // "aws.dynamodb.table_count" semantic conventions. It represents the number of + // items in the `TableNames` response parameter. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 20 + AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count") + + // AWSDynamoDBTableNamesKey is the attribute Key conforming to the + // "aws.dynamodb.table_names" semantic conventions. It represents the keys in + // the `RequestItems` object field. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Users", "Cats" + AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names") + + // AWSDynamoDBTotalSegmentsKey is the attribute Key conforming to the + // "aws.dynamodb.total_segments" semantic conventions. It represents the value + // of the `TotalSegments` request parameter. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 100 + AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments") + + // AWSECSClusterARNKey is the attribute Key conforming to the + // "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an + // [ECS cluster]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster" + // + // [ECS cluster]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html + AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn") + + // AWSECSContainerARNKey is the attribute Key conforming to the + // "aws.ecs.container.arn" semantic conventions. It represents the Amazon + // Resource Name (ARN) of an [ECS container instance]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // "arn:aws:ecs:us-west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9" + // + // [ECS container instance]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html + AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn") + + // AWSECSLaunchtypeKey is the attribute Key conforming to the + // "aws.ecs.launchtype" semantic conventions. It represents the [launch type] + // for an ECS task. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // + // [launch type]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html + AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype") + + // AWSECSTaskARNKey is the attribute Key conforming to the "aws.ecs.task.arn" + // semantic conventions. It represents the ARN of a running [ECS task]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // "arn:aws:ecs:us-west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b", + // "arn:aws:ecs:us-west-1:123456789123:task/my-cluster/task-id/23ebb8ac-c18f-46c6-8bbe-d55d0e37cfbd" + // + // [ECS task]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-account-settings.html#ecs-resource-ids + AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn") + + // AWSECSTaskFamilyKey is the attribute Key conforming to the + // "aws.ecs.task.family" semantic conventions. It represents the family name of + // the [ECS task definition] used to create the ECS task. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry-family" + // + // [ECS task definition]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html + AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family") + + // AWSECSTaskIDKey is the attribute Key conforming to the "aws.ecs.task.id" + // semantic conventions. It represents the ID of a running ECS task. The ID MUST + // be extracted from `task.arn`. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "10838bed-421f-43ef-870a-f43feacbbb5b", + // "23ebb8ac-c18f-46c6-8bbe-d55d0e37cfbd" + AWSECSTaskIDKey = attribute.Key("aws.ecs.task.id") + + // AWSECSTaskRevisionKey is the attribute Key conforming to the + // "aws.ecs.task.revision" semantic conventions. It represents the revision for + // the task definition used to create the ECS task. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "8", "26" + AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision") + + // AWSEKSClusterARNKey is the attribute Key conforming to the + // "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an EKS + // cluster. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster" + AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn") + + // AWSExtendedRequestIDKey is the attribute Key conforming to the + // "aws.extended_request_id" semantic conventions. It represents the AWS + // extended request ID as returned in the response header `x-amz-id-2`. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // "wzHcyEWfmOGDIE5QOhTAqFDoDWP3y8IUvpNINCwL9N4TEHbUw0/gZJ+VZTmCNCWR7fezEN3eCiQ=" + AWSExtendedRequestIDKey = attribute.Key("aws.extended_request_id") + + // AWSKinesisStreamNameKey is the attribute Key conforming to the + // "aws.kinesis.stream_name" semantic conventions. It represents the name of the + // AWS Kinesis [stream] the request refers to. Corresponds to the + // `--stream-name` parameter of the Kinesis [describe-stream] operation. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "some-stream-name" + // + // [stream]: https://docs.aws.amazon.com/streams/latest/dev/introduction.html + // [describe-stream]: https://docs.aws.amazon.com/cli/latest/reference/kinesis/describe-stream.html + AWSKinesisStreamNameKey = attribute.Key("aws.kinesis.stream_name") + + // AWSLambdaInvokedARNKey is the attribute Key conforming to the + // "aws.lambda.invoked_arn" semantic conventions. It represents the full invoked + // ARN as provided on the `Context` passed to the function ( + // `Lambda-Runtime-Invoked-Function-Arn` header on the + // `/runtime/invocation/next` applicable). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "arn:aws:lambda:us-east-1:123456:function:myfunction:myalias" + // Note: This may be different from `cloud.resource_id` if an alias is involved. + AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn") + + // AWSLambdaResourceMappingIDKey is the attribute Key conforming to the + // "aws.lambda.resource_mapping.id" semantic conventions. It represents the UUID + // of the [AWS Lambda EvenSource Mapping]. An event source is mapped to a lambda + // function. It's contents are read by Lambda and used to trigger a function. + // This isn't available in the lambda execution context or the lambda runtime + // environtment. This is going to be populated by the AWS SDK for each language + // when that UUID is present. Some of these operations are + // Create/Delete/Get/List/Update EventSourceMapping. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "587ad24b-03b9-4413-8202-bbd56b36e5b7" + // + // [AWS Lambda EvenSource Mapping]: https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-lambda-eventsourcemapping.html + AWSLambdaResourceMappingIDKey = attribute.Key("aws.lambda.resource_mapping.id") + + // AWSLogGroupARNsKey is the attribute Key conforming to the + // "aws.log.group.arns" semantic conventions. It represents the Amazon Resource + // Name(s) (ARN) of the AWS log group(s). + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*" + // Note: See the [log group ARN format documentation]. + // + // [log group ARN format documentation]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format + AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns") + + // AWSLogGroupNamesKey is the attribute Key conforming to the + // "aws.log.group.names" semantic conventions. It represents the name(s) of the + // AWS log group(s) an application is writing to. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "/aws/lambda/my-function", "opentelemetry-service" + // Note: Multiple log groups must be supported for cases like multi-container + // applications, where a single application has sidecar containers, and each + // write to their own log group. + AWSLogGroupNamesKey = attribute.Key("aws.log.group.names") + + // AWSLogStreamARNsKey is the attribute Key conforming to the + // "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of the + // AWS log stream(s). + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // "arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log-stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b" + // Note: See the [log stream ARN format documentation]. One log group can + // contain several log streams, so these ARNs necessarily identify both a log + // group and a log stream. + // + // [log stream ARN format documentation]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format + AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns") + + // AWSLogStreamNamesKey is the attribute Key conforming to the + // "aws.log.stream.names" semantic conventions. It represents the name(s) of the + // AWS log stream(s) an application is writing to. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "logs/main/10838bed-421f-43ef-870a-f43feacbbb5b" + AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names") + + // AWSRequestIDKey is the attribute Key conforming to the "aws.request_id" + // semantic conventions. It represents the AWS request ID as returned in the + // response headers `x-amzn-requestid`, `x-amzn-request-id` or + // `x-amz-request-id`. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "79b9da39-b7ae-508a-a6bc-864b2829c622", "C9ER4AJX75574TDJ" + AWSRequestIDKey = attribute.Key("aws.request_id") + + // AWSS3BucketKey is the attribute Key conforming to the "aws.s3.bucket" + // semantic conventions. It represents the S3 bucket name the request refers to. + // Corresponds to the `--bucket` parameter of the [S3 API] operations. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "some-bucket-name" + // Note: The `bucket` attribute is applicable to all S3 operations that + // reference a bucket, i.e. that require the bucket name as a mandatory + // parameter. + // This applies to almost all S3 operations except `list-buckets`. + // + // [S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html + AWSS3BucketKey = attribute.Key("aws.s3.bucket") + + // AWSS3CopySourceKey is the attribute Key conforming to the + // "aws.s3.copy_source" semantic conventions. It represents the source object + // (in the form `bucket`/`key`) for the copy operation. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "someFile.yml" + // Note: The `copy_source` attribute applies to S3 copy operations and + // corresponds to the `--copy-source` parameter + // of the [copy-object operation within the S3 API]. + // This applies in particular to the following operations: + // + // - [copy-object] + // - [upload-part-copy] + // + // + // [copy-object operation within the S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html + // [copy-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html + // [upload-part-copy]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html + AWSS3CopySourceKey = attribute.Key("aws.s3.copy_source") + + // AWSS3DeleteKey is the attribute Key conforming to the "aws.s3.delete" + // semantic conventions. It represents the delete request container that + // specifies the objects to be deleted. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // "Objects=[{Key=string,VersionId=string},{Key=string,VersionId=string}],Quiet=boolean" + // Note: The `delete` attribute is only applicable to the [delete-object] + // operation. + // The `delete` attribute corresponds to the `--delete` parameter of the + // [delete-objects operation within the S3 API]. + // + // [delete-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html + // [delete-objects operation within the S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-objects.html + AWSS3DeleteKey = attribute.Key("aws.s3.delete") + + // AWSS3KeyKey is the attribute Key conforming to the "aws.s3.key" semantic + // conventions. It represents the S3 object key the request refers to. + // Corresponds to the `--key` parameter of the [S3 API] operations. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "someFile.yml" + // Note: The `key` attribute is applicable to all object-related S3 operations, + // i.e. that require the object key as a mandatory parameter. + // This applies in particular to the following operations: + // + // - [copy-object] + // - [delete-object] + // - [get-object] + // - [head-object] + // - [put-object] + // - [restore-object] + // - [select-object-content] + // - [abort-multipart-upload] + // - [complete-multipart-upload] + // - [create-multipart-upload] + // - [list-parts] + // - [upload-part] + // - [upload-part-copy] + // + // + // [S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html + // [copy-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html + // [delete-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html + // [get-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/get-object.html + // [head-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/head-object.html + // [put-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/put-object.html + // [restore-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/restore-object.html + // [select-object-content]: https://docs.aws.amazon.com/cli/latest/reference/s3api/select-object-content.html + // [abort-multipart-upload]: https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html + // [complete-multipart-upload]: https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html + // [create-multipart-upload]: https://docs.aws.amazon.com/cli/latest/reference/s3api/create-multipart-upload.html + // [list-parts]: https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html + // [upload-part]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html + // [upload-part-copy]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html + AWSS3KeyKey = attribute.Key("aws.s3.key") + + // AWSS3PartNumberKey is the attribute Key conforming to the + // "aws.s3.part_number" semantic conventions. It represents the part number of + // the part being uploaded in a multipart-upload operation. This is a positive + // integer between 1 and 10,000. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 3456 + // Note: The `part_number` attribute is only applicable to the [upload-part] + // and [upload-part-copy] operations. + // The `part_number` attribute corresponds to the `--part-number` parameter of + // the + // [upload-part operation within the S3 API]. + // + // [upload-part]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html + // [upload-part-copy]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html + // [upload-part operation within the S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html + AWSS3PartNumberKey = attribute.Key("aws.s3.part_number") + + // AWSS3UploadIDKey is the attribute Key conforming to the "aws.s3.upload_id" + // semantic conventions. It represents the upload ID that identifies the + // multipart upload. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "dfRtDYWFbkRONycy.Yxwh66Yjlx.cph0gtNBtJ" + // Note: The `upload_id` attribute applies to S3 multipart-upload operations and + // corresponds to the `--upload-id` parameter + // of the [S3 API] multipart operations. + // This applies in particular to the following operations: + // + // - [abort-multipart-upload] + // - [complete-multipart-upload] + // - [list-parts] + // - [upload-part] + // - [upload-part-copy] + // + // + // [S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html + // [abort-multipart-upload]: https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html + // [complete-multipart-upload]: https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html + // [list-parts]: https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html + // [upload-part]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html + // [upload-part-copy]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html + AWSS3UploadIDKey = attribute.Key("aws.s3.upload_id") + + // AWSSecretsmanagerSecretARNKey is the attribute Key conforming to the + // "aws.secretsmanager.secret.arn" semantic conventions. It represents the ARN + // of the Secret stored in the Secrets Mangger. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // "arn:aws:secretsmanager:us-east-1:123456789012:secret:SecretName-6RandomCharacters" + AWSSecretsmanagerSecretARNKey = attribute.Key("aws.secretsmanager.secret.arn") + + // AWSSNSTopicARNKey is the attribute Key conforming to the "aws.sns.topic.arn" + // semantic conventions. It represents the ARN of the AWS SNS Topic. An Amazon + // SNS [topic] is a logical access point that acts as a communication channel. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "arn:aws:sns:us-east-1:123456789012:mystack-mytopic-NZJ5JSMVGFIE" + // + // [topic]: https://docs.aws.amazon.com/sns/latest/dg/sns-create-topic.html + AWSSNSTopicARNKey = attribute.Key("aws.sns.topic.arn") + + // AWSSQSQueueURLKey is the attribute Key conforming to the "aws.sqs.queue.url" + // semantic conventions. It represents the URL of the AWS SQS Queue. It's a + // unique identifier for a queue in Amazon Simple Queue Service (SQS) and is + // used to access the queue and perform actions on it. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "https://sqs.us-east-1.amazonaws.com/123456789012/MyQueue" + AWSSQSQueueURLKey = attribute.Key("aws.sqs.queue.url") + + // AWSStepFunctionsActivityARNKey is the attribute Key conforming to the + // "aws.step_functions.activity.arn" semantic conventions. It represents the ARN + // of the AWS Step Functions Activity. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "arn:aws:states:us-east-1:123456789012:activity:get-greeting" + AWSStepFunctionsActivityARNKey = attribute.Key("aws.step_functions.activity.arn") + + // AWSStepFunctionsStateMachineARNKey is the attribute Key conforming to the + // "aws.step_functions.state_machine.arn" semantic conventions. It represents + // the ARN of the AWS Step Functions State Machine. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // "arn:aws:states:us-east-1:123456789012:stateMachine:myStateMachine:1" + AWSStepFunctionsStateMachineARNKey = attribute.Key("aws.step_functions.state_machine.arn") +) + +// AWSBedrockGuardrailID returns an attribute KeyValue conforming to the +// "aws.bedrock.guardrail.id" semantic conventions. It represents the unique +// identifier of the AWS Bedrock Guardrail. A [guardrail] helps safeguard and +// prevent unwanted behavior from model responses or user messages. +// +// [guardrail]: https://docs.aws.amazon.com/bedrock/latest/userguide/guardrails.html +func AWSBedrockGuardrailID(val string) attribute.KeyValue { + return AWSBedrockGuardrailIDKey.String(val) +} + +// AWSBedrockKnowledgeBaseID returns an attribute KeyValue conforming to the +// "aws.bedrock.knowledge_base.id" semantic conventions. It represents the unique +// identifier of the AWS Bedrock Knowledge base. A [knowledge base] is a bank of +// information that can be queried by models to generate more relevant responses +// and augment prompts. +// +// [knowledge base]: https://docs.aws.amazon.com/bedrock/latest/userguide/knowledge-base.html +func AWSBedrockKnowledgeBaseID(val string) attribute.KeyValue { + return AWSBedrockKnowledgeBaseIDKey.String(val) +} + +// AWSDynamoDBAttributeDefinitions returns an attribute KeyValue conforming to +// the "aws.dynamodb.attribute_definitions" semantic conventions. It represents +// the JSON-serialized value of each item in the `AttributeDefinitions` request +// field. +func AWSDynamoDBAttributeDefinitions(val ...string) attribute.KeyValue { + return AWSDynamoDBAttributeDefinitionsKey.StringSlice(val) +} + +// AWSDynamoDBAttributesToGet returns an attribute KeyValue conforming to the +// "aws.dynamodb.attributes_to_get" semantic conventions. It represents the value +// of the `AttributesToGet` request parameter. +func AWSDynamoDBAttributesToGet(val ...string) attribute.KeyValue { + return AWSDynamoDBAttributesToGetKey.StringSlice(val) +} + +// AWSDynamoDBConsistentRead returns an attribute KeyValue conforming to the +// "aws.dynamodb.consistent_read" semantic conventions. It represents the value +// of the `ConsistentRead` request parameter. +func AWSDynamoDBConsistentRead(val bool) attribute.KeyValue { + return AWSDynamoDBConsistentReadKey.Bool(val) +} + +// AWSDynamoDBConsumedCapacity returns an attribute KeyValue conforming to the +// "aws.dynamodb.consumed_capacity" semantic conventions. It represents the +// JSON-serialized value of each item in the `ConsumedCapacity` response field. +func AWSDynamoDBConsumedCapacity(val ...string) attribute.KeyValue { + return AWSDynamoDBConsumedCapacityKey.StringSlice(val) +} + +// AWSDynamoDBCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.count" semantic conventions. It represents the value of the +// `Count` response parameter. +func AWSDynamoDBCount(val int) attribute.KeyValue { + return AWSDynamoDBCountKey.Int(val) +} + +// AWSDynamoDBExclusiveStartTable returns an attribute KeyValue conforming to the +// "aws.dynamodb.exclusive_start_table" semantic conventions. It represents the +// value of the `ExclusiveStartTableName` request parameter. +func AWSDynamoDBExclusiveStartTable(val string) attribute.KeyValue { + return AWSDynamoDBExclusiveStartTableKey.String(val) +} + +// AWSDynamoDBGlobalSecondaryIndexUpdates returns an attribute KeyValue +// conforming to the "aws.dynamodb.global_secondary_index_updates" semantic +// conventions. It represents the JSON-serialized value of each item in the +// `GlobalSecondaryIndexUpdates` request field. +func AWSDynamoDBGlobalSecondaryIndexUpdates(val ...string) attribute.KeyValue { + return AWSDynamoDBGlobalSecondaryIndexUpdatesKey.StringSlice(val) +} + +// AWSDynamoDBGlobalSecondaryIndexes returns an attribute KeyValue conforming to +// the "aws.dynamodb.global_secondary_indexes" semantic conventions. It +// represents the JSON-serialized value of each item of the +// `GlobalSecondaryIndexes` request field. +func AWSDynamoDBGlobalSecondaryIndexes(val ...string) attribute.KeyValue { + return AWSDynamoDBGlobalSecondaryIndexesKey.StringSlice(val) +} + +// AWSDynamoDBIndexName returns an attribute KeyValue conforming to the +// "aws.dynamodb.index_name" semantic conventions. It represents the value of the +// `IndexName` request parameter. +func AWSDynamoDBIndexName(val string) attribute.KeyValue { + return AWSDynamoDBIndexNameKey.String(val) +} + +// AWSDynamoDBItemCollectionMetrics returns an attribute KeyValue conforming to +// the "aws.dynamodb.item_collection_metrics" semantic conventions. It represents +// the JSON-serialized value of the `ItemCollectionMetrics` response field. +func AWSDynamoDBItemCollectionMetrics(val string) attribute.KeyValue { + return AWSDynamoDBItemCollectionMetricsKey.String(val) +} + +// AWSDynamoDBLimit returns an attribute KeyValue conforming to the +// "aws.dynamodb.limit" semantic conventions. It represents the value of the +// `Limit` request parameter. +func AWSDynamoDBLimit(val int) attribute.KeyValue { + return AWSDynamoDBLimitKey.Int(val) +} + +// AWSDynamoDBLocalSecondaryIndexes returns an attribute KeyValue conforming to +// the "aws.dynamodb.local_secondary_indexes" semantic conventions. It represents +// the JSON-serialized value of each item of the `LocalSecondaryIndexes` request +// field. +func AWSDynamoDBLocalSecondaryIndexes(val ...string) attribute.KeyValue { + return AWSDynamoDBLocalSecondaryIndexesKey.StringSlice(val) +} + +// AWSDynamoDBProjection returns an attribute KeyValue conforming to the +// "aws.dynamodb.projection" semantic conventions. It represents the value of the +// `ProjectionExpression` request parameter. +func AWSDynamoDBProjection(val string) attribute.KeyValue { + return AWSDynamoDBProjectionKey.String(val) +} + +// AWSDynamoDBProvisionedReadCapacity returns an attribute KeyValue conforming to +// the "aws.dynamodb.provisioned_read_capacity" semantic conventions. It +// represents the value of the `ProvisionedThroughput.ReadCapacityUnits` request +// parameter. +func AWSDynamoDBProvisionedReadCapacity(val float64) attribute.KeyValue { + return AWSDynamoDBProvisionedReadCapacityKey.Float64(val) +} + +// AWSDynamoDBProvisionedWriteCapacity returns an attribute KeyValue conforming +// to the "aws.dynamodb.provisioned_write_capacity" semantic conventions. It +// represents the value of the `ProvisionedThroughput.WriteCapacityUnits` request +// parameter. +func AWSDynamoDBProvisionedWriteCapacity(val float64) attribute.KeyValue { + return AWSDynamoDBProvisionedWriteCapacityKey.Float64(val) +} + +// AWSDynamoDBScanForward returns an attribute KeyValue conforming to the +// "aws.dynamodb.scan_forward" semantic conventions. It represents the value of +// the `ScanIndexForward` request parameter. +func AWSDynamoDBScanForward(val bool) attribute.KeyValue { + return AWSDynamoDBScanForwardKey.Bool(val) +} + +// AWSDynamoDBScannedCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.scanned_count" semantic conventions. It represents the value of +// the `ScannedCount` response parameter. +func AWSDynamoDBScannedCount(val int) attribute.KeyValue { + return AWSDynamoDBScannedCountKey.Int(val) +} + +// AWSDynamoDBSegment returns an attribute KeyValue conforming to the +// "aws.dynamodb.segment" semantic conventions. It represents the value of the +// `Segment` request parameter. +func AWSDynamoDBSegment(val int) attribute.KeyValue { + return AWSDynamoDBSegmentKey.Int(val) +} + +// AWSDynamoDBSelect returns an attribute KeyValue conforming to the +// "aws.dynamodb.select" semantic conventions. It represents the value of the +// `Select` request parameter. +func AWSDynamoDBSelect(val string) attribute.KeyValue { + return AWSDynamoDBSelectKey.String(val) +} + +// AWSDynamoDBTableCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.table_count" semantic conventions. It represents the number of +// items in the `TableNames` response parameter. +func AWSDynamoDBTableCount(val int) attribute.KeyValue { + return AWSDynamoDBTableCountKey.Int(val) +} + +// AWSDynamoDBTableNames returns an attribute KeyValue conforming to the +// "aws.dynamodb.table_names" semantic conventions. It represents the keys in the +// `RequestItems` object field. +func AWSDynamoDBTableNames(val ...string) attribute.KeyValue { + return AWSDynamoDBTableNamesKey.StringSlice(val) +} + +// AWSDynamoDBTotalSegments returns an attribute KeyValue conforming to the +// "aws.dynamodb.total_segments" semantic conventions. It represents the value of +// the `TotalSegments` request parameter. +func AWSDynamoDBTotalSegments(val int) attribute.KeyValue { + return AWSDynamoDBTotalSegmentsKey.Int(val) +} + +// AWSECSClusterARN returns an attribute KeyValue conforming to the +// "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an +// [ECS cluster]. +// +// [ECS cluster]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html +func AWSECSClusterARN(val string) attribute.KeyValue { + return AWSECSClusterARNKey.String(val) +} + +// AWSECSContainerARN returns an attribute KeyValue conforming to the +// "aws.ecs.container.arn" semantic conventions. It represents the Amazon +// Resource Name (ARN) of an [ECS container instance]. +// +// [ECS container instance]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html +func AWSECSContainerARN(val string) attribute.KeyValue { + return AWSECSContainerARNKey.String(val) +} + +// AWSECSTaskARN returns an attribute KeyValue conforming to the +// "aws.ecs.task.arn" semantic conventions. It represents the ARN of a running +// [ECS task]. +// +// [ECS task]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-account-settings.html#ecs-resource-ids +func AWSECSTaskARN(val string) attribute.KeyValue { + return AWSECSTaskARNKey.String(val) +} + +// AWSECSTaskFamily returns an attribute KeyValue conforming to the +// "aws.ecs.task.family" semantic conventions. It represents the family name of +// the [ECS task definition] used to create the ECS task. +// +// [ECS task definition]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html +func AWSECSTaskFamily(val string) attribute.KeyValue { + return AWSECSTaskFamilyKey.String(val) +} + +// AWSECSTaskID returns an attribute KeyValue conforming to the "aws.ecs.task.id" +// semantic conventions. It represents the ID of a running ECS task. The ID MUST +// be extracted from `task.arn`. +func AWSECSTaskID(val string) attribute.KeyValue { + return AWSECSTaskIDKey.String(val) +} + +// AWSECSTaskRevision returns an attribute KeyValue conforming to the +// "aws.ecs.task.revision" semantic conventions. It represents the revision for +// the task definition used to create the ECS task. +func AWSECSTaskRevision(val string) attribute.KeyValue { + return AWSECSTaskRevisionKey.String(val) +} + +// AWSEKSClusterARN returns an attribute KeyValue conforming to the +// "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an EKS +// cluster. +func AWSEKSClusterARN(val string) attribute.KeyValue { + return AWSEKSClusterARNKey.String(val) +} + +// AWSExtendedRequestID returns an attribute KeyValue conforming to the +// "aws.extended_request_id" semantic conventions. It represents the AWS extended +// request ID as returned in the response header `x-amz-id-2`. +func AWSExtendedRequestID(val string) attribute.KeyValue { + return AWSExtendedRequestIDKey.String(val) +} + +// AWSKinesisStreamName returns an attribute KeyValue conforming to the +// "aws.kinesis.stream_name" semantic conventions. It represents the name of the +// AWS Kinesis [stream] the request refers to. Corresponds to the `--stream-name` +// parameter of the Kinesis [describe-stream] operation. +// +// [stream]: https://docs.aws.amazon.com/streams/latest/dev/introduction.html +// [describe-stream]: https://docs.aws.amazon.com/cli/latest/reference/kinesis/describe-stream.html +func AWSKinesisStreamName(val string) attribute.KeyValue { + return AWSKinesisStreamNameKey.String(val) +} + +// AWSLambdaInvokedARN returns an attribute KeyValue conforming to the +// "aws.lambda.invoked_arn" semantic conventions. It represents the full invoked +// ARN as provided on the `Context` passed to the function ( +// `Lambda-Runtime-Invoked-Function-Arn` header on the `/runtime/invocation/next` +// applicable). +func AWSLambdaInvokedARN(val string) attribute.KeyValue { + return AWSLambdaInvokedARNKey.String(val) +} + +// AWSLambdaResourceMappingID returns an attribute KeyValue conforming to the +// "aws.lambda.resource_mapping.id" semantic conventions. It represents the UUID +// of the [AWS Lambda EvenSource Mapping]. An event source is mapped to a lambda +// function. It's contents are read by Lambda and used to trigger a function. +// This isn't available in the lambda execution context or the lambda runtime +// environtment. This is going to be populated by the AWS SDK for each language +// when that UUID is present. Some of these operations are +// Create/Delete/Get/List/Update EventSourceMapping. +// +// [AWS Lambda EvenSource Mapping]: https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-lambda-eventsourcemapping.html +func AWSLambdaResourceMappingID(val string) attribute.KeyValue { + return AWSLambdaResourceMappingIDKey.String(val) +} + +// AWSLogGroupARNs returns an attribute KeyValue conforming to the +// "aws.log.group.arns" semantic conventions. It represents the Amazon Resource +// Name(s) (ARN) of the AWS log group(s). +func AWSLogGroupARNs(val ...string) attribute.KeyValue { + return AWSLogGroupARNsKey.StringSlice(val) +} + +// AWSLogGroupNames returns an attribute KeyValue conforming to the +// "aws.log.group.names" semantic conventions. It represents the name(s) of the +// AWS log group(s) an application is writing to. +func AWSLogGroupNames(val ...string) attribute.KeyValue { + return AWSLogGroupNamesKey.StringSlice(val) +} + +// AWSLogStreamARNs returns an attribute KeyValue conforming to the +// "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of the +// AWS log stream(s). +func AWSLogStreamARNs(val ...string) attribute.KeyValue { + return AWSLogStreamARNsKey.StringSlice(val) +} + +// AWSLogStreamNames returns an attribute KeyValue conforming to the +// "aws.log.stream.names" semantic conventions. It represents the name(s) of the +// AWS log stream(s) an application is writing to. +func AWSLogStreamNames(val ...string) attribute.KeyValue { + return AWSLogStreamNamesKey.StringSlice(val) +} + +// AWSRequestID returns an attribute KeyValue conforming to the "aws.request_id" +// semantic conventions. It represents the AWS request ID as returned in the +// response headers `x-amzn-requestid`, `x-amzn-request-id` or `x-amz-request-id` +// . +func AWSRequestID(val string) attribute.KeyValue { + return AWSRequestIDKey.String(val) +} + +// AWSS3Bucket returns an attribute KeyValue conforming to the "aws.s3.bucket" +// semantic conventions. It represents the S3 bucket name the request refers to. +// Corresponds to the `--bucket` parameter of the [S3 API] operations. +// +// [S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html +func AWSS3Bucket(val string) attribute.KeyValue { + return AWSS3BucketKey.String(val) +} + +// AWSS3CopySource returns an attribute KeyValue conforming to the +// "aws.s3.copy_source" semantic conventions. It represents the source object (in +// the form `bucket`/`key`) for the copy operation. +func AWSS3CopySource(val string) attribute.KeyValue { + return AWSS3CopySourceKey.String(val) +} + +// AWSS3Delete returns an attribute KeyValue conforming to the "aws.s3.delete" +// semantic conventions. It represents the delete request container that +// specifies the objects to be deleted. +func AWSS3Delete(val string) attribute.KeyValue { + return AWSS3DeleteKey.String(val) +} + +// AWSS3Key returns an attribute KeyValue conforming to the "aws.s3.key" semantic +// conventions. It represents the S3 object key the request refers to. +// Corresponds to the `--key` parameter of the [S3 API] operations. +// +// [S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html +func AWSS3Key(val string) attribute.KeyValue { + return AWSS3KeyKey.String(val) +} + +// AWSS3PartNumber returns an attribute KeyValue conforming to the +// "aws.s3.part_number" semantic conventions. It represents the part number of +// the part being uploaded in a multipart-upload operation. This is a positive +// integer between 1 and 10,000. +func AWSS3PartNumber(val int) attribute.KeyValue { + return AWSS3PartNumberKey.Int(val) +} + +// AWSS3UploadID returns an attribute KeyValue conforming to the +// "aws.s3.upload_id" semantic conventions. It represents the upload ID that +// identifies the multipart upload. +func AWSS3UploadID(val string) attribute.KeyValue { + return AWSS3UploadIDKey.String(val) +} + +// AWSSecretsmanagerSecretARN returns an attribute KeyValue conforming to the +// "aws.secretsmanager.secret.arn" semantic conventions. It represents the ARN of +// the Secret stored in the Secrets Mangger. +func AWSSecretsmanagerSecretARN(val string) attribute.KeyValue { + return AWSSecretsmanagerSecretARNKey.String(val) +} + +// AWSSNSTopicARN returns an attribute KeyValue conforming to the +// "aws.sns.topic.arn" semantic conventions. It represents the ARN of the AWS SNS +// Topic. An Amazon SNS [topic] is a logical access point that acts as a +// communication channel. +// +// [topic]: https://docs.aws.amazon.com/sns/latest/dg/sns-create-topic.html +func AWSSNSTopicARN(val string) attribute.KeyValue { + return AWSSNSTopicARNKey.String(val) +} + +// AWSSQSQueueURL returns an attribute KeyValue conforming to the +// "aws.sqs.queue.url" semantic conventions. It represents the URL of the AWS SQS +// Queue. It's a unique identifier for a queue in Amazon Simple Queue Service +// (SQS) and is used to access the queue and perform actions on it. +func AWSSQSQueueURL(val string) attribute.KeyValue { + return AWSSQSQueueURLKey.String(val) +} + +// AWSStepFunctionsActivityARN returns an attribute KeyValue conforming to the +// "aws.step_functions.activity.arn" semantic conventions. It represents the ARN +// of the AWS Step Functions Activity. +func AWSStepFunctionsActivityARN(val string) attribute.KeyValue { + return AWSStepFunctionsActivityARNKey.String(val) +} + +// AWSStepFunctionsStateMachineARN returns an attribute KeyValue conforming to +// the "aws.step_functions.state_machine.arn" semantic conventions. It represents +// the ARN of the AWS Step Functions State Machine. +func AWSStepFunctionsStateMachineARN(val string) attribute.KeyValue { + return AWSStepFunctionsStateMachineARNKey.String(val) +} + +// Enum values for aws.ecs.launchtype +var ( + // Amazon EC2 + // Stability: development + AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2") + // Amazon Fargate + // Stability: development + AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate") +) + +// Namespace: azure +const ( + // AzureClientIDKey is the attribute Key conforming to the "azure.client.id" + // semantic conventions. It represents the unique identifier of the client + // instance. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "3ba4827d-4422-483f-b59f-85b74211c11d", "storage-client-1" + AzureClientIDKey = attribute.Key("azure.client.id") + + // AzureCosmosDBConnectionModeKey is the attribute Key conforming to the + // "azure.cosmosdb.connection.mode" semantic conventions. It represents the + // cosmos client connection mode. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + AzureCosmosDBConnectionModeKey = attribute.Key("azure.cosmosdb.connection.mode") + + // AzureCosmosDBConsistencyLevelKey is the attribute Key conforming to the + // "azure.cosmosdb.consistency.level" semantic conventions. It represents the + // account or request [consistency level]. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Eventual", "ConsistentPrefix", "BoundedStaleness", "Strong", + // "Session" + // + // [consistency level]: https://learn.microsoft.com/azure/cosmos-db/consistency-levels + AzureCosmosDBConsistencyLevelKey = attribute.Key("azure.cosmosdb.consistency.level") + + // AzureCosmosDBOperationContactedRegionsKey is the attribute Key conforming to + // the "azure.cosmosdb.operation.contacted_regions" semantic conventions. It + // represents the list of regions contacted during operation in the order that + // they were contacted. If there is more than one region listed, it indicates + // that the operation was performed on multiple regions i.e. cross-regional + // call. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "North Central US", "Australia East", "Australia Southeast" + // Note: Region name matches the format of `displayName` in [Azure Location API] + // + // [Azure Location API]: https://learn.microsoft.com/rest/api/subscription/subscriptions/list-locations?view=rest-subscription-2021-10-01&tabs=HTTP#location + AzureCosmosDBOperationContactedRegionsKey = attribute.Key("azure.cosmosdb.operation.contacted_regions") + + // AzureCosmosDBOperationRequestChargeKey is the attribute Key conforming to the + // "azure.cosmosdb.operation.request_charge" semantic conventions. It represents + // the number of request units consumed by the operation. + // + // Type: double + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 46.18, 1.0 + AzureCosmosDBOperationRequestChargeKey = attribute.Key("azure.cosmosdb.operation.request_charge") + + // AzureCosmosDBRequestBodySizeKey is the attribute Key conforming to the + // "azure.cosmosdb.request.body.size" semantic conventions. It represents the + // request payload size in bytes. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + AzureCosmosDBRequestBodySizeKey = attribute.Key("azure.cosmosdb.request.body.size") + + // AzureCosmosDBResponseSubStatusCodeKey is the attribute Key conforming to the + // "azure.cosmosdb.response.sub_status_code" semantic conventions. It represents + // the cosmos DB sub status code. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1000, 1002 + AzureCosmosDBResponseSubStatusCodeKey = attribute.Key("azure.cosmosdb.response.sub_status_code") + + // AzureResourceProviderNamespaceKey is the attribute Key conforming to the + // "azure.resource_provider.namespace" semantic conventions. It represents the + // [Azure Resource Provider Namespace] as recognized by the client. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Microsoft.Storage", "Microsoft.KeyVault", "Microsoft.ServiceBus" + // + // [Azure Resource Provider Namespace]: https://learn.microsoft.com/azure/azure-resource-manager/management/azure-services-resource-providers + AzureResourceProviderNamespaceKey = attribute.Key("azure.resource_provider.namespace") + + // AzureServiceRequestIDKey is the attribute Key conforming to the + // "azure.service.request.id" semantic conventions. It represents the unique + // identifier of the service request. It's generated by the Azure service and + // returned with the response. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "00000000-0000-0000-0000-000000000000" + AzureServiceRequestIDKey = attribute.Key("azure.service.request.id") +) + +// AzureClientID returns an attribute KeyValue conforming to the +// "azure.client.id" semantic conventions. It represents the unique identifier of +// the client instance. +func AzureClientID(val string) attribute.KeyValue { + return AzureClientIDKey.String(val) +} + +// AzureCosmosDBOperationContactedRegions returns an attribute KeyValue +// conforming to the "azure.cosmosdb.operation.contacted_regions" semantic +// conventions. It represents the list of regions contacted during operation in +// the order that they were contacted. If there is more than one region listed, +// it indicates that the operation was performed on multiple regions i.e. +// cross-regional call. +func AzureCosmosDBOperationContactedRegions(val ...string) attribute.KeyValue { + return AzureCosmosDBOperationContactedRegionsKey.StringSlice(val) +} + +// AzureCosmosDBOperationRequestCharge returns an attribute KeyValue conforming +// to the "azure.cosmosdb.operation.request_charge" semantic conventions. It +// represents the number of request units consumed by the operation. +func AzureCosmosDBOperationRequestCharge(val float64) attribute.KeyValue { + return AzureCosmosDBOperationRequestChargeKey.Float64(val) +} + +// AzureCosmosDBRequestBodySize returns an attribute KeyValue conforming to the +// "azure.cosmosdb.request.body.size" semantic conventions. It represents the +// request payload size in bytes. +func AzureCosmosDBRequestBodySize(val int) attribute.KeyValue { + return AzureCosmosDBRequestBodySizeKey.Int(val) +} + +// AzureCosmosDBResponseSubStatusCode returns an attribute KeyValue conforming to +// the "azure.cosmosdb.response.sub_status_code" semantic conventions. It +// represents the cosmos DB sub status code. +func AzureCosmosDBResponseSubStatusCode(val int) attribute.KeyValue { + return AzureCosmosDBResponseSubStatusCodeKey.Int(val) +} + +// AzureResourceProviderNamespace returns an attribute KeyValue conforming to the +// "azure.resource_provider.namespace" semantic conventions. It represents the +// [Azure Resource Provider Namespace] as recognized by the client. +// +// [Azure Resource Provider Namespace]: https://learn.microsoft.com/azure/azure-resource-manager/management/azure-services-resource-providers +func AzureResourceProviderNamespace(val string) attribute.KeyValue { + return AzureResourceProviderNamespaceKey.String(val) +} + +// AzureServiceRequestID returns an attribute KeyValue conforming to the +// "azure.service.request.id" semantic conventions. It represents the unique +// identifier of the service request. It's generated by the Azure service and +// returned with the response. +func AzureServiceRequestID(val string) attribute.KeyValue { + return AzureServiceRequestIDKey.String(val) +} + +// Enum values for azure.cosmosdb.connection.mode +var ( + // Gateway (HTTP) connection. + // Stability: development + AzureCosmosDBConnectionModeGateway = AzureCosmosDBConnectionModeKey.String("gateway") + // Direct connection. + // Stability: development + AzureCosmosDBConnectionModeDirect = AzureCosmosDBConnectionModeKey.String("direct") +) + +// Enum values for azure.cosmosdb.consistency.level +var ( + // Strong + // Stability: development + AzureCosmosDBConsistencyLevelStrong = AzureCosmosDBConsistencyLevelKey.String("Strong") + // Bounded Staleness + // Stability: development + AzureCosmosDBConsistencyLevelBoundedStaleness = AzureCosmosDBConsistencyLevelKey.String("BoundedStaleness") + // Session + // Stability: development + AzureCosmosDBConsistencyLevelSession = AzureCosmosDBConsistencyLevelKey.String("Session") + // Eventual + // Stability: development + AzureCosmosDBConsistencyLevelEventual = AzureCosmosDBConsistencyLevelKey.String("Eventual") + // Consistent Prefix + // Stability: development + AzureCosmosDBConsistencyLevelConsistentPrefix = AzureCosmosDBConsistencyLevelKey.String("ConsistentPrefix") +) + +// Namespace: browser +const ( + // BrowserBrandsKey is the attribute Key conforming to the "browser.brands" + // semantic conventions. It represents the array of brand name and version + // separated by a space. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: " Not A;Brand 99", "Chromium 99", "Chrome 99" + // Note: This value is intended to be taken from the [UA client hints API] ( + // `navigator.userAgentData.brands`). + // + // [UA client hints API]: https://wicg.github.io/ua-client-hints/#interface + BrowserBrandsKey = attribute.Key("browser.brands") + + // BrowserLanguageKey is the attribute Key conforming to the "browser.language" + // semantic conventions. It represents the preferred language of the user using + // the browser. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "en", "en-US", "fr", "fr-FR" + // Note: This value is intended to be taken from the Navigator API + // `navigator.language`. + BrowserLanguageKey = attribute.Key("browser.language") + + // BrowserMobileKey is the attribute Key conforming to the "browser.mobile" + // semantic conventions. It represents a boolean that is true if the browser is + // running on a mobile device. + // + // Type: boolean + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: This value is intended to be taken from the [UA client hints API] ( + // `navigator.userAgentData.mobile`). If unavailable, this attribute SHOULD be + // left unset. + // + // [UA client hints API]: https://wicg.github.io/ua-client-hints/#interface + BrowserMobileKey = attribute.Key("browser.mobile") + + // BrowserPlatformKey is the attribute Key conforming to the "browser.platform" + // semantic conventions. It represents the platform on which the browser is + // running. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Windows", "macOS", "Android" + // Note: This value is intended to be taken from the [UA client hints API] ( + // `navigator.userAgentData.platform`). If unavailable, the legacy + // `navigator.platform` API SHOULD NOT be used instead and this attribute SHOULD + // be left unset in order for the values to be consistent. + // The list of possible values is defined in the + // [W3C User-Agent Client Hints specification]. Note that some (but not all) of + // these values can overlap with values in the + // [`os.type` and `os.name` attributes]. However, for consistency, the values in + // the `browser.platform` attribute should capture the exact value that the user + // agent provides. + // + // [UA client hints API]: https://wicg.github.io/ua-client-hints/#interface + // [W3C User-Agent Client Hints specification]: https://wicg.github.io/ua-client-hints/#sec-ch-ua-platform + // [`os.type` and `os.name` attributes]: ./os.md + BrowserPlatformKey = attribute.Key("browser.platform") +) + +// BrowserBrands returns an attribute KeyValue conforming to the "browser.brands" +// semantic conventions. It represents the array of brand name and version +// separated by a space. +func BrowserBrands(val ...string) attribute.KeyValue { + return BrowserBrandsKey.StringSlice(val) +} + +// BrowserLanguage returns an attribute KeyValue conforming to the +// "browser.language" semantic conventions. It represents the preferred language +// of the user using the browser. +func BrowserLanguage(val string) attribute.KeyValue { + return BrowserLanguageKey.String(val) +} + +// BrowserMobile returns an attribute KeyValue conforming to the "browser.mobile" +// semantic conventions. It represents a boolean that is true if the browser is +// running on a mobile device. +func BrowserMobile(val bool) attribute.KeyValue { + return BrowserMobileKey.Bool(val) +} + +// BrowserPlatform returns an attribute KeyValue conforming to the +// "browser.platform" semantic conventions. It represents the platform on which +// the browser is running. +func BrowserPlatform(val string) attribute.KeyValue { + return BrowserPlatformKey.String(val) +} + +// Namespace: cassandra +const ( + // CassandraConsistencyLevelKey is the attribute Key conforming to the + // "cassandra.consistency.level" semantic conventions. It represents the + // consistency level of the query. Based on consistency values from [CQL]. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // + // [CQL]: https://docs.datastax.com/en/cassandra-oss/3.0/cassandra/dml/dmlConfigConsistency.html + CassandraConsistencyLevelKey = attribute.Key("cassandra.consistency.level") + + // CassandraCoordinatorDCKey is the attribute Key conforming to the + // "cassandra.coordinator.dc" semantic conventions. It represents the data + // center of the coordinating node for a query. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: us-west-2 + CassandraCoordinatorDCKey = attribute.Key("cassandra.coordinator.dc") + + // CassandraCoordinatorIDKey is the attribute Key conforming to the + // "cassandra.coordinator.id" semantic conventions. It represents the ID of the + // coordinating node for a query. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: be13faa2-8574-4d71-926d-27f16cf8a7af + CassandraCoordinatorIDKey = attribute.Key("cassandra.coordinator.id") + + // CassandraPageSizeKey is the attribute Key conforming to the + // "cassandra.page.size" semantic conventions. It represents the fetch size used + // for paging, i.e. how many rows will be returned at once. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 5000 + CassandraPageSizeKey = attribute.Key("cassandra.page.size") + + // CassandraQueryIdempotentKey is the attribute Key conforming to the + // "cassandra.query.idempotent" semantic conventions. It represents the whether + // or not the query is idempotent. + // + // Type: boolean + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + CassandraQueryIdempotentKey = attribute.Key("cassandra.query.idempotent") + + // CassandraSpeculativeExecutionCountKey is the attribute Key conforming to the + // "cassandra.speculative_execution.count" semantic conventions. It represents + // the number of times a query was speculatively executed. Not set or `0` if the + // query was not executed speculatively. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 0, 2 + CassandraSpeculativeExecutionCountKey = attribute.Key("cassandra.speculative_execution.count") +) + +// CassandraCoordinatorDC returns an attribute KeyValue conforming to the +// "cassandra.coordinator.dc" semantic conventions. It represents the data center +// of the coordinating node for a query. +func CassandraCoordinatorDC(val string) attribute.KeyValue { + return CassandraCoordinatorDCKey.String(val) +} + +// CassandraCoordinatorID returns an attribute KeyValue conforming to the +// "cassandra.coordinator.id" semantic conventions. It represents the ID of the +// coordinating node for a query. +func CassandraCoordinatorID(val string) attribute.KeyValue { + return CassandraCoordinatorIDKey.String(val) +} + +// CassandraPageSize returns an attribute KeyValue conforming to the +// "cassandra.page.size" semantic conventions. It represents the fetch size used +// for paging, i.e. how many rows will be returned at once. +func CassandraPageSize(val int) attribute.KeyValue { + return CassandraPageSizeKey.Int(val) +} + +// CassandraQueryIdempotent returns an attribute KeyValue conforming to the +// "cassandra.query.idempotent" semantic conventions. It represents the whether +// or not the query is idempotent. +func CassandraQueryIdempotent(val bool) attribute.KeyValue { + return CassandraQueryIdempotentKey.Bool(val) +} + +// CassandraSpeculativeExecutionCount returns an attribute KeyValue conforming to +// the "cassandra.speculative_execution.count" semantic conventions. It +// represents the number of times a query was speculatively executed. Not set or +// `0` if the query was not executed speculatively. +func CassandraSpeculativeExecutionCount(val int) attribute.KeyValue { + return CassandraSpeculativeExecutionCountKey.Int(val) +} + +// Enum values for cassandra.consistency.level +var ( + // All + // Stability: development + CassandraConsistencyLevelAll = CassandraConsistencyLevelKey.String("all") + // Each Quorum + // Stability: development + CassandraConsistencyLevelEachQuorum = CassandraConsistencyLevelKey.String("each_quorum") + // Quorum + // Stability: development + CassandraConsistencyLevelQuorum = CassandraConsistencyLevelKey.String("quorum") + // Local Quorum + // Stability: development + CassandraConsistencyLevelLocalQuorum = CassandraConsistencyLevelKey.String("local_quorum") + // One + // Stability: development + CassandraConsistencyLevelOne = CassandraConsistencyLevelKey.String("one") + // Two + // Stability: development + CassandraConsistencyLevelTwo = CassandraConsistencyLevelKey.String("two") + // Three + // Stability: development + CassandraConsistencyLevelThree = CassandraConsistencyLevelKey.String("three") + // Local One + // Stability: development + CassandraConsistencyLevelLocalOne = CassandraConsistencyLevelKey.String("local_one") + // Any + // Stability: development + CassandraConsistencyLevelAny = CassandraConsistencyLevelKey.String("any") + // Serial + // Stability: development + CassandraConsistencyLevelSerial = CassandraConsistencyLevelKey.String("serial") + // Local Serial + // Stability: development + CassandraConsistencyLevelLocalSerial = CassandraConsistencyLevelKey.String("local_serial") +) + +// Namespace: cicd +const ( + // CICDPipelineActionNameKey is the attribute Key conforming to the + // "cicd.pipeline.action.name" semantic conventions. It represents the kind of + // action a pipeline run is performing. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "BUILD", "RUN", "SYNC" + CICDPipelineActionNameKey = attribute.Key("cicd.pipeline.action.name") + + // CICDPipelineNameKey is the attribute Key conforming to the + // "cicd.pipeline.name" semantic conventions. It represents the human readable + // name of the pipeline within a CI/CD system. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Build and Test", "Lint", "Deploy Go Project", + // "deploy_to_environment" + CICDPipelineNameKey = attribute.Key("cicd.pipeline.name") + + // CICDPipelineResultKey is the attribute Key conforming to the + // "cicd.pipeline.result" semantic conventions. It represents the result of a + // pipeline run. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "success", "failure", "timeout", "skipped" + CICDPipelineResultKey = attribute.Key("cicd.pipeline.result") + + // CICDPipelineRunIDKey is the attribute Key conforming to the + // "cicd.pipeline.run.id" semantic conventions. It represents the unique + // identifier of a pipeline run within a CI/CD system. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "120912" + CICDPipelineRunIDKey = attribute.Key("cicd.pipeline.run.id") + + // CICDPipelineRunStateKey is the attribute Key conforming to the + // "cicd.pipeline.run.state" semantic conventions. It represents the pipeline + // run goes through these states during its lifecycle. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "pending", "executing", "finalizing" + CICDPipelineRunStateKey = attribute.Key("cicd.pipeline.run.state") + + // CICDPipelineRunURLFullKey is the attribute Key conforming to the + // "cicd.pipeline.run.url.full" semantic conventions. It represents the [URL] of + // the pipeline run, providing the complete address in order to locate and + // identify the pipeline run. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // "https://github.com/open-telemetry/semantic-conventions/actions/runs/9753949763?pr=1075" + // + // [URL]: https://wikipedia.org/wiki/URL + CICDPipelineRunURLFullKey = attribute.Key("cicd.pipeline.run.url.full") + + // CICDPipelineTaskNameKey is the attribute Key conforming to the + // "cicd.pipeline.task.name" semantic conventions. It represents the human + // readable name of a task within a pipeline. Task here most closely aligns with + // a [computing process] in a pipeline. Other terms for tasks include commands, + // steps, and procedures. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Run GoLang Linter", "Go Build", "go-test", "deploy_binary" + // + // [computing process]: https://wikipedia.org/wiki/Pipeline_(computing) + CICDPipelineTaskNameKey = attribute.Key("cicd.pipeline.task.name") + + // CICDPipelineTaskRunIDKey is the attribute Key conforming to the + // "cicd.pipeline.task.run.id" semantic conventions. It represents the unique + // identifier of a task run within a pipeline. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "12097" + CICDPipelineTaskRunIDKey = attribute.Key("cicd.pipeline.task.run.id") + + // CICDPipelineTaskRunResultKey is the attribute Key conforming to the + // "cicd.pipeline.task.run.result" semantic conventions. It represents the + // result of a task run. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "success", "failure", "timeout", "skipped" + CICDPipelineTaskRunResultKey = attribute.Key("cicd.pipeline.task.run.result") + + // CICDPipelineTaskRunURLFullKey is the attribute Key conforming to the + // "cicd.pipeline.task.run.url.full" semantic conventions. It represents the + // [URL] of the pipeline task run, providing the complete address in order to + // locate and identify the pipeline task run. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // "https://github.com/open-telemetry/semantic-conventions/actions/runs/9753949763/job/26920038674?pr=1075" + // + // [URL]: https://wikipedia.org/wiki/URL + CICDPipelineTaskRunURLFullKey = attribute.Key("cicd.pipeline.task.run.url.full") + + // CICDPipelineTaskTypeKey is the attribute Key conforming to the + // "cicd.pipeline.task.type" semantic conventions. It represents the type of the + // task within a pipeline. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "build", "test", "deploy" + CICDPipelineTaskTypeKey = attribute.Key("cicd.pipeline.task.type") + + // CICDSystemComponentKey is the attribute Key conforming to the + // "cicd.system.component" semantic conventions. It represents the name of a + // component of the CICD system. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "controller", "scheduler", "agent" + CICDSystemComponentKey = attribute.Key("cicd.system.component") + + // CICDWorkerIDKey is the attribute Key conforming to the "cicd.worker.id" + // semantic conventions. It represents the unique identifier of a worker within + // a CICD system. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "abc123", "10.0.1.2", "controller" + CICDWorkerIDKey = attribute.Key("cicd.worker.id") + + // CICDWorkerNameKey is the attribute Key conforming to the "cicd.worker.name" + // semantic conventions. It represents the name of a worker within a CICD + // system. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "agent-abc", "controller", "Ubuntu LTS" + CICDWorkerNameKey = attribute.Key("cicd.worker.name") + + // CICDWorkerStateKey is the attribute Key conforming to the "cicd.worker.state" + // semantic conventions. It represents the state of a CICD worker / agent. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "idle", "busy", "down" + CICDWorkerStateKey = attribute.Key("cicd.worker.state") + + // CICDWorkerURLFullKey is the attribute Key conforming to the + // "cicd.worker.url.full" semantic conventions. It represents the [URL] of the + // worker, providing the complete address in order to locate and identify the + // worker. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "https://cicd.example.org/worker/abc123" + // + // [URL]: https://wikipedia.org/wiki/URL + CICDWorkerURLFullKey = attribute.Key("cicd.worker.url.full") +) + +// CICDPipelineName returns an attribute KeyValue conforming to the +// "cicd.pipeline.name" semantic conventions. It represents the human readable +// name of the pipeline within a CI/CD system. +func CICDPipelineName(val string) attribute.KeyValue { + return CICDPipelineNameKey.String(val) +} + +// CICDPipelineRunID returns an attribute KeyValue conforming to the +// "cicd.pipeline.run.id" semantic conventions. It represents the unique +// identifier of a pipeline run within a CI/CD system. +func CICDPipelineRunID(val string) attribute.KeyValue { + return CICDPipelineRunIDKey.String(val) +} + +// CICDPipelineRunURLFull returns an attribute KeyValue conforming to the +// "cicd.pipeline.run.url.full" semantic conventions. It represents the [URL] of +// the pipeline run, providing the complete address in order to locate and +// identify the pipeline run. +// +// [URL]: https://wikipedia.org/wiki/URL +func CICDPipelineRunURLFull(val string) attribute.KeyValue { + return CICDPipelineRunURLFullKey.String(val) +} + +// CICDPipelineTaskName returns an attribute KeyValue conforming to the +// "cicd.pipeline.task.name" semantic conventions. It represents the human +// readable name of a task within a pipeline. Task here most closely aligns with +// a [computing process] in a pipeline. Other terms for tasks include commands, +// steps, and procedures. +// +// [computing process]: https://wikipedia.org/wiki/Pipeline_(computing) +func CICDPipelineTaskName(val string) attribute.KeyValue { + return CICDPipelineTaskNameKey.String(val) +} + +// CICDPipelineTaskRunID returns an attribute KeyValue conforming to the +// "cicd.pipeline.task.run.id" semantic conventions. It represents the unique +// identifier of a task run within a pipeline. +func CICDPipelineTaskRunID(val string) attribute.KeyValue { + return CICDPipelineTaskRunIDKey.String(val) +} + +// CICDPipelineTaskRunURLFull returns an attribute KeyValue conforming to the +// "cicd.pipeline.task.run.url.full" semantic conventions. It represents the +// [URL] of the pipeline task run, providing the complete address in order to +// locate and identify the pipeline task run. +// +// [URL]: https://wikipedia.org/wiki/URL +func CICDPipelineTaskRunURLFull(val string) attribute.KeyValue { + return CICDPipelineTaskRunURLFullKey.String(val) +} + +// CICDSystemComponent returns an attribute KeyValue conforming to the +// "cicd.system.component" semantic conventions. It represents the name of a +// component of the CICD system. +func CICDSystemComponent(val string) attribute.KeyValue { + return CICDSystemComponentKey.String(val) +} + +// CICDWorkerID returns an attribute KeyValue conforming to the "cicd.worker.id" +// semantic conventions. It represents the unique identifier of a worker within a +// CICD system. +func CICDWorkerID(val string) attribute.KeyValue { + return CICDWorkerIDKey.String(val) +} + +// CICDWorkerName returns an attribute KeyValue conforming to the +// "cicd.worker.name" semantic conventions. It represents the name of a worker +// within a CICD system. +func CICDWorkerName(val string) attribute.KeyValue { + return CICDWorkerNameKey.String(val) +} + +// CICDWorkerURLFull returns an attribute KeyValue conforming to the +// "cicd.worker.url.full" semantic conventions. It represents the [URL] of the +// worker, providing the complete address in order to locate and identify the +// worker. +// +// [URL]: https://wikipedia.org/wiki/URL +func CICDWorkerURLFull(val string) attribute.KeyValue { + return CICDWorkerURLFullKey.String(val) +} + +// Enum values for cicd.pipeline.action.name +var ( + // The pipeline run is executing a build. + // Stability: development + CICDPipelineActionNameBuild = CICDPipelineActionNameKey.String("BUILD") + // The pipeline run is executing. + // Stability: development + CICDPipelineActionNameRun = CICDPipelineActionNameKey.String("RUN") + // The pipeline run is executing a sync. + // Stability: development + CICDPipelineActionNameSync = CICDPipelineActionNameKey.String("SYNC") +) + +// Enum values for cicd.pipeline.result +var ( + // The pipeline run finished successfully. + // Stability: development + CICDPipelineResultSuccess = CICDPipelineResultKey.String("success") + // The pipeline run did not finish successfully, eg. due to a compile error or a + // failing test. Such failures are usually detected by non-zero exit codes of + // the tools executed in the pipeline run. + // Stability: development + CICDPipelineResultFailure = CICDPipelineResultKey.String("failure") + // The pipeline run failed due to an error in the CICD system, eg. due to the + // worker being killed. + // Stability: development + CICDPipelineResultError = CICDPipelineResultKey.String("error") + // A timeout caused the pipeline run to be interrupted. + // Stability: development + CICDPipelineResultTimeout = CICDPipelineResultKey.String("timeout") + // The pipeline run was cancelled, eg. by a user manually cancelling the + // pipeline run. + // Stability: development + CICDPipelineResultCancellation = CICDPipelineResultKey.String("cancellation") + // The pipeline run was skipped, eg. due to a precondition not being met. + // Stability: development + CICDPipelineResultSkip = CICDPipelineResultKey.String("skip") +) + +// Enum values for cicd.pipeline.run.state +var ( + // The run pending state spans from the event triggering the pipeline run until + // the execution of the run starts (eg. time spent in a queue, provisioning + // agents, creating run resources). + // + // Stability: development + CICDPipelineRunStatePending = CICDPipelineRunStateKey.String("pending") + // The executing state spans the execution of any run tasks (eg. build, test). + // Stability: development + CICDPipelineRunStateExecuting = CICDPipelineRunStateKey.String("executing") + // The finalizing state spans from when the run has finished executing (eg. + // cleanup of run resources). + // Stability: development + CICDPipelineRunStateFinalizing = CICDPipelineRunStateKey.String("finalizing") +) + +// Enum values for cicd.pipeline.task.run.result +var ( + // The task run finished successfully. + // Stability: development + CICDPipelineTaskRunResultSuccess = CICDPipelineTaskRunResultKey.String("success") + // The task run did not finish successfully, eg. due to a compile error or a + // failing test. Such failures are usually detected by non-zero exit codes of + // the tools executed in the task run. + // Stability: development + CICDPipelineTaskRunResultFailure = CICDPipelineTaskRunResultKey.String("failure") + // The task run failed due to an error in the CICD system, eg. due to the worker + // being killed. + // Stability: development + CICDPipelineTaskRunResultError = CICDPipelineTaskRunResultKey.String("error") + // A timeout caused the task run to be interrupted. + // Stability: development + CICDPipelineTaskRunResultTimeout = CICDPipelineTaskRunResultKey.String("timeout") + // The task run was cancelled, eg. by a user manually cancelling the task run. + // Stability: development + CICDPipelineTaskRunResultCancellation = CICDPipelineTaskRunResultKey.String("cancellation") + // The task run was skipped, eg. due to a precondition not being met. + // Stability: development + CICDPipelineTaskRunResultSkip = CICDPipelineTaskRunResultKey.String("skip") +) + +// Enum values for cicd.pipeline.task.type +var ( + // build + // Stability: development + CICDPipelineTaskTypeBuild = CICDPipelineTaskTypeKey.String("build") + // test + // Stability: development + CICDPipelineTaskTypeTest = CICDPipelineTaskTypeKey.String("test") + // deploy + // Stability: development + CICDPipelineTaskTypeDeploy = CICDPipelineTaskTypeKey.String("deploy") +) + +// Enum values for cicd.worker.state +var ( + // The worker is not performing work for the CICD system. It is available to the + // CICD system to perform work on (online / idle). + // Stability: development + CICDWorkerStateAvailable = CICDWorkerStateKey.String("available") + // The worker is performing work for the CICD system. + // Stability: development + CICDWorkerStateBusy = CICDWorkerStateKey.String("busy") + // The worker is not available to the CICD system (disconnected / down). + // Stability: development + CICDWorkerStateOffline = CICDWorkerStateKey.String("offline") +) + +// Namespace: client +const ( + // ClientAddressKey is the attribute Key conforming to the "client.address" + // semantic conventions. It represents the client address - domain name if + // available without reverse DNS lookup; otherwise, IP address or Unix domain + // socket name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "client.example.com", "10.1.2.80", "/tmp/my.sock" + // Note: When observed from the server side, and when communicating through an + // intermediary, `client.address` SHOULD represent the client address behind any + // intermediaries, for example proxies, if it's available. + ClientAddressKey = attribute.Key("client.address") + + // ClientPortKey is the attribute Key conforming to the "client.port" semantic + // conventions. It represents the client port number. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: 65123 + // Note: When observed from the server side, and when communicating through an + // intermediary, `client.port` SHOULD represent the client port behind any + // intermediaries, for example proxies, if it's available. + ClientPortKey = attribute.Key("client.port") +) + +// ClientAddress returns an attribute KeyValue conforming to the "client.address" +// semantic conventions. It represents the client address - domain name if +// available without reverse DNS lookup; otherwise, IP address or Unix domain +// socket name. +func ClientAddress(val string) attribute.KeyValue { + return ClientAddressKey.String(val) +} + +// ClientPort returns an attribute KeyValue conforming to the "client.port" +// semantic conventions. It represents the client port number. +func ClientPort(val int) attribute.KeyValue { + return ClientPortKey.Int(val) +} + +// Namespace: cloud +const ( + // CloudAccountIDKey is the attribute Key conforming to the "cloud.account.id" + // semantic conventions. It represents the cloud account ID the resource is + // assigned to. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "111111111111", "opentelemetry" + CloudAccountIDKey = attribute.Key("cloud.account.id") + + // CloudAvailabilityZoneKey is the attribute Key conforming to the + // "cloud.availability_zone" semantic conventions. It represents the cloud + // regions often have multiple, isolated locations known as zones to increase + // availability. Availability zone represents the zone where the resource is + // running. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "us-east-1c" + // Note: Availability zones are called "zones" on Alibaba Cloud and Google + // Cloud. + CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone") + + // CloudPlatformKey is the attribute Key conforming to the "cloud.platform" + // semantic conventions. It represents the cloud platform in use. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: The prefix of the service SHOULD match the one specified in + // `cloud.provider`. + CloudPlatformKey = attribute.Key("cloud.platform") + + // CloudProviderKey is the attribute Key conforming to the "cloud.provider" + // semantic conventions. It represents the name of the cloud provider. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + CloudProviderKey = attribute.Key("cloud.provider") + + // CloudRegionKey is the attribute Key conforming to the "cloud.region" semantic + // conventions. It represents the geographical region within a cloud provider. + // When associated with a resource, this attribute specifies the region where + // the resource operates. When calling services or APIs deployed on a cloud, + // this attribute identifies the region where the called destination is + // deployed. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "us-central1", "us-east-1" + // Note: Refer to your provider's docs to see the available regions, for example + // [Alibaba Cloud regions], [AWS regions], [Azure regions], + // [Google Cloud regions], or [Tencent Cloud regions]. + // + // [Alibaba Cloud regions]: https://www.alibabacloud.com/help/doc-detail/40654.htm + // [AWS regions]: https://aws.amazon.com/about-aws/global-infrastructure/regions_az/ + // [Azure regions]: https://azure.microsoft.com/global-infrastructure/geographies/ + // [Google Cloud regions]: https://cloud.google.com/about/locations + // [Tencent Cloud regions]: https://www.tencentcloud.com/document/product/213/6091 + CloudRegionKey = attribute.Key("cloud.region") + + // CloudResourceIDKey is the attribute Key conforming to the "cloud.resource_id" + // semantic conventions. It represents the cloud provider-specific native + // identifier of the monitored cloud resource (e.g. an [ARN] on AWS, a + // [fully qualified resource ID] on Azure, a [full resource name] on GCP). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "arn:aws:lambda:REGION:ACCOUNT_ID:function:my-function", + // "//run.googleapis.com/projects/PROJECT_ID/locations/LOCATION_ID/services/SERVICE_ID", + // "/subscriptions//resourceGroups/ + // /providers/Microsoft.Web/sites//functions/" + // Note: On some cloud providers, it may not be possible to determine the full + // ID at startup, + // so it may be necessary to set `cloud.resource_id` as a span attribute + // instead. + // + // The exact value to use for `cloud.resource_id` depends on the cloud provider. + // The following well-known definitions MUST be used if you set this attribute + // and they apply: + // + // - **AWS Lambda:** The function [ARN]. + // Take care not to use the "invoked ARN" directly but replace any + // [alias suffix] + // with the resolved function version, as the same runtime instance may be + // invocable with + // multiple different aliases. + // - **GCP:** The [URI of the resource] + // - **Azure:** The [Fully Qualified Resource ID] of the invoked function, + // *not* the function app, having the form + // + // `/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/` + // . + // This means that a span attribute MUST be used, as an Azure function app + // can host multiple functions that would usually share + // a TracerProvider. + // + // + // [ARN]: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html + // [fully qualified resource ID]: https://learn.microsoft.com/rest/api/resources/resources/get-by-id + // [full resource name]: https://google.aip.dev/122#full-resource-names + // [ARN]: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html + // [alias suffix]: https://docs.aws.amazon.com/lambda/latest/dg/configuration-aliases.html + // [URI of the resource]: https://cloud.google.com/iam/docs/full-resource-names + // [Fully Qualified Resource ID]: https://learn.microsoft.com/rest/api/resources/resources/get-by-id + CloudResourceIDKey = attribute.Key("cloud.resource_id") +) + +// CloudAccountID returns an attribute KeyValue conforming to the +// "cloud.account.id" semantic conventions. It represents the cloud account ID +// the resource is assigned to. +func CloudAccountID(val string) attribute.KeyValue { + return CloudAccountIDKey.String(val) +} + +// CloudAvailabilityZone returns an attribute KeyValue conforming to the +// "cloud.availability_zone" semantic conventions. It represents the cloud +// regions often have multiple, isolated locations known as zones to increase +// availability. Availability zone represents the zone where the resource is +// running. +func CloudAvailabilityZone(val string) attribute.KeyValue { + return CloudAvailabilityZoneKey.String(val) +} + +// CloudRegion returns an attribute KeyValue conforming to the "cloud.region" +// semantic conventions. It represents the geographical region within a cloud +// provider. When associated with a resource, this attribute specifies the region +// where the resource operates. When calling services or APIs deployed on a +// cloud, this attribute identifies the region where the called destination is +// deployed. +func CloudRegion(val string) attribute.KeyValue { + return CloudRegionKey.String(val) +} + +// CloudResourceID returns an attribute KeyValue conforming to the +// "cloud.resource_id" semantic conventions. It represents the cloud +// provider-specific native identifier of the monitored cloud resource (e.g. an +// [ARN] on AWS, a [fully qualified resource ID] on Azure, a [full resource name] +// on GCP). +// +// [ARN]: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html +// [fully qualified resource ID]: https://learn.microsoft.com/rest/api/resources/resources/get-by-id +// [full resource name]: https://google.aip.dev/122#full-resource-names +func CloudResourceID(val string) attribute.KeyValue { + return CloudResourceIDKey.String(val) +} + +// Enum values for cloud.platform +var ( + // Alibaba Cloud Elastic Compute Service + // Stability: development + CloudPlatformAlibabaCloudECS = CloudPlatformKey.String("alibaba_cloud_ecs") + // Alibaba Cloud Function Compute + // Stability: development + CloudPlatformAlibabaCloudFC = CloudPlatformKey.String("alibaba_cloud_fc") + // Red Hat OpenShift on Alibaba Cloud + // Stability: development + CloudPlatformAlibabaCloudOpenShift = CloudPlatformKey.String("alibaba_cloud_openshift") + // AWS Elastic Compute Cloud + // Stability: development + CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2") + // AWS Elastic Container Service + // Stability: development + CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs") + // AWS Elastic Kubernetes Service + // Stability: development + CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks") + // AWS Lambda + // Stability: development + CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda") + // AWS Elastic Beanstalk + // Stability: development + CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk") + // AWS App Runner + // Stability: development + CloudPlatformAWSAppRunner = CloudPlatformKey.String("aws_app_runner") + // Red Hat OpenShift on AWS (ROSA) + // Stability: development + CloudPlatformAWSOpenShift = CloudPlatformKey.String("aws_openshift") + // Azure Virtual Machines + // Stability: development + CloudPlatformAzureVM = CloudPlatformKey.String("azure.vm") + // Azure Container Apps + // Stability: development + CloudPlatformAzureContainerApps = CloudPlatformKey.String("azure.container_apps") + // Azure Container Instances + // Stability: development + CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure.container_instances") + // Azure Kubernetes Service + // Stability: development + CloudPlatformAzureAKS = CloudPlatformKey.String("azure.aks") + // Azure Functions + // Stability: development + CloudPlatformAzureFunctions = CloudPlatformKey.String("azure.functions") + // Azure App Service + // Stability: development + CloudPlatformAzureAppService = CloudPlatformKey.String("azure.app_service") + // Azure Red Hat OpenShift + // Stability: development + CloudPlatformAzureOpenShift = CloudPlatformKey.String("azure.openshift") + // Google Bare Metal Solution (BMS) + // Stability: development + CloudPlatformGCPBareMetalSolution = CloudPlatformKey.String("gcp_bare_metal_solution") + // Google Cloud Compute Engine (GCE) + // Stability: development + CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine") + // Google Cloud Run + // Stability: development + CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run") + // Google Cloud Kubernetes Engine (GKE) + // Stability: development + CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine") + // Google Cloud Functions (GCF) + // Stability: development + CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions") + // Google Cloud App Engine (GAE) + // Stability: development + CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine") + // Red Hat OpenShift on Google Cloud + // Stability: development + CloudPlatformGCPOpenShift = CloudPlatformKey.String("gcp_openshift") + // Red Hat OpenShift on IBM Cloud + // Stability: development + CloudPlatformIBMCloudOpenShift = CloudPlatformKey.String("ibm_cloud_openshift") + // Compute on Oracle Cloud Infrastructure (OCI) + // Stability: development + CloudPlatformOracleCloudCompute = CloudPlatformKey.String("oracle_cloud_compute") + // Kubernetes Engine (OKE) on Oracle Cloud Infrastructure (OCI) + // Stability: development + CloudPlatformOracleCloudOKE = CloudPlatformKey.String("oracle_cloud_oke") + // Tencent Cloud Cloud Virtual Machine (CVM) + // Stability: development + CloudPlatformTencentCloudCVM = CloudPlatformKey.String("tencent_cloud_cvm") + // Tencent Cloud Elastic Kubernetes Service (EKS) + // Stability: development + CloudPlatformTencentCloudEKS = CloudPlatformKey.String("tencent_cloud_eks") + // Tencent Cloud Serverless Cloud Function (SCF) + // Stability: development + CloudPlatformTencentCloudSCF = CloudPlatformKey.String("tencent_cloud_scf") +) + +// Enum values for cloud.provider +var ( + // Alibaba Cloud + // Stability: development + CloudProviderAlibabaCloud = CloudProviderKey.String("alibaba_cloud") + // Amazon Web Services + // Stability: development + CloudProviderAWS = CloudProviderKey.String("aws") + // Microsoft Azure + // Stability: development + CloudProviderAzure = CloudProviderKey.String("azure") + // Google Cloud Platform + // Stability: development + CloudProviderGCP = CloudProviderKey.String("gcp") + // Heroku Platform as a Service + // Stability: development + CloudProviderHeroku = CloudProviderKey.String("heroku") + // IBM Cloud + // Stability: development + CloudProviderIBMCloud = CloudProviderKey.String("ibm_cloud") + // Oracle Cloud Infrastructure (OCI) + // Stability: development + CloudProviderOracleCloud = CloudProviderKey.String("oracle_cloud") + // Tencent Cloud + // Stability: development + CloudProviderTencentCloud = CloudProviderKey.String("tencent_cloud") +) + +// Namespace: cloudevents +const ( + // CloudEventsEventIDKey is the attribute Key conforming to the + // "cloudevents.event_id" semantic conventions. It represents the [event_id] + // uniquely identifies the event. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "123e4567-e89b-12d3-a456-426614174000", "0001" + // + // [event_id]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id + CloudEventsEventIDKey = attribute.Key("cloudevents.event_id") + + // CloudEventsEventSourceKey is the attribute Key conforming to the + // "cloudevents.event_source" semantic conventions. It represents the [source] + // identifies the context in which an event happened. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "https://github.com/cloudevents", "/cloudevents/spec/pull/123", + // "my-service" + // + // [source]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1 + CloudEventsEventSourceKey = attribute.Key("cloudevents.event_source") + + // CloudEventsEventSpecVersionKey is the attribute Key conforming to the + // "cloudevents.event_spec_version" semantic conventions. It represents the + // [version of the CloudEvents specification] which the event uses. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1.0 + // + // [version of the CloudEvents specification]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion + CloudEventsEventSpecVersionKey = attribute.Key("cloudevents.event_spec_version") + + // CloudEventsEventSubjectKey is the attribute Key conforming to the + // "cloudevents.event_subject" semantic conventions. It represents the [subject] + // of the event in the context of the event producer (identified by source). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: mynewfile.jpg + // + // [subject]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject + CloudEventsEventSubjectKey = attribute.Key("cloudevents.event_subject") + + // CloudEventsEventTypeKey is the attribute Key conforming to the + // "cloudevents.event_type" semantic conventions. It represents the [event_type] + // contains a value describing the type of event related to the originating + // occurrence. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "com.github.pull_request.opened", "com.example.object.deleted.v2" + // + // [event_type]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type + CloudEventsEventTypeKey = attribute.Key("cloudevents.event_type") +) + +// CloudEventsEventID returns an attribute KeyValue conforming to the +// "cloudevents.event_id" semantic conventions. It represents the [event_id] +// uniquely identifies the event. +// +// [event_id]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id +func CloudEventsEventID(val string) attribute.KeyValue { + return CloudEventsEventIDKey.String(val) +} + +// CloudEventsEventSource returns an attribute KeyValue conforming to the +// "cloudevents.event_source" semantic conventions. It represents the [source] +// identifies the context in which an event happened. +// +// [source]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1 +func CloudEventsEventSource(val string) attribute.KeyValue { + return CloudEventsEventSourceKey.String(val) +} + +// CloudEventsEventSpecVersion returns an attribute KeyValue conforming to the +// "cloudevents.event_spec_version" semantic conventions. It represents the +// [version of the CloudEvents specification] which the event uses. +// +// [version of the CloudEvents specification]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion +func CloudEventsEventSpecVersion(val string) attribute.KeyValue { + return CloudEventsEventSpecVersionKey.String(val) +} + +// CloudEventsEventSubject returns an attribute KeyValue conforming to the +// "cloudevents.event_subject" semantic conventions. It represents the [subject] +// of the event in the context of the event producer (identified by source). +// +// [subject]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject +func CloudEventsEventSubject(val string) attribute.KeyValue { + return CloudEventsEventSubjectKey.String(val) +} + +// CloudEventsEventType returns an attribute KeyValue conforming to the +// "cloudevents.event_type" semantic conventions. It represents the [event_type] +// contains a value describing the type of event related to the originating +// occurrence. +// +// [event_type]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type +func CloudEventsEventType(val string) attribute.KeyValue { + return CloudEventsEventTypeKey.String(val) +} + +// Namespace: cloudfoundry +const ( + // CloudFoundryAppIDKey is the attribute Key conforming to the + // "cloudfoundry.app.id" semantic conventions. It represents the guid of the + // application. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "218fc5a9-a5f1-4b54-aa05-46717d0ab26d" + // Note: Application instrumentation should use the value from environment + // variable `VCAP_APPLICATION.application_id`. This is the same value as + // reported by `cf app --guid`. + CloudFoundryAppIDKey = attribute.Key("cloudfoundry.app.id") + + // CloudFoundryAppInstanceIDKey is the attribute Key conforming to the + // "cloudfoundry.app.instance.id" semantic conventions. It represents the index + // of the application instance. 0 when just one instance is active. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "0", "1" + // Note: CloudFoundry defines the `instance_id` in the [Loggregator v2 envelope] + // . + // It is used for logs and metrics emitted by CloudFoundry. It is + // supposed to contain the application instance index for applications + // deployed on the runtime. + // + // Application instrumentation should use the value from environment + // variable `CF_INSTANCE_INDEX`. + // + // [Loggregator v2 envelope]: https://github.com/cloudfoundry/loggregator-api#v2-envelope + CloudFoundryAppInstanceIDKey = attribute.Key("cloudfoundry.app.instance.id") + + // CloudFoundryAppNameKey is the attribute Key conforming to the + // "cloudfoundry.app.name" semantic conventions. It represents the name of the + // application. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "my-app-name" + // Note: Application instrumentation should use the value from environment + // variable `VCAP_APPLICATION.application_name`. This is the same value + // as reported by `cf apps`. + CloudFoundryAppNameKey = attribute.Key("cloudfoundry.app.name") + + // CloudFoundryOrgIDKey is the attribute Key conforming to the + // "cloudfoundry.org.id" semantic conventions. It represents the guid of the + // CloudFoundry org the application is running in. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "218fc5a9-a5f1-4b54-aa05-46717d0ab26d" + // Note: Application instrumentation should use the value from environment + // variable `VCAP_APPLICATION.org_id`. This is the same value as + // reported by `cf org --guid`. + CloudFoundryOrgIDKey = attribute.Key("cloudfoundry.org.id") + + // CloudFoundryOrgNameKey is the attribute Key conforming to the + // "cloudfoundry.org.name" semantic conventions. It represents the name of the + // CloudFoundry organization the app is running in. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "my-org-name" + // Note: Application instrumentation should use the value from environment + // variable `VCAP_APPLICATION.org_name`. This is the same value as + // reported by `cf orgs`. + CloudFoundryOrgNameKey = attribute.Key("cloudfoundry.org.name") + + // CloudFoundryProcessIDKey is the attribute Key conforming to the + // "cloudfoundry.process.id" semantic conventions. It represents the UID + // identifying the process. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "218fc5a9-a5f1-4b54-aa05-46717d0ab26d" + // Note: Application instrumentation should use the value from environment + // variable `VCAP_APPLICATION.process_id`. It is supposed to be equal to + // `VCAP_APPLICATION.app_id` for applications deployed to the runtime. + // For system components, this could be the actual PID. + CloudFoundryProcessIDKey = attribute.Key("cloudfoundry.process.id") + + // CloudFoundryProcessTypeKey is the attribute Key conforming to the + // "cloudfoundry.process.type" semantic conventions. It represents the type of + // process. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "web" + // Note: CloudFoundry applications can consist of multiple jobs. Usually the + // main process will be of type `web`. There can be additional background + // tasks or side-cars with different process types. + CloudFoundryProcessTypeKey = attribute.Key("cloudfoundry.process.type") + + // CloudFoundrySpaceIDKey is the attribute Key conforming to the + // "cloudfoundry.space.id" semantic conventions. It represents the guid of the + // CloudFoundry space the application is running in. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "218fc5a9-a5f1-4b54-aa05-46717d0ab26d" + // Note: Application instrumentation should use the value from environment + // variable `VCAP_APPLICATION.space_id`. This is the same value as + // reported by `cf space --guid`. + CloudFoundrySpaceIDKey = attribute.Key("cloudfoundry.space.id") + + // CloudFoundrySpaceNameKey is the attribute Key conforming to the + // "cloudfoundry.space.name" semantic conventions. It represents the name of the + // CloudFoundry space the application is running in. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "my-space-name" + // Note: Application instrumentation should use the value from environment + // variable `VCAP_APPLICATION.space_name`. This is the same value as + // reported by `cf spaces`. + CloudFoundrySpaceNameKey = attribute.Key("cloudfoundry.space.name") + + // CloudFoundrySystemIDKey is the attribute Key conforming to the + // "cloudfoundry.system.id" semantic conventions. It represents a guid or + // another name describing the event source. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "cf/gorouter" + // Note: CloudFoundry defines the `source_id` in the [Loggregator v2 envelope]. + // It is used for logs and metrics emitted by CloudFoundry. It is + // supposed to contain the component name, e.g. "gorouter", for + // CloudFoundry components. + // + // When system components are instrumented, values from the + // [Bosh spec] + // should be used. The `system.id` should be set to + // `spec.deployment/spec.name`. + // + // [Loggregator v2 envelope]: https://github.com/cloudfoundry/loggregator-api#v2-envelope + // [Bosh spec]: https://bosh.io/docs/jobs/#properties-spec + CloudFoundrySystemIDKey = attribute.Key("cloudfoundry.system.id") + + // CloudFoundrySystemInstanceIDKey is the attribute Key conforming to the + // "cloudfoundry.system.instance.id" semantic conventions. It represents a guid + // describing the concrete instance of the event source. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "218fc5a9-a5f1-4b54-aa05-46717d0ab26d" + // Note: CloudFoundry defines the `instance_id` in the [Loggregator v2 envelope] + // . + // It is used for logs and metrics emitted by CloudFoundry. It is + // supposed to contain the vm id for CloudFoundry components. + // + // When system components are instrumented, values from the + // [Bosh spec] + // should be used. The `system.instance.id` should be set to `spec.id`. + // + // [Loggregator v2 envelope]: https://github.com/cloudfoundry/loggregator-api#v2-envelope + // [Bosh spec]: https://bosh.io/docs/jobs/#properties-spec + CloudFoundrySystemInstanceIDKey = attribute.Key("cloudfoundry.system.instance.id") +) + +// CloudFoundryAppID returns an attribute KeyValue conforming to the +// "cloudfoundry.app.id" semantic conventions. It represents the guid of the +// application. +func CloudFoundryAppID(val string) attribute.KeyValue { + return CloudFoundryAppIDKey.String(val) +} + +// CloudFoundryAppInstanceID returns an attribute KeyValue conforming to the +// "cloudfoundry.app.instance.id" semantic conventions. It represents the index +// of the application instance. 0 when just one instance is active. +func CloudFoundryAppInstanceID(val string) attribute.KeyValue { + return CloudFoundryAppInstanceIDKey.String(val) +} + +// CloudFoundryAppName returns an attribute KeyValue conforming to the +// "cloudfoundry.app.name" semantic conventions. It represents the name of the +// application. +func CloudFoundryAppName(val string) attribute.KeyValue { + return CloudFoundryAppNameKey.String(val) +} + +// CloudFoundryOrgID returns an attribute KeyValue conforming to the +// "cloudfoundry.org.id" semantic conventions. It represents the guid of the +// CloudFoundry org the application is running in. +func CloudFoundryOrgID(val string) attribute.KeyValue { + return CloudFoundryOrgIDKey.String(val) +} + +// CloudFoundryOrgName returns an attribute KeyValue conforming to the +// "cloudfoundry.org.name" semantic conventions. It represents the name of the +// CloudFoundry organization the app is running in. +func CloudFoundryOrgName(val string) attribute.KeyValue { + return CloudFoundryOrgNameKey.String(val) +} + +// CloudFoundryProcessID returns an attribute KeyValue conforming to the +// "cloudfoundry.process.id" semantic conventions. It represents the UID +// identifying the process. +func CloudFoundryProcessID(val string) attribute.KeyValue { + return CloudFoundryProcessIDKey.String(val) +} + +// CloudFoundryProcessType returns an attribute KeyValue conforming to the +// "cloudfoundry.process.type" semantic conventions. It represents the type of +// process. +func CloudFoundryProcessType(val string) attribute.KeyValue { + return CloudFoundryProcessTypeKey.String(val) +} + +// CloudFoundrySpaceID returns an attribute KeyValue conforming to the +// "cloudfoundry.space.id" semantic conventions. It represents the guid of the +// CloudFoundry space the application is running in. +func CloudFoundrySpaceID(val string) attribute.KeyValue { + return CloudFoundrySpaceIDKey.String(val) +} + +// CloudFoundrySpaceName returns an attribute KeyValue conforming to the +// "cloudfoundry.space.name" semantic conventions. It represents the name of the +// CloudFoundry space the application is running in. +func CloudFoundrySpaceName(val string) attribute.KeyValue { + return CloudFoundrySpaceNameKey.String(val) +} + +// CloudFoundrySystemID returns an attribute KeyValue conforming to the +// "cloudfoundry.system.id" semantic conventions. It represents a guid or another +// name describing the event source. +func CloudFoundrySystemID(val string) attribute.KeyValue { + return CloudFoundrySystemIDKey.String(val) +} + +// CloudFoundrySystemInstanceID returns an attribute KeyValue conforming to the +// "cloudfoundry.system.instance.id" semantic conventions. It represents a guid +// describing the concrete instance of the event source. +func CloudFoundrySystemInstanceID(val string) attribute.KeyValue { + return CloudFoundrySystemInstanceIDKey.String(val) +} + +// Namespace: code +const ( + // CodeColumnNumberKey is the attribute Key conforming to the + // "code.column.number" semantic conventions. It represents the column number in + // `code.file.path` best representing the operation. It SHOULD point within the + // code unit named in `code.function.name`. This attribute MUST NOT be used on + // the Profile signal since the data is already captured in 'message Line'. This + // constraint is imposed to prevent redundancy and maintain data integrity. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Stable + CodeColumnNumberKey = attribute.Key("code.column.number") + + // CodeFilePathKey is the attribute Key conforming to the "code.file.path" + // semantic conventions. It represents the source code file name that identifies + // the code unit as uniquely as possible (preferably an absolute file path). + // This attribute MUST NOT be used on the Profile signal since the data is + // already captured in 'message Function'. This constraint is imposed to prevent + // redundancy and maintain data integrity. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: /usr/local/MyApplication/content_root/app/index.php + CodeFilePathKey = attribute.Key("code.file.path") + + // CodeFunctionNameKey is the attribute Key conforming to the + // "code.function.name" semantic conventions. It represents the method or + // function fully-qualified name without arguments. The value should fit the + // natural representation of the language runtime, which is also likely the same + // used within `code.stacktrace` attribute value. This attribute MUST NOT be + // used on the Profile signal since the data is already captured in 'message + // Function'. This constraint is imposed to prevent redundancy and maintain data + // integrity. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "com.example.MyHttpService.serveRequest", + // "GuzzleHttp\Client::transfer", "fopen" + // Note: Values and format depends on each language runtime, thus it is + // impossible to provide an exhaustive list of examples. + // The values are usually the same (or prefixes of) the ones found in native + // stack trace representation stored in + // `code.stacktrace` without information on arguments. + // + // Examples: + // + // - Java method: `com.example.MyHttpService.serveRequest` + // - Java anonymous class method: `com.mycompany.Main$1.myMethod` + // - Java lambda method: + // `com.mycompany.Main$$Lambda/0x0000748ae4149c00.myMethod` + // - PHP function: `GuzzleHttp\Client::transfer` + // - Go function: `github.com/my/repo/pkg.foo.func5` + // - Elixir: `OpenTelemetry.Ctx.new` + // - Erlang: `opentelemetry_ctx:new` + // - Rust: `playground::my_module::my_cool_func` + // - C function: `fopen` + CodeFunctionNameKey = attribute.Key("code.function.name") + + // CodeLineNumberKey is the attribute Key conforming to the "code.line.number" + // semantic conventions. It represents the line number in `code.file.path` best + // representing the operation. It SHOULD point within the code unit named in + // `code.function.name`. This attribute MUST NOT be used on the Profile signal + // since the data is already captured in 'message Line'. This constraint is + // imposed to prevent redundancy and maintain data integrity. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Stable + CodeLineNumberKey = attribute.Key("code.line.number") + + // CodeStacktraceKey is the attribute Key conforming to the "code.stacktrace" + // semantic conventions. It represents a stacktrace as a string in the natural + // representation for the language runtime. The representation is identical to + // [`exception.stacktrace`]. This attribute MUST NOT be used on the Profile + // signal since the data is already captured in 'message Location'. This + // constraint is imposed to prevent redundancy and maintain data integrity. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: at com.example.GenerateTrace.methodB(GenerateTrace.java:13)\n at + // com.example.GenerateTrace.methodA(GenerateTrace.java:9)\n at + // com.example.GenerateTrace.main(GenerateTrace.java:5) + // + // [`exception.stacktrace`]: /docs/exceptions/exceptions-spans.md#stacktrace-representation + CodeStacktraceKey = attribute.Key("code.stacktrace") +) + +// CodeColumnNumber returns an attribute KeyValue conforming to the +// "code.column.number" semantic conventions. It represents the column number in +// `code.file.path` best representing the operation. It SHOULD point within the +// code unit named in `code.function.name`. This attribute MUST NOT be used on +// the Profile signal since the data is already captured in 'message Line'. This +// constraint is imposed to prevent redundancy and maintain data integrity. +func CodeColumnNumber(val int) attribute.KeyValue { + return CodeColumnNumberKey.Int(val) +} + +// CodeFilePath returns an attribute KeyValue conforming to the "code.file.path" +// semantic conventions. It represents the source code file name that identifies +// the code unit as uniquely as possible (preferably an absolute file path). This +// attribute MUST NOT be used on the Profile signal since the data is already +// captured in 'message Function'. This constraint is imposed to prevent +// redundancy and maintain data integrity. +func CodeFilePath(val string) attribute.KeyValue { + return CodeFilePathKey.String(val) +} + +// CodeFunctionName returns an attribute KeyValue conforming to the +// "code.function.name" semantic conventions. It represents the method or +// function fully-qualified name without arguments. The value should fit the +// natural representation of the language runtime, which is also likely the same +// used within `code.stacktrace` attribute value. This attribute MUST NOT be used +// on the Profile signal since the data is already captured in 'message +// Function'. This constraint is imposed to prevent redundancy and maintain data +// integrity. +func CodeFunctionName(val string) attribute.KeyValue { + return CodeFunctionNameKey.String(val) +} + +// CodeLineNumber returns an attribute KeyValue conforming to the +// "code.line.number" semantic conventions. It represents the line number in +// `code.file.path` best representing the operation. It SHOULD point within the +// code unit named in `code.function.name`. This attribute MUST NOT be used on +// the Profile signal since the data is already captured in 'message Line'. This +// constraint is imposed to prevent redundancy and maintain data integrity. +func CodeLineNumber(val int) attribute.KeyValue { + return CodeLineNumberKey.Int(val) +} + +// CodeStacktrace returns an attribute KeyValue conforming to the +// "code.stacktrace" semantic conventions. It represents a stacktrace as a string +// in the natural representation for the language runtime. The representation is +// identical to [`exception.stacktrace`]. This attribute MUST NOT be used on the +// Profile signal since the data is already captured in 'message Location'. This +// constraint is imposed to prevent redundancy and maintain data integrity. +// +// [`exception.stacktrace`]: /docs/exceptions/exceptions-spans.md#stacktrace-representation +func CodeStacktrace(val string) attribute.KeyValue { + return CodeStacktraceKey.String(val) +} + +// Namespace: container +const ( + // ContainerCommandKey is the attribute Key conforming to the + // "container.command" semantic conventions. It represents the command used to + // run the container (i.e. the command name). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "otelcontribcol" + // Note: If using embedded credentials or sensitive data, it is recommended to + // remove them to prevent potential leakage. + ContainerCommandKey = attribute.Key("container.command") + + // ContainerCommandArgsKey is the attribute Key conforming to the + // "container.command_args" semantic conventions. It represents the all the + // command arguments (including the command/executable itself) run by the + // container. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "otelcontribcol", "--config", "config.yaml" + ContainerCommandArgsKey = attribute.Key("container.command_args") + + // ContainerCommandLineKey is the attribute Key conforming to the + // "container.command_line" semantic conventions. It represents the full command + // run by the container as a single string representing the full command. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "otelcontribcol --config config.yaml" + ContainerCommandLineKey = attribute.Key("container.command_line") + + // ContainerCSIPluginNameKey is the attribute Key conforming to the + // "container.csi.plugin.name" semantic conventions. It represents the name of + // the CSI ([Container Storage Interface]) plugin used by the volume. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "pd.csi.storage.gke.io" + // Note: This can sometimes be referred to as a "driver" in CSI implementations. + // This should represent the `name` field of the GetPluginInfo RPC. + // + // [Container Storage Interface]: https://github.com/container-storage-interface/spec + ContainerCSIPluginNameKey = attribute.Key("container.csi.plugin.name") + + // ContainerCSIVolumeIDKey is the attribute Key conforming to the + // "container.csi.volume.id" semantic conventions. It represents the unique + // volume ID returned by the CSI ([Container Storage Interface]) plugin. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "projects/my-gcp-project/zones/my-gcp-zone/disks/my-gcp-disk" + // Note: This can sometimes be referred to as a "volume handle" in CSI + // implementations. This should represent the `Volume.volume_id` field in CSI + // spec. + // + // [Container Storage Interface]: https://github.com/container-storage-interface/spec + ContainerCSIVolumeIDKey = attribute.Key("container.csi.volume.id") + + // ContainerIDKey is the attribute Key conforming to the "container.id" semantic + // conventions. It represents the container ID. Usually a UUID, as for example + // used to [identify Docker containers]. The UUID might be abbreviated. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "a3bf90e006b2" + // + // [identify Docker containers]: https://docs.docker.com/engine/containers/run/#container-identification + ContainerIDKey = attribute.Key("container.id") + + // ContainerImageIDKey is the attribute Key conforming to the + // "container.image.id" semantic conventions. It represents the runtime specific + // image identifier. Usually a hash algorithm followed by a UUID. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // "sha256:19c92d0a00d1b66d897bceaa7319bee0dd38a10a851c60bcec9474aa3f01e50f" + // Note: Docker defines a sha256 of the image id; `container.image.id` + // corresponds to the `Image` field from the Docker container inspect [API] + // endpoint. + // K8s defines a link to the container registry repository with digest + // `"imageID": "registry.azurecr.io /namespace/service/dockerfile@sha256:bdeabd40c3a8a492eaf9e8e44d0ebbb84bac7ee25ac0cf8a7159d25f62555625"` + // . + // The ID is assigned by the container runtime and can vary in different + // environments. Consider using `oci.manifest.digest` if it is important to + // identify the same image in different environments/runtimes. + // + // [API]: https://docs.docker.com/engine/api/v1.43/#tag/Container/operation/ContainerInspect + ContainerImageIDKey = attribute.Key("container.image.id") + + // ContainerImageNameKey is the attribute Key conforming to the + // "container.image.name" semantic conventions. It represents the name of the + // image the container was built on. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "gcr.io/opentelemetry/operator" + ContainerImageNameKey = attribute.Key("container.image.name") + + // ContainerImageRepoDigestsKey is the attribute Key conforming to the + // "container.image.repo_digests" semantic conventions. It represents the repo + // digests of the container image as provided by the container runtime. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // "example@sha256:afcc7f1ac1b49db317a7196c902e61c6c3c4607d63599ee1a82d702d249a0ccb", + // "internal.registry.example.com:5000/example@sha256:b69959407d21e8a062e0416bf13405bb2b71ed7a84dde4158ebafacfa06f5578" + // Note: [Docker] and [CRI] report those under the `RepoDigests` field. + // + // [Docker]: https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect + // [CRI]: https://github.com/kubernetes/cri-api/blob/c75ef5b473bbe2d0a4fc92f82235efd665ea8e9f/pkg/apis/runtime/v1/api.proto#L1237-L1238 + ContainerImageRepoDigestsKey = attribute.Key("container.image.repo_digests") + + // ContainerImageTagsKey is the attribute Key conforming to the + // "container.image.tags" semantic conventions. It represents the container + // image tags. An example can be found in [Docker Image Inspect]. Should be only + // the `` section of the full name for example from + // `registry.example.com/my-org/my-image:`. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "v1.27.1", "3.5.7-0" + // + // [Docker Image Inspect]: https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect + ContainerImageTagsKey = attribute.Key("container.image.tags") + + // ContainerNameKey is the attribute Key conforming to the "container.name" + // semantic conventions. It represents the container name used by container + // runtime. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry-autoconf" + ContainerNameKey = attribute.Key("container.name") + + // ContainerRuntimeDescriptionKey is the attribute Key conforming to the + // "container.runtime.description" semantic conventions. It represents a + // description about the runtime which could include, for example details about + // the CRI/API version being used or other customisations. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "docker://19.3.1 - CRI: 1.22.0" + ContainerRuntimeDescriptionKey = attribute.Key("container.runtime.description") + + // ContainerRuntimeNameKey is the attribute Key conforming to the + // "container.runtime.name" semantic conventions. It represents the container + // runtime managing this container. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "docker", "containerd", "rkt" + ContainerRuntimeNameKey = attribute.Key("container.runtime.name") + + // ContainerRuntimeVersionKey is the attribute Key conforming to the + // "container.runtime.version" semantic conventions. It represents the version + // of the runtime of this process, as returned by the runtime without + // modification. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1.0.0 + ContainerRuntimeVersionKey = attribute.Key("container.runtime.version") +) + +// ContainerCommand returns an attribute KeyValue conforming to the +// "container.command" semantic conventions. It represents the command used to +// run the container (i.e. the command name). +func ContainerCommand(val string) attribute.KeyValue { + return ContainerCommandKey.String(val) +} + +// ContainerCommandArgs returns an attribute KeyValue conforming to the +// "container.command_args" semantic conventions. It represents the all the +// command arguments (including the command/executable itself) run by the +// container. +func ContainerCommandArgs(val ...string) attribute.KeyValue { + return ContainerCommandArgsKey.StringSlice(val) +} + +// ContainerCommandLine returns an attribute KeyValue conforming to the +// "container.command_line" semantic conventions. It represents the full command +// run by the container as a single string representing the full command. +func ContainerCommandLine(val string) attribute.KeyValue { + return ContainerCommandLineKey.String(val) +} + +// ContainerCSIPluginName returns an attribute KeyValue conforming to the +// "container.csi.plugin.name" semantic conventions. It represents the name of +// the CSI ([Container Storage Interface]) plugin used by the volume. +// +// [Container Storage Interface]: https://github.com/container-storage-interface/spec +func ContainerCSIPluginName(val string) attribute.KeyValue { + return ContainerCSIPluginNameKey.String(val) +} + +// ContainerCSIVolumeID returns an attribute KeyValue conforming to the +// "container.csi.volume.id" semantic conventions. It represents the unique +// volume ID returned by the CSI ([Container Storage Interface]) plugin. +// +// [Container Storage Interface]: https://github.com/container-storage-interface/spec +func ContainerCSIVolumeID(val string) attribute.KeyValue { + return ContainerCSIVolumeIDKey.String(val) +} + +// ContainerID returns an attribute KeyValue conforming to the "container.id" +// semantic conventions. It represents the container ID. Usually a UUID, as for +// example used to [identify Docker containers]. The UUID might be abbreviated. +// +// [identify Docker containers]: https://docs.docker.com/engine/containers/run/#container-identification +func ContainerID(val string) attribute.KeyValue { + return ContainerIDKey.String(val) +} + +// ContainerImageID returns an attribute KeyValue conforming to the +// "container.image.id" semantic conventions. It represents the runtime specific +// image identifier. Usually a hash algorithm followed by a UUID. +func ContainerImageID(val string) attribute.KeyValue { + return ContainerImageIDKey.String(val) +} + +// ContainerImageName returns an attribute KeyValue conforming to the +// "container.image.name" semantic conventions. It represents the name of the +// image the container was built on. +func ContainerImageName(val string) attribute.KeyValue { + return ContainerImageNameKey.String(val) +} + +// ContainerImageRepoDigests returns an attribute KeyValue conforming to the +// "container.image.repo_digests" semantic conventions. It represents the repo +// digests of the container image as provided by the container runtime. +func ContainerImageRepoDigests(val ...string) attribute.KeyValue { + return ContainerImageRepoDigestsKey.StringSlice(val) +} + +// ContainerImageTags returns an attribute KeyValue conforming to the +// "container.image.tags" semantic conventions. It represents the container image +// tags. An example can be found in [Docker Image Inspect]. Should be only the +// `` section of the full name for example from +// `registry.example.com/my-org/my-image:`. +// +// [Docker Image Inspect]: https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect +func ContainerImageTags(val ...string) attribute.KeyValue { + return ContainerImageTagsKey.StringSlice(val) +} + +// ContainerLabel returns an attribute KeyValue conforming to the +// "container.label" semantic conventions. It represents the container labels, +// `` being the label name, the value being the label value. +func ContainerLabel(key string, val string) attribute.KeyValue { + return attribute.String("container.label."+key, val) +} + +// ContainerName returns an attribute KeyValue conforming to the "container.name" +// semantic conventions. It represents the container name used by container +// runtime. +func ContainerName(val string) attribute.KeyValue { + return ContainerNameKey.String(val) +} + +// ContainerRuntimeDescription returns an attribute KeyValue conforming to the +// "container.runtime.description" semantic conventions. It represents a +// description about the runtime which could include, for example details about +// the CRI/API version being used or other customisations. +func ContainerRuntimeDescription(val string) attribute.KeyValue { + return ContainerRuntimeDescriptionKey.String(val) +} + +// ContainerRuntimeName returns an attribute KeyValue conforming to the +// "container.runtime.name" semantic conventions. It represents the container +// runtime managing this container. +func ContainerRuntimeName(val string) attribute.KeyValue { + return ContainerRuntimeNameKey.String(val) +} + +// ContainerRuntimeVersion returns an attribute KeyValue conforming to the +// "container.runtime.version" semantic conventions. It represents the version of +// the runtime of this process, as returned by the runtime without modification. +func ContainerRuntimeVersion(val string) attribute.KeyValue { + return ContainerRuntimeVersionKey.String(val) +} + +// Namespace: cpu +const ( + // CPULogicalNumberKey is the attribute Key conforming to the + // "cpu.logical_number" semantic conventions. It represents the logical CPU + // number [0..n-1]. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1 + CPULogicalNumberKey = attribute.Key("cpu.logical_number") + + // CPUModeKey is the attribute Key conforming to the "cpu.mode" semantic + // conventions. It represents the mode of the CPU. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "user", "system" + CPUModeKey = attribute.Key("cpu.mode") +) + +// CPULogicalNumber returns an attribute KeyValue conforming to the +// "cpu.logical_number" semantic conventions. It represents the logical CPU +// number [0..n-1]. +func CPULogicalNumber(val int) attribute.KeyValue { + return CPULogicalNumberKey.Int(val) +} + +// Enum values for cpu.mode +var ( + // User + // Stability: development + CPUModeUser = CPUModeKey.String("user") + // System + // Stability: development + CPUModeSystem = CPUModeKey.String("system") + // Nice + // Stability: development + CPUModeNice = CPUModeKey.String("nice") + // Idle + // Stability: development + CPUModeIdle = CPUModeKey.String("idle") + // IO Wait + // Stability: development + CPUModeIOWait = CPUModeKey.String("iowait") + // Interrupt + // Stability: development + CPUModeInterrupt = CPUModeKey.String("interrupt") + // Steal + // Stability: development + CPUModeSteal = CPUModeKey.String("steal") + // Kernel + // Stability: development + CPUModeKernel = CPUModeKey.String("kernel") +) + +// Namespace: db +const ( + // DBClientConnectionPoolNameKey is the attribute Key conforming to the + // "db.client.connection.pool.name" semantic conventions. It represents the name + // of the connection pool; unique within the instrumented application. In case + // the connection pool implementation doesn't provide a name, instrumentation + // SHOULD use a combination of parameters that would make the name unique, for + // example, combining attributes `server.address`, `server.port`, and + // `db.namespace`, formatted as `server.address:server.port/db.namespace`. + // Instrumentations that generate connection pool name following different + // patterns SHOULD document it. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "myDataSource" + DBClientConnectionPoolNameKey = attribute.Key("db.client.connection.pool.name") + + // DBClientConnectionStateKey is the attribute Key conforming to the + // "db.client.connection.state" semantic conventions. It represents the state of + // a connection in the pool. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "idle" + DBClientConnectionStateKey = attribute.Key("db.client.connection.state") + + // DBCollectionNameKey is the attribute Key conforming to the + // "db.collection.name" semantic conventions. It represents the name of a + // collection (table, container) within the database. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "public.users", "customers" + // Note: It is RECOMMENDED to capture the value as provided by the application + // without attempting to do any case normalization. + // + // The collection name SHOULD NOT be extracted from `db.query.text`, + // when the database system supports query text with multiple collections + // in non-batch operations. + // + // For batch operations, if the individual operations are known to have the same + // collection name then that collection name SHOULD be used. + DBCollectionNameKey = attribute.Key("db.collection.name") + + // DBNamespaceKey is the attribute Key conforming to the "db.namespace" semantic + // conventions. It represents the name of the database, fully qualified within + // the server address and port. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "customers", "test.users" + // Note: If a database system has multiple namespace components, they SHOULD be + // concatenated from the most general to the most specific namespace component, + // using `|` as a separator between the components. Any missing components (and + // their associated separators) SHOULD be omitted. + // Semantic conventions for individual database systems SHOULD document what + // `db.namespace` means in the context of that system. + // It is RECOMMENDED to capture the value as provided by the application without + // attempting to do any case normalization. + DBNamespaceKey = attribute.Key("db.namespace") + + // DBOperationBatchSizeKey is the attribute Key conforming to the + // "db.operation.batch.size" semantic conventions. It represents the number of + // queries included in a batch operation. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: 2, 3, 4 + // Note: Operations are only considered batches when they contain two or more + // operations, and so `db.operation.batch.size` SHOULD never be `1`. + DBOperationBatchSizeKey = attribute.Key("db.operation.batch.size") + + // DBOperationNameKey is the attribute Key conforming to the "db.operation.name" + // semantic conventions. It represents the name of the operation or command + // being executed. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "findAndModify", "HMSET", "SELECT" + // Note: It is RECOMMENDED to capture the value as provided by the application + // without attempting to do any case normalization. + // + // The operation name SHOULD NOT be extracted from `db.query.text`, + // when the database system supports query text with multiple operations + // in non-batch operations. + // + // If spaces can occur in the operation name, multiple consecutive spaces + // SHOULD be normalized to a single space. + // + // For batch operations, if the individual operations are known to have the same + // operation name + // then that operation name SHOULD be used prepended by `BATCH `, + // otherwise `db.operation.name` SHOULD be `BATCH` or some other database + // system specific term if more applicable. + DBOperationNameKey = attribute.Key("db.operation.name") + + // DBQuerySummaryKey is the attribute Key conforming to the "db.query.summary" + // semantic conventions. It represents the low cardinality summary of a database + // query. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "SELECT wuser_table", "INSERT shipping_details SELECT orders", "get + // user by id" + // Note: The query summary describes a class of database queries and is useful + // as a grouping key, especially when analyzing telemetry for database + // calls involving complex queries. + // + // Summary may be available to the instrumentation through + // instrumentation hooks or other means. If it is not available, + // instrumentations + // that support query parsing SHOULD generate a summary following + // [Generating query summary] + // section. + // + // [Generating query summary]: /docs/database/database-spans.md#generating-a-summary-of-the-query + DBQuerySummaryKey = attribute.Key("db.query.summary") + + // DBQueryTextKey is the attribute Key conforming to the "db.query.text" + // semantic conventions. It represents the database query being executed. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "SELECT * FROM wuser_table where username = ?", "SET mykey ?" + // Note: For sanitization see [Sanitization of `db.query.text`]. + // For batch operations, if the individual operations are known to have the same + // query text then that query text SHOULD be used, otherwise all of the + // individual query texts SHOULD be concatenated with separator `; ` or some + // other database system specific separator if more applicable. + // Parameterized query text SHOULD NOT be sanitized. Even though parameterized + // query text can potentially have sensitive data, by using a parameterized + // query the user is giving a strong signal that any sensitive data will be + // passed as parameter values, and the benefit to observability of capturing the + // static part of the query text by default outweighs the risk. + // + // [Sanitization of `db.query.text`]: /docs/database/database-spans.md#sanitization-of-dbquerytext + DBQueryTextKey = attribute.Key("db.query.text") + + // DBResponseReturnedRowsKey is the attribute Key conforming to the + // "db.response.returned_rows" semantic conventions. It represents the number of + // rows returned by the operation. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 10, 30, 1000 + DBResponseReturnedRowsKey = attribute.Key("db.response.returned_rows") + + // DBResponseStatusCodeKey is the attribute Key conforming to the + // "db.response.status_code" semantic conventions. It represents the database + // response status code. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "102", "ORA-17002", "08P01", "404" + // Note: The status code returned by the database. Usually it represents an + // error code, but may also represent partial success, warning, or differentiate + // between various types of successful outcomes. + // Semantic conventions for individual database systems SHOULD document what + // `db.response.status_code` means in the context of that system. + DBResponseStatusCodeKey = attribute.Key("db.response.status_code") + + // DBStoredProcedureNameKey is the attribute Key conforming to the + // "db.stored_procedure.name" semantic conventions. It represents the name of a + // stored procedure within the database. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "GetCustomer" + // Note: It is RECOMMENDED to capture the value as provided by the application + // without attempting to do any case normalization. + // + // For batch operations, if the individual operations are known to have the same + // stored procedure name then that stored procedure name SHOULD be used. + DBStoredProcedureNameKey = attribute.Key("db.stored_procedure.name") + + // DBSystemNameKey is the attribute Key conforming to the "db.system.name" + // semantic conventions. It represents the database management system (DBMS) + // product as identified by the client instrumentation. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: + // Note: The actual DBMS may differ from the one identified by the client. For + // example, when using PostgreSQL client libraries to connect to a CockroachDB, + // the `db.system.name` is set to `postgresql` based on the instrumentation's + // best knowledge. + DBSystemNameKey = attribute.Key("db.system.name") +) + +// DBClientConnectionPoolName returns an attribute KeyValue conforming to the +// "db.client.connection.pool.name" semantic conventions. It represents the name +// of the connection pool; unique within the instrumented application. In case +// the connection pool implementation doesn't provide a name, instrumentation +// SHOULD use a combination of parameters that would make the name unique, for +// example, combining attributes `server.address`, `server.port`, and +// `db.namespace`, formatted as `server.address:server.port/db.namespace`. +// Instrumentations that generate connection pool name following different +// patterns SHOULD document it. +func DBClientConnectionPoolName(val string) attribute.KeyValue { + return DBClientConnectionPoolNameKey.String(val) +} + +// DBCollectionName returns an attribute KeyValue conforming to the +// "db.collection.name" semantic conventions. It represents the name of a +// collection (table, container) within the database. +func DBCollectionName(val string) attribute.KeyValue { + return DBCollectionNameKey.String(val) +} + +// DBNamespace returns an attribute KeyValue conforming to the "db.namespace" +// semantic conventions. It represents the name of the database, fully qualified +// within the server address and port. +func DBNamespace(val string) attribute.KeyValue { + return DBNamespaceKey.String(val) +} + +// DBOperationBatchSize returns an attribute KeyValue conforming to the +// "db.operation.batch.size" semantic conventions. It represents the number of +// queries included in a batch operation. +func DBOperationBatchSize(val int) attribute.KeyValue { + return DBOperationBatchSizeKey.Int(val) +} + +// DBOperationName returns an attribute KeyValue conforming to the +// "db.operation.name" semantic conventions. It represents the name of the +// operation or command being executed. +func DBOperationName(val string) attribute.KeyValue { + return DBOperationNameKey.String(val) +} + +// DBOperationParameter returns an attribute KeyValue conforming to the +// "db.operation.parameter" semantic conventions. It represents a database +// operation parameter, with `` being the parameter name, and the attribute +// value being a string representation of the parameter value. +func DBOperationParameter(key string, val string) attribute.KeyValue { + return attribute.String("db.operation.parameter."+key, val) +} + +// DBQueryParameter returns an attribute KeyValue conforming to the +// "db.query.parameter" semantic conventions. It represents a database query +// parameter, with `` being the parameter name, and the attribute value +// being a string representation of the parameter value. +func DBQueryParameter(key string, val string) attribute.KeyValue { + return attribute.String("db.query.parameter."+key, val) +} + +// DBQuerySummary returns an attribute KeyValue conforming to the +// "db.query.summary" semantic conventions. It represents the low cardinality +// summary of a database query. +func DBQuerySummary(val string) attribute.KeyValue { + return DBQuerySummaryKey.String(val) +} + +// DBQueryText returns an attribute KeyValue conforming to the "db.query.text" +// semantic conventions. It represents the database query being executed. +func DBQueryText(val string) attribute.KeyValue { + return DBQueryTextKey.String(val) +} + +// DBResponseReturnedRows returns an attribute KeyValue conforming to the +// "db.response.returned_rows" semantic conventions. It represents the number of +// rows returned by the operation. +func DBResponseReturnedRows(val int) attribute.KeyValue { + return DBResponseReturnedRowsKey.Int(val) +} + +// DBResponseStatusCode returns an attribute KeyValue conforming to the +// "db.response.status_code" semantic conventions. It represents the database +// response status code. +func DBResponseStatusCode(val string) attribute.KeyValue { + return DBResponseStatusCodeKey.String(val) +} + +// DBStoredProcedureName returns an attribute KeyValue conforming to the +// "db.stored_procedure.name" semantic conventions. It represents the name of a +// stored procedure within the database. +func DBStoredProcedureName(val string) attribute.KeyValue { + return DBStoredProcedureNameKey.String(val) +} + +// Enum values for db.client.connection.state +var ( + // idle + // Stability: development + DBClientConnectionStateIdle = DBClientConnectionStateKey.String("idle") + // used + // Stability: development + DBClientConnectionStateUsed = DBClientConnectionStateKey.String("used") +) + +// Enum values for db.system.name +var ( + // Some other SQL database. Fallback only. + // Stability: development + DBSystemNameOtherSQL = DBSystemNameKey.String("other_sql") + // [Adabas (Adaptable Database System)] + // Stability: development + // + // [Adabas (Adaptable Database System)]: https://documentation.softwareag.com/?pf=adabas + DBSystemNameSoftwareagAdabas = DBSystemNameKey.String("softwareag.adabas") + // [Actian Ingres] + // Stability: development + // + // [Actian Ingres]: https://www.actian.com/databases/ingres/ + DBSystemNameActianIngres = DBSystemNameKey.String("actian.ingres") + // [Amazon DynamoDB] + // Stability: development + // + // [Amazon DynamoDB]: https://aws.amazon.com/pm/dynamodb/ + DBSystemNameAWSDynamoDB = DBSystemNameKey.String("aws.dynamodb") + // [Amazon Redshift] + // Stability: development + // + // [Amazon Redshift]: https://aws.amazon.com/redshift/ + DBSystemNameAWSRedshift = DBSystemNameKey.String("aws.redshift") + // [Azure Cosmos DB] + // Stability: development + // + // [Azure Cosmos DB]: https://learn.microsoft.com/azure/cosmos-db + DBSystemNameAzureCosmosDB = DBSystemNameKey.String("azure.cosmosdb") + // [InterSystems Caché] + // Stability: development + // + // [InterSystems Caché]: https://www.intersystems.com/products/cache/ + DBSystemNameIntersystemsCache = DBSystemNameKey.String("intersystems.cache") + // [Apache Cassandra] + // Stability: development + // + // [Apache Cassandra]: https://cassandra.apache.org/ + DBSystemNameCassandra = DBSystemNameKey.String("cassandra") + // [ClickHouse] + // Stability: development + // + // [ClickHouse]: https://clickhouse.com/ + DBSystemNameClickHouse = DBSystemNameKey.String("clickhouse") + // [CockroachDB] + // Stability: development + // + // [CockroachDB]: https://www.cockroachlabs.com/ + DBSystemNameCockroachDB = DBSystemNameKey.String("cockroachdb") + // [Couchbase] + // Stability: development + // + // [Couchbase]: https://www.couchbase.com/ + DBSystemNameCouchbase = DBSystemNameKey.String("couchbase") + // [Apache CouchDB] + // Stability: development + // + // [Apache CouchDB]: https://couchdb.apache.org/ + DBSystemNameCouchDB = DBSystemNameKey.String("couchdb") + // [Apache Derby] + // Stability: development + // + // [Apache Derby]: https://db.apache.org/derby/ + DBSystemNameDerby = DBSystemNameKey.String("derby") + // [Elasticsearch] + // Stability: development + // + // [Elasticsearch]: https://www.elastic.co/elasticsearch + DBSystemNameElasticsearch = DBSystemNameKey.String("elasticsearch") + // [Firebird] + // Stability: development + // + // [Firebird]: https://www.firebirdsql.org/ + DBSystemNameFirebirdSQL = DBSystemNameKey.String("firebirdsql") + // [Google Cloud Spanner] + // Stability: development + // + // [Google Cloud Spanner]: https://cloud.google.com/spanner + DBSystemNameGCPSpanner = DBSystemNameKey.String("gcp.spanner") + // [Apache Geode] + // Stability: development + // + // [Apache Geode]: https://geode.apache.org/ + DBSystemNameGeode = DBSystemNameKey.String("geode") + // [H2 Database] + // Stability: development + // + // [H2 Database]: https://h2database.com/ + DBSystemNameH2database = DBSystemNameKey.String("h2database") + // [Apache HBase] + // Stability: development + // + // [Apache HBase]: https://hbase.apache.org/ + DBSystemNameHBase = DBSystemNameKey.String("hbase") + // [Apache Hive] + // Stability: development + // + // [Apache Hive]: https://hive.apache.org/ + DBSystemNameHive = DBSystemNameKey.String("hive") + // [HyperSQL Database] + // Stability: development + // + // [HyperSQL Database]: https://hsqldb.org/ + DBSystemNameHSQLDB = DBSystemNameKey.String("hsqldb") + // [IBM Db2] + // Stability: development + // + // [IBM Db2]: https://www.ibm.com/db2 + DBSystemNameIBMDB2 = DBSystemNameKey.String("ibm.db2") + // [IBM Informix] + // Stability: development + // + // [IBM Informix]: https://www.ibm.com/products/informix + DBSystemNameIBMInformix = DBSystemNameKey.String("ibm.informix") + // [IBM Netezza] + // Stability: development + // + // [IBM Netezza]: https://www.ibm.com/products/netezza + DBSystemNameIBMNetezza = DBSystemNameKey.String("ibm.netezza") + // [InfluxDB] + // Stability: development + // + // [InfluxDB]: https://www.influxdata.com/ + DBSystemNameInfluxDB = DBSystemNameKey.String("influxdb") + // [Instant] + // Stability: development + // + // [Instant]: https://www.instantdb.com/ + DBSystemNameInstantDB = DBSystemNameKey.String("instantdb") + // [MariaDB] + // Stability: stable + // + // [MariaDB]: https://mariadb.org/ + DBSystemNameMariaDB = DBSystemNameKey.String("mariadb") + // [Memcached] + // Stability: development + // + // [Memcached]: https://memcached.org/ + DBSystemNameMemcached = DBSystemNameKey.String("memcached") + // [MongoDB] + // Stability: development + // + // [MongoDB]: https://www.mongodb.com/ + DBSystemNameMongoDB = DBSystemNameKey.String("mongodb") + // [Microsoft SQL Server] + // Stability: stable + // + // [Microsoft SQL Server]: https://www.microsoft.com/sql-server + DBSystemNameMicrosoftSQLServer = DBSystemNameKey.String("microsoft.sql_server") + // [MySQL] + // Stability: stable + // + // [MySQL]: https://www.mysql.com/ + DBSystemNameMySQL = DBSystemNameKey.String("mysql") + // [Neo4j] + // Stability: development + // + // [Neo4j]: https://neo4j.com/ + DBSystemNameNeo4j = DBSystemNameKey.String("neo4j") + // [OpenSearch] + // Stability: development + // + // [OpenSearch]: https://opensearch.org/ + DBSystemNameOpenSearch = DBSystemNameKey.String("opensearch") + // [Oracle Database] + // Stability: development + // + // [Oracle Database]: https://www.oracle.com/database/ + DBSystemNameOracleDB = DBSystemNameKey.String("oracle.db") + // [PostgreSQL] + // Stability: stable + // + // [PostgreSQL]: https://www.postgresql.org/ + DBSystemNamePostgreSQL = DBSystemNameKey.String("postgresql") + // [Redis] + // Stability: development + // + // [Redis]: https://redis.io/ + DBSystemNameRedis = DBSystemNameKey.String("redis") + // [SAP HANA] + // Stability: development + // + // [SAP HANA]: https://www.sap.com/products/technology-platform/hana/what-is-sap-hana.html + DBSystemNameSAPHANA = DBSystemNameKey.String("sap.hana") + // [SAP MaxDB] + // Stability: development + // + // [SAP MaxDB]: https://maxdb.sap.com/ + DBSystemNameSAPMaxDB = DBSystemNameKey.String("sap.maxdb") + // [SQLite] + // Stability: development + // + // [SQLite]: https://www.sqlite.org/ + DBSystemNameSQLite = DBSystemNameKey.String("sqlite") + // [Teradata] + // Stability: development + // + // [Teradata]: https://www.teradata.com/ + DBSystemNameTeradata = DBSystemNameKey.String("teradata") + // [Trino] + // Stability: development + // + // [Trino]: https://trino.io/ + DBSystemNameTrino = DBSystemNameKey.String("trino") +) + +// Namespace: deployment +const ( + // DeploymentEnvironmentNameKey is the attribute Key conforming to the + // "deployment.environment.name" semantic conventions. It represents the name of + // the [deployment environment] (aka deployment tier). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "staging", "production" + // Note: `deployment.environment.name` does not affect the uniqueness + // constraints defined through + // the `service.namespace`, `service.name` and `service.instance.id` resource + // attributes. + // This implies that resources carrying the following attribute combinations + // MUST be + // considered to be identifying the same service: + // + // - `service.name=frontend`, `deployment.environment.name=production` + // - `service.name=frontend`, `deployment.environment.name=staging`. + // + // + // [deployment environment]: https://wikipedia.org/wiki/Deployment_environment + DeploymentEnvironmentNameKey = attribute.Key("deployment.environment.name") + + // DeploymentIDKey is the attribute Key conforming to the "deployment.id" + // semantic conventions. It represents the id of the deployment. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "1208" + DeploymentIDKey = attribute.Key("deployment.id") + + // DeploymentNameKey is the attribute Key conforming to the "deployment.name" + // semantic conventions. It represents the name of the deployment. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "deploy my app", "deploy-frontend" + DeploymentNameKey = attribute.Key("deployment.name") + + // DeploymentStatusKey is the attribute Key conforming to the + // "deployment.status" semantic conventions. It represents the status of the + // deployment. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + DeploymentStatusKey = attribute.Key("deployment.status") +) + +// DeploymentEnvironmentName returns an attribute KeyValue conforming to the +// "deployment.environment.name" semantic conventions. It represents the name of +// the [deployment environment] (aka deployment tier). +// +// [deployment environment]: https://wikipedia.org/wiki/Deployment_environment +func DeploymentEnvironmentName(val string) attribute.KeyValue { + return DeploymentEnvironmentNameKey.String(val) +} + +// DeploymentID returns an attribute KeyValue conforming to the "deployment.id" +// semantic conventions. It represents the id of the deployment. +func DeploymentID(val string) attribute.KeyValue { + return DeploymentIDKey.String(val) +} + +// DeploymentName returns an attribute KeyValue conforming to the +// "deployment.name" semantic conventions. It represents the name of the +// deployment. +func DeploymentName(val string) attribute.KeyValue { + return DeploymentNameKey.String(val) +} + +// Enum values for deployment.status +var ( + // failed + // Stability: development + DeploymentStatusFailed = DeploymentStatusKey.String("failed") + // succeeded + // Stability: development + DeploymentStatusSucceeded = DeploymentStatusKey.String("succeeded") +) + +// Namespace: destination +const ( + // DestinationAddressKey is the attribute Key conforming to the + // "destination.address" semantic conventions. It represents the destination + // address - domain name if available without reverse DNS lookup; otherwise, IP + // address or Unix domain socket name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "destination.example.com", "10.1.2.80", "/tmp/my.sock" + // Note: When observed from the source side, and when communicating through an + // intermediary, `destination.address` SHOULD represent the destination address + // behind any intermediaries, for example proxies, if it's available. + DestinationAddressKey = attribute.Key("destination.address") + + // DestinationPortKey is the attribute Key conforming to the "destination.port" + // semantic conventions. It represents the destination port number. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 3389, 2888 + DestinationPortKey = attribute.Key("destination.port") +) + +// DestinationAddress returns an attribute KeyValue conforming to the +// "destination.address" semantic conventions. It represents the destination +// address - domain name if available without reverse DNS lookup; otherwise, IP +// address or Unix domain socket name. +func DestinationAddress(val string) attribute.KeyValue { + return DestinationAddressKey.String(val) +} + +// DestinationPort returns an attribute KeyValue conforming to the +// "destination.port" semantic conventions. It represents the destination port +// number. +func DestinationPort(val int) attribute.KeyValue { + return DestinationPortKey.Int(val) +} + +// Namespace: device +const ( + // DeviceIDKey is the attribute Key conforming to the "device.id" semantic + // conventions. It represents a unique identifier representing the device. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "123456789012345", "01:23:45:67:89:AB" + // Note: Its value SHOULD be identical for all apps on a device and it SHOULD + // NOT change if an app is uninstalled and re-installed. + // However, it might be resettable by the user for all apps on a device. + // Hardware IDs (e.g. vendor-specific serial number, IMEI or MAC address) MAY be + // used as values. + // + // More information about Android identifier best practices can be found in the + // [Android user data IDs guide]. + // + // > [!WARNING]> This attribute may contain sensitive (PII) information. Caution + // > should be taken when storing personal data or anything which can identify a + // > user. GDPR and data protection laws may apply, + // > ensure you do your own due diligence.> Due to these reasons, this + // > identifier is not recommended for consumer applications and will likely + // > result in rejection from both Google Play and App Store. + // > However, it may be appropriate for specific enterprise scenarios, such as + // > kiosk devices or enterprise-managed devices, with appropriate compliance + // > clearance. + // > Any instrumentation providing this identifier MUST implement it as an + // > opt-in feature.> See [`app.installation.id`]> for a more + // > privacy-preserving alternative. + // + // [Android user data IDs guide]: https://developer.android.com/training/articles/user-data-ids + // [`app.installation.id`]: /docs/registry/attributes/app.md#app-installation-id + DeviceIDKey = attribute.Key("device.id") + + // DeviceManufacturerKey is the attribute Key conforming to the + // "device.manufacturer" semantic conventions. It represents the name of the + // device manufacturer. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Apple", "Samsung" + // Note: The Android OS provides this field via [Build]. iOS apps SHOULD + // hardcode the value `Apple`. + // + // [Build]: https://developer.android.com/reference/android/os/Build#MANUFACTURER + DeviceManufacturerKey = attribute.Key("device.manufacturer") + + // DeviceModelIdentifierKey is the attribute Key conforming to the + // "device.model.identifier" semantic conventions. It represents the model + // identifier for the device. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "iPhone3,4", "SM-G920F" + // Note: It's recommended this value represents a machine-readable version of + // the model identifier rather than the market or consumer-friendly name of the + // device. + DeviceModelIdentifierKey = attribute.Key("device.model.identifier") + + // DeviceModelNameKey is the attribute Key conforming to the "device.model.name" + // semantic conventions. It represents the marketing name for the device model. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "iPhone 6s Plus", "Samsung Galaxy S6" + // Note: It's recommended this value represents a human-readable version of the + // device model rather than a machine-readable alternative. + DeviceModelNameKey = attribute.Key("device.model.name") +) + +// DeviceID returns an attribute KeyValue conforming to the "device.id" semantic +// conventions. It represents a unique identifier representing the device. +func DeviceID(val string) attribute.KeyValue { + return DeviceIDKey.String(val) +} + +// DeviceManufacturer returns an attribute KeyValue conforming to the +// "device.manufacturer" semantic conventions. It represents the name of the +// device manufacturer. +func DeviceManufacturer(val string) attribute.KeyValue { + return DeviceManufacturerKey.String(val) +} + +// DeviceModelIdentifier returns an attribute KeyValue conforming to the +// "device.model.identifier" semantic conventions. It represents the model +// identifier for the device. +func DeviceModelIdentifier(val string) attribute.KeyValue { + return DeviceModelIdentifierKey.String(val) +} + +// DeviceModelName returns an attribute KeyValue conforming to the +// "device.model.name" semantic conventions. It represents the marketing name for +// the device model. +func DeviceModelName(val string) attribute.KeyValue { + return DeviceModelNameKey.String(val) +} + +// Namespace: disk +const ( + // DiskIODirectionKey is the attribute Key conforming to the "disk.io.direction" + // semantic conventions. It represents the disk IO operation direction. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "read" + DiskIODirectionKey = attribute.Key("disk.io.direction") +) + +// Enum values for disk.io.direction +var ( + // read + // Stability: development + DiskIODirectionRead = DiskIODirectionKey.String("read") + // write + // Stability: development + DiskIODirectionWrite = DiskIODirectionKey.String("write") +) + +// Namespace: dns +const ( + // DNSAnswersKey is the attribute Key conforming to the "dns.answers" semantic + // conventions. It represents the list of IPv4 or IPv6 addresses resolved during + // DNS lookup. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "10.0.0.1", "2001:0db8:85a3:0000:0000:8a2e:0370:7334" + DNSAnswersKey = attribute.Key("dns.answers") + + // DNSQuestionNameKey is the attribute Key conforming to the "dns.question.name" + // semantic conventions. It represents the name being queried. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "www.example.com", "opentelemetry.io" + // Note: If the name field contains non-printable characters (below 32 or above + // 126), those characters should be represented as escaped base 10 integers + // (\DDD). Back slashes and quotes should be escaped. Tabs, carriage returns, + // and line feeds should be converted to \t, \r, and \n respectively. + DNSQuestionNameKey = attribute.Key("dns.question.name") +) + +// DNSAnswers returns an attribute KeyValue conforming to the "dns.answers" +// semantic conventions. It represents the list of IPv4 or IPv6 addresses +// resolved during DNS lookup. +func DNSAnswers(val ...string) attribute.KeyValue { + return DNSAnswersKey.StringSlice(val) +} + +// DNSQuestionName returns an attribute KeyValue conforming to the +// "dns.question.name" semantic conventions. It represents the name being +// queried. +func DNSQuestionName(val string) attribute.KeyValue { + return DNSQuestionNameKey.String(val) +} + +// Namespace: elasticsearch +const ( + // ElasticsearchNodeNameKey is the attribute Key conforming to the + // "elasticsearch.node.name" semantic conventions. It represents the represents + // the human-readable identifier of the node/instance to which a request was + // routed. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "instance-0000000001" + ElasticsearchNodeNameKey = attribute.Key("elasticsearch.node.name") +) + +// ElasticsearchNodeName returns an attribute KeyValue conforming to the +// "elasticsearch.node.name" semantic conventions. It represents the represents +// the human-readable identifier of the node/instance to which a request was +// routed. +func ElasticsearchNodeName(val string) attribute.KeyValue { + return ElasticsearchNodeNameKey.String(val) +} + +// Namespace: enduser +const ( + // EnduserIDKey is the attribute Key conforming to the "enduser.id" semantic + // conventions. It represents the unique identifier of an end user in the + // system. It maybe a username, email address, or other identifier. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "username" + // Note: Unique identifier of an end user in the system. + // + // > [!Warning] + // > This field contains sensitive (PII) information. + EnduserIDKey = attribute.Key("enduser.id") + + // EnduserPseudoIDKey is the attribute Key conforming to the "enduser.pseudo.id" + // semantic conventions. It represents the pseudonymous identifier of an end + // user. This identifier should be a random value that is not directly linked or + // associated with the end user's actual identity. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "QdH5CAWJgqVT4rOr0qtumf" + // Note: Pseudonymous identifier of an end user. + // + // > [!Warning] + // > This field contains sensitive (linkable PII) information. + EnduserPseudoIDKey = attribute.Key("enduser.pseudo.id") +) + +// EnduserID returns an attribute KeyValue conforming to the "enduser.id" +// semantic conventions. It represents the unique identifier of an end user in +// the system. It maybe a username, email address, or other identifier. +func EnduserID(val string) attribute.KeyValue { + return EnduserIDKey.String(val) +} + +// EnduserPseudoID returns an attribute KeyValue conforming to the +// "enduser.pseudo.id" semantic conventions. It represents the pseudonymous +// identifier of an end user. This identifier should be a random value that is +// not directly linked or associated with the end user's actual identity. +func EnduserPseudoID(val string) attribute.KeyValue { + return EnduserPseudoIDKey.String(val) +} + +// Namespace: error +const ( + // ErrorMessageKey is the attribute Key conforming to the "error.message" + // semantic conventions. It represents a message providing more detail about an + // error in human-readable form. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Unexpected input type: string", "The user has exceeded their + // storage quota" + // Note: `error.message` should provide additional context and detail about an + // error. + // It is NOT RECOMMENDED to duplicate the value of `error.type` in + // `error.message`. + // It is also NOT RECOMMENDED to duplicate the value of `exception.message` in + // `error.message`. + // + // `error.message` is NOT RECOMMENDED for metrics or spans due to its unbounded + // cardinality and overlap with span status. + ErrorMessageKey = attribute.Key("error.message") + + // ErrorTypeKey is the attribute Key conforming to the "error.type" semantic + // conventions. It represents the describes a class of error the operation ended + // with. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "timeout", "java.net.UnknownHostException", + // "server_certificate_invalid", "500" + // Note: The `error.type` SHOULD be predictable, and SHOULD have low + // cardinality. + // + // When `error.type` is set to a type (e.g., an exception type), its + // canonical class name identifying the type within the artifact SHOULD be used. + // + // Instrumentations SHOULD document the list of errors they report. + // + // The cardinality of `error.type` within one instrumentation library SHOULD be + // low. + // Telemetry consumers that aggregate data from multiple instrumentation + // libraries and applications + // should be prepared for `error.type` to have high cardinality at query time + // when no + // additional filters are applied. + // + // If the operation has completed successfully, instrumentations SHOULD NOT set + // `error.type`. + // + // If a specific domain defines its own set of error identifiers (such as HTTP + // or gRPC status codes), + // it's RECOMMENDED to: + // + // - Use a domain-specific attribute + // - Set `error.type` to capture all errors, regardless of whether they are + // defined within the domain-specific set or not. + ErrorTypeKey = attribute.Key("error.type") +) + +// ErrorMessage returns an attribute KeyValue conforming to the "error.message" +// semantic conventions. It represents a message providing more detail about an +// error in human-readable form. +func ErrorMessage(val string) attribute.KeyValue { + return ErrorMessageKey.String(val) +} + +// Enum values for error.type +var ( + // A fallback error value to be used when the instrumentation doesn't define a + // custom value. + // + // Stability: stable + ErrorTypeOther = ErrorTypeKey.String("_OTHER") +) + +// Namespace: exception +const ( + // ExceptionMessageKey is the attribute Key conforming to the + // "exception.message" semantic conventions. It represents the exception + // message. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "Division by zero", "Can't convert 'int' object to str implicitly" + ExceptionMessageKey = attribute.Key("exception.message") + + // ExceptionStacktraceKey is the attribute Key conforming to the + // "exception.stacktrace" semantic conventions. It represents a stacktrace as a + // string in the natural representation for the language runtime. The + // representation is to be determined and documented by each language SIG. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: Exception in thread "main" java.lang.RuntimeException: Test + // exception\n at com.example.GenerateTrace.methodB(GenerateTrace.java:13)\n at + // com.example.GenerateTrace.methodA(GenerateTrace.java:9)\n at + // com.example.GenerateTrace.main(GenerateTrace.java:5) + ExceptionStacktraceKey = attribute.Key("exception.stacktrace") + + // ExceptionTypeKey is the attribute Key conforming to the "exception.type" + // semantic conventions. It represents the type of the exception (its + // fully-qualified class name, if applicable). The dynamic type of the exception + // should be preferred over the static type in languages that support it. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "java.net.ConnectException", "OSError" + ExceptionTypeKey = attribute.Key("exception.type") +) + +// ExceptionMessage returns an attribute KeyValue conforming to the +// "exception.message" semantic conventions. It represents the exception message. +func ExceptionMessage(val string) attribute.KeyValue { + return ExceptionMessageKey.String(val) +} + +// ExceptionStacktrace returns an attribute KeyValue conforming to the +// "exception.stacktrace" semantic conventions. It represents a stacktrace as a +// string in the natural representation for the language runtime. The +// representation is to be determined and documented by each language SIG. +func ExceptionStacktrace(val string) attribute.KeyValue { + return ExceptionStacktraceKey.String(val) +} + +// ExceptionType returns an attribute KeyValue conforming to the "exception.type" +// semantic conventions. It represents the type of the exception (its +// fully-qualified class name, if applicable). The dynamic type of the exception +// should be preferred over the static type in languages that support it. +func ExceptionType(val string) attribute.KeyValue { + return ExceptionTypeKey.String(val) +} + +// Namespace: faas +const ( + // FaaSColdstartKey is the attribute Key conforming to the "faas.coldstart" + // semantic conventions. It represents a boolean that is true if the serverless + // function is executed for the first time (aka cold-start). + // + // Type: boolean + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + FaaSColdstartKey = attribute.Key("faas.coldstart") + + // FaaSCronKey is the attribute Key conforming to the "faas.cron" semantic + // conventions. It represents a string containing the schedule period as + // [Cron Expression]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 0/5 * * * ? * + // + // [Cron Expression]: https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm + FaaSCronKey = attribute.Key("faas.cron") + + // FaaSDocumentCollectionKey is the attribute Key conforming to the + // "faas.document.collection" semantic conventions. It represents the name of + // the source on which the triggering operation was performed. For example, in + // Cloud Storage or S3 corresponds to the bucket name, and in Cosmos DB to the + // database name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "myBucketName", "myDbName" + FaaSDocumentCollectionKey = attribute.Key("faas.document.collection") + + // FaaSDocumentNameKey is the attribute Key conforming to the + // "faas.document.name" semantic conventions. It represents the document + // name/table subjected to the operation. For example, in Cloud Storage or S3 is + // the name of the file, and in Cosmos DB the table name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "myFile.txt", "myTableName" + FaaSDocumentNameKey = attribute.Key("faas.document.name") + + // FaaSDocumentOperationKey is the attribute Key conforming to the + // "faas.document.operation" semantic conventions. It represents the describes + // the type of the operation that was performed on the data. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + FaaSDocumentOperationKey = attribute.Key("faas.document.operation") + + // FaaSDocumentTimeKey is the attribute Key conforming to the + // "faas.document.time" semantic conventions. It represents a string containing + // the time when the data was accessed in the [ISO 8601] format expressed in + // [UTC]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 2020-01-23T13:47:06Z + // + // [ISO 8601]: https://www.iso.org/iso-8601-date-and-time-format.html + // [UTC]: https://www.w3.org/TR/NOTE-datetime + FaaSDocumentTimeKey = attribute.Key("faas.document.time") + + // FaaSInstanceKey is the attribute Key conforming to the "faas.instance" + // semantic conventions. It represents the execution environment ID as a string, + // that will be potentially reused for other invocations to the same + // function/function version. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de" + // Note: - **AWS Lambda:** Use the (full) log stream name. + FaaSInstanceKey = attribute.Key("faas.instance") + + // FaaSInvocationIDKey is the attribute Key conforming to the + // "faas.invocation_id" semantic conventions. It represents the invocation ID of + // the current function invocation. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: af9d5aa4-a685-4c5f-a22b-444f80b3cc28 + FaaSInvocationIDKey = attribute.Key("faas.invocation_id") + + // FaaSInvokedNameKey is the attribute Key conforming to the "faas.invoked_name" + // semantic conventions. It represents the name of the invoked function. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: my-function + // Note: SHOULD be equal to the `faas.name` resource attribute of the invoked + // function. + FaaSInvokedNameKey = attribute.Key("faas.invoked_name") + + // FaaSInvokedProviderKey is the attribute Key conforming to the + // "faas.invoked_provider" semantic conventions. It represents the cloud + // provider of the invoked function. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: SHOULD be equal to the `cloud.provider` resource attribute of the + // invoked function. + FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider") + + // FaaSInvokedRegionKey is the attribute Key conforming to the + // "faas.invoked_region" semantic conventions. It represents the cloud region of + // the invoked function. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: eu-central-1 + // Note: SHOULD be equal to the `cloud.region` resource attribute of the invoked + // function. + FaaSInvokedRegionKey = attribute.Key("faas.invoked_region") + + // FaaSMaxMemoryKey is the attribute Key conforming to the "faas.max_memory" + // semantic conventions. It represents the amount of memory available to the + // serverless function converted to Bytes. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Note: It's recommended to set this attribute since e.g. too little memory can + // easily stop a Java AWS Lambda function from working correctly. On AWS Lambda, + // the environment variable `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this + // information (which must be multiplied by 1,048,576). + FaaSMaxMemoryKey = attribute.Key("faas.max_memory") + + // FaaSNameKey is the attribute Key conforming to the "faas.name" semantic + // conventions. It represents the name of the single function that this runtime + // instance executes. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "my-function", "myazurefunctionapp/some-function-name" + // Note: This is the name of the function as configured/deployed on the FaaS + // platform and is usually different from the name of the callback + // function (which may be stored in the + // [`code.namespace`/`code.function.name`] + // span attributes). + // + // For some cloud providers, the above definition is ambiguous. The following + // definition of function name MUST be used for this attribute + // (and consequently the span name) for the listed cloud providers/products: + // + // - **Azure:** The full name `/`, i.e., function app name + // followed by a forward slash followed by the function name (this form + // can also be seen in the resource JSON for the function). + // This means that a span attribute MUST be used, as an Azure function + // app can host multiple functions that would usually share + // a TracerProvider (see also the `cloud.resource_id` attribute). + // + // + // [`code.namespace`/`code.function.name`]: /docs/general/attributes.md#source-code-attributes + FaaSNameKey = attribute.Key("faas.name") + + // FaaSTimeKey is the attribute Key conforming to the "faas.time" semantic + // conventions. It represents a string containing the function invocation time + // in the [ISO 8601] format expressed in [UTC]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 2020-01-23T13:47:06Z + // + // [ISO 8601]: https://www.iso.org/iso-8601-date-and-time-format.html + // [UTC]: https://www.w3.org/TR/NOTE-datetime + FaaSTimeKey = attribute.Key("faas.time") + + // FaaSTriggerKey is the attribute Key conforming to the "faas.trigger" semantic + // conventions. It represents the type of the trigger which caused this function + // invocation. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + FaaSTriggerKey = attribute.Key("faas.trigger") + + // FaaSVersionKey is the attribute Key conforming to the "faas.version" semantic + // conventions. It represents the immutable version of the function being + // executed. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "26", "pinkfroid-00002" + // Note: Depending on the cloud provider and platform, use: + // + // - **AWS Lambda:** The [function version] + // (an integer represented as a decimal string). + // - **Google Cloud Run (Services):** The [revision] + // (i.e., the function name plus the revision suffix). + // - **Google Cloud Functions:** The value of the + // [`K_REVISION` environment variable]. + // - **Azure Functions:** Not applicable. Do not set this attribute. + // + // + // [function version]: https://docs.aws.amazon.com/lambda/latest/dg/configuration-versions.html + // [revision]: https://cloud.google.com/run/docs/managing/revisions + // [`K_REVISION` environment variable]: https://cloud.google.com/functions/docs/env-var#runtime_environment_variables_set_automatically + FaaSVersionKey = attribute.Key("faas.version") +) + +// FaaSColdstart returns an attribute KeyValue conforming to the "faas.coldstart" +// semantic conventions. It represents a boolean that is true if the serverless +// function is executed for the first time (aka cold-start). +func FaaSColdstart(val bool) attribute.KeyValue { + return FaaSColdstartKey.Bool(val) +} + +// FaaSCron returns an attribute KeyValue conforming to the "faas.cron" semantic +// conventions. It represents a string containing the schedule period as +// [Cron Expression]. +// +// [Cron Expression]: https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm +func FaaSCron(val string) attribute.KeyValue { + return FaaSCronKey.String(val) +} + +// FaaSDocumentCollection returns an attribute KeyValue conforming to the +// "faas.document.collection" semantic conventions. It represents the name of the +// source on which the triggering operation was performed. For example, in Cloud +// Storage or S3 corresponds to the bucket name, and in Cosmos DB to the database +// name. +func FaaSDocumentCollection(val string) attribute.KeyValue { + return FaaSDocumentCollectionKey.String(val) +} + +// FaaSDocumentName returns an attribute KeyValue conforming to the +// "faas.document.name" semantic conventions. It represents the document +// name/table subjected to the operation. For example, in Cloud Storage or S3 is +// the name of the file, and in Cosmos DB the table name. +func FaaSDocumentName(val string) attribute.KeyValue { + return FaaSDocumentNameKey.String(val) +} + +// FaaSDocumentTime returns an attribute KeyValue conforming to the +// "faas.document.time" semantic conventions. It represents a string containing +// the time when the data was accessed in the [ISO 8601] format expressed in +// [UTC]. +// +// [ISO 8601]: https://www.iso.org/iso-8601-date-and-time-format.html +// [UTC]: https://www.w3.org/TR/NOTE-datetime +func FaaSDocumentTime(val string) attribute.KeyValue { + return FaaSDocumentTimeKey.String(val) +} + +// FaaSInstance returns an attribute KeyValue conforming to the "faas.instance" +// semantic conventions. It represents the execution environment ID as a string, +// that will be potentially reused for other invocations to the same +// function/function version. +func FaaSInstance(val string) attribute.KeyValue { + return FaaSInstanceKey.String(val) +} + +// FaaSInvocationID returns an attribute KeyValue conforming to the +// "faas.invocation_id" semantic conventions. It represents the invocation ID of +// the current function invocation. +func FaaSInvocationID(val string) attribute.KeyValue { + return FaaSInvocationIDKey.String(val) +} + +// FaaSInvokedName returns an attribute KeyValue conforming to the +// "faas.invoked_name" semantic conventions. It represents the name of the +// invoked function. +func FaaSInvokedName(val string) attribute.KeyValue { + return FaaSInvokedNameKey.String(val) +} + +// FaaSInvokedRegion returns an attribute KeyValue conforming to the +// "faas.invoked_region" semantic conventions. It represents the cloud region of +// the invoked function. +func FaaSInvokedRegion(val string) attribute.KeyValue { + return FaaSInvokedRegionKey.String(val) +} + +// FaaSMaxMemory returns an attribute KeyValue conforming to the +// "faas.max_memory" semantic conventions. It represents the amount of memory +// available to the serverless function converted to Bytes. +func FaaSMaxMemory(val int) attribute.KeyValue { + return FaaSMaxMemoryKey.Int(val) +} + +// FaaSName returns an attribute KeyValue conforming to the "faas.name" semantic +// conventions. It represents the name of the single function that this runtime +// instance executes. +func FaaSName(val string) attribute.KeyValue { + return FaaSNameKey.String(val) +} + +// FaaSTime returns an attribute KeyValue conforming to the "faas.time" semantic +// conventions. It represents a string containing the function invocation time in +// the [ISO 8601] format expressed in [UTC]. +// +// [ISO 8601]: https://www.iso.org/iso-8601-date-and-time-format.html +// [UTC]: https://www.w3.org/TR/NOTE-datetime +func FaaSTime(val string) attribute.KeyValue { + return FaaSTimeKey.String(val) +} + +// FaaSVersion returns an attribute KeyValue conforming to the "faas.version" +// semantic conventions. It represents the immutable version of the function +// being executed. +func FaaSVersion(val string) attribute.KeyValue { + return FaaSVersionKey.String(val) +} + +// Enum values for faas.document.operation +var ( + // When a new object is created. + // Stability: development + FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert") + // When an object is modified. + // Stability: development + FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit") + // When an object is deleted. + // Stability: development + FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete") +) + +// Enum values for faas.invoked_provider +var ( + // Alibaba Cloud + // Stability: development + FaaSInvokedProviderAlibabaCloud = FaaSInvokedProviderKey.String("alibaba_cloud") + // Amazon Web Services + // Stability: development + FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws") + // Microsoft Azure + // Stability: development + FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure") + // Google Cloud Platform + // Stability: development + FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp") + // Tencent Cloud + // Stability: development + FaaSInvokedProviderTencentCloud = FaaSInvokedProviderKey.String("tencent_cloud") +) + +// Enum values for faas.trigger +var ( + // A response to some data source operation such as a database or filesystem + // read/write + // Stability: development + FaaSTriggerDatasource = FaaSTriggerKey.String("datasource") + // To provide an answer to an inbound HTTP request + // Stability: development + FaaSTriggerHTTP = FaaSTriggerKey.String("http") + // A function is set to be executed when messages are sent to a messaging system + // Stability: development + FaaSTriggerPubSub = FaaSTriggerKey.String("pubsub") + // A function is scheduled to be executed regularly + // Stability: development + FaaSTriggerTimer = FaaSTriggerKey.String("timer") + // If none of the others apply + // Stability: development + FaaSTriggerOther = FaaSTriggerKey.String("other") +) + +// Namespace: feature_flag +const ( + // FeatureFlagContextIDKey is the attribute Key conforming to the + // "feature_flag.context.id" semantic conventions. It represents the unique + // identifier for the flag evaluation context. For example, the targeting key. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Release_Candidate + // + // Examples: "5157782b-2203-4c80-a857-dbbd5e7761db" + FeatureFlagContextIDKey = attribute.Key("feature_flag.context.id") + + // FeatureFlagKeyKey is the attribute Key conforming to the "feature_flag.key" + // semantic conventions. It represents the lookup key of the feature flag. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Release_Candidate + // + // Examples: "logo-color" + FeatureFlagKeyKey = attribute.Key("feature_flag.key") + + // FeatureFlagProviderNameKey is the attribute Key conforming to the + // "feature_flag.provider.name" semantic conventions. It represents the + // identifies the feature flag provider. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Release_Candidate + // + // Examples: "Flag Manager" + FeatureFlagProviderNameKey = attribute.Key("feature_flag.provider.name") + + // FeatureFlagResultReasonKey is the attribute Key conforming to the + // "feature_flag.result.reason" semantic conventions. It represents the reason + // code which shows how a feature flag value was determined. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Release_Candidate + // + // Examples: "static", "targeting_match", "error", "default" + FeatureFlagResultReasonKey = attribute.Key("feature_flag.result.reason") + + // FeatureFlagResultValueKey is the attribute Key conforming to the + // "feature_flag.result.value" semantic conventions. It represents the evaluated + // value of the feature flag. + // + // Type: any + // RequirementLevel: Recommended + // Stability: Release_Candidate + // + // Examples: "#ff0000", true, 3 + // Note: With some feature flag providers, feature flag results can be quite + // large or contain private or sensitive details. + // Because of this, `feature_flag.result.variant` is often the preferred + // attribute if it is available. + // + // It may be desirable to redact or otherwise limit the size and scope of + // `feature_flag.result.value` if possible. + // Because the evaluated flag value is unstructured and may be any type, it is + // left to the instrumentation author to determine how best to achieve this. + FeatureFlagResultValueKey = attribute.Key("feature_flag.result.value") + + // FeatureFlagResultVariantKey is the attribute Key conforming to the + // "feature_flag.result.variant" semantic conventions. It represents a semantic + // identifier for an evaluated flag value. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Release_Candidate + // + // Examples: "red", "true", "on" + // Note: A semantic identifier, commonly referred to as a variant, provides a + // means + // for referring to a value without including the value itself. This can + // provide additional context for understanding the meaning behind a value. + // For example, the variant `red` maybe be used for the value `#c05543`. + FeatureFlagResultVariantKey = attribute.Key("feature_flag.result.variant") + + // FeatureFlagSetIDKey is the attribute Key conforming to the + // "feature_flag.set.id" semantic conventions. It represents the identifier of + // the [flag set] to which the feature flag belongs. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Release_Candidate + // + // Examples: "proj-1", "ab98sgs", "service1/dev" + // + // [flag set]: https://openfeature.dev/specification/glossary/#flag-set + FeatureFlagSetIDKey = attribute.Key("feature_flag.set.id") + + // FeatureFlagVersionKey is the attribute Key conforming to the + // "feature_flag.version" semantic conventions. It represents the version of the + // ruleset used during the evaluation. This may be any stable value which + // uniquely identifies the ruleset. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Release_Candidate + // + // Examples: "1", "01ABCDEF" + FeatureFlagVersionKey = attribute.Key("feature_flag.version") +) + +// FeatureFlagContextID returns an attribute KeyValue conforming to the +// "feature_flag.context.id" semantic conventions. It represents the unique +// identifier for the flag evaluation context. For example, the targeting key. +func FeatureFlagContextID(val string) attribute.KeyValue { + return FeatureFlagContextIDKey.String(val) +} + +// FeatureFlagKey returns an attribute KeyValue conforming to the +// "feature_flag.key" semantic conventions. It represents the lookup key of the +// feature flag. +func FeatureFlagKey(val string) attribute.KeyValue { + return FeatureFlagKeyKey.String(val) +} + +// FeatureFlagProviderName returns an attribute KeyValue conforming to the +// "feature_flag.provider.name" semantic conventions. It represents the +// identifies the feature flag provider. +func FeatureFlagProviderName(val string) attribute.KeyValue { + return FeatureFlagProviderNameKey.String(val) +} + +// FeatureFlagResultVariant returns an attribute KeyValue conforming to the +// "feature_flag.result.variant" semantic conventions. It represents a semantic +// identifier for an evaluated flag value. +func FeatureFlagResultVariant(val string) attribute.KeyValue { + return FeatureFlagResultVariantKey.String(val) +} + +// FeatureFlagSetID returns an attribute KeyValue conforming to the +// "feature_flag.set.id" semantic conventions. It represents the identifier of +// the [flag set] to which the feature flag belongs. +// +// [flag set]: https://openfeature.dev/specification/glossary/#flag-set +func FeatureFlagSetID(val string) attribute.KeyValue { + return FeatureFlagSetIDKey.String(val) +} + +// FeatureFlagVersion returns an attribute KeyValue conforming to the +// "feature_flag.version" semantic conventions. It represents the version of the +// ruleset used during the evaluation. This may be any stable value which +// uniquely identifies the ruleset. +func FeatureFlagVersion(val string) attribute.KeyValue { + return FeatureFlagVersionKey.String(val) +} + +// Enum values for feature_flag.result.reason +var ( + // The resolved value is static (no dynamic evaluation). + // Stability: release_candidate + FeatureFlagResultReasonStatic = FeatureFlagResultReasonKey.String("static") + // The resolved value fell back to a pre-configured value (no dynamic evaluation + // occurred or dynamic evaluation yielded no result). + // Stability: release_candidate + FeatureFlagResultReasonDefault = FeatureFlagResultReasonKey.String("default") + // The resolved value was the result of a dynamic evaluation, such as a rule or + // specific user-targeting. + // Stability: release_candidate + FeatureFlagResultReasonTargetingMatch = FeatureFlagResultReasonKey.String("targeting_match") + // The resolved value was the result of pseudorandom assignment. + // Stability: release_candidate + FeatureFlagResultReasonSplit = FeatureFlagResultReasonKey.String("split") + // The resolved value was retrieved from cache. + // Stability: release_candidate + FeatureFlagResultReasonCached = FeatureFlagResultReasonKey.String("cached") + // The resolved value was the result of the flag being disabled in the + // management system. + // Stability: release_candidate + FeatureFlagResultReasonDisabled = FeatureFlagResultReasonKey.String("disabled") + // The reason for the resolved value could not be determined. + // Stability: release_candidate + FeatureFlagResultReasonUnknown = FeatureFlagResultReasonKey.String("unknown") + // The resolved value is non-authoritative or possibly out of date + // Stability: release_candidate + FeatureFlagResultReasonStale = FeatureFlagResultReasonKey.String("stale") + // The resolved value was the result of an error. + // Stability: release_candidate + FeatureFlagResultReasonError = FeatureFlagResultReasonKey.String("error") +) + +// Namespace: file +const ( + // FileAccessedKey is the attribute Key conforming to the "file.accessed" + // semantic conventions. It represents the time when the file was last accessed, + // in ISO 8601 format. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2021-01-01T12:00:00Z" + // Note: This attribute might not be supported by some file systems — NFS, + // FAT32, in embedded OS, etc. + FileAccessedKey = attribute.Key("file.accessed") + + // FileAttributesKey is the attribute Key conforming to the "file.attributes" + // semantic conventions. It represents the array of file attributes. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "readonly", "hidden" + // Note: Attributes names depend on the OS or file system. Here’s a + // non-exhaustive list of values expected for this attribute: `archive`, + // `compressed`, `directory`, `encrypted`, `execute`, `hidden`, `immutable`, + // `journaled`, `read`, `readonly`, `symbolic link`, `system`, `temporary`, + // `write`. + FileAttributesKey = attribute.Key("file.attributes") + + // FileChangedKey is the attribute Key conforming to the "file.changed" semantic + // conventions. It represents the time when the file attributes or metadata was + // last changed, in ISO 8601 format. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2021-01-01T12:00:00Z" + // Note: `file.changed` captures the time when any of the file's properties or + // attributes (including the content) are changed, while `file.modified` + // captures the timestamp when the file content is modified. + FileChangedKey = attribute.Key("file.changed") + + // FileCreatedKey is the attribute Key conforming to the "file.created" semantic + // conventions. It represents the time when the file was created, in ISO 8601 + // format. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2021-01-01T12:00:00Z" + // Note: This attribute might not be supported by some file systems — NFS, + // FAT32, in embedded OS, etc. + FileCreatedKey = attribute.Key("file.created") + + // FileDirectoryKey is the attribute Key conforming to the "file.directory" + // semantic conventions. It represents the directory where the file is located. + // It should include the drive letter, when appropriate. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "/home/user", "C:\Program Files\MyApp" + FileDirectoryKey = attribute.Key("file.directory") + + // FileExtensionKey is the attribute Key conforming to the "file.extension" + // semantic conventions. It represents the file extension, excluding the leading + // dot. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "png", "gz" + // Note: When the file name has multiple extensions (example.tar.gz), only the + // last one should be captured ("gz", not "tar.gz"). + FileExtensionKey = attribute.Key("file.extension") + + // FileForkNameKey is the attribute Key conforming to the "file.fork_name" + // semantic conventions. It represents the name of the fork. A fork is + // additional data associated with a filesystem object. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Zone.Identifier" + // Note: On Linux, a resource fork is used to store additional data with a + // filesystem object. A file always has at least one fork for the data portion, + // and additional forks may exist. + // On NTFS, this is analogous to an Alternate Data Stream (ADS), and the default + // data stream for a file is just called $DATA. Zone.Identifier is commonly used + // by Windows to track contents downloaded from the Internet. An ADS is + // typically of the form: C:\path\to\filename.extension:some_fork_name, and + // some_fork_name is the value that should populate `fork_name`. + // `filename.extension` should populate `file.name`, and `extension` should + // populate `file.extension`. The full path, `file.path`, will include the fork + // name. + FileForkNameKey = attribute.Key("file.fork_name") + + // FileGroupIDKey is the attribute Key conforming to the "file.group.id" + // semantic conventions. It represents the primary Group ID (GID) of the file. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "1000" + FileGroupIDKey = attribute.Key("file.group.id") + + // FileGroupNameKey is the attribute Key conforming to the "file.group.name" + // semantic conventions. It represents the primary group name of the file. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "users" + FileGroupNameKey = attribute.Key("file.group.name") + + // FileInodeKey is the attribute Key conforming to the "file.inode" semantic + // conventions. It represents the inode representing the file in the filesystem. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "256383" + FileInodeKey = attribute.Key("file.inode") + + // FileModeKey is the attribute Key conforming to the "file.mode" semantic + // conventions. It represents the mode of the file in octal representation. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "0640" + FileModeKey = attribute.Key("file.mode") + + // FileModifiedKey is the attribute Key conforming to the "file.modified" + // semantic conventions. It represents the time when the file content was last + // modified, in ISO 8601 format. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2021-01-01T12:00:00Z" + FileModifiedKey = attribute.Key("file.modified") + + // FileNameKey is the attribute Key conforming to the "file.name" semantic + // conventions. It represents the name of the file including the extension, + // without the directory. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "example.png" + FileNameKey = attribute.Key("file.name") + + // FileOwnerIDKey is the attribute Key conforming to the "file.owner.id" + // semantic conventions. It represents the user ID (UID) or security identifier + // (SID) of the file owner. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "1000" + FileOwnerIDKey = attribute.Key("file.owner.id") + + // FileOwnerNameKey is the attribute Key conforming to the "file.owner.name" + // semantic conventions. It represents the username of the file owner. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "root" + FileOwnerNameKey = attribute.Key("file.owner.name") + + // FilePathKey is the attribute Key conforming to the "file.path" semantic + // conventions. It represents the full path to the file, including the file + // name. It should include the drive letter, when appropriate. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "/home/alice/example.png", "C:\Program Files\MyApp\myapp.exe" + FilePathKey = attribute.Key("file.path") + + // FileSizeKey is the attribute Key conforming to the "file.size" semantic + // conventions. It represents the file size in bytes. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + FileSizeKey = attribute.Key("file.size") + + // FileSymbolicLinkTargetPathKey is the attribute Key conforming to the + // "file.symbolic_link.target_path" semantic conventions. It represents the path + // to the target of a symbolic link. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "/usr/bin/python3" + // Note: This attribute is only applicable to symbolic links. + FileSymbolicLinkTargetPathKey = attribute.Key("file.symbolic_link.target_path") +) + +// FileAccessed returns an attribute KeyValue conforming to the "file.accessed" +// semantic conventions. It represents the time when the file was last accessed, +// in ISO 8601 format. +func FileAccessed(val string) attribute.KeyValue { + return FileAccessedKey.String(val) +} + +// FileAttributes returns an attribute KeyValue conforming to the +// "file.attributes" semantic conventions. It represents the array of file +// attributes. +func FileAttributes(val ...string) attribute.KeyValue { + return FileAttributesKey.StringSlice(val) +} + +// FileChanged returns an attribute KeyValue conforming to the "file.changed" +// semantic conventions. It represents the time when the file attributes or +// metadata was last changed, in ISO 8601 format. +func FileChanged(val string) attribute.KeyValue { + return FileChangedKey.String(val) +} + +// FileCreated returns an attribute KeyValue conforming to the "file.created" +// semantic conventions. It represents the time when the file was created, in ISO +// 8601 format. +func FileCreated(val string) attribute.KeyValue { + return FileCreatedKey.String(val) +} + +// FileDirectory returns an attribute KeyValue conforming to the "file.directory" +// semantic conventions. It represents the directory where the file is located. +// It should include the drive letter, when appropriate. +func FileDirectory(val string) attribute.KeyValue { + return FileDirectoryKey.String(val) +} + +// FileExtension returns an attribute KeyValue conforming to the "file.extension" +// semantic conventions. It represents the file extension, excluding the leading +// dot. +func FileExtension(val string) attribute.KeyValue { + return FileExtensionKey.String(val) +} + +// FileForkName returns an attribute KeyValue conforming to the "file.fork_name" +// semantic conventions. It represents the name of the fork. A fork is additional +// data associated with a filesystem object. +func FileForkName(val string) attribute.KeyValue { + return FileForkNameKey.String(val) +} + +// FileGroupID returns an attribute KeyValue conforming to the "file.group.id" +// semantic conventions. It represents the primary Group ID (GID) of the file. +func FileGroupID(val string) attribute.KeyValue { + return FileGroupIDKey.String(val) +} + +// FileGroupName returns an attribute KeyValue conforming to the +// "file.group.name" semantic conventions. It represents the primary group name +// of the file. +func FileGroupName(val string) attribute.KeyValue { + return FileGroupNameKey.String(val) +} + +// FileInode returns an attribute KeyValue conforming to the "file.inode" +// semantic conventions. It represents the inode representing the file in the +// filesystem. +func FileInode(val string) attribute.KeyValue { + return FileInodeKey.String(val) +} + +// FileMode returns an attribute KeyValue conforming to the "file.mode" semantic +// conventions. It represents the mode of the file in octal representation. +func FileMode(val string) attribute.KeyValue { + return FileModeKey.String(val) +} + +// FileModified returns an attribute KeyValue conforming to the "file.modified" +// semantic conventions. It represents the time when the file content was last +// modified, in ISO 8601 format. +func FileModified(val string) attribute.KeyValue { + return FileModifiedKey.String(val) +} + +// FileName returns an attribute KeyValue conforming to the "file.name" semantic +// conventions. It represents the name of the file including the extension, +// without the directory. +func FileName(val string) attribute.KeyValue { + return FileNameKey.String(val) +} + +// FileOwnerID returns an attribute KeyValue conforming to the "file.owner.id" +// semantic conventions. It represents the user ID (UID) or security identifier +// (SID) of the file owner. +func FileOwnerID(val string) attribute.KeyValue { + return FileOwnerIDKey.String(val) +} + +// FileOwnerName returns an attribute KeyValue conforming to the +// "file.owner.name" semantic conventions. It represents the username of the file +// owner. +func FileOwnerName(val string) attribute.KeyValue { + return FileOwnerNameKey.String(val) +} + +// FilePath returns an attribute KeyValue conforming to the "file.path" semantic +// conventions. It represents the full path to the file, including the file name. +// It should include the drive letter, when appropriate. +func FilePath(val string) attribute.KeyValue { + return FilePathKey.String(val) +} + +// FileSize returns an attribute KeyValue conforming to the "file.size" semantic +// conventions. It represents the file size in bytes. +func FileSize(val int) attribute.KeyValue { + return FileSizeKey.Int(val) +} + +// FileSymbolicLinkTargetPath returns an attribute KeyValue conforming to the +// "file.symbolic_link.target_path" semantic conventions. It represents the path +// to the target of a symbolic link. +func FileSymbolicLinkTargetPath(val string) attribute.KeyValue { + return FileSymbolicLinkTargetPathKey.String(val) +} + +// Namespace: gcp +const ( + // GCPAppHubApplicationContainerKey is the attribute Key conforming to the + // "gcp.apphub.application.container" semantic conventions. It represents the + // container within GCP where the AppHub application is defined. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "projects/my-container-project" + GCPAppHubApplicationContainerKey = attribute.Key("gcp.apphub.application.container") + + // GCPAppHubApplicationIDKey is the attribute Key conforming to the + // "gcp.apphub.application.id" semantic conventions. It represents the name of + // the application as configured in AppHub. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "my-application" + GCPAppHubApplicationIDKey = attribute.Key("gcp.apphub.application.id") + + // GCPAppHubApplicationLocationKey is the attribute Key conforming to the + // "gcp.apphub.application.location" semantic conventions. It represents the GCP + // zone or region where the application is defined. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "us-central1" + GCPAppHubApplicationLocationKey = attribute.Key("gcp.apphub.application.location") + + // GCPAppHubServiceCriticalityTypeKey is the attribute Key conforming to the + // "gcp.apphub.service.criticality_type" semantic conventions. It represents the + // criticality of a service indicates its importance to the business. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: [See AppHub type enum] + // + // [See AppHub type enum]: https://cloud.google.com/app-hub/docs/reference/rest/v1/Attributes#type + GCPAppHubServiceCriticalityTypeKey = attribute.Key("gcp.apphub.service.criticality_type") + + // GCPAppHubServiceEnvironmentTypeKey is the attribute Key conforming to the + // "gcp.apphub.service.environment_type" semantic conventions. It represents the + // environment of a service is the stage of a software lifecycle. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: [See AppHub environment type] + // + // [See AppHub environment type]: https://cloud.google.com/app-hub/docs/reference/rest/v1/Attributes#type_1 + GCPAppHubServiceEnvironmentTypeKey = attribute.Key("gcp.apphub.service.environment_type") + + // GCPAppHubServiceIDKey is the attribute Key conforming to the + // "gcp.apphub.service.id" semantic conventions. It represents the name of the + // service as configured in AppHub. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "my-service" + GCPAppHubServiceIDKey = attribute.Key("gcp.apphub.service.id") + + // GCPAppHubWorkloadCriticalityTypeKey is the attribute Key conforming to the + // "gcp.apphub.workload.criticality_type" semantic conventions. It represents + // the criticality of a workload indicates its importance to the business. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: [See AppHub type enum] + // + // [See AppHub type enum]: https://cloud.google.com/app-hub/docs/reference/rest/v1/Attributes#type + GCPAppHubWorkloadCriticalityTypeKey = attribute.Key("gcp.apphub.workload.criticality_type") + + // GCPAppHubWorkloadEnvironmentTypeKey is the attribute Key conforming to the + // "gcp.apphub.workload.environment_type" semantic conventions. It represents + // the environment of a workload is the stage of a software lifecycle. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: [See AppHub environment type] + // + // [See AppHub environment type]: https://cloud.google.com/app-hub/docs/reference/rest/v1/Attributes#type_1 + GCPAppHubWorkloadEnvironmentTypeKey = attribute.Key("gcp.apphub.workload.environment_type") + + // GCPAppHubWorkloadIDKey is the attribute Key conforming to the + // "gcp.apphub.workload.id" semantic conventions. It represents the name of the + // workload as configured in AppHub. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "my-workload" + GCPAppHubWorkloadIDKey = attribute.Key("gcp.apphub.workload.id") + + // GCPClientServiceKey is the attribute Key conforming to the + // "gcp.client.service" semantic conventions. It represents the identifies the + // Google Cloud service for which the official client library is intended. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "appengine", "run", "firestore", "alloydb", "spanner" + // Note: Intended to be a stable identifier for Google Cloud client libraries + // that is uniform across implementation languages. The value should be derived + // from the canonical service domain for the service; for example, + // 'foo.googleapis.com' should result in a value of 'foo'. + GCPClientServiceKey = attribute.Key("gcp.client.service") + + // GCPCloudRunJobExecutionKey is the attribute Key conforming to the + // "gcp.cloud_run.job.execution" semantic conventions. It represents the name of + // the Cloud Run [execution] being run for the Job, as set by the + // [`CLOUD_RUN_EXECUTION`] environment variable. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "job-name-xxxx", "sample-job-mdw84" + // + // [execution]: https://cloud.google.com/run/docs/managing/job-executions + // [`CLOUD_RUN_EXECUTION`]: https://cloud.google.com/run/docs/container-contract#jobs-env-vars + GCPCloudRunJobExecutionKey = attribute.Key("gcp.cloud_run.job.execution") + + // GCPCloudRunJobTaskIndexKey is the attribute Key conforming to the + // "gcp.cloud_run.job.task_index" semantic conventions. It represents the index + // for a task within an execution as provided by the [`CLOUD_RUN_TASK_INDEX`] + // environment variable. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 0, 1 + // + // [`CLOUD_RUN_TASK_INDEX`]: https://cloud.google.com/run/docs/container-contract#jobs-env-vars + GCPCloudRunJobTaskIndexKey = attribute.Key("gcp.cloud_run.job.task_index") + + // GCPGCEInstanceHostnameKey is the attribute Key conforming to the + // "gcp.gce.instance.hostname" semantic conventions. It represents the hostname + // of a GCE instance. This is the full value of the default or [custom hostname] + // . + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "my-host1234.example.com", + // "sample-vm.us-west1-b.c.my-project.internal" + // + // [custom hostname]: https://cloud.google.com/compute/docs/instances/custom-hostname-vm + GCPGCEInstanceHostnameKey = attribute.Key("gcp.gce.instance.hostname") + + // GCPGCEInstanceNameKey is the attribute Key conforming to the + // "gcp.gce.instance.name" semantic conventions. It represents the instance name + // of a GCE instance. This is the value provided by `host.name`, the visible + // name of the instance in the Cloud Console UI, and the prefix for the default + // hostname of the instance as defined by the [default internal DNS name]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "instance-1", "my-vm-name" + // + // [default internal DNS name]: https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names + GCPGCEInstanceNameKey = attribute.Key("gcp.gce.instance.name") +) + +// GCPAppHubApplicationContainer returns an attribute KeyValue conforming to the +// "gcp.apphub.application.container" semantic conventions. It represents the +// container within GCP where the AppHub application is defined. +func GCPAppHubApplicationContainer(val string) attribute.KeyValue { + return GCPAppHubApplicationContainerKey.String(val) +} + +// GCPAppHubApplicationID returns an attribute KeyValue conforming to the +// "gcp.apphub.application.id" semantic conventions. It represents the name of +// the application as configured in AppHub. +func GCPAppHubApplicationID(val string) attribute.KeyValue { + return GCPAppHubApplicationIDKey.String(val) +} + +// GCPAppHubApplicationLocation returns an attribute KeyValue conforming to the +// "gcp.apphub.application.location" semantic conventions. It represents the GCP +// zone or region where the application is defined. +func GCPAppHubApplicationLocation(val string) attribute.KeyValue { + return GCPAppHubApplicationLocationKey.String(val) +} + +// GCPAppHubServiceID returns an attribute KeyValue conforming to the +// "gcp.apphub.service.id" semantic conventions. It represents the name of the +// service as configured in AppHub. +func GCPAppHubServiceID(val string) attribute.KeyValue { + return GCPAppHubServiceIDKey.String(val) +} + +// GCPAppHubWorkloadID returns an attribute KeyValue conforming to the +// "gcp.apphub.workload.id" semantic conventions. It represents the name of the +// workload as configured in AppHub. +func GCPAppHubWorkloadID(val string) attribute.KeyValue { + return GCPAppHubWorkloadIDKey.String(val) +} + +// GCPClientService returns an attribute KeyValue conforming to the +// "gcp.client.service" semantic conventions. It represents the identifies the +// Google Cloud service for which the official client library is intended. +func GCPClientService(val string) attribute.KeyValue { + return GCPClientServiceKey.String(val) +} + +// GCPCloudRunJobExecution returns an attribute KeyValue conforming to the +// "gcp.cloud_run.job.execution" semantic conventions. It represents the name of +// the Cloud Run [execution] being run for the Job, as set by the +// [`CLOUD_RUN_EXECUTION`] environment variable. +// +// [execution]: https://cloud.google.com/run/docs/managing/job-executions +// [`CLOUD_RUN_EXECUTION`]: https://cloud.google.com/run/docs/container-contract#jobs-env-vars +func GCPCloudRunJobExecution(val string) attribute.KeyValue { + return GCPCloudRunJobExecutionKey.String(val) +} + +// GCPCloudRunJobTaskIndex returns an attribute KeyValue conforming to the +// "gcp.cloud_run.job.task_index" semantic conventions. It represents the index +// for a task within an execution as provided by the [`CLOUD_RUN_TASK_INDEX`] +// environment variable. +// +// [`CLOUD_RUN_TASK_INDEX`]: https://cloud.google.com/run/docs/container-contract#jobs-env-vars +func GCPCloudRunJobTaskIndex(val int) attribute.KeyValue { + return GCPCloudRunJobTaskIndexKey.Int(val) +} + +// GCPGCEInstanceHostname returns an attribute KeyValue conforming to the +// "gcp.gce.instance.hostname" semantic conventions. It represents the hostname +// of a GCE instance. This is the full value of the default or [custom hostname] +// . +// +// [custom hostname]: https://cloud.google.com/compute/docs/instances/custom-hostname-vm +func GCPGCEInstanceHostname(val string) attribute.KeyValue { + return GCPGCEInstanceHostnameKey.String(val) +} + +// GCPGCEInstanceName returns an attribute KeyValue conforming to the +// "gcp.gce.instance.name" semantic conventions. It represents the instance name +// of a GCE instance. This is the value provided by `host.name`, the visible name +// of the instance in the Cloud Console UI, and the prefix for the default +// hostname of the instance as defined by the [default internal DNS name]. +// +// [default internal DNS name]: https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names +func GCPGCEInstanceName(val string) attribute.KeyValue { + return GCPGCEInstanceNameKey.String(val) +} + +// Enum values for gcp.apphub.service.criticality_type +var ( + // Mission critical service. + // Stability: development + GCPAppHubServiceCriticalityTypeMissionCritical = GCPAppHubServiceCriticalityTypeKey.String("MISSION_CRITICAL") + // High impact. + // Stability: development + GCPAppHubServiceCriticalityTypeHigh = GCPAppHubServiceCriticalityTypeKey.String("HIGH") + // Medium impact. + // Stability: development + GCPAppHubServiceCriticalityTypeMedium = GCPAppHubServiceCriticalityTypeKey.String("MEDIUM") + // Low impact. + // Stability: development + GCPAppHubServiceCriticalityTypeLow = GCPAppHubServiceCriticalityTypeKey.String("LOW") +) + +// Enum values for gcp.apphub.service.environment_type +var ( + // Production environment. + // Stability: development + GCPAppHubServiceEnvironmentTypeProduction = GCPAppHubServiceEnvironmentTypeKey.String("PRODUCTION") + // Staging environment. + // Stability: development + GCPAppHubServiceEnvironmentTypeStaging = GCPAppHubServiceEnvironmentTypeKey.String("STAGING") + // Test environment. + // Stability: development + GCPAppHubServiceEnvironmentTypeTest = GCPAppHubServiceEnvironmentTypeKey.String("TEST") + // Development environment. + // Stability: development + GCPAppHubServiceEnvironmentTypeDevelopment = GCPAppHubServiceEnvironmentTypeKey.String("DEVELOPMENT") +) + +// Enum values for gcp.apphub.workload.criticality_type +var ( + // Mission critical service. + // Stability: development + GCPAppHubWorkloadCriticalityTypeMissionCritical = GCPAppHubWorkloadCriticalityTypeKey.String("MISSION_CRITICAL") + // High impact. + // Stability: development + GCPAppHubWorkloadCriticalityTypeHigh = GCPAppHubWorkloadCriticalityTypeKey.String("HIGH") + // Medium impact. + // Stability: development + GCPAppHubWorkloadCriticalityTypeMedium = GCPAppHubWorkloadCriticalityTypeKey.String("MEDIUM") + // Low impact. + // Stability: development + GCPAppHubWorkloadCriticalityTypeLow = GCPAppHubWorkloadCriticalityTypeKey.String("LOW") +) + +// Enum values for gcp.apphub.workload.environment_type +var ( + // Production environment. + // Stability: development + GCPAppHubWorkloadEnvironmentTypeProduction = GCPAppHubWorkloadEnvironmentTypeKey.String("PRODUCTION") + // Staging environment. + // Stability: development + GCPAppHubWorkloadEnvironmentTypeStaging = GCPAppHubWorkloadEnvironmentTypeKey.String("STAGING") + // Test environment. + // Stability: development + GCPAppHubWorkloadEnvironmentTypeTest = GCPAppHubWorkloadEnvironmentTypeKey.String("TEST") + // Development environment. + // Stability: development + GCPAppHubWorkloadEnvironmentTypeDevelopment = GCPAppHubWorkloadEnvironmentTypeKey.String("DEVELOPMENT") +) + +// Namespace: gen_ai +const ( + // GenAIAgentDescriptionKey is the attribute Key conforming to the + // "gen_ai.agent.description" semantic conventions. It represents the free-form + // description of the GenAI agent provided by the application. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Helps with math problems", "Generates fiction stories" + GenAIAgentDescriptionKey = attribute.Key("gen_ai.agent.description") + + // GenAIAgentIDKey is the attribute Key conforming to the "gen_ai.agent.id" + // semantic conventions. It represents the unique identifier of the GenAI agent. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "asst_5j66UpCpwteGg4YSxUnt7lPY" + GenAIAgentIDKey = attribute.Key("gen_ai.agent.id") + + // GenAIAgentNameKey is the attribute Key conforming to the "gen_ai.agent.name" + // semantic conventions. It represents the human-readable name of the GenAI + // agent provided by the application. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Math Tutor", "Fiction Writer" + GenAIAgentNameKey = attribute.Key("gen_ai.agent.name") + + // GenAIConversationIDKey is the attribute Key conforming to the + // "gen_ai.conversation.id" semantic conventions. It represents the unique + // identifier for a conversation (session, thread), used to store and correlate + // messages within this conversation. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "conv_5j66UpCpwteGg4YSxUnt7lPY" + GenAIConversationIDKey = attribute.Key("gen_ai.conversation.id") + + // GenAIDataSourceIDKey is the attribute Key conforming to the + // "gen_ai.data_source.id" semantic conventions. It represents the data source + // identifier. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "H7STPQYOND" + // Note: Data sources are used by AI agents and RAG applications to store + // grounding data. A data source may be an external database, object store, + // document collection, website, or any other storage system used by the GenAI + // agent or application. The `gen_ai.data_source.id` SHOULD match the identifier + // used by the GenAI system rather than a name specific to the external storage, + // such as a database or object store. Semantic conventions referencing + // `gen_ai.data_source.id` MAY also leverage additional attributes, such as + // `db.*`, to further identify and describe the data source. + GenAIDataSourceIDKey = attribute.Key("gen_ai.data_source.id") + + // GenAIInputMessagesKey is the attribute Key conforming to the + // "gen_ai.input.messages" semantic conventions. It represents the chat history + // provided to the model as an input. + // + // Type: any + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "[\n {\n "role": "user",\n "parts": [\n {\n "type": "text",\n + // "content": "Weather in Paris?"\n }\n ]\n },\n {\n "role": "assistant",\n + // "parts": [\n {\n "type": "tool_call",\n "id": + // "call_VSPygqKTWdrhaFErNvMV18Yl",\n "name": "get_weather",\n "arguments": {\n + // "location": "Paris"\n }\n }\n ]\n },\n {\n "role": "tool",\n "parts": [\n {\n + // "type": "tool_call_response",\n "id": " call_VSPygqKTWdrhaFErNvMV18Yl",\n + // "result": "rainy, 57°F"\n }\n ]\n }\n]\n" + // Note: Instrumentations MUST follow [Input messages JSON schema]. + // When the attribute is recorded on events, it MUST be recorded in structured + // form. When recorded on spans, it MAY be recorded as a JSON string if + // structured + // format is not supported and SHOULD be recorded in structured form otherwise. + // + // Messages MUST be provided in the order they were sent to the model. + // Instrumentations MAY provide a way for users to filter or truncate + // input messages. + // + // > [!Warning] + // > This attribute is likely to contain sensitive information including + // > user/PII data. + // + // See [Recording content on attributes] + // section for more details. + // + // [Input messages JSON schema]: /docs/gen-ai/gen-ai-input-messages.json + // [Recording content on attributes]: /docs/gen-ai/gen-ai-spans.md#recording-content-on-attributes + GenAIInputMessagesKey = attribute.Key("gen_ai.input.messages") + + // GenAIOperationNameKey is the attribute Key conforming to the + // "gen_ai.operation.name" semantic conventions. It represents the name of the + // operation being performed. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: If one of the predefined values applies, but specific system uses a + // different name it's RECOMMENDED to document it in the semantic conventions + // for specific GenAI system and use system-specific name in the + // instrumentation. If a different name is not documented, instrumentation + // libraries SHOULD use applicable predefined value. + GenAIOperationNameKey = attribute.Key("gen_ai.operation.name") + + // GenAIOutputMessagesKey is the attribute Key conforming to the + // "gen_ai.output.messages" semantic conventions. It represents the messages + // returned by the model where each message represents a specific model response + // (choice, candidate). + // + // Type: any + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "[\n {\n "role": "assistant",\n "parts": [\n {\n "type": "text",\n + // "content": "The weather in Paris is currently rainy with a temperature of + // 57°F."\n }\n ],\n "finish_reason": "stop"\n }\n]\n" + // Note: Instrumentations MUST follow [Output messages JSON schema] + // + // Each message represents a single output choice/candidate generated by + // the model. Each message corresponds to exactly one generation + // (choice/candidate) and vice versa - one choice cannot be split across + // multiple messages or one message cannot contain parts from multiple choices. + // + // When the attribute is recorded on events, it MUST be recorded in structured + // form. When recorded on spans, it MAY be recorded as a JSON string if + // structured + // format is not supported and SHOULD be recorded in structured form otherwise. + // + // Instrumentations MAY provide a way for users to filter or truncate + // output messages. + // + // > [!Warning] + // > This attribute is likely to contain sensitive information including + // > user/PII data. + // + // See [Recording content on attributes] + // section for more details. + // + // [Output messages JSON schema]: /docs/gen-ai/gen-ai-output-messages.json + // [Recording content on attributes]: /docs/gen-ai/gen-ai-spans.md#recording-content-on-attributes + GenAIOutputMessagesKey = attribute.Key("gen_ai.output.messages") + + // GenAIOutputTypeKey is the attribute Key conforming to the + // "gen_ai.output.type" semantic conventions. It represents the represents the + // content type requested by the client. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: This attribute SHOULD be used when the client requests output of a + // specific type. The model may return zero or more outputs of this type. + // This attribute specifies the output modality and not the actual output + // format. For example, if an image is requested, the actual output could be a + // URL pointing to an image file. + // Additional output format details may be recorded in the future in the + // `gen_ai.output.{type}.*` attributes. + GenAIOutputTypeKey = attribute.Key("gen_ai.output.type") + + // GenAIProviderNameKey is the attribute Key conforming to the + // "gen_ai.provider.name" semantic conventions. It represents the Generative AI + // provider as identified by the client or server instrumentation. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: The attribute SHOULD be set based on the instrumentation's best + // knowledge and may differ from the actual model provider. + // + // Multiple providers, including Azure OpenAI, Gemini, and AI hosting platforms + // are accessible using the OpenAI REST API and corresponding client libraries, + // but may proxy or host models from different providers. + // + // The `gen_ai.request.model`, `gen_ai.response.model`, and `server.address` + // attributes may help identify the actual system in use. + // + // The `gen_ai.provider.name` attribute acts as a discriminator that + // identifies the GenAI telemetry format flavor specific to that provider + // within GenAI semantic conventions. + // It SHOULD be set consistently with provider-specific attributes and signals. + // For example, GenAI spans, metrics, and events related to AWS Bedrock + // should have the `gen_ai.provider.name` set to `aws.bedrock` and include + // applicable `aws.bedrock.*` attributes and are not expected to include + // `openai.*` attributes. + GenAIProviderNameKey = attribute.Key("gen_ai.provider.name") + + // GenAIRequestChoiceCountKey is the attribute Key conforming to the + // "gen_ai.request.choice.count" semantic conventions. It represents the target + // number of candidate completions to return. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 3 + GenAIRequestChoiceCountKey = attribute.Key("gen_ai.request.choice.count") + + // GenAIRequestEncodingFormatsKey is the attribute Key conforming to the + // "gen_ai.request.encoding_formats" semantic conventions. It represents the + // encoding formats requested in an embeddings operation, if specified. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "base64"], ["float", "binary" + // Note: In some GenAI systems the encoding formats are called embedding types. + // Also, some GenAI systems only accept a single format per request. + GenAIRequestEncodingFormatsKey = attribute.Key("gen_ai.request.encoding_formats") + + // GenAIRequestFrequencyPenaltyKey is the attribute Key conforming to the + // "gen_ai.request.frequency_penalty" semantic conventions. It represents the + // frequency penalty setting for the GenAI request. + // + // Type: double + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 0.1 + GenAIRequestFrequencyPenaltyKey = attribute.Key("gen_ai.request.frequency_penalty") + + // GenAIRequestMaxTokensKey is the attribute Key conforming to the + // "gen_ai.request.max_tokens" semantic conventions. It represents the maximum + // number of tokens the model generates for a request. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 100 + GenAIRequestMaxTokensKey = attribute.Key("gen_ai.request.max_tokens") + + // GenAIRequestModelKey is the attribute Key conforming to the + // "gen_ai.request.model" semantic conventions. It represents the name of the + // GenAI model a request is being made to. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: gpt-4 + GenAIRequestModelKey = attribute.Key("gen_ai.request.model") + + // GenAIRequestPresencePenaltyKey is the attribute Key conforming to the + // "gen_ai.request.presence_penalty" semantic conventions. It represents the + // presence penalty setting for the GenAI request. + // + // Type: double + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 0.1 + GenAIRequestPresencePenaltyKey = attribute.Key("gen_ai.request.presence_penalty") + + // GenAIRequestSeedKey is the attribute Key conforming to the + // "gen_ai.request.seed" semantic conventions. It represents the requests with + // same seed value more likely to return same result. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 100 + GenAIRequestSeedKey = attribute.Key("gen_ai.request.seed") + + // GenAIRequestStopSequencesKey is the attribute Key conforming to the + // "gen_ai.request.stop_sequences" semantic conventions. It represents the list + // of sequences that the model will use to stop generating further tokens. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "forest", "lived" + GenAIRequestStopSequencesKey = attribute.Key("gen_ai.request.stop_sequences") + + // GenAIRequestTemperatureKey is the attribute Key conforming to the + // "gen_ai.request.temperature" semantic conventions. It represents the + // temperature setting for the GenAI request. + // + // Type: double + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 0.0 + GenAIRequestTemperatureKey = attribute.Key("gen_ai.request.temperature") + + // GenAIRequestTopKKey is the attribute Key conforming to the + // "gen_ai.request.top_k" semantic conventions. It represents the top_k sampling + // setting for the GenAI request. + // + // Type: double + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1.0 + GenAIRequestTopKKey = attribute.Key("gen_ai.request.top_k") + + // GenAIRequestTopPKey is the attribute Key conforming to the + // "gen_ai.request.top_p" semantic conventions. It represents the top_p sampling + // setting for the GenAI request. + // + // Type: double + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1.0 + GenAIRequestTopPKey = attribute.Key("gen_ai.request.top_p") + + // GenAIResponseFinishReasonsKey is the attribute Key conforming to the + // "gen_ai.response.finish_reasons" semantic conventions. It represents the + // array of reasons the model stopped generating tokens, corresponding to each + // generation received. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "stop"], ["stop", "length" + GenAIResponseFinishReasonsKey = attribute.Key("gen_ai.response.finish_reasons") + + // GenAIResponseIDKey is the attribute Key conforming to the + // "gen_ai.response.id" semantic conventions. It represents the unique + // identifier for the completion. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "chatcmpl-123" + GenAIResponseIDKey = attribute.Key("gen_ai.response.id") + + // GenAIResponseModelKey is the attribute Key conforming to the + // "gen_ai.response.model" semantic conventions. It represents the name of the + // model that generated the response. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "gpt-4-0613" + GenAIResponseModelKey = attribute.Key("gen_ai.response.model") + + // GenAISystemInstructionsKey is the attribute Key conforming to the + // "gen_ai.system_instructions" semantic conventions. It represents the system + // message or instructions provided to the GenAI model separately from the chat + // history. + // + // Type: any + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "[\n {\n "type": "text",\n "content": "You are an Agent that greet + // users, always use greetings tool to respond"\n }\n]\n", "[\n {\n "type": + // "text",\n "content": "You are a language translator."\n },\n {\n "type": + // "text",\n "content": "Your mission is to translate text in English to + // French."\n }\n]\n" + // Note: This attribute SHOULD be used when the corresponding provider or API + // allows to provide system instructions or messages separately from the + // chat history. + // + // Instructions that are part of the chat history SHOULD be recorded in + // `gen_ai.input.messages` attribute instead. + // + // Instrumentations MUST follow [System instructions JSON schema]. + // + // When recorded on spans, it MAY be recorded as a JSON string if structured + // format is not supported and SHOULD be recorded in structured form otherwise. + // + // Instrumentations MAY provide a way for users to filter or truncate + // system instructions. + // + // > [!Warning] + // > This attribute may contain sensitive information. + // + // See [Recording content on attributes] + // section for more details. + // + // [System instructions JSON schema]: /docs/gen-ai/gen-ai-system-instructions.json + // [Recording content on attributes]: /docs/gen-ai/gen-ai-spans.md#recording-content-on-attributes + GenAISystemInstructionsKey = attribute.Key("gen_ai.system_instructions") + + // GenAITokenTypeKey is the attribute Key conforming to the "gen_ai.token.type" + // semantic conventions. It represents the type of token being counted. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "input", "output" + GenAITokenTypeKey = attribute.Key("gen_ai.token.type") + + // GenAIToolCallIDKey is the attribute Key conforming to the + // "gen_ai.tool.call.id" semantic conventions. It represents the tool call + // identifier. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "call_mszuSIzqtI65i1wAUOE8w5H4" + GenAIToolCallIDKey = attribute.Key("gen_ai.tool.call.id") + + // GenAIToolDescriptionKey is the attribute Key conforming to the + // "gen_ai.tool.description" semantic conventions. It represents the tool + // description. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Multiply two numbers" + GenAIToolDescriptionKey = attribute.Key("gen_ai.tool.description") + + // GenAIToolNameKey is the attribute Key conforming to the "gen_ai.tool.name" + // semantic conventions. It represents the name of the tool utilized by the + // agent. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Flights" + GenAIToolNameKey = attribute.Key("gen_ai.tool.name") + + // GenAIToolTypeKey is the attribute Key conforming to the "gen_ai.tool.type" + // semantic conventions. It represents the type of the tool utilized by the + // agent. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "function", "extension", "datastore" + // Note: Extension: A tool executed on the agent-side to directly call external + // APIs, bridging the gap between the agent and real-world systems. + // Agent-side operations involve actions that are performed by the agent on the + // server or within the agent's controlled environment. + // Function: A tool executed on the client-side, where the agent generates + // parameters for a predefined function, and the client executes the logic. + // Client-side operations are actions taken on the user's end or within the + // client application. + // Datastore: A tool used by the agent to access and query structured or + // unstructured external data for retrieval-augmented tasks or knowledge + // updates. + GenAIToolTypeKey = attribute.Key("gen_ai.tool.type") + + // GenAIUsageInputTokensKey is the attribute Key conforming to the + // "gen_ai.usage.input_tokens" semantic conventions. It represents the number of + // tokens used in the GenAI input (prompt). + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 100 + GenAIUsageInputTokensKey = attribute.Key("gen_ai.usage.input_tokens") + + // GenAIUsageOutputTokensKey is the attribute Key conforming to the + // "gen_ai.usage.output_tokens" semantic conventions. It represents the number + // of tokens used in the GenAI response (completion). + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 180 + GenAIUsageOutputTokensKey = attribute.Key("gen_ai.usage.output_tokens") +) + +// GenAIAgentDescription returns an attribute KeyValue conforming to the +// "gen_ai.agent.description" semantic conventions. It represents the free-form +// description of the GenAI agent provided by the application. +func GenAIAgentDescription(val string) attribute.KeyValue { + return GenAIAgentDescriptionKey.String(val) +} + +// GenAIAgentID returns an attribute KeyValue conforming to the "gen_ai.agent.id" +// semantic conventions. It represents the unique identifier of the GenAI agent. +func GenAIAgentID(val string) attribute.KeyValue { + return GenAIAgentIDKey.String(val) +} + +// GenAIAgentName returns an attribute KeyValue conforming to the +// "gen_ai.agent.name" semantic conventions. It represents the human-readable +// name of the GenAI agent provided by the application. +func GenAIAgentName(val string) attribute.KeyValue { + return GenAIAgentNameKey.String(val) +} + +// GenAIConversationID returns an attribute KeyValue conforming to the +// "gen_ai.conversation.id" semantic conventions. It represents the unique +// identifier for a conversation (session, thread), used to store and correlate +// messages within this conversation. +func GenAIConversationID(val string) attribute.KeyValue { + return GenAIConversationIDKey.String(val) +} + +// GenAIDataSourceID returns an attribute KeyValue conforming to the +// "gen_ai.data_source.id" semantic conventions. It represents the data source +// identifier. +func GenAIDataSourceID(val string) attribute.KeyValue { + return GenAIDataSourceIDKey.String(val) +} + +// GenAIRequestChoiceCount returns an attribute KeyValue conforming to the +// "gen_ai.request.choice.count" semantic conventions. It represents the target +// number of candidate completions to return. +func GenAIRequestChoiceCount(val int) attribute.KeyValue { + return GenAIRequestChoiceCountKey.Int(val) +} + +// GenAIRequestEncodingFormats returns an attribute KeyValue conforming to the +// "gen_ai.request.encoding_formats" semantic conventions. It represents the +// encoding formats requested in an embeddings operation, if specified. +func GenAIRequestEncodingFormats(val ...string) attribute.KeyValue { + return GenAIRequestEncodingFormatsKey.StringSlice(val) +} + +// GenAIRequestFrequencyPenalty returns an attribute KeyValue conforming to the +// "gen_ai.request.frequency_penalty" semantic conventions. It represents the +// frequency penalty setting for the GenAI request. +func GenAIRequestFrequencyPenalty(val float64) attribute.KeyValue { + return GenAIRequestFrequencyPenaltyKey.Float64(val) +} + +// GenAIRequestMaxTokens returns an attribute KeyValue conforming to the +// "gen_ai.request.max_tokens" semantic conventions. It represents the maximum +// number of tokens the model generates for a request. +func GenAIRequestMaxTokens(val int) attribute.KeyValue { + return GenAIRequestMaxTokensKey.Int(val) +} + +// GenAIRequestModel returns an attribute KeyValue conforming to the +// "gen_ai.request.model" semantic conventions. It represents the name of the +// GenAI model a request is being made to. +func GenAIRequestModel(val string) attribute.KeyValue { + return GenAIRequestModelKey.String(val) +} + +// GenAIRequestPresencePenalty returns an attribute KeyValue conforming to the +// "gen_ai.request.presence_penalty" semantic conventions. It represents the +// presence penalty setting for the GenAI request. +func GenAIRequestPresencePenalty(val float64) attribute.KeyValue { + return GenAIRequestPresencePenaltyKey.Float64(val) +} + +// GenAIRequestSeed returns an attribute KeyValue conforming to the +// "gen_ai.request.seed" semantic conventions. It represents the requests with +// same seed value more likely to return same result. +func GenAIRequestSeed(val int) attribute.KeyValue { + return GenAIRequestSeedKey.Int(val) +} + +// GenAIRequestStopSequences returns an attribute KeyValue conforming to the +// "gen_ai.request.stop_sequences" semantic conventions. It represents the list +// of sequences that the model will use to stop generating further tokens. +func GenAIRequestStopSequences(val ...string) attribute.KeyValue { + return GenAIRequestStopSequencesKey.StringSlice(val) +} + +// GenAIRequestTemperature returns an attribute KeyValue conforming to the +// "gen_ai.request.temperature" semantic conventions. It represents the +// temperature setting for the GenAI request. +func GenAIRequestTemperature(val float64) attribute.KeyValue { + return GenAIRequestTemperatureKey.Float64(val) +} + +// GenAIRequestTopK returns an attribute KeyValue conforming to the +// "gen_ai.request.top_k" semantic conventions. It represents the top_k sampling +// setting for the GenAI request. +func GenAIRequestTopK(val float64) attribute.KeyValue { + return GenAIRequestTopKKey.Float64(val) +} + +// GenAIRequestTopP returns an attribute KeyValue conforming to the +// "gen_ai.request.top_p" semantic conventions. It represents the top_p sampling +// setting for the GenAI request. +func GenAIRequestTopP(val float64) attribute.KeyValue { + return GenAIRequestTopPKey.Float64(val) +} + +// GenAIResponseFinishReasons returns an attribute KeyValue conforming to the +// "gen_ai.response.finish_reasons" semantic conventions. It represents the array +// of reasons the model stopped generating tokens, corresponding to each +// generation received. +func GenAIResponseFinishReasons(val ...string) attribute.KeyValue { + return GenAIResponseFinishReasonsKey.StringSlice(val) +} + +// GenAIResponseID returns an attribute KeyValue conforming to the +// "gen_ai.response.id" semantic conventions. It represents the unique identifier +// for the completion. +func GenAIResponseID(val string) attribute.KeyValue { + return GenAIResponseIDKey.String(val) +} + +// GenAIResponseModel returns an attribute KeyValue conforming to the +// "gen_ai.response.model" semantic conventions. It represents the name of the +// model that generated the response. +func GenAIResponseModel(val string) attribute.KeyValue { + return GenAIResponseModelKey.String(val) +} + +// GenAIToolCallID returns an attribute KeyValue conforming to the +// "gen_ai.tool.call.id" semantic conventions. It represents the tool call +// identifier. +func GenAIToolCallID(val string) attribute.KeyValue { + return GenAIToolCallIDKey.String(val) +} + +// GenAIToolDescription returns an attribute KeyValue conforming to the +// "gen_ai.tool.description" semantic conventions. It represents the tool +// description. +func GenAIToolDescription(val string) attribute.KeyValue { + return GenAIToolDescriptionKey.String(val) +} + +// GenAIToolName returns an attribute KeyValue conforming to the +// "gen_ai.tool.name" semantic conventions. It represents the name of the tool +// utilized by the agent. +func GenAIToolName(val string) attribute.KeyValue { + return GenAIToolNameKey.String(val) +} + +// GenAIToolType returns an attribute KeyValue conforming to the +// "gen_ai.tool.type" semantic conventions. It represents the type of the tool +// utilized by the agent. +func GenAIToolType(val string) attribute.KeyValue { + return GenAIToolTypeKey.String(val) +} + +// GenAIUsageInputTokens returns an attribute KeyValue conforming to the +// "gen_ai.usage.input_tokens" semantic conventions. It represents the number of +// tokens used in the GenAI input (prompt). +func GenAIUsageInputTokens(val int) attribute.KeyValue { + return GenAIUsageInputTokensKey.Int(val) +} + +// GenAIUsageOutputTokens returns an attribute KeyValue conforming to the +// "gen_ai.usage.output_tokens" semantic conventions. It represents the number of +// tokens used in the GenAI response (completion). +func GenAIUsageOutputTokens(val int) attribute.KeyValue { + return GenAIUsageOutputTokensKey.Int(val) +} + +// Enum values for gen_ai.operation.name +var ( + // Chat completion operation such as [OpenAI Chat API] + // Stability: development + // + // [OpenAI Chat API]: https://platform.openai.com/docs/api-reference/chat + GenAIOperationNameChat = GenAIOperationNameKey.String("chat") + // Multimodal content generation operation such as [Gemini Generate Content] + // Stability: development + // + // [Gemini Generate Content]: https://ai.google.dev/api/generate-content + GenAIOperationNameGenerateContent = GenAIOperationNameKey.String("generate_content") + // Text completions operation such as [OpenAI Completions API (Legacy)] + // Stability: development + // + // [OpenAI Completions API (Legacy)]: https://platform.openai.com/docs/api-reference/completions + GenAIOperationNameTextCompletion = GenAIOperationNameKey.String("text_completion") + // Embeddings operation such as [OpenAI Create embeddings API] + // Stability: development + // + // [OpenAI Create embeddings API]: https://platform.openai.com/docs/api-reference/embeddings/create + GenAIOperationNameEmbeddings = GenAIOperationNameKey.String("embeddings") + // Create GenAI agent + // Stability: development + GenAIOperationNameCreateAgent = GenAIOperationNameKey.String("create_agent") + // Invoke GenAI agent + // Stability: development + GenAIOperationNameInvokeAgent = GenAIOperationNameKey.String("invoke_agent") + // Execute a tool + // Stability: development + GenAIOperationNameExecuteTool = GenAIOperationNameKey.String("execute_tool") +) + +// Enum values for gen_ai.output.type +var ( + // Plain text + // Stability: development + GenAIOutputTypeText = GenAIOutputTypeKey.String("text") + // JSON object with known or unknown schema + // Stability: development + GenAIOutputTypeJSON = GenAIOutputTypeKey.String("json") + // Image + // Stability: development + GenAIOutputTypeImage = GenAIOutputTypeKey.String("image") + // Speech + // Stability: development + GenAIOutputTypeSpeech = GenAIOutputTypeKey.String("speech") +) + +// Enum values for gen_ai.provider.name +var ( + // [OpenAI] + // Stability: development + // + // [OpenAI]: https://openai.com/ + GenAIProviderNameOpenAI = GenAIProviderNameKey.String("openai") + // Any Google generative AI endpoint + // Stability: development + GenAIProviderNameGCPGenAI = GenAIProviderNameKey.String("gcp.gen_ai") + // [Vertex AI] + // Stability: development + // + // [Vertex AI]: https://cloud.google.com/vertex-ai + GenAIProviderNameGCPVertexAI = GenAIProviderNameKey.String("gcp.vertex_ai") + // [Gemini] + // Stability: development + // + // [Gemini]: https://cloud.google.com/products/gemini + GenAIProviderNameGCPGemini = GenAIProviderNameKey.String("gcp.gemini") + // [Anthropic] + // Stability: development + // + // [Anthropic]: https://www.anthropic.com/ + GenAIProviderNameAnthropic = GenAIProviderNameKey.String("anthropic") + // [Cohere] + // Stability: development + // + // [Cohere]: https://cohere.com/ + GenAIProviderNameCohere = GenAIProviderNameKey.String("cohere") + // Azure AI Inference + // Stability: development + GenAIProviderNameAzureAIInference = GenAIProviderNameKey.String("azure.ai.inference") + // [Azure OpenAI] + // Stability: development + // + // [Azure OpenAI]: https://azure.microsoft.com/products/ai-services/openai-service/ + GenAIProviderNameAzureAIOpenAI = GenAIProviderNameKey.String("azure.ai.openai") + // [IBM Watsonx AI] + // Stability: development + // + // [IBM Watsonx AI]: https://www.ibm.com/products/watsonx-ai + GenAIProviderNameIBMWatsonxAI = GenAIProviderNameKey.String("ibm.watsonx.ai") + // [AWS Bedrock] + // Stability: development + // + // [AWS Bedrock]: https://aws.amazon.com/bedrock + GenAIProviderNameAWSBedrock = GenAIProviderNameKey.String("aws.bedrock") + // [Perplexity] + // Stability: development + // + // [Perplexity]: https://www.perplexity.ai/ + GenAIProviderNamePerplexity = GenAIProviderNameKey.String("perplexity") + // [xAI] + // Stability: development + // + // [xAI]: https://x.ai/ + GenAIProviderNameXAI = GenAIProviderNameKey.String("x_ai") + // [DeepSeek] + // Stability: development + // + // [DeepSeek]: https://www.deepseek.com/ + GenAIProviderNameDeepseek = GenAIProviderNameKey.String("deepseek") + // [Groq] + // Stability: development + // + // [Groq]: https://groq.com/ + GenAIProviderNameGroq = GenAIProviderNameKey.String("groq") + // [Mistral AI] + // Stability: development + // + // [Mistral AI]: https://mistral.ai/ + GenAIProviderNameMistralAI = GenAIProviderNameKey.String("mistral_ai") +) + +// Enum values for gen_ai.token.type +var ( + // Input tokens (prompt, input, etc.) + // Stability: development + GenAITokenTypeInput = GenAITokenTypeKey.String("input") + // Output tokens (completion, response, etc.) + // Stability: development + GenAITokenTypeOutput = GenAITokenTypeKey.String("output") +) + +// Namespace: geo +const ( + // GeoContinentCodeKey is the attribute Key conforming to the + // "geo.continent.code" semantic conventions. It represents the two-letter code + // representing continent’s name. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + GeoContinentCodeKey = attribute.Key("geo.continent.code") + + // GeoCountryISOCodeKey is the attribute Key conforming to the + // "geo.country.iso_code" semantic conventions. It represents the two-letter ISO + // Country Code ([ISO 3166-1 alpha2]). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "CA" + // + // [ISO 3166-1 alpha2]: https://wikipedia.org/wiki/ISO_3166-1#Codes + GeoCountryISOCodeKey = attribute.Key("geo.country.iso_code") + + // GeoLocalityNameKey is the attribute Key conforming to the "geo.locality.name" + // semantic conventions. It represents the locality name. Represents the name of + // a city, town, village, or similar populated place. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Montreal", "Berlin" + GeoLocalityNameKey = attribute.Key("geo.locality.name") + + // GeoLocationLatKey is the attribute Key conforming to the "geo.location.lat" + // semantic conventions. It represents the latitude of the geo location in + // [WGS84]. + // + // Type: double + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 45.505918 + // + // [WGS84]: https://wikipedia.org/wiki/World_Geodetic_System#WGS84 + GeoLocationLatKey = attribute.Key("geo.location.lat") + + // GeoLocationLonKey is the attribute Key conforming to the "geo.location.lon" + // semantic conventions. It represents the longitude of the geo location in + // [WGS84]. + // + // Type: double + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: -73.61483 + // + // [WGS84]: https://wikipedia.org/wiki/World_Geodetic_System#WGS84 + GeoLocationLonKey = attribute.Key("geo.location.lon") + + // GeoPostalCodeKey is the attribute Key conforming to the "geo.postal_code" + // semantic conventions. It represents the postal code associated with the + // location. Values appropriate for this field may also be known as a postcode + // or ZIP code and will vary widely from country to country. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "94040" + GeoPostalCodeKey = attribute.Key("geo.postal_code") + + // GeoRegionISOCodeKey is the attribute Key conforming to the + // "geo.region.iso_code" semantic conventions. It represents the region ISO code + // ([ISO 3166-2]). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "CA-QC" + // + // [ISO 3166-2]: https://wikipedia.org/wiki/ISO_3166-2 + GeoRegionISOCodeKey = attribute.Key("geo.region.iso_code") +) + +// GeoCountryISOCode returns an attribute KeyValue conforming to the +// "geo.country.iso_code" semantic conventions. It represents the two-letter ISO +// Country Code ([ISO 3166-1 alpha2]). +// +// [ISO 3166-1 alpha2]: https://wikipedia.org/wiki/ISO_3166-1#Codes +func GeoCountryISOCode(val string) attribute.KeyValue { + return GeoCountryISOCodeKey.String(val) +} + +// GeoLocalityName returns an attribute KeyValue conforming to the +// "geo.locality.name" semantic conventions. It represents the locality name. +// Represents the name of a city, town, village, or similar populated place. +func GeoLocalityName(val string) attribute.KeyValue { + return GeoLocalityNameKey.String(val) +} + +// GeoLocationLat returns an attribute KeyValue conforming to the +// "geo.location.lat" semantic conventions. It represents the latitude of the geo +// location in [WGS84]. +// +// [WGS84]: https://wikipedia.org/wiki/World_Geodetic_System#WGS84 +func GeoLocationLat(val float64) attribute.KeyValue { + return GeoLocationLatKey.Float64(val) +} + +// GeoLocationLon returns an attribute KeyValue conforming to the +// "geo.location.lon" semantic conventions. It represents the longitude of the +// geo location in [WGS84]. +// +// [WGS84]: https://wikipedia.org/wiki/World_Geodetic_System#WGS84 +func GeoLocationLon(val float64) attribute.KeyValue { + return GeoLocationLonKey.Float64(val) +} + +// GeoPostalCode returns an attribute KeyValue conforming to the +// "geo.postal_code" semantic conventions. It represents the postal code +// associated with the location. Values appropriate for this field may also be +// known as a postcode or ZIP code and will vary widely from country to country. +func GeoPostalCode(val string) attribute.KeyValue { + return GeoPostalCodeKey.String(val) +} + +// GeoRegionISOCode returns an attribute KeyValue conforming to the +// "geo.region.iso_code" semantic conventions. It represents the region ISO code +// ([ISO 3166-2]). +// +// [ISO 3166-2]: https://wikipedia.org/wiki/ISO_3166-2 +func GeoRegionISOCode(val string) attribute.KeyValue { + return GeoRegionISOCodeKey.String(val) +} + +// Enum values for geo.continent.code +var ( + // Africa + // Stability: development + GeoContinentCodeAf = GeoContinentCodeKey.String("AF") + // Antarctica + // Stability: development + GeoContinentCodeAn = GeoContinentCodeKey.String("AN") + // Asia + // Stability: development + GeoContinentCodeAs = GeoContinentCodeKey.String("AS") + // Europe + // Stability: development + GeoContinentCodeEu = GeoContinentCodeKey.String("EU") + // North America + // Stability: development + GeoContinentCodeNa = GeoContinentCodeKey.String("NA") + // Oceania + // Stability: development + GeoContinentCodeOc = GeoContinentCodeKey.String("OC") + // South America + // Stability: development + GeoContinentCodeSa = GeoContinentCodeKey.String("SA") +) + +// Namespace: go +const ( + // GoMemoryTypeKey is the attribute Key conforming to the "go.memory.type" + // semantic conventions. It represents the type of memory. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "other", "stack" + GoMemoryTypeKey = attribute.Key("go.memory.type") +) + +// Enum values for go.memory.type +var ( + // Memory allocated from the heap that is reserved for stack space, whether or + // not it is currently in-use. + // Stability: development + GoMemoryTypeStack = GoMemoryTypeKey.String("stack") + // Memory used by the Go runtime, excluding other categories of memory usage + // described in this enumeration. + // Stability: development + GoMemoryTypeOther = GoMemoryTypeKey.String("other") +) + +// Namespace: graphql +const ( + // GraphQLDocumentKey is the attribute Key conforming to the "graphql.document" + // semantic conventions. It represents the GraphQL document being executed. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: query findBookById { bookById(id: ?) { name } } + // Note: The value may be sanitized to exclude sensitive information. + GraphQLDocumentKey = attribute.Key("graphql.document") + + // GraphQLOperationNameKey is the attribute Key conforming to the + // "graphql.operation.name" semantic conventions. It represents the name of the + // operation being executed. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: findBookById + GraphQLOperationNameKey = attribute.Key("graphql.operation.name") + + // GraphQLOperationTypeKey is the attribute Key conforming to the + // "graphql.operation.type" semantic conventions. It represents the type of the + // operation being executed. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "query", "mutation", "subscription" + GraphQLOperationTypeKey = attribute.Key("graphql.operation.type") +) + +// GraphQLDocument returns an attribute KeyValue conforming to the +// "graphql.document" semantic conventions. It represents the GraphQL document +// being executed. +func GraphQLDocument(val string) attribute.KeyValue { + return GraphQLDocumentKey.String(val) +} + +// GraphQLOperationName returns an attribute KeyValue conforming to the +// "graphql.operation.name" semantic conventions. It represents the name of the +// operation being executed. +func GraphQLOperationName(val string) attribute.KeyValue { + return GraphQLOperationNameKey.String(val) +} + +// Enum values for graphql.operation.type +var ( + // GraphQL query + // Stability: development + GraphQLOperationTypeQuery = GraphQLOperationTypeKey.String("query") + // GraphQL mutation + // Stability: development + GraphQLOperationTypeMutation = GraphQLOperationTypeKey.String("mutation") + // GraphQL subscription + // Stability: development + GraphQLOperationTypeSubscription = GraphQLOperationTypeKey.String("subscription") +) + +// Namespace: heroku +const ( + // HerokuAppIDKey is the attribute Key conforming to the "heroku.app.id" + // semantic conventions. It represents the unique identifier for the + // application. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2daa2797-e42b-4624-9322-ec3f968df4da" + HerokuAppIDKey = attribute.Key("heroku.app.id") + + // HerokuReleaseCommitKey is the attribute Key conforming to the + // "heroku.release.commit" semantic conventions. It represents the commit hash + // for the current release. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "e6134959463efd8966b20e75b913cafe3f5ec" + HerokuReleaseCommitKey = attribute.Key("heroku.release.commit") + + // HerokuReleaseCreationTimestampKey is the attribute Key conforming to the + // "heroku.release.creation_timestamp" semantic conventions. It represents the + // time and date the release was created. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2022-10-23T18:00:42Z" + HerokuReleaseCreationTimestampKey = attribute.Key("heroku.release.creation_timestamp") +) + +// HerokuAppID returns an attribute KeyValue conforming to the "heroku.app.id" +// semantic conventions. It represents the unique identifier for the application. +func HerokuAppID(val string) attribute.KeyValue { + return HerokuAppIDKey.String(val) +} + +// HerokuReleaseCommit returns an attribute KeyValue conforming to the +// "heroku.release.commit" semantic conventions. It represents the commit hash +// for the current release. +func HerokuReleaseCommit(val string) attribute.KeyValue { + return HerokuReleaseCommitKey.String(val) +} + +// HerokuReleaseCreationTimestamp returns an attribute KeyValue conforming to the +// "heroku.release.creation_timestamp" semantic conventions. It represents the +// time and date the release was created. +func HerokuReleaseCreationTimestamp(val string) attribute.KeyValue { + return HerokuReleaseCreationTimestampKey.String(val) +} + +// Namespace: host +const ( + // HostArchKey is the attribute Key conforming to the "host.arch" semantic + // conventions. It represents the CPU architecture the host system is running + // on. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + HostArchKey = attribute.Key("host.arch") + + // HostCPUCacheL2SizeKey is the attribute Key conforming to the + // "host.cpu.cache.l2.size" semantic conventions. It represents the amount of + // level 2 memory cache available to the processor (in Bytes). + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 12288000 + HostCPUCacheL2SizeKey = attribute.Key("host.cpu.cache.l2.size") + + // HostCPUFamilyKey is the attribute Key conforming to the "host.cpu.family" + // semantic conventions. It represents the family or generation of the CPU. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "6", "PA-RISC 1.1e" + HostCPUFamilyKey = attribute.Key("host.cpu.family") + + // HostCPUModelIDKey is the attribute Key conforming to the "host.cpu.model.id" + // semantic conventions. It represents the model identifier. It provides more + // granular information about the CPU, distinguishing it from other CPUs within + // the same family. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "6", "9000/778/B180L" + HostCPUModelIDKey = attribute.Key("host.cpu.model.id") + + // HostCPUModelNameKey is the attribute Key conforming to the + // "host.cpu.model.name" semantic conventions. It represents the model + // designation of the processor. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "11th Gen Intel(R) Core(TM) i7-1185G7 @ 3.00GHz" + HostCPUModelNameKey = attribute.Key("host.cpu.model.name") + + // HostCPUSteppingKey is the attribute Key conforming to the "host.cpu.stepping" + // semantic conventions. It represents the stepping or core revisions. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "1", "r1p1" + HostCPUSteppingKey = attribute.Key("host.cpu.stepping") + + // HostCPUVendorIDKey is the attribute Key conforming to the + // "host.cpu.vendor.id" semantic conventions. It represents the processor + // manufacturer identifier. A maximum 12-character string. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "GenuineIntel" + // Note: [CPUID] command returns the vendor ID string in EBX, EDX and ECX + // registers. Writing these to memory in this order results in a 12-character + // string. + // + // [CPUID]: https://wiki.osdev.org/CPUID + HostCPUVendorIDKey = attribute.Key("host.cpu.vendor.id") + + // HostIDKey is the attribute Key conforming to the "host.id" semantic + // conventions. It represents the unique host ID. For Cloud, this must be the + // instance_id assigned by the cloud provider. For non-containerized systems, + // this should be the `machine-id`. See the table below for the sources to use + // to determine the `machine-id` based on operating system. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "fdbf79e8af94cb7f9e8df36789187052" + HostIDKey = attribute.Key("host.id") + + // HostImageIDKey is the attribute Key conforming to the "host.image.id" + // semantic conventions. It represents the VM image ID or host OS image ID. For + // Cloud, this value is from the provider. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "ami-07b06b442921831e5" + HostImageIDKey = attribute.Key("host.image.id") + + // HostImageNameKey is the attribute Key conforming to the "host.image.name" + // semantic conventions. It represents the name of the VM image or OS install + // the host was instantiated from. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "infra-ami-eks-worker-node-7d4ec78312", "CentOS-8-x86_64-1905" + HostImageNameKey = attribute.Key("host.image.name") + + // HostImageVersionKey is the attribute Key conforming to the + // "host.image.version" semantic conventions. It represents the version string + // of the VM image or host OS as defined in [Version Attributes]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "0.1" + // + // [Version Attributes]: /docs/resource/README.md#version-attributes + HostImageVersionKey = attribute.Key("host.image.version") + + // HostIPKey is the attribute Key conforming to the "host.ip" semantic + // conventions. It represents the available IP addresses of the host, excluding + // loopback interfaces. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "192.168.1.140", "fe80::abc2:4a28:737a:609e" + // Note: IPv4 Addresses MUST be specified in dotted-quad notation. IPv6 + // addresses MUST be specified in the [RFC 5952] format. + // + // [RFC 5952]: https://www.rfc-editor.org/rfc/rfc5952.html + HostIPKey = attribute.Key("host.ip") + + // HostMacKey is the attribute Key conforming to the "host.mac" semantic + // conventions. It represents the available MAC addresses of the host, excluding + // loopback interfaces. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "AC-DE-48-23-45-67", "AC-DE-48-23-45-67-01-9F" + // Note: MAC Addresses MUST be represented in [IEEE RA hexadecimal form]: as + // hyphen-separated octets in uppercase hexadecimal form from most to least + // significant. + // + // [IEEE RA hexadecimal form]: https://standards.ieee.org/wp-content/uploads/import/documents/tutorials/eui.pdf + HostMacKey = attribute.Key("host.mac") + + // HostNameKey is the attribute Key conforming to the "host.name" semantic + // conventions. It represents the name of the host. On Unix systems, it may + // contain what the hostname command returns, or the fully qualified hostname, + // or another name specified by the user. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry-test" + HostNameKey = attribute.Key("host.name") + + // HostTypeKey is the attribute Key conforming to the "host.type" semantic + // conventions. It represents the type of host. For Cloud, this must be the + // machine type. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "n1-standard-1" + HostTypeKey = attribute.Key("host.type") +) + +// HostCPUCacheL2Size returns an attribute KeyValue conforming to the +// "host.cpu.cache.l2.size" semantic conventions. It represents the amount of +// level 2 memory cache available to the processor (in Bytes). +func HostCPUCacheL2Size(val int) attribute.KeyValue { + return HostCPUCacheL2SizeKey.Int(val) +} + +// HostCPUFamily returns an attribute KeyValue conforming to the +// "host.cpu.family" semantic conventions. It represents the family or generation +// of the CPU. +func HostCPUFamily(val string) attribute.KeyValue { + return HostCPUFamilyKey.String(val) +} + +// HostCPUModelID returns an attribute KeyValue conforming to the +// "host.cpu.model.id" semantic conventions. It represents the model identifier. +// It provides more granular information about the CPU, distinguishing it from +// other CPUs within the same family. +func HostCPUModelID(val string) attribute.KeyValue { + return HostCPUModelIDKey.String(val) +} + +// HostCPUModelName returns an attribute KeyValue conforming to the +// "host.cpu.model.name" semantic conventions. It represents the model +// designation of the processor. +func HostCPUModelName(val string) attribute.KeyValue { + return HostCPUModelNameKey.String(val) +} + +// HostCPUStepping returns an attribute KeyValue conforming to the +// "host.cpu.stepping" semantic conventions. It represents the stepping or core +// revisions. +func HostCPUStepping(val string) attribute.KeyValue { + return HostCPUSteppingKey.String(val) +} + +// HostCPUVendorID returns an attribute KeyValue conforming to the +// "host.cpu.vendor.id" semantic conventions. It represents the processor +// manufacturer identifier. A maximum 12-character string. +func HostCPUVendorID(val string) attribute.KeyValue { + return HostCPUVendorIDKey.String(val) +} + +// HostID returns an attribute KeyValue conforming to the "host.id" semantic +// conventions. It represents the unique host ID. For Cloud, this must be the +// instance_id assigned by the cloud provider. For non-containerized systems, +// this should be the `machine-id`. See the table below for the sources to use to +// determine the `machine-id` based on operating system. +func HostID(val string) attribute.KeyValue { + return HostIDKey.String(val) +} + +// HostImageID returns an attribute KeyValue conforming to the "host.image.id" +// semantic conventions. It represents the VM image ID or host OS image ID. For +// Cloud, this value is from the provider. +func HostImageID(val string) attribute.KeyValue { + return HostImageIDKey.String(val) +} + +// HostImageName returns an attribute KeyValue conforming to the +// "host.image.name" semantic conventions. It represents the name of the VM image +// or OS install the host was instantiated from. +func HostImageName(val string) attribute.KeyValue { + return HostImageNameKey.String(val) +} + +// HostImageVersion returns an attribute KeyValue conforming to the +// "host.image.version" semantic conventions. It represents the version string of +// the VM image or host OS as defined in [Version Attributes]. +// +// [Version Attributes]: /docs/resource/README.md#version-attributes +func HostImageVersion(val string) attribute.KeyValue { + return HostImageVersionKey.String(val) +} + +// HostIP returns an attribute KeyValue conforming to the "host.ip" semantic +// conventions. It represents the available IP addresses of the host, excluding +// loopback interfaces. +func HostIP(val ...string) attribute.KeyValue { + return HostIPKey.StringSlice(val) +} + +// HostMac returns an attribute KeyValue conforming to the "host.mac" semantic +// conventions. It represents the available MAC addresses of the host, excluding +// loopback interfaces. +func HostMac(val ...string) attribute.KeyValue { + return HostMacKey.StringSlice(val) +} + +// HostName returns an attribute KeyValue conforming to the "host.name" semantic +// conventions. It represents the name of the host. On Unix systems, it may +// contain what the hostname command returns, or the fully qualified hostname, or +// another name specified by the user. +func HostName(val string) attribute.KeyValue { + return HostNameKey.String(val) +} + +// HostType returns an attribute KeyValue conforming to the "host.type" semantic +// conventions. It represents the type of host. For Cloud, this must be the +// machine type. +func HostType(val string) attribute.KeyValue { + return HostTypeKey.String(val) +} + +// Enum values for host.arch +var ( + // AMD64 + // Stability: development + HostArchAMD64 = HostArchKey.String("amd64") + // ARM32 + // Stability: development + HostArchARM32 = HostArchKey.String("arm32") + // ARM64 + // Stability: development + HostArchARM64 = HostArchKey.String("arm64") + // Itanium + // Stability: development + HostArchIA64 = HostArchKey.String("ia64") + // 32-bit PowerPC + // Stability: development + HostArchPPC32 = HostArchKey.String("ppc32") + // 64-bit PowerPC + // Stability: development + HostArchPPC64 = HostArchKey.String("ppc64") + // IBM z/Architecture + // Stability: development + HostArchS390x = HostArchKey.String("s390x") + // 32-bit x86 + // Stability: development + HostArchX86 = HostArchKey.String("x86") +) + +// Namespace: http +const ( + // HTTPConnectionStateKey is the attribute Key conforming to the + // "http.connection.state" semantic conventions. It represents the state of the + // HTTP connection in the HTTP connection pool. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "active", "idle" + HTTPConnectionStateKey = attribute.Key("http.connection.state") + + // HTTPRequestBodySizeKey is the attribute Key conforming to the + // "http.request.body.size" semantic conventions. It represents the size of the + // request payload body in bytes. This is the number of bytes transferred + // excluding headers and is often, but not always, present as the + // [Content-Length] header. For requests using transport encoding, this should + // be the compressed size. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // [Content-Length]: https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length + HTTPRequestBodySizeKey = attribute.Key("http.request.body.size") + + // HTTPRequestMethodKey is the attribute Key conforming to the + // "http.request.method" semantic conventions. It represents the HTTP request + // method. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "GET", "POST", "HEAD" + // Note: HTTP request method value SHOULD be "known" to the instrumentation. + // By default, this convention defines "known" methods as the ones listed in + // [RFC9110] + // and the PATCH method defined in [RFC5789]. + // + // If the HTTP request method is not known to instrumentation, it MUST set the + // `http.request.method` attribute to `_OTHER`. + // + // If the HTTP instrumentation could end up converting valid HTTP request + // methods to `_OTHER`, then it MUST provide a way to override + // the list of known HTTP methods. If this override is done via environment + // variable, then the environment variable MUST be named + // OTEL_INSTRUMENTATION_HTTP_KNOWN_METHODS and support a comma-separated list of + // case-sensitive known HTTP methods + // (this list MUST be a full override of the default known method, it is not a + // list of known methods in addition to the defaults). + // + // HTTP method names are case-sensitive and `http.request.method` attribute + // value MUST match a known HTTP method name exactly. + // Instrumentations for specific web frameworks that consider HTTP methods to be + // case insensitive, SHOULD populate a canonical equivalent. + // Tracing instrumentations that do so, MUST also set + // `http.request.method_original` to the original value. + // + // [RFC9110]: https://www.rfc-editor.org/rfc/rfc9110.html#name-methods + // [RFC5789]: https://www.rfc-editor.org/rfc/rfc5789.html + HTTPRequestMethodKey = attribute.Key("http.request.method") + + // HTTPRequestMethodOriginalKey is the attribute Key conforming to the + // "http.request.method_original" semantic conventions. It represents the + // original HTTP method sent by the client in the request line. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "GeT", "ACL", "foo" + HTTPRequestMethodOriginalKey = attribute.Key("http.request.method_original") + + // HTTPRequestResendCountKey is the attribute Key conforming to the + // "http.request.resend_count" semantic conventions. It represents the ordinal + // number of request resending attempt (for any reason, including redirects). + // + // Type: int + // RequirementLevel: Recommended + // Stability: Stable + // + // Note: The resend count SHOULD be updated each time an HTTP request gets + // resent by the client, regardless of what was the cause of the resending (e.g. + // redirection, authorization failure, 503 Server Unavailable, network issues, + // or any other). + HTTPRequestResendCountKey = attribute.Key("http.request.resend_count") + + // HTTPRequestSizeKey is the attribute Key conforming to the "http.request.size" + // semantic conventions. It represents the total size of the request in bytes. + // This should be the total number of bytes sent over the wire, including the + // request line (HTTP/1.1), framing (HTTP/2 and HTTP/3), headers, and request + // body if any. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + HTTPRequestSizeKey = attribute.Key("http.request.size") + + // HTTPResponseBodySizeKey is the attribute Key conforming to the + // "http.response.body.size" semantic conventions. It represents the size of the + // response payload body in bytes. This is the number of bytes transferred + // excluding headers and is often, but not always, present as the + // [Content-Length] header. For requests using transport encoding, this should + // be the compressed size. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // [Content-Length]: https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length + HTTPResponseBodySizeKey = attribute.Key("http.response.body.size") + + // HTTPResponseSizeKey is the attribute Key conforming to the + // "http.response.size" semantic conventions. It represents the total size of + // the response in bytes. This should be the total number of bytes sent over the + // wire, including the status line (HTTP/1.1), framing (HTTP/2 and HTTP/3), + // headers, and response body and trailers if any. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + HTTPResponseSizeKey = attribute.Key("http.response.size") + + // HTTPResponseStatusCodeKey is the attribute Key conforming to the + // "http.response.status_code" semantic conventions. It represents the + // [HTTP response status code]. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: 200 + // + // [HTTP response status code]: https://tools.ietf.org/html/rfc7231#section-6 + HTTPResponseStatusCodeKey = attribute.Key("http.response.status_code") + + // HTTPRouteKey is the attribute Key conforming to the "http.route" semantic + // conventions. It represents the matched route, that is, the path template in + // the format used by the respective server framework. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "/users/:userID?", "{controller}/{action}/{id?}" + // Note: MUST NOT be populated when this is not supported by the HTTP server + // framework as the route attribute should have low-cardinality and the URI path + // can NOT substitute it. + // SHOULD include the [application root] if there is one. + // + // [application root]: /docs/http/http-spans.md#http-server-definitions + HTTPRouteKey = attribute.Key("http.route") +) + +// HTTPRequestBodySize returns an attribute KeyValue conforming to the +// "http.request.body.size" semantic conventions. It represents the size of the +// request payload body in bytes. This is the number of bytes transferred +// excluding headers and is often, but not always, present as the +// [Content-Length] header. For requests using transport encoding, this should be +// the compressed size. +// +// [Content-Length]: https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length +func HTTPRequestBodySize(val int) attribute.KeyValue { + return HTTPRequestBodySizeKey.Int(val) +} + +// HTTPRequestHeader returns an attribute KeyValue conforming to the +// "http.request.header" semantic conventions. It represents the HTTP request +// headers, `` being the normalized HTTP Header name (lowercase), the value +// being the header values. +func HTTPRequestHeader(key string, val ...string) attribute.KeyValue { + return attribute.StringSlice("http.request.header."+key, val) +} + +// HTTPRequestMethodOriginal returns an attribute KeyValue conforming to the +// "http.request.method_original" semantic conventions. It represents the +// original HTTP method sent by the client in the request line. +func HTTPRequestMethodOriginal(val string) attribute.KeyValue { + return HTTPRequestMethodOriginalKey.String(val) +} + +// HTTPRequestResendCount returns an attribute KeyValue conforming to the +// "http.request.resend_count" semantic conventions. It represents the ordinal +// number of request resending attempt (for any reason, including redirects). +func HTTPRequestResendCount(val int) attribute.KeyValue { + return HTTPRequestResendCountKey.Int(val) +} + +// HTTPRequestSize returns an attribute KeyValue conforming to the +// "http.request.size" semantic conventions. It represents the total size of the +// request in bytes. This should be the total number of bytes sent over the wire, +// including the request line (HTTP/1.1), framing (HTTP/2 and HTTP/3), headers, +// and request body if any. +func HTTPRequestSize(val int) attribute.KeyValue { + return HTTPRequestSizeKey.Int(val) +} + +// HTTPResponseBodySize returns an attribute KeyValue conforming to the +// "http.response.body.size" semantic conventions. It represents the size of the +// response payload body in bytes. This is the number of bytes transferred +// excluding headers and is often, but not always, present as the +// [Content-Length] header. For requests using transport encoding, this should be +// the compressed size. +// +// [Content-Length]: https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length +func HTTPResponseBodySize(val int) attribute.KeyValue { + return HTTPResponseBodySizeKey.Int(val) +} + +// HTTPResponseHeader returns an attribute KeyValue conforming to the +// "http.response.header" semantic conventions. It represents the HTTP response +// headers, `` being the normalized HTTP Header name (lowercase), the value +// being the header values. +func HTTPResponseHeader(key string, val ...string) attribute.KeyValue { + return attribute.StringSlice("http.response.header."+key, val) +} + +// HTTPResponseSize returns an attribute KeyValue conforming to the +// "http.response.size" semantic conventions. It represents the total size of the +// response in bytes. This should be the total number of bytes sent over the +// wire, including the status line (HTTP/1.1), framing (HTTP/2 and HTTP/3), +// headers, and response body and trailers if any. +func HTTPResponseSize(val int) attribute.KeyValue { + return HTTPResponseSizeKey.Int(val) +} + +// HTTPResponseStatusCode returns an attribute KeyValue conforming to the +// "http.response.status_code" semantic conventions. It represents the +// [HTTP response status code]. +// +// [HTTP response status code]: https://tools.ietf.org/html/rfc7231#section-6 +func HTTPResponseStatusCode(val int) attribute.KeyValue { + return HTTPResponseStatusCodeKey.Int(val) +} + +// HTTPRoute returns an attribute KeyValue conforming to the "http.route" +// semantic conventions. It represents the matched route, that is, the path +// template in the format used by the respective server framework. +func HTTPRoute(val string) attribute.KeyValue { + return HTTPRouteKey.String(val) +} + +// Enum values for http.connection.state +var ( + // active state. + // Stability: development + HTTPConnectionStateActive = HTTPConnectionStateKey.String("active") + // idle state. + // Stability: development + HTTPConnectionStateIdle = HTTPConnectionStateKey.String("idle") +) + +// Enum values for http.request.method +var ( + // CONNECT method. + // Stability: stable + HTTPRequestMethodConnect = HTTPRequestMethodKey.String("CONNECT") + // DELETE method. + // Stability: stable + HTTPRequestMethodDelete = HTTPRequestMethodKey.String("DELETE") + // GET method. + // Stability: stable + HTTPRequestMethodGet = HTTPRequestMethodKey.String("GET") + // HEAD method. + // Stability: stable + HTTPRequestMethodHead = HTTPRequestMethodKey.String("HEAD") + // OPTIONS method. + // Stability: stable + HTTPRequestMethodOptions = HTTPRequestMethodKey.String("OPTIONS") + // PATCH method. + // Stability: stable + HTTPRequestMethodPatch = HTTPRequestMethodKey.String("PATCH") + // POST method. + // Stability: stable + HTTPRequestMethodPost = HTTPRequestMethodKey.String("POST") + // PUT method. + // Stability: stable + HTTPRequestMethodPut = HTTPRequestMethodKey.String("PUT") + // TRACE method. + // Stability: stable + HTTPRequestMethodTrace = HTTPRequestMethodKey.String("TRACE") + // Any HTTP method that the instrumentation has no prior knowledge of. + // Stability: stable + HTTPRequestMethodOther = HTTPRequestMethodKey.String("_OTHER") +) + +// Namespace: hw +const ( + // HwBatteryCapacityKey is the attribute Key conforming to the + // "hw.battery.capacity" semantic conventions. It represents the design capacity + // in Watts-hours or Amper-hours. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "9.3Ah", "50Wh" + HwBatteryCapacityKey = attribute.Key("hw.battery.capacity") + + // HwBatteryChemistryKey is the attribute Key conforming to the + // "hw.battery.chemistry" semantic conventions. It represents the battery + // [chemistry], e.g. Lithium-Ion, Nickel-Cadmium, etc. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Li-ion", "NiMH" + // + // [chemistry]: https://schemas.dmtf.org/wbem/cim-html/2.31.0/CIM_Battery.html + HwBatteryChemistryKey = attribute.Key("hw.battery.chemistry") + + // HwBatteryStateKey is the attribute Key conforming to the "hw.battery.state" + // semantic conventions. It represents the current state of the battery. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + HwBatteryStateKey = attribute.Key("hw.battery.state") + + // HwBiosVersionKey is the attribute Key conforming to the "hw.bios_version" + // semantic conventions. It represents the BIOS version of the hardware + // component. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "1.2.3" + HwBiosVersionKey = attribute.Key("hw.bios_version") + + // HwDriverVersionKey is the attribute Key conforming to the "hw.driver_version" + // semantic conventions. It represents the driver version for the hardware + // component. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "10.2.1-3" + HwDriverVersionKey = attribute.Key("hw.driver_version") + + // HwEnclosureTypeKey is the attribute Key conforming to the "hw.enclosure.type" + // semantic conventions. It represents the type of the enclosure (useful for + // modular systems). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Computer", "Storage", "Switch" + HwEnclosureTypeKey = attribute.Key("hw.enclosure.type") + + // HwFirmwareVersionKey is the attribute Key conforming to the + // "hw.firmware_version" semantic conventions. It represents the firmware + // version of the hardware component. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2.0.1" + HwFirmwareVersionKey = attribute.Key("hw.firmware_version") + + // HwGpuTaskKey is the attribute Key conforming to the "hw.gpu.task" semantic + // conventions. It represents the type of task the GPU is performing. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + HwGpuTaskKey = attribute.Key("hw.gpu.task") + + // HwIDKey is the attribute Key conforming to the "hw.id" semantic conventions. + // It represents an identifier for the hardware component, unique within the + // monitored host. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "win32battery_battery_testsysa33_1" + HwIDKey = attribute.Key("hw.id") + + // HwLimitTypeKey is the attribute Key conforming to the "hw.limit_type" + // semantic conventions. It represents the type of limit for hardware + // components. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + HwLimitTypeKey = attribute.Key("hw.limit_type") + + // HwLogicalDiskRaidLevelKey is the attribute Key conforming to the + // "hw.logical_disk.raid_level" semantic conventions. It represents the RAID + // Level of the logical disk. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "RAID0+1", "RAID5", "RAID10" + HwLogicalDiskRaidLevelKey = attribute.Key("hw.logical_disk.raid_level") + + // HwLogicalDiskStateKey is the attribute Key conforming to the + // "hw.logical_disk.state" semantic conventions. It represents the state of the + // logical disk space usage. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + HwLogicalDiskStateKey = attribute.Key("hw.logical_disk.state") + + // HwMemoryTypeKey is the attribute Key conforming to the "hw.memory.type" + // semantic conventions. It represents the type of the memory module. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "DDR4", "DDR5", "LPDDR5" + HwMemoryTypeKey = attribute.Key("hw.memory.type") + + // HwModelKey is the attribute Key conforming to the "hw.model" semantic + // conventions. It represents the descriptive model name of the hardware + // component. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "PERC H740P", "Intel(R) Core(TM) i7-10700K", "Dell XPS 15 Battery" + HwModelKey = attribute.Key("hw.model") + + // HwNameKey is the attribute Key conforming to the "hw.name" semantic + // conventions. It represents an easily-recognizable name for the hardware + // component. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "eth0" + HwNameKey = attribute.Key("hw.name") + + // HwNetworkLogicalAddressesKey is the attribute Key conforming to the + // "hw.network.logical_addresses" semantic conventions. It represents the + // logical addresses of the adapter (e.g. IP address, or WWPN). + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "172.16.8.21", "57.11.193.42" + HwNetworkLogicalAddressesKey = attribute.Key("hw.network.logical_addresses") + + // HwNetworkPhysicalAddressKey is the attribute Key conforming to the + // "hw.network.physical_address" semantic conventions. It represents the + // physical address of the adapter (e.g. MAC address, or WWNN). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "00-90-F5-E9-7B-36" + HwNetworkPhysicalAddressKey = attribute.Key("hw.network.physical_address") + + // HwParentKey is the attribute Key conforming to the "hw.parent" semantic + // conventions. It represents the unique identifier of the parent component + // (typically the `hw.id` attribute of the enclosure, or disk controller). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "dellStorage_perc_0" + HwParentKey = attribute.Key("hw.parent") + + // HwPhysicalDiskSmartAttributeKey is the attribute Key conforming to the + // "hw.physical_disk.smart_attribute" semantic conventions. It represents the + // [S.M.A.R.T.] (Self-Monitoring, Analysis, and Reporting Technology) attribute + // of the physical disk. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Spin Retry Count", "Seek Error Rate", "Raw Read Error Rate" + // + // [S.M.A.R.T.]: https://wikipedia.org/wiki/S.M.A.R.T. + HwPhysicalDiskSmartAttributeKey = attribute.Key("hw.physical_disk.smart_attribute") + + // HwPhysicalDiskStateKey is the attribute Key conforming to the + // "hw.physical_disk.state" semantic conventions. It represents the state of the + // physical disk endurance utilization. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + HwPhysicalDiskStateKey = attribute.Key("hw.physical_disk.state") + + // HwPhysicalDiskTypeKey is the attribute Key conforming to the + // "hw.physical_disk.type" semantic conventions. It represents the type of the + // physical disk. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "HDD", "SSD", "10K" + HwPhysicalDiskTypeKey = attribute.Key("hw.physical_disk.type") + + // HwSensorLocationKey is the attribute Key conforming to the + // "hw.sensor_location" semantic conventions. It represents the location of the + // sensor. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "cpu0", "ps1", "INLET", "CPU0_DIE", "AMBIENT", "MOTHERBOARD", "PS0 + // V3_3", "MAIN_12V", "CPU_VCORE" + HwSensorLocationKey = attribute.Key("hw.sensor_location") + + // HwSerialNumberKey is the attribute Key conforming to the "hw.serial_number" + // semantic conventions. It represents the serial number of the hardware + // component. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "CNFCP0123456789" + HwSerialNumberKey = attribute.Key("hw.serial_number") + + // HwStateKey is the attribute Key conforming to the "hw.state" semantic + // conventions. It represents the current state of the component. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + HwStateKey = attribute.Key("hw.state") + + // HwTapeDriveOperationTypeKey is the attribute Key conforming to the + // "hw.tape_drive.operation_type" semantic conventions. It represents the type + // of tape drive operation. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + HwTapeDriveOperationTypeKey = attribute.Key("hw.tape_drive.operation_type") + + // HwTypeKey is the attribute Key conforming to the "hw.type" semantic + // conventions. It represents the type of the component. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: Describes the category of the hardware component for which `hw.state` + // is being reported. For example, `hw.type=temperature` along with + // `hw.state=degraded` would indicate that the temperature of the hardware + // component has been reported as `degraded`. + HwTypeKey = attribute.Key("hw.type") + + // HwVendorKey is the attribute Key conforming to the "hw.vendor" semantic + // conventions. It represents the vendor name of the hardware component. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Dell", "HP", "Intel", "AMD", "LSI", "Lenovo" + HwVendorKey = attribute.Key("hw.vendor") +) + +// HwBatteryCapacity returns an attribute KeyValue conforming to the +// "hw.battery.capacity" semantic conventions. It represents the design capacity +// in Watts-hours or Amper-hours. +func HwBatteryCapacity(val string) attribute.KeyValue { + return HwBatteryCapacityKey.String(val) +} + +// HwBatteryChemistry returns an attribute KeyValue conforming to the +// "hw.battery.chemistry" semantic conventions. It represents the battery +// [chemistry], e.g. Lithium-Ion, Nickel-Cadmium, etc. +// +// [chemistry]: https://schemas.dmtf.org/wbem/cim-html/2.31.0/CIM_Battery.html +func HwBatteryChemistry(val string) attribute.KeyValue { + return HwBatteryChemistryKey.String(val) +} + +// HwBiosVersion returns an attribute KeyValue conforming to the +// "hw.bios_version" semantic conventions. It represents the BIOS version of the +// hardware component. +func HwBiosVersion(val string) attribute.KeyValue { + return HwBiosVersionKey.String(val) +} + +// HwDriverVersion returns an attribute KeyValue conforming to the +// "hw.driver_version" semantic conventions. It represents the driver version for +// the hardware component. +func HwDriverVersion(val string) attribute.KeyValue { + return HwDriverVersionKey.String(val) +} + +// HwEnclosureType returns an attribute KeyValue conforming to the +// "hw.enclosure.type" semantic conventions. It represents the type of the +// enclosure (useful for modular systems). +func HwEnclosureType(val string) attribute.KeyValue { + return HwEnclosureTypeKey.String(val) +} + +// HwFirmwareVersion returns an attribute KeyValue conforming to the +// "hw.firmware_version" semantic conventions. It represents the firmware version +// of the hardware component. +func HwFirmwareVersion(val string) attribute.KeyValue { + return HwFirmwareVersionKey.String(val) +} + +// HwID returns an attribute KeyValue conforming to the "hw.id" semantic +// conventions. It represents an identifier for the hardware component, unique +// within the monitored host. +func HwID(val string) attribute.KeyValue { + return HwIDKey.String(val) +} + +// HwLogicalDiskRaidLevel returns an attribute KeyValue conforming to the +// "hw.logical_disk.raid_level" semantic conventions. It represents the RAID +// Level of the logical disk. +func HwLogicalDiskRaidLevel(val string) attribute.KeyValue { + return HwLogicalDiskRaidLevelKey.String(val) +} + +// HwMemoryType returns an attribute KeyValue conforming to the "hw.memory.type" +// semantic conventions. It represents the type of the memory module. +func HwMemoryType(val string) attribute.KeyValue { + return HwMemoryTypeKey.String(val) +} + +// HwModel returns an attribute KeyValue conforming to the "hw.model" semantic +// conventions. It represents the descriptive model name of the hardware +// component. +func HwModel(val string) attribute.KeyValue { + return HwModelKey.String(val) +} + +// HwName returns an attribute KeyValue conforming to the "hw.name" semantic +// conventions. It represents an easily-recognizable name for the hardware +// component. +func HwName(val string) attribute.KeyValue { + return HwNameKey.String(val) +} + +// HwNetworkLogicalAddresses returns an attribute KeyValue conforming to the +// "hw.network.logical_addresses" semantic conventions. It represents the logical +// addresses of the adapter (e.g. IP address, or WWPN). +func HwNetworkLogicalAddresses(val ...string) attribute.KeyValue { + return HwNetworkLogicalAddressesKey.StringSlice(val) +} + +// HwNetworkPhysicalAddress returns an attribute KeyValue conforming to the +// "hw.network.physical_address" semantic conventions. It represents the physical +// address of the adapter (e.g. MAC address, or WWNN). +func HwNetworkPhysicalAddress(val string) attribute.KeyValue { + return HwNetworkPhysicalAddressKey.String(val) +} + +// HwParent returns an attribute KeyValue conforming to the "hw.parent" semantic +// conventions. It represents the unique identifier of the parent component +// (typically the `hw.id` attribute of the enclosure, or disk controller). +func HwParent(val string) attribute.KeyValue { + return HwParentKey.String(val) +} + +// HwPhysicalDiskSmartAttribute returns an attribute KeyValue conforming to the +// "hw.physical_disk.smart_attribute" semantic conventions. It represents the +// [S.M.A.R.T.] (Self-Monitoring, Analysis, and Reporting Technology) attribute +// of the physical disk. +// +// [S.M.A.R.T.]: https://wikipedia.org/wiki/S.M.A.R.T. +func HwPhysicalDiskSmartAttribute(val string) attribute.KeyValue { + return HwPhysicalDiskSmartAttributeKey.String(val) +} + +// HwPhysicalDiskType returns an attribute KeyValue conforming to the +// "hw.physical_disk.type" semantic conventions. It represents the type of the +// physical disk. +func HwPhysicalDiskType(val string) attribute.KeyValue { + return HwPhysicalDiskTypeKey.String(val) +} + +// HwSensorLocation returns an attribute KeyValue conforming to the +// "hw.sensor_location" semantic conventions. It represents the location of the +// sensor. +func HwSensorLocation(val string) attribute.KeyValue { + return HwSensorLocationKey.String(val) +} + +// HwSerialNumber returns an attribute KeyValue conforming to the +// "hw.serial_number" semantic conventions. It represents the serial number of +// the hardware component. +func HwSerialNumber(val string) attribute.KeyValue { + return HwSerialNumberKey.String(val) +} + +// HwVendor returns an attribute KeyValue conforming to the "hw.vendor" semantic +// conventions. It represents the vendor name of the hardware component. +func HwVendor(val string) attribute.KeyValue { + return HwVendorKey.String(val) +} + +// Enum values for hw.battery.state +var ( + // Charging + // Stability: development + HwBatteryStateCharging = HwBatteryStateKey.String("charging") + // Discharging + // Stability: development + HwBatteryStateDischarging = HwBatteryStateKey.String("discharging") +) + +// Enum values for hw.gpu.task +var ( + // Decoder + // Stability: development + HwGpuTaskDecoder = HwGpuTaskKey.String("decoder") + // Encoder + // Stability: development + HwGpuTaskEncoder = HwGpuTaskKey.String("encoder") + // General + // Stability: development + HwGpuTaskGeneral = HwGpuTaskKey.String("general") +) + +// Enum values for hw.limit_type +var ( + // Critical + // Stability: development + HwLimitTypeCritical = HwLimitTypeKey.String("critical") + // Degraded + // Stability: development + HwLimitTypeDegraded = HwLimitTypeKey.String("degraded") + // High Critical + // Stability: development + HwLimitTypeHighCritical = HwLimitTypeKey.String("high.critical") + // High Degraded + // Stability: development + HwLimitTypeHighDegraded = HwLimitTypeKey.String("high.degraded") + // Low Critical + // Stability: development + HwLimitTypeLowCritical = HwLimitTypeKey.String("low.critical") + // Low Degraded + // Stability: development + HwLimitTypeLowDegraded = HwLimitTypeKey.String("low.degraded") + // Maximum + // Stability: development + HwLimitTypeMax = HwLimitTypeKey.String("max") + // Throttled + // Stability: development + HwLimitTypeThrottled = HwLimitTypeKey.String("throttled") + // Turbo + // Stability: development + HwLimitTypeTurbo = HwLimitTypeKey.String("turbo") +) + +// Enum values for hw.logical_disk.state +var ( + // Used + // Stability: development + HwLogicalDiskStateUsed = HwLogicalDiskStateKey.String("used") + // Free + // Stability: development + HwLogicalDiskStateFree = HwLogicalDiskStateKey.String("free") +) + +// Enum values for hw.physical_disk.state +var ( + // Remaining + // Stability: development + HwPhysicalDiskStateRemaining = HwPhysicalDiskStateKey.String("remaining") +) + +// Enum values for hw.state +var ( + // Degraded + // Stability: development + HwStateDegraded = HwStateKey.String("degraded") + // Failed + // Stability: development + HwStateFailed = HwStateKey.String("failed") + // Needs Cleaning + // Stability: development + HwStateNeedsCleaning = HwStateKey.String("needs_cleaning") + // OK + // Stability: development + HwStateOk = HwStateKey.String("ok") + // Predicted Failure + // Stability: development + HwStatePredictedFailure = HwStateKey.String("predicted_failure") +) + +// Enum values for hw.tape_drive.operation_type +var ( + // Mount + // Stability: development + HwTapeDriveOperationTypeMount = HwTapeDriveOperationTypeKey.String("mount") + // Unmount + // Stability: development + HwTapeDriveOperationTypeUnmount = HwTapeDriveOperationTypeKey.String("unmount") + // Clean + // Stability: development + HwTapeDriveOperationTypeClean = HwTapeDriveOperationTypeKey.String("clean") +) + +// Enum values for hw.type +var ( + // Battery + // Stability: development + HwTypeBattery = HwTypeKey.String("battery") + // CPU + // Stability: development + HwTypeCPU = HwTypeKey.String("cpu") + // Disk controller + // Stability: development + HwTypeDiskController = HwTypeKey.String("disk_controller") + // Enclosure + // Stability: development + HwTypeEnclosure = HwTypeKey.String("enclosure") + // Fan + // Stability: development + HwTypeFan = HwTypeKey.String("fan") + // GPU + // Stability: development + HwTypeGpu = HwTypeKey.String("gpu") + // Logical disk + // Stability: development + HwTypeLogicalDisk = HwTypeKey.String("logical_disk") + // Memory + // Stability: development + HwTypeMemory = HwTypeKey.String("memory") + // Network + // Stability: development + HwTypeNetwork = HwTypeKey.String("network") + // Physical disk + // Stability: development + HwTypePhysicalDisk = HwTypeKey.String("physical_disk") + // Power supply + // Stability: development + HwTypePowerSupply = HwTypeKey.String("power_supply") + // Tape drive + // Stability: development + HwTypeTapeDrive = HwTypeKey.String("tape_drive") + // Temperature + // Stability: development + HwTypeTemperature = HwTypeKey.String("temperature") + // Voltage + // Stability: development + HwTypeVoltage = HwTypeKey.String("voltage") +) + +// Namespace: ios +const ( + // IOSAppStateKey is the attribute Key conforming to the "ios.app.state" + // semantic conventions. It represents the this attribute represents the state + // of the application. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: The iOS lifecycle states are defined in the + // [UIApplicationDelegate documentation], and from which the `OS terminology` + // column values are derived. + // + // [UIApplicationDelegate documentation]: https://developer.apple.com/documentation/uikit/uiapplicationdelegate + IOSAppStateKey = attribute.Key("ios.app.state") +) + +// Enum values for ios.app.state +var ( + // The app has become `active`. Associated with UIKit notification + // `applicationDidBecomeActive`. + // + // Stability: development + IOSAppStateActive = IOSAppStateKey.String("active") + // The app is now `inactive`. Associated with UIKit notification + // `applicationWillResignActive`. + // + // Stability: development + IOSAppStateInactive = IOSAppStateKey.String("inactive") + // The app is now in the background. This value is associated with UIKit + // notification `applicationDidEnterBackground`. + // + // Stability: development + IOSAppStateBackground = IOSAppStateKey.String("background") + // The app is now in the foreground. This value is associated with UIKit + // notification `applicationWillEnterForeground`. + // + // Stability: development + IOSAppStateForeground = IOSAppStateKey.String("foreground") + // The app is about to terminate. Associated with UIKit notification + // `applicationWillTerminate`. + // + // Stability: development + IOSAppStateTerminate = IOSAppStateKey.String("terminate") +) + +// Namespace: k8s +const ( + // K8SClusterNameKey is the attribute Key conforming to the "k8s.cluster.name" + // semantic conventions. It represents the name of the cluster. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry-cluster" + K8SClusterNameKey = attribute.Key("k8s.cluster.name") + + // K8SClusterUIDKey is the attribute Key conforming to the "k8s.cluster.uid" + // semantic conventions. It represents a pseudo-ID for the cluster, set to the + // UID of the `kube-system` namespace. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "218fc5a9-a5f1-4b54-aa05-46717d0ab26d" + // Note: K8s doesn't have support for obtaining a cluster ID. If this is ever + // added, we will recommend collecting the `k8s.cluster.uid` through the + // official APIs. In the meantime, we are able to use the `uid` of the + // `kube-system` namespace as a proxy for cluster ID. Read on for the + // rationale. + // + // Every object created in a K8s cluster is assigned a distinct UID. The + // `kube-system` namespace is used by Kubernetes itself and will exist + // for the lifetime of the cluster. Using the `uid` of the `kube-system` + // namespace is a reasonable proxy for the K8s ClusterID as it will only + // change if the cluster is rebuilt. Furthermore, Kubernetes UIDs are + // UUIDs as standardized by + // [ISO/IEC 9834-8 and ITU-T X.667]. + // Which states: + // + // > If generated according to one of the mechanisms defined in Rec. + // > ITU-T X.667 | ISO/IEC 9834-8, a UUID is either guaranteed to be + // > different from all other UUIDs generated before 3603 A.D., or is + // > extremely likely to be different (depending on the mechanism chosen). + // + // Therefore, UIDs between clusters should be extremely unlikely to + // conflict. + // + // [ISO/IEC 9834-8 and ITU-T X.667]: https://www.itu.int/ITU-T/studygroups/com17/oid.html + K8SClusterUIDKey = attribute.Key("k8s.cluster.uid") + + // K8SContainerNameKey is the attribute Key conforming to the + // "k8s.container.name" semantic conventions. It represents the name of the + // Container from Pod specification, must be unique within a Pod. Container + // runtime usually uses different globally unique name (`container.name`). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "redis" + K8SContainerNameKey = attribute.Key("k8s.container.name") + + // K8SContainerRestartCountKey is the attribute Key conforming to the + // "k8s.container.restart_count" semantic conventions. It represents the number + // of times the container was restarted. This attribute can be used to identify + // a particular container (running or stopped) within a container spec. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + K8SContainerRestartCountKey = attribute.Key("k8s.container.restart_count") + + // K8SContainerStatusLastTerminatedReasonKey is the attribute Key conforming to + // the "k8s.container.status.last_terminated_reason" semantic conventions. It + // represents the last terminated reason of the Container. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Evicted", "Error" + K8SContainerStatusLastTerminatedReasonKey = attribute.Key("k8s.container.status.last_terminated_reason") + + // K8SContainerStatusReasonKey is the attribute Key conforming to the + // "k8s.container.status.reason" semantic conventions. It represents the reason + // for the container state. Corresponds to the `reason` field of the: + // [K8s ContainerStateWaiting] or [K8s ContainerStateTerminated]. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "ContainerCreating", "CrashLoopBackOff", + // "CreateContainerConfigError", "ErrImagePull", "ImagePullBackOff", + // "OOMKilled", "Completed", "Error", "ContainerCannotRun" + // + // [K8s ContainerStateWaiting]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#containerstatewaiting-v1-core + // [K8s ContainerStateTerminated]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#containerstateterminated-v1-core + K8SContainerStatusReasonKey = attribute.Key("k8s.container.status.reason") + + // K8SContainerStatusStateKey is the attribute Key conforming to the + // "k8s.container.status.state" semantic conventions. It represents the state of + // the container. [K8s ContainerState]. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "terminated", "running", "waiting" + // + // [K8s ContainerState]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#containerstate-v1-core + K8SContainerStatusStateKey = attribute.Key("k8s.container.status.state") + + // K8SCronJobNameKey is the attribute Key conforming to the "k8s.cronjob.name" + // semantic conventions. It represents the name of the CronJob. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry" + K8SCronJobNameKey = attribute.Key("k8s.cronjob.name") + + // K8SCronJobUIDKey is the attribute Key conforming to the "k8s.cronjob.uid" + // semantic conventions. It represents the UID of the CronJob. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" + K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid") + + // K8SDaemonSetNameKey is the attribute Key conforming to the + // "k8s.daemonset.name" semantic conventions. It represents the name of the + // DaemonSet. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry" + K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name") + + // K8SDaemonSetUIDKey is the attribute Key conforming to the "k8s.daemonset.uid" + // semantic conventions. It represents the UID of the DaemonSet. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" + K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid") + + // K8SDeploymentNameKey is the attribute Key conforming to the + // "k8s.deployment.name" semantic conventions. It represents the name of the + // Deployment. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry" + K8SDeploymentNameKey = attribute.Key("k8s.deployment.name") + + // K8SDeploymentUIDKey is the attribute Key conforming to the + // "k8s.deployment.uid" semantic conventions. It represents the UID of the + // Deployment. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" + K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid") + + // K8SHPAMetricTypeKey is the attribute Key conforming to the + // "k8s.hpa.metric.type" semantic conventions. It represents the type of metric + // source for the horizontal pod autoscaler. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Resource", "ContainerResource" + // Note: This attribute reflects the `type` field of spec.metrics[] in the HPA. + K8SHPAMetricTypeKey = attribute.Key("k8s.hpa.metric.type") + + // K8SHPANameKey is the attribute Key conforming to the "k8s.hpa.name" semantic + // conventions. It represents the name of the horizontal pod autoscaler. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry" + K8SHPANameKey = attribute.Key("k8s.hpa.name") + + // K8SHPAScaletargetrefAPIVersionKey is the attribute Key conforming to the + // "k8s.hpa.scaletargetref.api_version" semantic conventions. It represents the + // API version of the target resource to scale for the HorizontalPodAutoscaler. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "apps/v1", "autoscaling/v2" + // Note: This maps to the `apiVersion` field in the `scaleTargetRef` of the HPA + // spec. + K8SHPAScaletargetrefAPIVersionKey = attribute.Key("k8s.hpa.scaletargetref.api_version") + + // K8SHPAScaletargetrefKindKey is the attribute Key conforming to the + // "k8s.hpa.scaletargetref.kind" semantic conventions. It represents the kind of + // the target resource to scale for the HorizontalPodAutoscaler. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Deployment", "StatefulSet" + // Note: This maps to the `kind` field in the `scaleTargetRef` of the HPA spec. + K8SHPAScaletargetrefKindKey = attribute.Key("k8s.hpa.scaletargetref.kind") + + // K8SHPAScaletargetrefNameKey is the attribute Key conforming to the + // "k8s.hpa.scaletargetref.name" semantic conventions. It represents the name of + // the target resource to scale for the HorizontalPodAutoscaler. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "my-deployment", "my-statefulset" + // Note: This maps to the `name` field in the `scaleTargetRef` of the HPA spec. + K8SHPAScaletargetrefNameKey = attribute.Key("k8s.hpa.scaletargetref.name") + + // K8SHPAUIDKey is the attribute Key conforming to the "k8s.hpa.uid" semantic + // conventions. It represents the UID of the horizontal pod autoscaler. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" + K8SHPAUIDKey = attribute.Key("k8s.hpa.uid") + + // K8SHugepageSizeKey is the attribute Key conforming to the "k8s.hugepage.size" + // semantic conventions. It represents the size (identifier) of the K8s huge + // page. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2Mi" + K8SHugepageSizeKey = attribute.Key("k8s.hugepage.size") + + // K8SJobNameKey is the attribute Key conforming to the "k8s.job.name" semantic + // conventions. It represents the name of the Job. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry" + K8SJobNameKey = attribute.Key("k8s.job.name") + + // K8SJobUIDKey is the attribute Key conforming to the "k8s.job.uid" semantic + // conventions. It represents the UID of the Job. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" + K8SJobUIDKey = attribute.Key("k8s.job.uid") + + // K8SNamespaceNameKey is the attribute Key conforming to the + // "k8s.namespace.name" semantic conventions. It represents the name of the + // namespace that the pod is running in. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "default" + K8SNamespaceNameKey = attribute.Key("k8s.namespace.name") + + // K8SNamespacePhaseKey is the attribute Key conforming to the + // "k8s.namespace.phase" semantic conventions. It represents the phase of the + // K8s namespace. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "active", "terminating" + // Note: This attribute aligns with the `phase` field of the + // [K8s NamespaceStatus] + // + // [K8s NamespaceStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#namespacestatus-v1-core + K8SNamespacePhaseKey = attribute.Key("k8s.namespace.phase") + + // K8SNodeConditionStatusKey is the attribute Key conforming to the + // "k8s.node.condition.status" semantic conventions. It represents the status of + // the condition, one of True, False, Unknown. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "true", "false", "unknown" + // Note: This attribute aligns with the `status` field of the + // [NodeCondition] + // + // [NodeCondition]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#nodecondition-v1-core + K8SNodeConditionStatusKey = attribute.Key("k8s.node.condition.status") + + // K8SNodeConditionTypeKey is the attribute Key conforming to the + // "k8s.node.condition.type" semantic conventions. It represents the condition + // type of a K8s Node. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Ready", "DiskPressure" + // Note: K8s Node conditions as described + // by [K8s documentation]. + // + // This attribute aligns with the `type` field of the + // [NodeCondition] + // + // The set of possible values is not limited to those listed here. Managed + // Kubernetes environments, + // or custom controllers MAY introduce additional node condition types. + // When this occurs, the exact value as reported by the Kubernetes API SHOULD be + // used. + // + // [K8s documentation]: https://v1-32.docs.kubernetes.io/docs/reference/node/node-status/#condition + // [NodeCondition]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#nodecondition-v1-core + K8SNodeConditionTypeKey = attribute.Key("k8s.node.condition.type") + + // K8SNodeNameKey is the attribute Key conforming to the "k8s.node.name" + // semantic conventions. It represents the name of the Node. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "node-1" + K8SNodeNameKey = attribute.Key("k8s.node.name") + + // K8SNodeUIDKey is the attribute Key conforming to the "k8s.node.uid" semantic + // conventions. It represents the UID of the Node. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2" + K8SNodeUIDKey = attribute.Key("k8s.node.uid") + + // K8SPodNameKey is the attribute Key conforming to the "k8s.pod.name" semantic + // conventions. It represents the name of the Pod. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry-pod-autoconf" + K8SPodNameKey = attribute.Key("k8s.pod.name") + + // K8SPodUIDKey is the attribute Key conforming to the "k8s.pod.uid" semantic + // conventions. It represents the UID of the Pod. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" + K8SPodUIDKey = attribute.Key("k8s.pod.uid") + + // K8SReplicaSetNameKey is the attribute Key conforming to the + // "k8s.replicaset.name" semantic conventions. It represents the name of the + // ReplicaSet. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry" + K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name") + + // K8SReplicaSetUIDKey is the attribute Key conforming to the + // "k8s.replicaset.uid" semantic conventions. It represents the UID of the + // ReplicaSet. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" + K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid") + + // K8SReplicationControllerNameKey is the attribute Key conforming to the + // "k8s.replicationcontroller.name" semantic conventions. It represents the name + // of the replication controller. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry" + K8SReplicationControllerNameKey = attribute.Key("k8s.replicationcontroller.name") + + // K8SReplicationControllerUIDKey is the attribute Key conforming to the + // "k8s.replicationcontroller.uid" semantic conventions. It represents the UID + // of the replication controller. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" + K8SReplicationControllerUIDKey = attribute.Key("k8s.replicationcontroller.uid") + + // K8SResourceQuotaNameKey is the attribute Key conforming to the + // "k8s.resourcequota.name" semantic conventions. It represents the name of the + // resource quota. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry" + K8SResourceQuotaNameKey = attribute.Key("k8s.resourcequota.name") + + // K8SResourceQuotaResourceNameKey is the attribute Key conforming to the + // "k8s.resourcequota.resource_name" semantic conventions. It represents the + // name of the K8s resource a resource quota defines. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "count/replicationcontrollers" + // Note: The value for this attribute can be either the full + // `count/[.]` string (e.g., count/deployments.apps, + // count/pods), or, for certain core Kubernetes resources, just the resource + // name (e.g., pods, services, configmaps). Both forms are supported by + // Kubernetes for object count quotas. See + // [Kubernetes Resource Quotas documentation] for more details. + // + // [Kubernetes Resource Quotas documentation]: https://kubernetes.io/docs/concepts/policy/resource-quotas/#object-count-quota + K8SResourceQuotaResourceNameKey = attribute.Key("k8s.resourcequota.resource_name") + + // K8SResourceQuotaUIDKey is the attribute Key conforming to the + // "k8s.resourcequota.uid" semantic conventions. It represents the UID of the + // resource quota. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" + K8SResourceQuotaUIDKey = attribute.Key("k8s.resourcequota.uid") + + // K8SStatefulSetNameKey is the attribute Key conforming to the + // "k8s.statefulset.name" semantic conventions. It represents the name of the + // StatefulSet. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "opentelemetry" + K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name") + + // K8SStatefulSetUIDKey is the attribute Key conforming to the + // "k8s.statefulset.uid" semantic conventions. It represents the UID of the + // StatefulSet. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff" + K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid") + + // K8SStorageclassNameKey is the attribute Key conforming to the + // "k8s.storageclass.name" semantic conventions. It represents the name of K8s + // [StorageClass] object. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "gold.storageclass.storage.k8s.io" + // + // [StorageClass]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#storageclass-v1-storage-k8s-io + K8SStorageclassNameKey = attribute.Key("k8s.storageclass.name") + + // K8SVolumeNameKey is the attribute Key conforming to the "k8s.volume.name" + // semantic conventions. It represents the name of the K8s volume. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "volume0" + K8SVolumeNameKey = attribute.Key("k8s.volume.name") + + // K8SVolumeTypeKey is the attribute Key conforming to the "k8s.volume.type" + // semantic conventions. It represents the type of the K8s volume. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "emptyDir", "persistentVolumeClaim" + K8SVolumeTypeKey = attribute.Key("k8s.volume.type") +) + +// K8SClusterName returns an attribute KeyValue conforming to the +// "k8s.cluster.name" semantic conventions. It represents the name of the +// cluster. +func K8SClusterName(val string) attribute.KeyValue { + return K8SClusterNameKey.String(val) +} + +// K8SClusterUID returns an attribute KeyValue conforming to the +// "k8s.cluster.uid" semantic conventions. It represents a pseudo-ID for the +// cluster, set to the UID of the `kube-system` namespace. +func K8SClusterUID(val string) attribute.KeyValue { + return K8SClusterUIDKey.String(val) +} + +// K8SContainerName returns an attribute KeyValue conforming to the +// "k8s.container.name" semantic conventions. It represents the name of the +// Container from Pod specification, must be unique within a Pod. Container +// runtime usually uses different globally unique name (`container.name`). +func K8SContainerName(val string) attribute.KeyValue { + return K8SContainerNameKey.String(val) +} + +// K8SContainerRestartCount returns an attribute KeyValue conforming to the +// "k8s.container.restart_count" semantic conventions. It represents the number +// of times the container was restarted. This attribute can be used to identify a +// particular container (running or stopped) within a container spec. +func K8SContainerRestartCount(val int) attribute.KeyValue { + return K8SContainerRestartCountKey.Int(val) +} + +// K8SContainerStatusLastTerminatedReason returns an attribute KeyValue +// conforming to the "k8s.container.status.last_terminated_reason" semantic +// conventions. It represents the last terminated reason of the Container. +func K8SContainerStatusLastTerminatedReason(val string) attribute.KeyValue { + return K8SContainerStatusLastTerminatedReasonKey.String(val) +} + +// K8SCronJobAnnotation returns an attribute KeyValue conforming to the +// "k8s.cronjob.annotation" semantic conventions. It represents the cronjob +// annotation placed on the CronJob, the `` being the annotation name, the +// value being the annotation value. +func K8SCronJobAnnotation(key string, val string) attribute.KeyValue { + return attribute.String("k8s.cronjob.annotation."+key, val) +} + +// K8SCronJobLabel returns an attribute KeyValue conforming to the +// "k8s.cronjob.label" semantic conventions. It represents the label placed on +// the CronJob, the `` being the label name, the value being the label +// value. +func K8SCronJobLabel(key string, val string) attribute.KeyValue { + return attribute.String("k8s.cronjob.label."+key, val) +} + +// K8SCronJobName returns an attribute KeyValue conforming to the +// "k8s.cronjob.name" semantic conventions. It represents the name of the +// CronJob. +func K8SCronJobName(val string) attribute.KeyValue { + return K8SCronJobNameKey.String(val) +} + +// K8SCronJobUID returns an attribute KeyValue conforming to the +// "k8s.cronjob.uid" semantic conventions. It represents the UID of the CronJob. +func K8SCronJobUID(val string) attribute.KeyValue { + return K8SCronJobUIDKey.String(val) +} + +// K8SDaemonSetAnnotation returns an attribute KeyValue conforming to the +// "k8s.daemonset.annotation" semantic conventions. It represents the annotation +// placed on the DaemonSet, the `` being the annotation name, the value +// being the annotation value, even if the value is empty. +func K8SDaemonSetAnnotation(key string, val string) attribute.KeyValue { + return attribute.String("k8s.daemonset.annotation."+key, val) +} + +// K8SDaemonSetLabel returns an attribute KeyValue conforming to the +// "k8s.daemonset.label" semantic conventions. It represents the label placed on +// the DaemonSet, the `` being the label name, the value being the label +// value, even if the value is empty. +func K8SDaemonSetLabel(key string, val string) attribute.KeyValue { + return attribute.String("k8s.daemonset.label."+key, val) +} + +// K8SDaemonSetName returns an attribute KeyValue conforming to the +// "k8s.daemonset.name" semantic conventions. It represents the name of the +// DaemonSet. +func K8SDaemonSetName(val string) attribute.KeyValue { + return K8SDaemonSetNameKey.String(val) +} + +// K8SDaemonSetUID returns an attribute KeyValue conforming to the +// "k8s.daemonset.uid" semantic conventions. It represents the UID of the +// DaemonSet. +func K8SDaemonSetUID(val string) attribute.KeyValue { + return K8SDaemonSetUIDKey.String(val) +} + +// K8SDeploymentAnnotation returns an attribute KeyValue conforming to the +// "k8s.deployment.annotation" semantic conventions. It represents the annotation +// placed on the Deployment, the `` being the annotation name, the value +// being the annotation value, even if the value is empty. +func K8SDeploymentAnnotation(key string, val string) attribute.KeyValue { + return attribute.String("k8s.deployment.annotation."+key, val) +} + +// K8SDeploymentLabel returns an attribute KeyValue conforming to the +// "k8s.deployment.label" semantic conventions. It represents the label placed on +// the Deployment, the `` being the label name, the value being the label +// value, even if the value is empty. +func K8SDeploymentLabel(key string, val string) attribute.KeyValue { + return attribute.String("k8s.deployment.label."+key, val) +} + +// K8SDeploymentName returns an attribute KeyValue conforming to the +// "k8s.deployment.name" semantic conventions. It represents the name of the +// Deployment. +func K8SDeploymentName(val string) attribute.KeyValue { + return K8SDeploymentNameKey.String(val) +} + +// K8SDeploymentUID returns an attribute KeyValue conforming to the +// "k8s.deployment.uid" semantic conventions. It represents the UID of the +// Deployment. +func K8SDeploymentUID(val string) attribute.KeyValue { + return K8SDeploymentUIDKey.String(val) +} + +// K8SHPAMetricType returns an attribute KeyValue conforming to the +// "k8s.hpa.metric.type" semantic conventions. It represents the type of metric +// source for the horizontal pod autoscaler. +func K8SHPAMetricType(val string) attribute.KeyValue { + return K8SHPAMetricTypeKey.String(val) +} + +// K8SHPAName returns an attribute KeyValue conforming to the "k8s.hpa.name" +// semantic conventions. It represents the name of the horizontal pod autoscaler. +func K8SHPAName(val string) attribute.KeyValue { + return K8SHPANameKey.String(val) +} + +// K8SHPAScaletargetrefAPIVersion returns an attribute KeyValue conforming to the +// "k8s.hpa.scaletargetref.api_version" semantic conventions. It represents the +// API version of the target resource to scale for the HorizontalPodAutoscaler. +func K8SHPAScaletargetrefAPIVersion(val string) attribute.KeyValue { + return K8SHPAScaletargetrefAPIVersionKey.String(val) +} + +// K8SHPAScaletargetrefKind returns an attribute KeyValue conforming to the +// "k8s.hpa.scaletargetref.kind" semantic conventions. It represents the kind of +// the target resource to scale for the HorizontalPodAutoscaler. +func K8SHPAScaletargetrefKind(val string) attribute.KeyValue { + return K8SHPAScaletargetrefKindKey.String(val) +} + +// K8SHPAScaletargetrefName returns an attribute KeyValue conforming to the +// "k8s.hpa.scaletargetref.name" semantic conventions. It represents the name of +// the target resource to scale for the HorizontalPodAutoscaler. +func K8SHPAScaletargetrefName(val string) attribute.KeyValue { + return K8SHPAScaletargetrefNameKey.String(val) +} + +// K8SHPAUID returns an attribute KeyValue conforming to the "k8s.hpa.uid" +// semantic conventions. It represents the UID of the horizontal pod autoscaler. +func K8SHPAUID(val string) attribute.KeyValue { + return K8SHPAUIDKey.String(val) +} + +// K8SHugepageSize returns an attribute KeyValue conforming to the +// "k8s.hugepage.size" semantic conventions. It represents the size (identifier) +// of the K8s huge page. +func K8SHugepageSize(val string) attribute.KeyValue { + return K8SHugepageSizeKey.String(val) +} + +// K8SJobAnnotation returns an attribute KeyValue conforming to the +// "k8s.job.annotation" semantic conventions. It represents the annotation placed +// on the Job, the `` being the annotation name, the value being the +// annotation value, even if the value is empty. +func K8SJobAnnotation(key string, val string) attribute.KeyValue { + return attribute.String("k8s.job.annotation."+key, val) +} + +// K8SJobLabel returns an attribute KeyValue conforming to the "k8s.job.label" +// semantic conventions. It represents the label placed on the Job, the `` +// being the label name, the value being the label value, even if the value is +// empty. +func K8SJobLabel(key string, val string) attribute.KeyValue { + return attribute.String("k8s.job.label."+key, val) +} + +// K8SJobName returns an attribute KeyValue conforming to the "k8s.job.name" +// semantic conventions. It represents the name of the Job. +func K8SJobName(val string) attribute.KeyValue { + return K8SJobNameKey.String(val) +} + +// K8SJobUID returns an attribute KeyValue conforming to the "k8s.job.uid" +// semantic conventions. It represents the UID of the Job. +func K8SJobUID(val string) attribute.KeyValue { + return K8SJobUIDKey.String(val) +} + +// K8SNamespaceAnnotation returns an attribute KeyValue conforming to the +// "k8s.namespace.annotation" semantic conventions. It represents the annotation +// placed on the Namespace, the `` being the annotation name, the value +// being the annotation value, even if the value is empty. +func K8SNamespaceAnnotation(key string, val string) attribute.KeyValue { + return attribute.String("k8s.namespace.annotation."+key, val) +} + +// K8SNamespaceLabel returns an attribute KeyValue conforming to the +// "k8s.namespace.label" semantic conventions. It represents the label placed on +// the Namespace, the `` being the label name, the value being the label +// value, even if the value is empty. +func K8SNamespaceLabel(key string, val string) attribute.KeyValue { + return attribute.String("k8s.namespace.label."+key, val) +} + +// K8SNamespaceName returns an attribute KeyValue conforming to the +// "k8s.namespace.name" semantic conventions. It represents the name of the +// namespace that the pod is running in. +func K8SNamespaceName(val string) attribute.KeyValue { + return K8SNamespaceNameKey.String(val) +} + +// K8SNodeAnnotation returns an attribute KeyValue conforming to the +// "k8s.node.annotation" semantic conventions. It represents the annotation +// placed on the Node, the `` being the annotation name, the value being the +// annotation value, even if the value is empty. +func K8SNodeAnnotation(key string, val string) attribute.KeyValue { + return attribute.String("k8s.node.annotation."+key, val) +} + +// K8SNodeLabel returns an attribute KeyValue conforming to the "k8s.node.label" +// semantic conventions. It represents the label placed on the Node, the `` +// being the label name, the value being the label value, even if the value is +// empty. +func K8SNodeLabel(key string, val string) attribute.KeyValue { + return attribute.String("k8s.node.label."+key, val) +} + +// K8SNodeName returns an attribute KeyValue conforming to the "k8s.node.name" +// semantic conventions. It represents the name of the Node. +func K8SNodeName(val string) attribute.KeyValue { + return K8SNodeNameKey.String(val) +} + +// K8SNodeUID returns an attribute KeyValue conforming to the "k8s.node.uid" +// semantic conventions. It represents the UID of the Node. +func K8SNodeUID(val string) attribute.KeyValue { + return K8SNodeUIDKey.String(val) +} + +// K8SPodAnnotation returns an attribute KeyValue conforming to the +// "k8s.pod.annotation" semantic conventions. It represents the annotation placed +// on the Pod, the `` being the annotation name, the value being the +// annotation value. +func K8SPodAnnotation(key string, val string) attribute.KeyValue { + return attribute.String("k8s.pod.annotation."+key, val) +} + +// K8SPodLabel returns an attribute KeyValue conforming to the "k8s.pod.label" +// semantic conventions. It represents the label placed on the Pod, the `` +// being the label name, the value being the label value. +func K8SPodLabel(key string, val string) attribute.KeyValue { + return attribute.String("k8s.pod.label."+key, val) +} + +// K8SPodName returns an attribute KeyValue conforming to the "k8s.pod.name" +// semantic conventions. It represents the name of the Pod. +func K8SPodName(val string) attribute.KeyValue { + return K8SPodNameKey.String(val) +} + +// K8SPodUID returns an attribute KeyValue conforming to the "k8s.pod.uid" +// semantic conventions. It represents the UID of the Pod. +func K8SPodUID(val string) attribute.KeyValue { + return K8SPodUIDKey.String(val) +} + +// K8SReplicaSetAnnotation returns an attribute KeyValue conforming to the +// "k8s.replicaset.annotation" semantic conventions. It represents the annotation +// placed on the ReplicaSet, the `` being the annotation name, the value +// being the annotation value, even if the value is empty. +func K8SReplicaSetAnnotation(key string, val string) attribute.KeyValue { + return attribute.String("k8s.replicaset.annotation."+key, val) +} + +// K8SReplicaSetLabel returns an attribute KeyValue conforming to the +// "k8s.replicaset.label" semantic conventions. It represents the label placed on +// the ReplicaSet, the `` being the label name, the value being the label +// value, even if the value is empty. +func K8SReplicaSetLabel(key string, val string) attribute.KeyValue { + return attribute.String("k8s.replicaset.label."+key, val) +} + +// K8SReplicaSetName returns an attribute KeyValue conforming to the +// "k8s.replicaset.name" semantic conventions. It represents the name of the +// ReplicaSet. +func K8SReplicaSetName(val string) attribute.KeyValue { + return K8SReplicaSetNameKey.String(val) +} + +// K8SReplicaSetUID returns an attribute KeyValue conforming to the +// "k8s.replicaset.uid" semantic conventions. It represents the UID of the +// ReplicaSet. +func K8SReplicaSetUID(val string) attribute.KeyValue { + return K8SReplicaSetUIDKey.String(val) +} + +// K8SReplicationControllerName returns an attribute KeyValue conforming to the +// "k8s.replicationcontroller.name" semantic conventions. It represents the name +// of the replication controller. +func K8SReplicationControllerName(val string) attribute.KeyValue { + return K8SReplicationControllerNameKey.String(val) +} + +// K8SReplicationControllerUID returns an attribute KeyValue conforming to the +// "k8s.replicationcontroller.uid" semantic conventions. It represents the UID of +// the replication controller. +func K8SReplicationControllerUID(val string) attribute.KeyValue { + return K8SReplicationControllerUIDKey.String(val) +} + +// K8SResourceQuotaName returns an attribute KeyValue conforming to the +// "k8s.resourcequota.name" semantic conventions. It represents the name of the +// resource quota. +func K8SResourceQuotaName(val string) attribute.KeyValue { + return K8SResourceQuotaNameKey.String(val) +} + +// K8SResourceQuotaResourceName returns an attribute KeyValue conforming to the +// "k8s.resourcequota.resource_name" semantic conventions. It represents the name +// of the K8s resource a resource quota defines. +func K8SResourceQuotaResourceName(val string) attribute.KeyValue { + return K8SResourceQuotaResourceNameKey.String(val) +} + +// K8SResourceQuotaUID returns an attribute KeyValue conforming to the +// "k8s.resourcequota.uid" semantic conventions. It represents the UID of the +// resource quota. +func K8SResourceQuotaUID(val string) attribute.KeyValue { + return K8SResourceQuotaUIDKey.String(val) +} + +// K8SStatefulSetAnnotation returns an attribute KeyValue conforming to the +// "k8s.statefulset.annotation" semantic conventions. It represents the +// annotation placed on the StatefulSet, the `` being the annotation name, +// the value being the annotation value, even if the value is empty. +func K8SStatefulSetAnnotation(key string, val string) attribute.KeyValue { + return attribute.String("k8s.statefulset.annotation."+key, val) +} + +// K8SStatefulSetLabel returns an attribute KeyValue conforming to the +// "k8s.statefulset.label" semantic conventions. It represents the label placed +// on the StatefulSet, the `` being the label name, the value being the +// label value, even if the value is empty. +func K8SStatefulSetLabel(key string, val string) attribute.KeyValue { + return attribute.String("k8s.statefulset.label."+key, val) +} + +// K8SStatefulSetName returns an attribute KeyValue conforming to the +// "k8s.statefulset.name" semantic conventions. It represents the name of the +// StatefulSet. +func K8SStatefulSetName(val string) attribute.KeyValue { + return K8SStatefulSetNameKey.String(val) +} + +// K8SStatefulSetUID returns an attribute KeyValue conforming to the +// "k8s.statefulset.uid" semantic conventions. It represents the UID of the +// StatefulSet. +func K8SStatefulSetUID(val string) attribute.KeyValue { + return K8SStatefulSetUIDKey.String(val) +} + +// K8SStorageclassName returns an attribute KeyValue conforming to the +// "k8s.storageclass.name" semantic conventions. It represents the name of K8s +// [StorageClass] object. +// +// [StorageClass]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#storageclass-v1-storage-k8s-io +func K8SStorageclassName(val string) attribute.KeyValue { + return K8SStorageclassNameKey.String(val) +} + +// K8SVolumeName returns an attribute KeyValue conforming to the +// "k8s.volume.name" semantic conventions. It represents the name of the K8s +// volume. +func K8SVolumeName(val string) attribute.KeyValue { + return K8SVolumeNameKey.String(val) +} + +// Enum values for k8s.container.status.reason +var ( + // The container is being created. + // Stability: development + K8SContainerStatusReasonContainerCreating = K8SContainerStatusReasonKey.String("ContainerCreating") + // The container is in a crash loop back off state. + // Stability: development + K8SContainerStatusReasonCrashLoopBackOff = K8SContainerStatusReasonKey.String("CrashLoopBackOff") + // There was an error creating the container configuration. + // Stability: development + K8SContainerStatusReasonCreateContainerConfigError = K8SContainerStatusReasonKey.String("CreateContainerConfigError") + // There was an error pulling the container image. + // Stability: development + K8SContainerStatusReasonErrImagePull = K8SContainerStatusReasonKey.String("ErrImagePull") + // The container image pull is in back off state. + // Stability: development + K8SContainerStatusReasonImagePullBackOff = K8SContainerStatusReasonKey.String("ImagePullBackOff") + // The container was killed due to out of memory. + // Stability: development + K8SContainerStatusReasonOomKilled = K8SContainerStatusReasonKey.String("OOMKilled") + // The container has completed execution. + // Stability: development + K8SContainerStatusReasonCompleted = K8SContainerStatusReasonKey.String("Completed") + // There was an error with the container. + // Stability: development + K8SContainerStatusReasonError = K8SContainerStatusReasonKey.String("Error") + // The container cannot run. + // Stability: development + K8SContainerStatusReasonContainerCannotRun = K8SContainerStatusReasonKey.String("ContainerCannotRun") +) + +// Enum values for k8s.container.status.state +var ( + // The container has terminated. + // Stability: development + K8SContainerStatusStateTerminated = K8SContainerStatusStateKey.String("terminated") + // The container is running. + // Stability: development + K8SContainerStatusStateRunning = K8SContainerStatusStateKey.String("running") + // The container is waiting. + // Stability: development + K8SContainerStatusStateWaiting = K8SContainerStatusStateKey.String("waiting") +) + +// Enum values for k8s.namespace.phase +var ( + // Active namespace phase as described by [K8s API] + // Stability: development + // + // [K8s API]: https://pkg.go.dev/k8s.io/api@v0.31.3/core/v1#NamespacePhase + K8SNamespacePhaseActive = K8SNamespacePhaseKey.String("active") + // Terminating namespace phase as described by [K8s API] + // Stability: development + // + // [K8s API]: https://pkg.go.dev/k8s.io/api@v0.31.3/core/v1#NamespacePhase + K8SNamespacePhaseTerminating = K8SNamespacePhaseKey.String("terminating") +) + +// Enum values for k8s.node.condition.status +var ( + // condition_true + // Stability: development + K8SNodeConditionStatusConditionTrue = K8SNodeConditionStatusKey.String("true") + // condition_false + // Stability: development + K8SNodeConditionStatusConditionFalse = K8SNodeConditionStatusKey.String("false") + // condition_unknown + // Stability: development + K8SNodeConditionStatusConditionUnknown = K8SNodeConditionStatusKey.String("unknown") +) + +// Enum values for k8s.node.condition.type +var ( + // The node is healthy and ready to accept pods + // Stability: development + K8SNodeConditionTypeReady = K8SNodeConditionTypeKey.String("Ready") + // Pressure exists on the disk size—that is, if the disk capacity is low + // Stability: development + K8SNodeConditionTypeDiskPressure = K8SNodeConditionTypeKey.String("DiskPressure") + // Pressure exists on the node memory—that is, if the node memory is low + // Stability: development + K8SNodeConditionTypeMemoryPressure = K8SNodeConditionTypeKey.String("MemoryPressure") + // Pressure exists on the processes—that is, if there are too many processes + // on the node + // Stability: development + K8SNodeConditionTypePIDPressure = K8SNodeConditionTypeKey.String("PIDPressure") + // The network for the node is not correctly configured + // Stability: development + K8SNodeConditionTypeNetworkUnavailable = K8SNodeConditionTypeKey.String("NetworkUnavailable") +) + +// Enum values for k8s.volume.type +var ( + // A [persistentVolumeClaim] volume + // Stability: development + // + // [persistentVolumeClaim]: https://v1-30.docs.kubernetes.io/docs/concepts/storage/volumes/#persistentvolumeclaim + K8SVolumeTypePersistentVolumeClaim = K8SVolumeTypeKey.String("persistentVolumeClaim") + // A [configMap] volume + // Stability: development + // + // [configMap]: https://v1-30.docs.kubernetes.io/docs/concepts/storage/volumes/#configmap + K8SVolumeTypeConfigMap = K8SVolumeTypeKey.String("configMap") + // A [downwardAPI] volume + // Stability: development + // + // [downwardAPI]: https://v1-30.docs.kubernetes.io/docs/concepts/storage/volumes/#downwardapi + K8SVolumeTypeDownwardAPI = K8SVolumeTypeKey.String("downwardAPI") + // An [emptyDir] volume + // Stability: development + // + // [emptyDir]: https://v1-30.docs.kubernetes.io/docs/concepts/storage/volumes/#emptydir + K8SVolumeTypeEmptyDir = K8SVolumeTypeKey.String("emptyDir") + // A [secret] volume + // Stability: development + // + // [secret]: https://v1-30.docs.kubernetes.io/docs/concepts/storage/volumes/#secret + K8SVolumeTypeSecret = K8SVolumeTypeKey.String("secret") + // A [local] volume + // Stability: development + // + // [local]: https://v1-30.docs.kubernetes.io/docs/concepts/storage/volumes/#local + K8SVolumeTypeLocal = K8SVolumeTypeKey.String("local") +) + +// Namespace: linux +const ( + // LinuxMemorySlabStateKey is the attribute Key conforming to the + // "linux.memory.slab.state" semantic conventions. It represents the Linux Slab + // memory state. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "reclaimable", "unreclaimable" + LinuxMemorySlabStateKey = attribute.Key("linux.memory.slab.state") +) + +// Enum values for linux.memory.slab.state +var ( + // reclaimable + // Stability: development + LinuxMemorySlabStateReclaimable = LinuxMemorySlabStateKey.String("reclaimable") + // unreclaimable + // Stability: development + LinuxMemorySlabStateUnreclaimable = LinuxMemorySlabStateKey.String("unreclaimable") +) + +// Namespace: log +const ( + // LogFileNameKey is the attribute Key conforming to the "log.file.name" + // semantic conventions. It represents the basename of the file. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "audit.log" + LogFileNameKey = attribute.Key("log.file.name") + + // LogFileNameResolvedKey is the attribute Key conforming to the + // "log.file.name_resolved" semantic conventions. It represents the basename of + // the file, with symlinks resolved. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "uuid.log" + LogFileNameResolvedKey = attribute.Key("log.file.name_resolved") + + // LogFilePathKey is the attribute Key conforming to the "log.file.path" + // semantic conventions. It represents the full path to the file. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "/var/log/mysql/audit.log" + LogFilePathKey = attribute.Key("log.file.path") + + // LogFilePathResolvedKey is the attribute Key conforming to the + // "log.file.path_resolved" semantic conventions. It represents the full path to + // the file, with symlinks resolved. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "/var/lib/docker/uuid.log" + LogFilePathResolvedKey = attribute.Key("log.file.path_resolved") + + // LogIostreamKey is the attribute Key conforming to the "log.iostream" semantic + // conventions. It represents the stream associated with the log. See below for + // a list of well-known values. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + LogIostreamKey = attribute.Key("log.iostream") + + // LogRecordOriginalKey is the attribute Key conforming to the + // "log.record.original" semantic conventions. It represents the complete + // original Log Record. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "77 <86>1 2015-08-06T21:58:59.694Z 192.168.2.133 inactive - - - + // Something happened", "[INFO] 8/3/24 12:34:56 Something happened" + // Note: This value MAY be added when processing a Log Record which was + // originally transmitted as a string or equivalent data type AND the Body field + // of the Log Record does not contain the same value. (e.g. a syslog or a log + // record read from a file.) + LogRecordOriginalKey = attribute.Key("log.record.original") + + // LogRecordUIDKey is the attribute Key conforming to the "log.record.uid" + // semantic conventions. It represents a unique identifier for the Log Record. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "01ARZ3NDEKTSV4RRFFQ69G5FAV" + // Note: If an id is provided, other log records with the same id will be + // considered duplicates and can be removed safely. This means, that two + // distinguishable log records MUST have different values. + // The id MAY be an + // [Universally Unique Lexicographically Sortable Identifier (ULID)], but other + // identifiers (e.g. UUID) may be used as needed. + // + // [Universally Unique Lexicographically Sortable Identifier (ULID)]: https://github.com/ulid/spec + LogRecordUIDKey = attribute.Key("log.record.uid") +) + +// LogFileName returns an attribute KeyValue conforming to the "log.file.name" +// semantic conventions. It represents the basename of the file. +func LogFileName(val string) attribute.KeyValue { + return LogFileNameKey.String(val) +} + +// LogFileNameResolved returns an attribute KeyValue conforming to the +// "log.file.name_resolved" semantic conventions. It represents the basename of +// the file, with symlinks resolved. +func LogFileNameResolved(val string) attribute.KeyValue { + return LogFileNameResolvedKey.String(val) +} + +// LogFilePath returns an attribute KeyValue conforming to the "log.file.path" +// semantic conventions. It represents the full path to the file. +func LogFilePath(val string) attribute.KeyValue { + return LogFilePathKey.String(val) +} + +// LogFilePathResolved returns an attribute KeyValue conforming to the +// "log.file.path_resolved" semantic conventions. It represents the full path to +// the file, with symlinks resolved. +func LogFilePathResolved(val string) attribute.KeyValue { + return LogFilePathResolvedKey.String(val) +} + +// LogRecordOriginal returns an attribute KeyValue conforming to the +// "log.record.original" semantic conventions. It represents the complete +// original Log Record. +func LogRecordOriginal(val string) attribute.KeyValue { + return LogRecordOriginalKey.String(val) +} + +// LogRecordUID returns an attribute KeyValue conforming to the "log.record.uid" +// semantic conventions. It represents a unique identifier for the Log Record. +func LogRecordUID(val string) attribute.KeyValue { + return LogRecordUIDKey.String(val) +} + +// Enum values for log.iostream +var ( + // Logs from stdout stream + // Stability: development + LogIostreamStdout = LogIostreamKey.String("stdout") + // Events from stderr stream + // Stability: development + LogIostreamStderr = LogIostreamKey.String("stderr") +) + +// Namespace: mainframe +const ( + // MainframeLparNameKey is the attribute Key conforming to the + // "mainframe.lpar.name" semantic conventions. It represents the name of the + // logical partition that hosts a systems with a mainframe operating system. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "LPAR01" + MainframeLparNameKey = attribute.Key("mainframe.lpar.name") +) + +// MainframeLparName returns an attribute KeyValue conforming to the +// "mainframe.lpar.name" semantic conventions. It represents the name of the +// logical partition that hosts a systems with a mainframe operating system. +func MainframeLparName(val string) attribute.KeyValue { + return MainframeLparNameKey.String(val) +} + +// Namespace: messaging +const ( + // MessagingBatchMessageCountKey is the attribute Key conforming to the + // "messaging.batch.message_count" semantic conventions. It represents the + // number of messages sent, received, or processed in the scope of the batching + // operation. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 0, 1, 2 + // Note: Instrumentations SHOULD NOT set `messaging.batch.message_count` on + // spans that operate with a single message. When a messaging client library + // supports both batch and single-message API for the same operation, + // instrumentations SHOULD use `messaging.batch.message_count` for batching APIs + // and SHOULD NOT use it for single-message APIs. + MessagingBatchMessageCountKey = attribute.Key("messaging.batch.message_count") + + // MessagingClientIDKey is the attribute Key conforming to the + // "messaging.client.id" semantic conventions. It represents a unique identifier + // for the client that consumes or produces a message. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "client-5", "myhost@8742@s8083jm" + MessagingClientIDKey = attribute.Key("messaging.client.id") + + // MessagingConsumerGroupNameKey is the attribute Key conforming to the + // "messaging.consumer.group.name" semantic conventions. It represents the name + // of the consumer group with which a consumer is associated. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "my-group", "indexer" + // Note: Semantic conventions for individual messaging systems SHOULD document + // whether `messaging.consumer.group.name` is applicable and what it means in + // the context of that system. + MessagingConsumerGroupNameKey = attribute.Key("messaging.consumer.group.name") + + // MessagingDestinationAnonymousKey is the attribute Key conforming to the + // "messaging.destination.anonymous" semantic conventions. It represents a + // boolean that is true if the message destination is anonymous (could be + // unnamed or have auto-generated name). + // + // Type: boolean + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + MessagingDestinationAnonymousKey = attribute.Key("messaging.destination.anonymous") + + // MessagingDestinationNameKey is the attribute Key conforming to the + // "messaging.destination.name" semantic conventions. It represents the message + // destination name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "MyQueue", "MyTopic" + // Note: Destination name SHOULD uniquely identify a specific queue, topic or + // other entity within the broker. If + // the broker doesn't have such notion, the destination name SHOULD uniquely + // identify the broker. + MessagingDestinationNameKey = attribute.Key("messaging.destination.name") + + // MessagingDestinationPartitionIDKey is the attribute Key conforming to the + // "messaging.destination.partition.id" semantic conventions. It represents the + // identifier of the partition messages are sent to or received from, unique + // within the `messaging.destination.name`. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1 + MessagingDestinationPartitionIDKey = attribute.Key("messaging.destination.partition.id") + + // MessagingDestinationSubscriptionNameKey is the attribute Key conforming to + // the "messaging.destination.subscription.name" semantic conventions. It + // represents the name of the destination subscription from which a message is + // consumed. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "subscription-a" + // Note: Semantic conventions for individual messaging systems SHOULD document + // whether `messaging.destination.subscription.name` is applicable and what it + // means in the context of that system. + MessagingDestinationSubscriptionNameKey = attribute.Key("messaging.destination.subscription.name") + + // MessagingDestinationTemplateKey is the attribute Key conforming to the + // "messaging.destination.template" semantic conventions. It represents the low + // cardinality representation of the messaging destination name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "/customers/{customerId}" + // Note: Destination names could be constructed from templates. An example would + // be a destination name involving a user name or product id. Although the + // destination name in this case is of high cardinality, the underlying template + // is of low cardinality and can be effectively used for grouping and + // aggregation. + MessagingDestinationTemplateKey = attribute.Key("messaging.destination.template") + + // MessagingDestinationTemporaryKey is the attribute Key conforming to the + // "messaging.destination.temporary" semantic conventions. It represents a + // boolean that is true if the message destination is temporary and might not + // exist anymore after messages are processed. + // + // Type: boolean + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + MessagingDestinationTemporaryKey = attribute.Key("messaging.destination.temporary") + + // MessagingEventHubsMessageEnqueuedTimeKey is the attribute Key conforming to + // the "messaging.eventhubs.message.enqueued_time" semantic conventions. It + // represents the UTC epoch seconds at which the message has been accepted and + // stored in the entity. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + MessagingEventHubsMessageEnqueuedTimeKey = attribute.Key("messaging.eventhubs.message.enqueued_time") + + // MessagingGCPPubSubMessageAckDeadlineKey is the attribute Key conforming to + // the "messaging.gcp_pubsub.message.ack_deadline" semantic conventions. It + // represents the ack deadline in seconds set for the modify ack deadline + // request. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + MessagingGCPPubSubMessageAckDeadlineKey = attribute.Key("messaging.gcp_pubsub.message.ack_deadline") + + // MessagingGCPPubSubMessageAckIDKey is the attribute Key conforming to the + // "messaging.gcp_pubsub.message.ack_id" semantic conventions. It represents the + // ack id for a given message. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: ack_id + MessagingGCPPubSubMessageAckIDKey = attribute.Key("messaging.gcp_pubsub.message.ack_id") + + // MessagingGCPPubSubMessageDeliveryAttemptKey is the attribute Key conforming + // to the "messaging.gcp_pubsub.message.delivery_attempt" semantic conventions. + // It represents the delivery attempt for a given message. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + MessagingGCPPubSubMessageDeliveryAttemptKey = attribute.Key("messaging.gcp_pubsub.message.delivery_attempt") + + // MessagingGCPPubSubMessageOrderingKeyKey is the attribute Key conforming to + // the "messaging.gcp_pubsub.message.ordering_key" semantic conventions. It + // represents the ordering key for a given message. If the attribute is not + // present, the message does not have an ordering key. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: ordering_key + MessagingGCPPubSubMessageOrderingKeyKey = attribute.Key("messaging.gcp_pubsub.message.ordering_key") + + // MessagingKafkaMessageKeyKey is the attribute Key conforming to the + // "messaging.kafka.message.key" semantic conventions. It represents the message + // keys in Kafka are used for grouping alike messages to ensure they're + // processed on the same partition. They differ from `messaging.message.id` in + // that they're not unique. If the key is `null`, the attribute MUST NOT be set. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: myKey + // Note: If the key type is not string, it's string representation has to be + // supplied for the attribute. If the key has no unambiguous, canonical string + // form, don't include its value. + MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message.key") + + // MessagingKafkaMessageTombstoneKey is the attribute Key conforming to the + // "messaging.kafka.message.tombstone" semantic conventions. It represents a + // boolean that is true if the message is a tombstone. + // + // Type: boolean + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + MessagingKafkaMessageTombstoneKey = attribute.Key("messaging.kafka.message.tombstone") + + // MessagingKafkaOffsetKey is the attribute Key conforming to the + // "messaging.kafka.offset" semantic conventions. It represents the offset of a + // record in the corresponding Kafka partition. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + MessagingKafkaOffsetKey = attribute.Key("messaging.kafka.offset") + + // MessagingMessageBodySizeKey is the attribute Key conforming to the + // "messaging.message.body.size" semantic conventions. It represents the size of + // the message body in bytes. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Note: This can refer to both the compressed or uncompressed body size. If + // both sizes are known, the uncompressed + // body size should be used. + MessagingMessageBodySizeKey = attribute.Key("messaging.message.body.size") + + // MessagingMessageConversationIDKey is the attribute Key conforming to the + // "messaging.message.conversation_id" semantic conventions. It represents the + // conversation ID identifying the conversation to which the message belongs, + // represented as a string. Sometimes called "Correlation ID". + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: MyConversationId + MessagingMessageConversationIDKey = attribute.Key("messaging.message.conversation_id") + + // MessagingMessageEnvelopeSizeKey is the attribute Key conforming to the + // "messaging.message.envelope.size" semantic conventions. It represents the + // size of the message body and metadata in bytes. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Note: This can refer to both the compressed or uncompressed size. If both + // sizes are known, the uncompressed + // size should be used. + MessagingMessageEnvelopeSizeKey = attribute.Key("messaging.message.envelope.size") + + // MessagingMessageIDKey is the attribute Key conforming to the + // "messaging.message.id" semantic conventions. It represents a value used by + // the messaging system as an identifier for the message, represented as a + // string. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 452a7c7c7c7048c2f887f61572b18fc2 + MessagingMessageIDKey = attribute.Key("messaging.message.id") + + // MessagingOperationNameKey is the attribute Key conforming to the + // "messaging.operation.name" semantic conventions. It represents the + // system-specific name of the messaging operation. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "ack", "nack", "send" + MessagingOperationNameKey = attribute.Key("messaging.operation.name") + + // MessagingOperationTypeKey is the attribute Key conforming to the + // "messaging.operation.type" semantic conventions. It represents a string + // identifying the type of the messaging operation. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: If a custom value is used, it MUST be of low cardinality. + MessagingOperationTypeKey = attribute.Key("messaging.operation.type") + + // MessagingRabbitMQDestinationRoutingKeyKey is the attribute Key conforming to + // the "messaging.rabbitmq.destination.routing_key" semantic conventions. It + // represents the rabbitMQ message routing key. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: myKey + MessagingRabbitMQDestinationRoutingKeyKey = attribute.Key("messaging.rabbitmq.destination.routing_key") + + // MessagingRabbitMQMessageDeliveryTagKey is the attribute Key conforming to the + // "messaging.rabbitmq.message.delivery_tag" semantic conventions. It represents + // the rabbitMQ message delivery tag. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + MessagingRabbitMQMessageDeliveryTagKey = attribute.Key("messaging.rabbitmq.message.delivery_tag") + + // MessagingRocketMQConsumptionModelKey is the attribute Key conforming to the + // "messaging.rocketmq.consumption_model" semantic conventions. It represents + // the model of message consumption. This only applies to consumer spans. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + MessagingRocketMQConsumptionModelKey = attribute.Key("messaging.rocketmq.consumption_model") + + // MessagingRocketMQMessageDelayTimeLevelKey is the attribute Key conforming to + // the "messaging.rocketmq.message.delay_time_level" semantic conventions. It + // represents the delay time level for delay message, which determines the + // message delay time. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + MessagingRocketMQMessageDelayTimeLevelKey = attribute.Key("messaging.rocketmq.message.delay_time_level") + + // MessagingRocketMQMessageDeliveryTimestampKey is the attribute Key conforming + // to the "messaging.rocketmq.message.delivery_timestamp" semantic conventions. + // It represents the timestamp in milliseconds that the delay message is + // expected to be delivered to consumer. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + MessagingRocketMQMessageDeliveryTimestampKey = attribute.Key("messaging.rocketmq.message.delivery_timestamp") + + // MessagingRocketMQMessageGroupKey is the attribute Key conforming to the + // "messaging.rocketmq.message.group" semantic conventions. It represents the it + // is essential for FIFO message. Messages that belong to the same message group + // are always processed one by one within the same consumer group. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: myMessageGroup + MessagingRocketMQMessageGroupKey = attribute.Key("messaging.rocketmq.message.group") + + // MessagingRocketMQMessageKeysKey is the attribute Key conforming to the + // "messaging.rocketmq.message.keys" semantic conventions. It represents the + // key(s) of message, another way to mark message besides message id. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "keyA", "keyB" + MessagingRocketMQMessageKeysKey = attribute.Key("messaging.rocketmq.message.keys") + + // MessagingRocketMQMessageTagKey is the attribute Key conforming to the + // "messaging.rocketmq.message.tag" semantic conventions. It represents the + // secondary classifier of message besides topic. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: tagA + MessagingRocketMQMessageTagKey = attribute.Key("messaging.rocketmq.message.tag") + + // MessagingRocketMQMessageTypeKey is the attribute Key conforming to the + // "messaging.rocketmq.message.type" semantic conventions. It represents the + // type of message. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + MessagingRocketMQMessageTypeKey = attribute.Key("messaging.rocketmq.message.type") + + // MessagingRocketMQNamespaceKey is the attribute Key conforming to the + // "messaging.rocketmq.namespace" semantic conventions. It represents the + // namespace of RocketMQ resources, resources in different namespaces are + // individual. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: myNamespace + MessagingRocketMQNamespaceKey = attribute.Key("messaging.rocketmq.namespace") + + // MessagingServiceBusDispositionStatusKey is the attribute Key conforming to + // the "messaging.servicebus.disposition_status" semantic conventions. It + // represents the describes the [settlement type]. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // + // [settlement type]: https://learn.microsoft.com/azure/service-bus-messaging/message-transfers-locks-settlement#peeklock + MessagingServiceBusDispositionStatusKey = attribute.Key("messaging.servicebus.disposition_status") + + // MessagingServiceBusMessageDeliveryCountKey is the attribute Key conforming to + // the "messaging.servicebus.message.delivery_count" semantic conventions. It + // represents the number of deliveries that have been attempted for this + // message. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + MessagingServiceBusMessageDeliveryCountKey = attribute.Key("messaging.servicebus.message.delivery_count") + + // MessagingServiceBusMessageEnqueuedTimeKey is the attribute Key conforming to + // the "messaging.servicebus.message.enqueued_time" semantic conventions. It + // represents the UTC epoch seconds at which the message has been accepted and + // stored in the entity. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + MessagingServiceBusMessageEnqueuedTimeKey = attribute.Key("messaging.servicebus.message.enqueued_time") + + // MessagingSystemKey is the attribute Key conforming to the "messaging.system" + // semantic conventions. It represents the messaging system as identified by the + // client instrumentation. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: The actual messaging system may differ from the one known by the + // client. For example, when using Kafka client libraries to communicate with + // Azure Event Hubs, the `messaging.system` is set to `kafka` based on the + // instrumentation's best knowledge. + MessagingSystemKey = attribute.Key("messaging.system") +) + +// MessagingBatchMessageCount returns an attribute KeyValue conforming to the +// "messaging.batch.message_count" semantic conventions. It represents the number +// of messages sent, received, or processed in the scope of the batching +// operation. +func MessagingBatchMessageCount(val int) attribute.KeyValue { + return MessagingBatchMessageCountKey.Int(val) +} + +// MessagingClientID returns an attribute KeyValue conforming to the +// "messaging.client.id" semantic conventions. It represents a unique identifier +// for the client that consumes or produces a message. +func MessagingClientID(val string) attribute.KeyValue { + return MessagingClientIDKey.String(val) +} + +// MessagingConsumerGroupName returns an attribute KeyValue conforming to the +// "messaging.consumer.group.name" semantic conventions. It represents the name +// of the consumer group with which a consumer is associated. +func MessagingConsumerGroupName(val string) attribute.KeyValue { + return MessagingConsumerGroupNameKey.String(val) +} + +// MessagingDestinationAnonymous returns an attribute KeyValue conforming to the +// "messaging.destination.anonymous" semantic conventions. It represents a +// boolean that is true if the message destination is anonymous (could be unnamed +// or have auto-generated name). +func MessagingDestinationAnonymous(val bool) attribute.KeyValue { + return MessagingDestinationAnonymousKey.Bool(val) +} + +// MessagingDestinationName returns an attribute KeyValue conforming to the +// "messaging.destination.name" semantic conventions. It represents the message +// destination name. +func MessagingDestinationName(val string) attribute.KeyValue { + return MessagingDestinationNameKey.String(val) +} + +// MessagingDestinationPartitionID returns an attribute KeyValue conforming to +// the "messaging.destination.partition.id" semantic conventions. It represents +// the identifier of the partition messages are sent to or received from, unique +// within the `messaging.destination.name`. +func MessagingDestinationPartitionID(val string) attribute.KeyValue { + return MessagingDestinationPartitionIDKey.String(val) +} + +// MessagingDestinationSubscriptionName returns an attribute KeyValue conforming +// to the "messaging.destination.subscription.name" semantic conventions. It +// represents the name of the destination subscription from which a message is +// consumed. +func MessagingDestinationSubscriptionName(val string) attribute.KeyValue { + return MessagingDestinationSubscriptionNameKey.String(val) +} + +// MessagingDestinationTemplate returns an attribute KeyValue conforming to the +// "messaging.destination.template" semantic conventions. It represents the low +// cardinality representation of the messaging destination name. +func MessagingDestinationTemplate(val string) attribute.KeyValue { + return MessagingDestinationTemplateKey.String(val) +} + +// MessagingDestinationTemporary returns an attribute KeyValue conforming to the +// "messaging.destination.temporary" semantic conventions. It represents a +// boolean that is true if the message destination is temporary and might not +// exist anymore after messages are processed. +func MessagingDestinationTemporary(val bool) attribute.KeyValue { + return MessagingDestinationTemporaryKey.Bool(val) +} + +// MessagingEventHubsMessageEnqueuedTime returns an attribute KeyValue conforming +// to the "messaging.eventhubs.message.enqueued_time" semantic conventions. It +// represents the UTC epoch seconds at which the message has been accepted and +// stored in the entity. +func MessagingEventHubsMessageEnqueuedTime(val int) attribute.KeyValue { + return MessagingEventHubsMessageEnqueuedTimeKey.Int(val) +} + +// MessagingGCPPubSubMessageAckDeadline returns an attribute KeyValue conforming +// to the "messaging.gcp_pubsub.message.ack_deadline" semantic conventions. It +// represents the ack deadline in seconds set for the modify ack deadline +// request. +func MessagingGCPPubSubMessageAckDeadline(val int) attribute.KeyValue { + return MessagingGCPPubSubMessageAckDeadlineKey.Int(val) +} + +// MessagingGCPPubSubMessageAckID returns an attribute KeyValue conforming to the +// "messaging.gcp_pubsub.message.ack_id" semantic conventions. It represents the +// ack id for a given message. +func MessagingGCPPubSubMessageAckID(val string) attribute.KeyValue { + return MessagingGCPPubSubMessageAckIDKey.String(val) +} + +// MessagingGCPPubSubMessageDeliveryAttempt returns an attribute KeyValue +// conforming to the "messaging.gcp_pubsub.message.delivery_attempt" semantic +// conventions. It represents the delivery attempt for a given message. +func MessagingGCPPubSubMessageDeliveryAttempt(val int) attribute.KeyValue { + return MessagingGCPPubSubMessageDeliveryAttemptKey.Int(val) +} + +// MessagingGCPPubSubMessageOrderingKey returns an attribute KeyValue conforming +// to the "messaging.gcp_pubsub.message.ordering_key" semantic conventions. It +// represents the ordering key for a given message. If the attribute is not +// present, the message does not have an ordering key. +func MessagingGCPPubSubMessageOrderingKey(val string) attribute.KeyValue { + return MessagingGCPPubSubMessageOrderingKeyKey.String(val) +} + +// MessagingKafkaMessageKey returns an attribute KeyValue conforming to the +// "messaging.kafka.message.key" semantic conventions. It represents the message +// keys in Kafka are used for grouping alike messages to ensure they're processed +// on the same partition. They differ from `messaging.message.id` in that they're +// not unique. If the key is `null`, the attribute MUST NOT be set. +func MessagingKafkaMessageKey(val string) attribute.KeyValue { + return MessagingKafkaMessageKeyKey.String(val) +} + +// MessagingKafkaMessageTombstone returns an attribute KeyValue conforming to the +// "messaging.kafka.message.tombstone" semantic conventions. It represents a +// boolean that is true if the message is a tombstone. +func MessagingKafkaMessageTombstone(val bool) attribute.KeyValue { + return MessagingKafkaMessageTombstoneKey.Bool(val) +} + +// MessagingKafkaOffset returns an attribute KeyValue conforming to the +// "messaging.kafka.offset" semantic conventions. It represents the offset of a +// record in the corresponding Kafka partition. +func MessagingKafkaOffset(val int) attribute.KeyValue { + return MessagingKafkaOffsetKey.Int(val) +} + +// MessagingMessageBodySize returns an attribute KeyValue conforming to the +// "messaging.message.body.size" semantic conventions. It represents the size of +// the message body in bytes. +func MessagingMessageBodySize(val int) attribute.KeyValue { + return MessagingMessageBodySizeKey.Int(val) +} + +// MessagingMessageConversationID returns an attribute KeyValue conforming to the +// "messaging.message.conversation_id" semantic conventions. It represents the +// conversation ID identifying the conversation to which the message belongs, +// represented as a string. Sometimes called "Correlation ID". +func MessagingMessageConversationID(val string) attribute.KeyValue { + return MessagingMessageConversationIDKey.String(val) +} + +// MessagingMessageEnvelopeSize returns an attribute KeyValue conforming to the +// "messaging.message.envelope.size" semantic conventions. It represents the size +// of the message body and metadata in bytes. +func MessagingMessageEnvelopeSize(val int) attribute.KeyValue { + return MessagingMessageEnvelopeSizeKey.Int(val) +} + +// MessagingMessageID returns an attribute KeyValue conforming to the +// "messaging.message.id" semantic conventions. It represents a value used by the +// messaging system as an identifier for the message, represented as a string. +func MessagingMessageID(val string) attribute.KeyValue { + return MessagingMessageIDKey.String(val) +} + +// MessagingOperationName returns an attribute KeyValue conforming to the +// "messaging.operation.name" semantic conventions. It represents the +// system-specific name of the messaging operation. +func MessagingOperationName(val string) attribute.KeyValue { + return MessagingOperationNameKey.String(val) +} + +// MessagingRabbitMQDestinationRoutingKey returns an attribute KeyValue +// conforming to the "messaging.rabbitmq.destination.routing_key" semantic +// conventions. It represents the rabbitMQ message routing key. +func MessagingRabbitMQDestinationRoutingKey(val string) attribute.KeyValue { + return MessagingRabbitMQDestinationRoutingKeyKey.String(val) +} + +// MessagingRabbitMQMessageDeliveryTag returns an attribute KeyValue conforming +// to the "messaging.rabbitmq.message.delivery_tag" semantic conventions. It +// represents the rabbitMQ message delivery tag. +func MessagingRabbitMQMessageDeliveryTag(val int) attribute.KeyValue { + return MessagingRabbitMQMessageDeliveryTagKey.Int(val) +} + +// MessagingRocketMQMessageDelayTimeLevel returns an attribute KeyValue +// conforming to the "messaging.rocketmq.message.delay_time_level" semantic +// conventions. It represents the delay time level for delay message, which +// determines the message delay time. +func MessagingRocketMQMessageDelayTimeLevel(val int) attribute.KeyValue { + return MessagingRocketMQMessageDelayTimeLevelKey.Int(val) +} + +// MessagingRocketMQMessageDeliveryTimestamp returns an attribute KeyValue +// conforming to the "messaging.rocketmq.message.delivery_timestamp" semantic +// conventions. It represents the timestamp in milliseconds that the delay +// message is expected to be delivered to consumer. +func MessagingRocketMQMessageDeliveryTimestamp(val int) attribute.KeyValue { + return MessagingRocketMQMessageDeliveryTimestampKey.Int(val) +} + +// MessagingRocketMQMessageGroup returns an attribute KeyValue conforming to the +// "messaging.rocketmq.message.group" semantic conventions. It represents the it +// is essential for FIFO message. Messages that belong to the same message group +// are always processed one by one within the same consumer group. +func MessagingRocketMQMessageGroup(val string) attribute.KeyValue { + return MessagingRocketMQMessageGroupKey.String(val) +} + +// MessagingRocketMQMessageKeys returns an attribute KeyValue conforming to the +// "messaging.rocketmq.message.keys" semantic conventions. It represents the +// key(s) of message, another way to mark message besides message id. +func MessagingRocketMQMessageKeys(val ...string) attribute.KeyValue { + return MessagingRocketMQMessageKeysKey.StringSlice(val) +} + +// MessagingRocketMQMessageTag returns an attribute KeyValue conforming to the +// "messaging.rocketmq.message.tag" semantic conventions. It represents the +// secondary classifier of message besides topic. +func MessagingRocketMQMessageTag(val string) attribute.KeyValue { + return MessagingRocketMQMessageTagKey.String(val) +} + +// MessagingRocketMQNamespace returns an attribute KeyValue conforming to the +// "messaging.rocketmq.namespace" semantic conventions. It represents the +// namespace of RocketMQ resources, resources in different namespaces are +// individual. +func MessagingRocketMQNamespace(val string) attribute.KeyValue { + return MessagingRocketMQNamespaceKey.String(val) +} + +// MessagingServiceBusMessageDeliveryCount returns an attribute KeyValue +// conforming to the "messaging.servicebus.message.delivery_count" semantic +// conventions. It represents the number of deliveries that have been attempted +// for this message. +func MessagingServiceBusMessageDeliveryCount(val int) attribute.KeyValue { + return MessagingServiceBusMessageDeliveryCountKey.Int(val) +} + +// MessagingServiceBusMessageEnqueuedTime returns an attribute KeyValue +// conforming to the "messaging.servicebus.message.enqueued_time" semantic +// conventions. It represents the UTC epoch seconds at which the message has been +// accepted and stored in the entity. +func MessagingServiceBusMessageEnqueuedTime(val int) attribute.KeyValue { + return MessagingServiceBusMessageEnqueuedTimeKey.Int(val) +} + +// Enum values for messaging.operation.type +var ( + // A message is created. "Create" spans always refer to a single message and are + // used to provide a unique creation context for messages in batch sending + // scenarios. + // + // Stability: development + MessagingOperationTypeCreate = MessagingOperationTypeKey.String("create") + // One or more messages are provided for sending to an intermediary. If a single + // message is sent, the context of the "Send" span can be used as the creation + // context and no "Create" span needs to be created. + // + // Stability: development + MessagingOperationTypeSend = MessagingOperationTypeKey.String("send") + // One or more messages are requested by a consumer. This operation refers to + // pull-based scenarios, where consumers explicitly call methods of messaging + // SDKs to receive messages. + // + // Stability: development + MessagingOperationTypeReceive = MessagingOperationTypeKey.String("receive") + // One or more messages are processed by a consumer. + // + // Stability: development + MessagingOperationTypeProcess = MessagingOperationTypeKey.String("process") + // One or more messages are settled. + // + // Stability: development + MessagingOperationTypeSettle = MessagingOperationTypeKey.String("settle") +) + +// Enum values for messaging.rocketmq.consumption_model +var ( + // Clustering consumption model + // Stability: development + MessagingRocketMQConsumptionModelClustering = MessagingRocketMQConsumptionModelKey.String("clustering") + // Broadcasting consumption model + // Stability: development + MessagingRocketMQConsumptionModelBroadcasting = MessagingRocketMQConsumptionModelKey.String("broadcasting") +) + +// Enum values for messaging.rocketmq.message.type +var ( + // Normal message + // Stability: development + MessagingRocketMQMessageTypeNormal = MessagingRocketMQMessageTypeKey.String("normal") + // FIFO message + // Stability: development + MessagingRocketMQMessageTypeFifo = MessagingRocketMQMessageTypeKey.String("fifo") + // Delay message + // Stability: development + MessagingRocketMQMessageTypeDelay = MessagingRocketMQMessageTypeKey.String("delay") + // Transaction message + // Stability: development + MessagingRocketMQMessageTypeTransaction = MessagingRocketMQMessageTypeKey.String("transaction") +) + +// Enum values for messaging.servicebus.disposition_status +var ( + // Message is completed + // Stability: development + MessagingServiceBusDispositionStatusComplete = MessagingServiceBusDispositionStatusKey.String("complete") + // Message is abandoned + // Stability: development + MessagingServiceBusDispositionStatusAbandon = MessagingServiceBusDispositionStatusKey.String("abandon") + // Message is sent to dead letter queue + // Stability: development + MessagingServiceBusDispositionStatusDeadLetter = MessagingServiceBusDispositionStatusKey.String("dead_letter") + // Message is deferred + // Stability: development + MessagingServiceBusDispositionStatusDefer = MessagingServiceBusDispositionStatusKey.String("defer") +) + +// Enum values for messaging.system +var ( + // Apache ActiveMQ + // Stability: development + MessagingSystemActiveMQ = MessagingSystemKey.String("activemq") + // Amazon Simple Notification Service (SNS) + // Stability: development + MessagingSystemAWSSNS = MessagingSystemKey.String("aws.sns") + // Amazon Simple Queue Service (SQS) + // Stability: development + MessagingSystemAWSSQS = MessagingSystemKey.String("aws_sqs") + // Azure Event Grid + // Stability: development + MessagingSystemEventGrid = MessagingSystemKey.String("eventgrid") + // Azure Event Hubs + // Stability: development + MessagingSystemEventHubs = MessagingSystemKey.String("eventhubs") + // Azure Service Bus + // Stability: development + MessagingSystemServiceBus = MessagingSystemKey.String("servicebus") + // Google Cloud Pub/Sub + // Stability: development + MessagingSystemGCPPubSub = MessagingSystemKey.String("gcp_pubsub") + // Java Message Service + // Stability: development + MessagingSystemJMS = MessagingSystemKey.String("jms") + // Apache Kafka + // Stability: development + MessagingSystemKafka = MessagingSystemKey.String("kafka") + // RabbitMQ + // Stability: development + MessagingSystemRabbitMQ = MessagingSystemKey.String("rabbitmq") + // Apache RocketMQ + // Stability: development + MessagingSystemRocketMQ = MessagingSystemKey.String("rocketmq") + // Apache Pulsar + // Stability: development + MessagingSystemPulsar = MessagingSystemKey.String("pulsar") +) + +// Namespace: network +const ( + // NetworkCarrierICCKey is the attribute Key conforming to the + // "network.carrier.icc" semantic conventions. It represents the ISO 3166-1 + // alpha-2 2-character country code associated with the mobile carrier network. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: DE + NetworkCarrierICCKey = attribute.Key("network.carrier.icc") + + // NetworkCarrierMCCKey is the attribute Key conforming to the + // "network.carrier.mcc" semantic conventions. It represents the mobile carrier + // country code. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 310 + NetworkCarrierMCCKey = attribute.Key("network.carrier.mcc") + + // NetworkCarrierMNCKey is the attribute Key conforming to the + // "network.carrier.mnc" semantic conventions. It represents the mobile carrier + // network code. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 001 + NetworkCarrierMNCKey = attribute.Key("network.carrier.mnc") + + // NetworkCarrierNameKey is the attribute Key conforming to the + // "network.carrier.name" semantic conventions. It represents the name of the + // mobile carrier. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: sprint + NetworkCarrierNameKey = attribute.Key("network.carrier.name") + + // NetworkConnectionStateKey is the attribute Key conforming to the + // "network.connection.state" semantic conventions. It represents the state of + // network connection. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "close_wait" + // Note: Connection states are defined as part of the [rfc9293] + // + // [rfc9293]: https://datatracker.ietf.org/doc/html/rfc9293#section-3.3.2 + NetworkConnectionStateKey = attribute.Key("network.connection.state") + + // NetworkConnectionSubtypeKey is the attribute Key conforming to the + // "network.connection.subtype" semantic conventions. It represents the this + // describes more details regarding the connection.type. It may be the type of + // cell technology connection, but it could be used for describing details about + // a wifi connection. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: LTE + NetworkConnectionSubtypeKey = attribute.Key("network.connection.subtype") + + // NetworkConnectionTypeKey is the attribute Key conforming to the + // "network.connection.type" semantic conventions. It represents the internet + // connection type. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: wifi + NetworkConnectionTypeKey = attribute.Key("network.connection.type") + + // NetworkInterfaceNameKey is the attribute Key conforming to the + // "network.interface.name" semantic conventions. It represents the network + // interface name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "lo", "eth0" + NetworkInterfaceNameKey = attribute.Key("network.interface.name") + + // NetworkIODirectionKey is the attribute Key conforming to the + // "network.io.direction" semantic conventions. It represents the network IO + // operation direction. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "transmit" + NetworkIODirectionKey = attribute.Key("network.io.direction") + + // NetworkLocalAddressKey is the attribute Key conforming to the + // "network.local.address" semantic conventions. It represents the local address + // of the network connection - IP address or Unix domain socket name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "10.1.2.80", "/tmp/my.sock" + NetworkLocalAddressKey = attribute.Key("network.local.address") + + // NetworkLocalPortKey is the attribute Key conforming to the + // "network.local.port" semantic conventions. It represents the local port + // number of the network connection. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: 65123 + NetworkLocalPortKey = attribute.Key("network.local.port") + + // NetworkPeerAddressKey is the attribute Key conforming to the + // "network.peer.address" semantic conventions. It represents the peer address + // of the network connection - IP address or Unix domain socket name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "10.1.2.80", "/tmp/my.sock" + NetworkPeerAddressKey = attribute.Key("network.peer.address") + + // NetworkPeerPortKey is the attribute Key conforming to the "network.peer.port" + // semantic conventions. It represents the peer port number of the network + // connection. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: 65123 + NetworkPeerPortKey = attribute.Key("network.peer.port") + + // NetworkProtocolNameKey is the attribute Key conforming to the + // "network.protocol.name" semantic conventions. It represents the + // [OSI application layer] or non-OSI equivalent. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "amqp", "http", "mqtt" + // Note: The value SHOULD be normalized to lowercase. + // + // [OSI application layer]: https://wikipedia.org/wiki/Application_layer + NetworkProtocolNameKey = attribute.Key("network.protocol.name") + + // NetworkProtocolVersionKey is the attribute Key conforming to the + // "network.protocol.version" semantic conventions. It represents the actual + // version of the protocol used for network communication. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "1.1", "2" + // Note: If protocol version is subject to negotiation (for example using [ALPN] + // ), this attribute SHOULD be set to the negotiated version. If the actual + // protocol version is not known, this attribute SHOULD NOT be set. + // + // [ALPN]: https://www.rfc-editor.org/rfc/rfc7301.html + NetworkProtocolVersionKey = attribute.Key("network.protocol.version") + + // NetworkTransportKey is the attribute Key conforming to the + // "network.transport" semantic conventions. It represents the + // [OSI transport layer] or [inter-process communication method]. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "tcp", "udp" + // Note: The value SHOULD be normalized to lowercase. + // + // Consider always setting the transport when setting a port number, since + // a port number is ambiguous without knowing the transport. For example + // different processes could be listening on TCP port 12345 and UDP port 12345. + // + // [OSI transport layer]: https://wikipedia.org/wiki/Transport_layer + // [inter-process communication method]: https://wikipedia.org/wiki/Inter-process_communication + NetworkTransportKey = attribute.Key("network.transport") + + // NetworkTypeKey is the attribute Key conforming to the "network.type" semantic + // conventions. It represents the [OSI network layer] or non-OSI equivalent. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "ipv4", "ipv6" + // Note: The value SHOULD be normalized to lowercase. + // + // [OSI network layer]: https://wikipedia.org/wiki/Network_layer + NetworkTypeKey = attribute.Key("network.type") +) + +// NetworkCarrierICC returns an attribute KeyValue conforming to the +// "network.carrier.icc" semantic conventions. It represents the ISO 3166-1 +// alpha-2 2-character country code associated with the mobile carrier network. +func NetworkCarrierICC(val string) attribute.KeyValue { + return NetworkCarrierICCKey.String(val) +} + +// NetworkCarrierMCC returns an attribute KeyValue conforming to the +// "network.carrier.mcc" semantic conventions. It represents the mobile carrier +// country code. +func NetworkCarrierMCC(val string) attribute.KeyValue { + return NetworkCarrierMCCKey.String(val) +} + +// NetworkCarrierMNC returns an attribute KeyValue conforming to the +// "network.carrier.mnc" semantic conventions. It represents the mobile carrier +// network code. +func NetworkCarrierMNC(val string) attribute.KeyValue { + return NetworkCarrierMNCKey.String(val) +} + +// NetworkCarrierName returns an attribute KeyValue conforming to the +// "network.carrier.name" semantic conventions. It represents the name of the +// mobile carrier. +func NetworkCarrierName(val string) attribute.KeyValue { + return NetworkCarrierNameKey.String(val) +} + +// NetworkInterfaceName returns an attribute KeyValue conforming to the +// "network.interface.name" semantic conventions. It represents the network +// interface name. +func NetworkInterfaceName(val string) attribute.KeyValue { + return NetworkInterfaceNameKey.String(val) +} + +// NetworkLocalAddress returns an attribute KeyValue conforming to the +// "network.local.address" semantic conventions. It represents the local address +// of the network connection - IP address or Unix domain socket name. +func NetworkLocalAddress(val string) attribute.KeyValue { + return NetworkLocalAddressKey.String(val) +} + +// NetworkLocalPort returns an attribute KeyValue conforming to the +// "network.local.port" semantic conventions. It represents the local port number +// of the network connection. +func NetworkLocalPort(val int) attribute.KeyValue { + return NetworkLocalPortKey.Int(val) +} + +// NetworkPeerAddress returns an attribute KeyValue conforming to the +// "network.peer.address" semantic conventions. It represents the peer address of +// the network connection - IP address or Unix domain socket name. +func NetworkPeerAddress(val string) attribute.KeyValue { + return NetworkPeerAddressKey.String(val) +} + +// NetworkPeerPort returns an attribute KeyValue conforming to the +// "network.peer.port" semantic conventions. It represents the peer port number +// of the network connection. +func NetworkPeerPort(val int) attribute.KeyValue { + return NetworkPeerPortKey.Int(val) +} + +// NetworkProtocolName returns an attribute KeyValue conforming to the +// "network.protocol.name" semantic conventions. It represents the +// [OSI application layer] or non-OSI equivalent. +// +// [OSI application layer]: https://wikipedia.org/wiki/Application_layer +func NetworkProtocolName(val string) attribute.KeyValue { + return NetworkProtocolNameKey.String(val) +} + +// NetworkProtocolVersion returns an attribute KeyValue conforming to the +// "network.protocol.version" semantic conventions. It represents the actual +// version of the protocol used for network communication. +func NetworkProtocolVersion(val string) attribute.KeyValue { + return NetworkProtocolVersionKey.String(val) +} + +// Enum values for network.connection.state +var ( + // closed + // Stability: development + NetworkConnectionStateClosed = NetworkConnectionStateKey.String("closed") + // close_wait + // Stability: development + NetworkConnectionStateCloseWait = NetworkConnectionStateKey.String("close_wait") + // closing + // Stability: development + NetworkConnectionStateClosing = NetworkConnectionStateKey.String("closing") + // established + // Stability: development + NetworkConnectionStateEstablished = NetworkConnectionStateKey.String("established") + // fin_wait_1 + // Stability: development + NetworkConnectionStateFinWait1 = NetworkConnectionStateKey.String("fin_wait_1") + // fin_wait_2 + // Stability: development + NetworkConnectionStateFinWait2 = NetworkConnectionStateKey.String("fin_wait_2") + // last_ack + // Stability: development + NetworkConnectionStateLastAck = NetworkConnectionStateKey.String("last_ack") + // listen + // Stability: development + NetworkConnectionStateListen = NetworkConnectionStateKey.String("listen") + // syn_received + // Stability: development + NetworkConnectionStateSynReceived = NetworkConnectionStateKey.String("syn_received") + // syn_sent + // Stability: development + NetworkConnectionStateSynSent = NetworkConnectionStateKey.String("syn_sent") + // time_wait + // Stability: development + NetworkConnectionStateTimeWait = NetworkConnectionStateKey.String("time_wait") +) + +// Enum values for network.connection.subtype +var ( + // GPRS + // Stability: development + NetworkConnectionSubtypeGprs = NetworkConnectionSubtypeKey.String("gprs") + // EDGE + // Stability: development + NetworkConnectionSubtypeEdge = NetworkConnectionSubtypeKey.String("edge") + // UMTS + // Stability: development + NetworkConnectionSubtypeUmts = NetworkConnectionSubtypeKey.String("umts") + // CDMA + // Stability: development + NetworkConnectionSubtypeCdma = NetworkConnectionSubtypeKey.String("cdma") + // EVDO Rel. 0 + // Stability: development + NetworkConnectionSubtypeEvdo0 = NetworkConnectionSubtypeKey.String("evdo_0") + // EVDO Rev. A + // Stability: development + NetworkConnectionSubtypeEvdoA = NetworkConnectionSubtypeKey.String("evdo_a") + // CDMA2000 1XRTT + // Stability: development + NetworkConnectionSubtypeCdma20001xrtt = NetworkConnectionSubtypeKey.String("cdma2000_1xrtt") + // HSDPA + // Stability: development + NetworkConnectionSubtypeHsdpa = NetworkConnectionSubtypeKey.String("hsdpa") + // HSUPA + // Stability: development + NetworkConnectionSubtypeHsupa = NetworkConnectionSubtypeKey.String("hsupa") + // HSPA + // Stability: development + NetworkConnectionSubtypeHspa = NetworkConnectionSubtypeKey.String("hspa") + // IDEN + // Stability: development + NetworkConnectionSubtypeIden = NetworkConnectionSubtypeKey.String("iden") + // EVDO Rev. B + // Stability: development + NetworkConnectionSubtypeEvdoB = NetworkConnectionSubtypeKey.String("evdo_b") + // LTE + // Stability: development + NetworkConnectionSubtypeLte = NetworkConnectionSubtypeKey.String("lte") + // EHRPD + // Stability: development + NetworkConnectionSubtypeEhrpd = NetworkConnectionSubtypeKey.String("ehrpd") + // HSPAP + // Stability: development + NetworkConnectionSubtypeHspap = NetworkConnectionSubtypeKey.String("hspap") + // GSM + // Stability: development + NetworkConnectionSubtypeGsm = NetworkConnectionSubtypeKey.String("gsm") + // TD-SCDMA + // Stability: development + NetworkConnectionSubtypeTdScdma = NetworkConnectionSubtypeKey.String("td_scdma") + // IWLAN + // Stability: development + NetworkConnectionSubtypeIwlan = NetworkConnectionSubtypeKey.String("iwlan") + // 5G NR (New Radio) + // Stability: development + NetworkConnectionSubtypeNr = NetworkConnectionSubtypeKey.String("nr") + // 5G NRNSA (New Radio Non-Standalone) + // Stability: development + NetworkConnectionSubtypeNrnsa = NetworkConnectionSubtypeKey.String("nrnsa") + // LTE CA + // Stability: development + NetworkConnectionSubtypeLteCa = NetworkConnectionSubtypeKey.String("lte_ca") +) + +// Enum values for network.connection.type +var ( + // wifi + // Stability: development + NetworkConnectionTypeWifi = NetworkConnectionTypeKey.String("wifi") + // wired + // Stability: development + NetworkConnectionTypeWired = NetworkConnectionTypeKey.String("wired") + // cell + // Stability: development + NetworkConnectionTypeCell = NetworkConnectionTypeKey.String("cell") + // unavailable + // Stability: development + NetworkConnectionTypeUnavailable = NetworkConnectionTypeKey.String("unavailable") + // unknown + // Stability: development + NetworkConnectionTypeUnknown = NetworkConnectionTypeKey.String("unknown") +) + +// Enum values for network.io.direction +var ( + // transmit + // Stability: development + NetworkIODirectionTransmit = NetworkIODirectionKey.String("transmit") + // receive + // Stability: development + NetworkIODirectionReceive = NetworkIODirectionKey.String("receive") +) + +// Enum values for network.transport +var ( + // TCP + // Stability: stable + NetworkTransportTCP = NetworkTransportKey.String("tcp") + // UDP + // Stability: stable + NetworkTransportUDP = NetworkTransportKey.String("udp") + // Named or anonymous pipe. + // Stability: stable + NetworkTransportPipe = NetworkTransportKey.String("pipe") + // Unix domain socket + // Stability: stable + NetworkTransportUnix = NetworkTransportKey.String("unix") + // QUIC + // Stability: stable + NetworkTransportQUIC = NetworkTransportKey.String("quic") +) + +// Enum values for network.type +var ( + // IPv4 + // Stability: stable + NetworkTypeIPv4 = NetworkTypeKey.String("ipv4") + // IPv6 + // Stability: stable + NetworkTypeIPv6 = NetworkTypeKey.String("ipv6") +) + +// Namespace: oci +const ( + // OCIManifestDigestKey is the attribute Key conforming to the + // "oci.manifest.digest" semantic conventions. It represents the digest of the + // OCI image manifest. For container images specifically is the digest by which + // the container image is known. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // "sha256:e4ca62c0d62f3e886e684806dfe9d4e0cda60d54986898173c1083856cfda0f4" + // Note: Follows [OCI Image Manifest Specification], and specifically the + // [Digest property]. + // An example can be found in [Example Image Manifest]. + // + // [OCI Image Manifest Specification]: https://github.com/opencontainers/image-spec/blob/main/manifest.md + // [Digest property]: https://github.com/opencontainers/image-spec/blob/main/descriptor.md#digests + // [Example Image Manifest]: https://github.com/opencontainers/image-spec/blob/main/manifest.md#example-image-manifest + OCIManifestDigestKey = attribute.Key("oci.manifest.digest") +) + +// OCIManifestDigest returns an attribute KeyValue conforming to the +// "oci.manifest.digest" semantic conventions. It represents the digest of the +// OCI image manifest. For container images specifically is the digest by which +// the container image is known. +func OCIManifestDigest(val string) attribute.KeyValue { + return OCIManifestDigestKey.String(val) +} + +// Namespace: openai +const ( + // OpenAIRequestServiceTierKey is the attribute Key conforming to the + // "openai.request.service_tier" semantic conventions. It represents the service + // tier requested. May be a specific tier, default, or auto. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "auto", "default" + OpenAIRequestServiceTierKey = attribute.Key("openai.request.service_tier") + + // OpenAIResponseServiceTierKey is the attribute Key conforming to the + // "openai.response.service_tier" semantic conventions. It represents the + // service tier used for the response. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "scale", "default" + OpenAIResponseServiceTierKey = attribute.Key("openai.response.service_tier") + + // OpenAIResponseSystemFingerprintKey is the attribute Key conforming to the + // "openai.response.system_fingerprint" semantic conventions. It represents a + // fingerprint to track any eventual change in the Generative AI environment. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "fp_44709d6fcb" + OpenAIResponseSystemFingerprintKey = attribute.Key("openai.response.system_fingerprint") +) + +// OpenAIResponseServiceTier returns an attribute KeyValue conforming to the +// "openai.response.service_tier" semantic conventions. It represents the service +// tier used for the response. +func OpenAIResponseServiceTier(val string) attribute.KeyValue { + return OpenAIResponseServiceTierKey.String(val) +} + +// OpenAIResponseSystemFingerprint returns an attribute KeyValue conforming to +// the "openai.response.system_fingerprint" semantic conventions. It represents a +// fingerprint to track any eventual change in the Generative AI environment. +func OpenAIResponseSystemFingerprint(val string) attribute.KeyValue { + return OpenAIResponseSystemFingerprintKey.String(val) +} + +// Enum values for openai.request.service_tier +var ( + // The system will utilize scale tier credits until they are exhausted. + // Stability: development + OpenAIRequestServiceTierAuto = OpenAIRequestServiceTierKey.String("auto") + // The system will utilize the default scale tier. + // Stability: development + OpenAIRequestServiceTierDefault = OpenAIRequestServiceTierKey.String("default") +) + +// Namespace: opentracing +const ( + // OpenTracingRefTypeKey is the attribute Key conforming to the + // "opentracing.ref_type" semantic conventions. It represents the parent-child + // Reference type. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: The causal relationship between a child Span and a parent Span. + OpenTracingRefTypeKey = attribute.Key("opentracing.ref_type") +) + +// Enum values for opentracing.ref_type +var ( + // The parent Span depends on the child Span in some capacity + // Stability: development + OpenTracingRefTypeChildOf = OpenTracingRefTypeKey.String("child_of") + // The parent Span doesn't depend in any way on the result of the child Span + // Stability: development + OpenTracingRefTypeFollowsFrom = OpenTracingRefTypeKey.String("follows_from") +) + +// Namespace: os +const ( + // OSBuildIDKey is the attribute Key conforming to the "os.build_id" semantic + // conventions. It represents the unique identifier for a particular build or + // compilation of the operating system. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "TQ3C.230805.001.B2", "20E247", "22621" + OSBuildIDKey = attribute.Key("os.build_id") + + // OSDescriptionKey is the attribute Key conforming to the "os.description" + // semantic conventions. It represents the human readable (not intended to be + // parsed) OS version information, like e.g. reported by `ver` or + // `lsb_release -a` commands. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Microsoft Windows [Version 10.0.18363.778]", "Ubuntu 18.04.1 LTS" + OSDescriptionKey = attribute.Key("os.description") + + // OSNameKey is the attribute Key conforming to the "os.name" semantic + // conventions. It represents the human readable operating system name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "iOS", "Android", "Ubuntu" + OSNameKey = attribute.Key("os.name") + + // OSTypeKey is the attribute Key conforming to the "os.type" semantic + // conventions. It represents the operating system type. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + OSTypeKey = attribute.Key("os.type") + + // OSVersionKey is the attribute Key conforming to the "os.version" semantic + // conventions. It represents the version string of the operating system as + // defined in [Version Attributes]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "14.2.1", "18.04.1" + // + // [Version Attributes]: /docs/resource/README.md#version-attributes + OSVersionKey = attribute.Key("os.version") +) + +// OSBuildID returns an attribute KeyValue conforming to the "os.build_id" +// semantic conventions. It represents the unique identifier for a particular +// build or compilation of the operating system. +func OSBuildID(val string) attribute.KeyValue { + return OSBuildIDKey.String(val) +} + +// OSDescription returns an attribute KeyValue conforming to the "os.description" +// semantic conventions. It represents the human readable (not intended to be +// parsed) OS version information, like e.g. reported by `ver` or +// `lsb_release -a` commands. +func OSDescription(val string) attribute.KeyValue { + return OSDescriptionKey.String(val) +} + +// OSName returns an attribute KeyValue conforming to the "os.name" semantic +// conventions. It represents the human readable operating system name. +func OSName(val string) attribute.KeyValue { + return OSNameKey.String(val) +} + +// OSVersion returns an attribute KeyValue conforming to the "os.version" +// semantic conventions. It represents the version string of the operating system +// as defined in [Version Attributes]. +// +// [Version Attributes]: /docs/resource/README.md#version-attributes +func OSVersion(val string) attribute.KeyValue { + return OSVersionKey.String(val) +} + +// Enum values for os.type +var ( + // Microsoft Windows + // Stability: development + OSTypeWindows = OSTypeKey.String("windows") + // Linux + // Stability: development + OSTypeLinux = OSTypeKey.String("linux") + // Apple Darwin + // Stability: development + OSTypeDarwin = OSTypeKey.String("darwin") + // FreeBSD + // Stability: development + OSTypeFreeBSD = OSTypeKey.String("freebsd") + // NetBSD + // Stability: development + OSTypeNetBSD = OSTypeKey.String("netbsd") + // OpenBSD + // Stability: development + OSTypeOpenBSD = OSTypeKey.String("openbsd") + // DragonFly BSD + // Stability: development + OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd") + // HP-UX (Hewlett Packard Unix) + // Stability: development + OSTypeHPUX = OSTypeKey.String("hpux") + // AIX (Advanced Interactive eXecutive) + // Stability: development + OSTypeAIX = OSTypeKey.String("aix") + // SunOS, Oracle Solaris + // Stability: development + OSTypeSolaris = OSTypeKey.String("solaris") + // IBM z/OS + // Stability: development + OSTypeZOS = OSTypeKey.String("zos") +) + +// Namespace: otel +const ( + // OTelComponentNameKey is the attribute Key conforming to the + // "otel.component.name" semantic conventions. It represents a name uniquely + // identifying the instance of the OpenTelemetry component within its containing + // SDK instance. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "otlp_grpc_span_exporter/0", "custom-name" + // Note: Implementations SHOULD ensure a low cardinality for this attribute, + // even across application or SDK restarts. + // E.g. implementations MUST NOT use UUIDs as values for this attribute. + // + // Implementations MAY achieve these goals by following a + // `/` pattern, e.g. + // `batching_span_processor/0`. + // Hereby `otel.component.type` refers to the corresponding attribute value of + // the component. + // + // The value of `instance-counter` MAY be automatically assigned by the + // component and uniqueness within the enclosing SDK instance MUST be + // guaranteed. + // For example, `` MAY be implemented by using a monotonically + // increasing counter (starting with `0`), which is incremented every time an + // instance of the given component type is started. + // + // With this implementation, for example the first Batching Span Processor would + // have `batching_span_processor/0` + // as `otel.component.name`, the second one `batching_span_processor/1` and so + // on. + // These values will therefore be reused in the case of an application restart. + OTelComponentNameKey = attribute.Key("otel.component.name") + + // OTelComponentTypeKey is the attribute Key conforming to the + // "otel.component.type" semantic conventions. It represents a name identifying + // the type of the OpenTelemetry component. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "batching_span_processor", "com.example.MySpanExporter" + // Note: If none of the standardized values apply, implementations SHOULD use + // the language-defined name of the type. + // E.g. for Java the fully qualified classname SHOULD be used in this case. + OTelComponentTypeKey = attribute.Key("otel.component.type") + + // OTelScopeNameKey is the attribute Key conforming to the "otel.scope.name" + // semantic conventions. It represents the name of the instrumentation scope - ( + // `InstrumentationScope.Name` in OTLP). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "io.opentelemetry.contrib.mongodb" + OTelScopeNameKey = attribute.Key("otel.scope.name") + + // OTelScopeSchemaURLKey is the attribute Key conforming to the + // "otel.scope.schema_url" semantic conventions. It represents the schema URL of + // the instrumentation scope. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "https://opentelemetry.io/schemas/1.31.0" + OTelScopeSchemaURLKey = attribute.Key("otel.scope.schema_url") + + // OTelScopeVersionKey is the attribute Key conforming to the + // "otel.scope.version" semantic conventions. It represents the version of the + // instrumentation scope - (`InstrumentationScope.Version` in OTLP). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "1.0.0" + OTelScopeVersionKey = attribute.Key("otel.scope.version") + + // OTelSpanParentOriginKey is the attribute Key conforming to the + // "otel.span.parent.origin" semantic conventions. It represents the determines + // whether the span has a parent span, and if so, + // [whether it is a remote parent]. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // + // [whether it is a remote parent]: https://opentelemetry.io/docs/specs/otel/trace/api/#isremote + OTelSpanParentOriginKey = attribute.Key("otel.span.parent.origin") + + // OTelSpanSamplingResultKey is the attribute Key conforming to the + // "otel.span.sampling_result" semantic conventions. It represents the result + // value of the sampler for this span. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + OTelSpanSamplingResultKey = attribute.Key("otel.span.sampling_result") + + // OTelStatusCodeKey is the attribute Key conforming to the "otel.status_code" + // semantic conventions. It represents the name of the code, either "OK" or + // "ERROR". MUST NOT be set if the status code is UNSET. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: + OTelStatusCodeKey = attribute.Key("otel.status_code") + + // OTelStatusDescriptionKey is the attribute Key conforming to the + // "otel.status_description" semantic conventions. It represents the description + // of the Status if it has a value, otherwise not set. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "resource not found" + OTelStatusDescriptionKey = attribute.Key("otel.status_description") +) + +// OTelComponentName returns an attribute KeyValue conforming to the +// "otel.component.name" semantic conventions. It represents a name uniquely +// identifying the instance of the OpenTelemetry component within its containing +// SDK instance. +func OTelComponentName(val string) attribute.KeyValue { + return OTelComponentNameKey.String(val) +} + +// OTelScopeName returns an attribute KeyValue conforming to the +// "otel.scope.name" semantic conventions. It represents the name of the +// instrumentation scope - (`InstrumentationScope.Name` in OTLP). +func OTelScopeName(val string) attribute.KeyValue { + return OTelScopeNameKey.String(val) +} + +// OTelScopeSchemaURL returns an attribute KeyValue conforming to the +// "otel.scope.schema_url" semantic conventions. It represents the schema URL of +// the instrumentation scope. +func OTelScopeSchemaURL(val string) attribute.KeyValue { + return OTelScopeSchemaURLKey.String(val) +} + +// OTelScopeVersion returns an attribute KeyValue conforming to the +// "otel.scope.version" semantic conventions. It represents the version of the +// instrumentation scope - (`InstrumentationScope.Version` in OTLP). +func OTelScopeVersion(val string) attribute.KeyValue { + return OTelScopeVersionKey.String(val) +} + +// OTelStatusDescription returns an attribute KeyValue conforming to the +// "otel.status_description" semantic conventions. It represents the description +// of the Status if it has a value, otherwise not set. +func OTelStatusDescription(val string) attribute.KeyValue { + return OTelStatusDescriptionKey.String(val) +} + +// Enum values for otel.component.type +var ( + // The builtin SDK batching span processor + // + // Stability: development + OTelComponentTypeBatchingSpanProcessor = OTelComponentTypeKey.String("batching_span_processor") + // The builtin SDK simple span processor + // + // Stability: development + OTelComponentTypeSimpleSpanProcessor = OTelComponentTypeKey.String("simple_span_processor") + // The builtin SDK batching log record processor + // + // Stability: development + OTelComponentTypeBatchingLogProcessor = OTelComponentTypeKey.String("batching_log_processor") + // The builtin SDK simple log record processor + // + // Stability: development + OTelComponentTypeSimpleLogProcessor = OTelComponentTypeKey.String("simple_log_processor") + // OTLP span exporter over gRPC with protobuf serialization + // + // Stability: development + OTelComponentTypeOtlpGRPCSpanExporter = OTelComponentTypeKey.String("otlp_grpc_span_exporter") + // OTLP span exporter over HTTP with protobuf serialization + // + // Stability: development + OTelComponentTypeOtlpHTTPSpanExporter = OTelComponentTypeKey.String("otlp_http_span_exporter") + // OTLP span exporter over HTTP with JSON serialization + // + // Stability: development + OTelComponentTypeOtlpHTTPJSONSpanExporter = OTelComponentTypeKey.String("otlp_http_json_span_exporter") + // Zipkin span exporter over HTTP + // + // Stability: development + OTelComponentTypeZipkinHTTPSpanExporter = OTelComponentTypeKey.String("zipkin_http_span_exporter") + // OTLP log record exporter over gRPC with protobuf serialization + // + // Stability: development + OTelComponentTypeOtlpGRPCLogExporter = OTelComponentTypeKey.String("otlp_grpc_log_exporter") + // OTLP log record exporter over HTTP with protobuf serialization + // + // Stability: development + OTelComponentTypeOtlpHTTPLogExporter = OTelComponentTypeKey.String("otlp_http_log_exporter") + // OTLP log record exporter over HTTP with JSON serialization + // + // Stability: development + OTelComponentTypeOtlpHTTPJSONLogExporter = OTelComponentTypeKey.String("otlp_http_json_log_exporter") + // The builtin SDK periodically exporting metric reader + // + // Stability: development + OTelComponentTypePeriodicMetricReader = OTelComponentTypeKey.String("periodic_metric_reader") + // OTLP metric exporter over gRPC with protobuf serialization + // + // Stability: development + OTelComponentTypeOtlpGRPCMetricExporter = OTelComponentTypeKey.String("otlp_grpc_metric_exporter") + // OTLP metric exporter over HTTP with protobuf serialization + // + // Stability: development + OTelComponentTypeOtlpHTTPMetricExporter = OTelComponentTypeKey.String("otlp_http_metric_exporter") + // OTLP metric exporter over HTTP with JSON serialization + // + // Stability: development + OTelComponentTypeOtlpHTTPJSONMetricExporter = OTelComponentTypeKey.String("otlp_http_json_metric_exporter") + // Prometheus metric exporter over HTTP with the default text-based format + // + // Stability: development + OTelComponentTypePrometheusHTTPTextMetricExporter = OTelComponentTypeKey.String("prometheus_http_text_metric_exporter") +) + +// Enum values for otel.span.parent.origin +var ( + // The span does not have a parent, it is a root span + // Stability: development + OTelSpanParentOriginNone = OTelSpanParentOriginKey.String("none") + // The span has a parent and the parent's span context [isRemote()] is false + // Stability: development + // + // [isRemote()]: https://opentelemetry.io/docs/specs/otel/trace/api/#isremote + OTelSpanParentOriginLocal = OTelSpanParentOriginKey.String("local") + // The span has a parent and the parent's span context [isRemote()] is true + // Stability: development + // + // [isRemote()]: https://opentelemetry.io/docs/specs/otel/trace/api/#isremote + OTelSpanParentOriginRemote = OTelSpanParentOriginKey.String("remote") +) + +// Enum values for otel.span.sampling_result +var ( + // The span is not sampled and not recording + // Stability: development + OTelSpanSamplingResultDrop = OTelSpanSamplingResultKey.String("DROP") + // The span is not sampled, but recording + // Stability: development + OTelSpanSamplingResultRecordOnly = OTelSpanSamplingResultKey.String("RECORD_ONLY") + // The span is sampled and recording + // Stability: development + OTelSpanSamplingResultRecordAndSample = OTelSpanSamplingResultKey.String("RECORD_AND_SAMPLE") +) + +// Enum values for otel.status_code +var ( + // The operation has been validated by an Application developer or Operator to + // have completed successfully. + // Stability: stable + OTelStatusCodeOk = OTelStatusCodeKey.String("OK") + // The operation contains an error. + // Stability: stable + OTelStatusCodeError = OTelStatusCodeKey.String("ERROR") +) + +// Namespace: peer +const ( + // PeerServiceKey is the attribute Key conforming to the "peer.service" semantic + // conventions. It represents the [`service.name`] of the remote service. SHOULD + // be equal to the actual `service.name` resource attribute of the remote + // service if any. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: AuthTokenCache + // + // [`service.name`]: /docs/resource/README.md#service + PeerServiceKey = attribute.Key("peer.service") +) + +// PeerService returns an attribute KeyValue conforming to the "peer.service" +// semantic conventions. It represents the [`service.name`] of the remote +// service. SHOULD be equal to the actual `service.name` resource attribute of +// the remote service if any. +// +// [`service.name`]: /docs/resource/README.md#service +func PeerService(val string) attribute.KeyValue { + return PeerServiceKey.String(val) +} + +// Namespace: process +const ( + // ProcessArgsCountKey is the attribute Key conforming to the + // "process.args_count" semantic conventions. It represents the length of the + // process.command_args array. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 4 + // Note: This field can be useful for querying or performing bucket analysis on + // how many arguments were provided to start a process. More arguments may be an + // indication of suspicious activity. + ProcessArgsCountKey = attribute.Key("process.args_count") + + // ProcessCommandKey is the attribute Key conforming to the "process.command" + // semantic conventions. It represents the command used to launch the process + // (i.e. the command name). On Linux based systems, can be set to the zeroth + // string in `proc/[pid]/cmdline`. On Windows, can be set to the first parameter + // extracted from `GetCommandLineW`. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "cmd/otelcol" + ProcessCommandKey = attribute.Key("process.command") + + // ProcessCommandArgsKey is the attribute Key conforming to the + // "process.command_args" semantic conventions. It represents the all the + // command arguments (including the command/executable itself) as received by + // the process. On Linux-based systems (and some other Unixoid systems + // supporting procfs), can be set according to the list of null-delimited + // strings extracted from `proc/[pid]/cmdline`. For libc-based executables, this + // would be the full argv vector passed to `main`. SHOULD NOT be collected by + // default unless there is sanitization that excludes sensitive data. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "cmd/otecol", "--config=config.yaml" + ProcessCommandArgsKey = attribute.Key("process.command_args") + + // ProcessCommandLineKey is the attribute Key conforming to the + // "process.command_line" semantic conventions. It represents the full command + // used to launch the process as a single string representing the full command. + // On Windows, can be set to the result of `GetCommandLineW`. Do not set this if + // you have to assemble it just for monitoring; use `process.command_args` + // instead. SHOULD NOT be collected by default unless there is sanitization that + // excludes sensitive data. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "C:\cmd\otecol --config="my directory\config.yaml"" + ProcessCommandLineKey = attribute.Key("process.command_line") + + // ProcessContextSwitchTypeKey is the attribute Key conforming to the + // "process.context_switch_type" semantic conventions. It represents the + // specifies whether the context switches for this data point were voluntary or + // involuntary. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + ProcessContextSwitchTypeKey = attribute.Key("process.context_switch_type") + + // ProcessCreationTimeKey is the attribute Key conforming to the + // "process.creation.time" semantic conventions. It represents the date and time + // the process was created, in ISO 8601 format. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2023-11-21T09:25:34.853Z" + ProcessCreationTimeKey = attribute.Key("process.creation.time") + + // ProcessExecutableBuildIDGNUKey is the attribute Key conforming to the + // "process.executable.build_id.gnu" semantic conventions. It represents the GNU + // build ID as found in the `.note.gnu.build-id` ELF section (hex string). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "c89b11207f6479603b0d49bf291c092c2b719293" + ProcessExecutableBuildIDGNUKey = attribute.Key("process.executable.build_id.gnu") + + // ProcessExecutableBuildIDGoKey is the attribute Key conforming to the + // "process.executable.build_id.go" semantic conventions. It represents the Go + // build ID as retrieved by `go tool buildid `. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // "foh3mEXu7BLZjsN9pOwG/kATcXlYVCDEFouRMQed_/WwRFB1hPo9LBkekthSPG/x8hMC8emW2cCjXD0_1aY" + ProcessExecutableBuildIDGoKey = attribute.Key("process.executable.build_id.go") + + // ProcessExecutableBuildIDHtlhashKey is the attribute Key conforming to the + // "process.executable.build_id.htlhash" semantic conventions. It represents the + // profiling specific build ID for executables. See the OTel specification for + // Profiles for more information. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "600DCAFE4A110000F2BF38C493F5FB92" + ProcessExecutableBuildIDHtlhashKey = attribute.Key("process.executable.build_id.htlhash") + + // ProcessExecutableNameKey is the attribute Key conforming to the + // "process.executable.name" semantic conventions. It represents the name of the + // process executable. On Linux based systems, this SHOULD be set to the base + // name of the target of `/proc/[pid]/exe`. On Windows, this SHOULD be set to + // the base name of `GetProcessImageFileNameW`. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "otelcol" + ProcessExecutableNameKey = attribute.Key("process.executable.name") + + // ProcessExecutablePathKey is the attribute Key conforming to the + // "process.executable.path" semantic conventions. It represents the full path + // to the process executable. On Linux based systems, can be set to the target + // of `proc/[pid]/exe`. On Windows, can be set to the result of + // `GetProcessImageFileNameW`. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "/usr/bin/cmd/otelcol" + ProcessExecutablePathKey = attribute.Key("process.executable.path") + + // ProcessExitCodeKey is the attribute Key conforming to the "process.exit.code" + // semantic conventions. It represents the exit code of the process. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 127 + ProcessExitCodeKey = attribute.Key("process.exit.code") + + // ProcessExitTimeKey is the attribute Key conforming to the "process.exit.time" + // semantic conventions. It represents the date and time the process exited, in + // ISO 8601 format. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2023-11-21T09:26:12.315Z" + ProcessExitTimeKey = attribute.Key("process.exit.time") + + // ProcessGroupLeaderPIDKey is the attribute Key conforming to the + // "process.group_leader.pid" semantic conventions. It represents the PID of the + // process's group leader. This is also the process group ID (PGID) of the + // process. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 23 + ProcessGroupLeaderPIDKey = attribute.Key("process.group_leader.pid") + + // ProcessInteractiveKey is the attribute Key conforming to the + // "process.interactive" semantic conventions. It represents the whether the + // process is connected to an interactive shell. + // + // Type: boolean + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + ProcessInteractiveKey = attribute.Key("process.interactive") + + // ProcessLinuxCgroupKey is the attribute Key conforming to the + // "process.linux.cgroup" semantic conventions. It represents the control group + // associated with the process. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "1:name=systemd:/user.slice/user-1000.slice/session-3.scope", + // "0::/user.slice/user-1000.slice/user@1000.service/tmux-spawn-0267755b-4639-4a27-90ed-f19f88e53748.scope" + // Note: Control groups (cgroups) are a kernel feature used to organize and + // manage process resources. This attribute provides the path(s) to the + // cgroup(s) associated with the process, which should match the contents of the + // [/proc/[PID]/cgroup] file. + // + // [/proc/[PID]/cgroup]: https://man7.org/linux/man-pages/man7/cgroups.7.html + ProcessLinuxCgroupKey = attribute.Key("process.linux.cgroup") + + // ProcessOwnerKey is the attribute Key conforming to the "process.owner" + // semantic conventions. It represents the username of the user that owns the + // process. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "root" + ProcessOwnerKey = attribute.Key("process.owner") + + // ProcessPagingFaultTypeKey is the attribute Key conforming to the + // "process.paging.fault_type" semantic conventions. It represents the type of + // page fault for this data point. Type `major` is for major/hard page faults, + // and `minor` is for minor/soft page faults. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + ProcessPagingFaultTypeKey = attribute.Key("process.paging.fault_type") + + // ProcessParentPIDKey is the attribute Key conforming to the + // "process.parent_pid" semantic conventions. It represents the parent Process + // identifier (PPID). + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 111 + ProcessParentPIDKey = attribute.Key("process.parent_pid") + + // ProcessPIDKey is the attribute Key conforming to the "process.pid" semantic + // conventions. It represents the process identifier (PID). + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1234 + ProcessPIDKey = attribute.Key("process.pid") + + // ProcessRealUserIDKey is the attribute Key conforming to the + // "process.real_user.id" semantic conventions. It represents the real user ID + // (RUID) of the process. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1000 + ProcessRealUserIDKey = attribute.Key("process.real_user.id") + + // ProcessRealUserNameKey is the attribute Key conforming to the + // "process.real_user.name" semantic conventions. It represents the username of + // the real user of the process. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "operator" + ProcessRealUserNameKey = attribute.Key("process.real_user.name") + + // ProcessRuntimeDescriptionKey is the attribute Key conforming to the + // "process.runtime.description" semantic conventions. It represents an + // additional description about the runtime of the process, for example a + // specific vendor customization of the runtime environment. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0 + ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description") + + // ProcessRuntimeNameKey is the attribute Key conforming to the + // "process.runtime.name" semantic conventions. It represents the name of the + // runtime of this process. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "OpenJDK Runtime Environment" + ProcessRuntimeNameKey = attribute.Key("process.runtime.name") + + // ProcessRuntimeVersionKey is the attribute Key conforming to the + // "process.runtime.version" semantic conventions. It represents the version of + // the runtime of this process, as returned by the runtime without modification. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 14.0.2 + ProcessRuntimeVersionKey = attribute.Key("process.runtime.version") + + // ProcessSavedUserIDKey is the attribute Key conforming to the + // "process.saved_user.id" semantic conventions. It represents the saved user ID + // (SUID) of the process. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1002 + ProcessSavedUserIDKey = attribute.Key("process.saved_user.id") + + // ProcessSavedUserNameKey is the attribute Key conforming to the + // "process.saved_user.name" semantic conventions. It represents the username of + // the saved user. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "operator" + ProcessSavedUserNameKey = attribute.Key("process.saved_user.name") + + // ProcessSessionLeaderPIDKey is the attribute Key conforming to the + // "process.session_leader.pid" semantic conventions. It represents the PID of + // the process's session leader. This is also the session ID (SID) of the + // process. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 14 + ProcessSessionLeaderPIDKey = attribute.Key("process.session_leader.pid") + + // ProcessTitleKey is the attribute Key conforming to the "process.title" + // semantic conventions. It represents the process title (proctitle). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "cat /etc/hostname", "xfce4-session", "bash" + // Note: In many Unix-like systems, process title (proctitle), is the string + // that represents the name or command line of a running process, displayed by + // system monitoring tools like ps, top, and htop. + ProcessTitleKey = attribute.Key("process.title") + + // ProcessUserIDKey is the attribute Key conforming to the "process.user.id" + // semantic conventions. It represents the effective user ID (EUID) of the + // process. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1001 + ProcessUserIDKey = attribute.Key("process.user.id") + + // ProcessUserNameKey is the attribute Key conforming to the "process.user.name" + // semantic conventions. It represents the username of the effective user of the + // process. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "root" + ProcessUserNameKey = attribute.Key("process.user.name") + + // ProcessVpidKey is the attribute Key conforming to the "process.vpid" semantic + // conventions. It represents the virtual process identifier. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 12 + // Note: The process ID within a PID namespace. This is not necessarily unique + // across all processes on the host but it is unique within the process + // namespace that the process exists within. + ProcessVpidKey = attribute.Key("process.vpid") + + // ProcessWorkingDirectoryKey is the attribute Key conforming to the + // "process.working_directory" semantic conventions. It represents the working + // directory of the process. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "/root" + ProcessWorkingDirectoryKey = attribute.Key("process.working_directory") +) + +// ProcessArgsCount returns an attribute KeyValue conforming to the +// "process.args_count" semantic conventions. It represents the length of the +// process.command_args array. +func ProcessArgsCount(val int) attribute.KeyValue { + return ProcessArgsCountKey.Int(val) +} + +// ProcessCommand returns an attribute KeyValue conforming to the +// "process.command" semantic conventions. It represents the command used to +// launch the process (i.e. the command name). On Linux based systems, can be set +// to the zeroth string in `proc/[pid]/cmdline`. On Windows, can be set to the +// first parameter extracted from `GetCommandLineW`. +func ProcessCommand(val string) attribute.KeyValue { + return ProcessCommandKey.String(val) +} + +// ProcessCommandArgs returns an attribute KeyValue conforming to the +// "process.command_args" semantic conventions. It represents the all the command +// arguments (including the command/executable itself) as received by the +// process. On Linux-based systems (and some other Unixoid systems supporting +// procfs), can be set according to the list of null-delimited strings extracted +// from `proc/[pid]/cmdline`. For libc-based executables, this would be the full +// argv vector passed to `main`. SHOULD NOT be collected by default unless there +// is sanitization that excludes sensitive data. +func ProcessCommandArgs(val ...string) attribute.KeyValue { + return ProcessCommandArgsKey.StringSlice(val) +} + +// ProcessCommandLine returns an attribute KeyValue conforming to the +// "process.command_line" semantic conventions. It represents the full command +// used to launch the process as a single string representing the full command. +// On Windows, can be set to the result of `GetCommandLineW`. Do not set this if +// you have to assemble it just for monitoring; use `process.command_args` +// instead. SHOULD NOT be collected by default unless there is sanitization that +// excludes sensitive data. +func ProcessCommandLine(val string) attribute.KeyValue { + return ProcessCommandLineKey.String(val) +} + +// ProcessCreationTime returns an attribute KeyValue conforming to the +// "process.creation.time" semantic conventions. It represents the date and time +// the process was created, in ISO 8601 format. +func ProcessCreationTime(val string) attribute.KeyValue { + return ProcessCreationTimeKey.String(val) +} + +// ProcessEnvironmentVariable returns an attribute KeyValue conforming to the +// "process.environment_variable" semantic conventions. It represents the process +// environment variables, `` being the environment variable name, the value +// being the environment variable value. +func ProcessEnvironmentVariable(key string, val string) attribute.KeyValue { + return attribute.String("process.environment_variable."+key, val) +} + +// ProcessExecutableBuildIDGNU returns an attribute KeyValue conforming to the +// "process.executable.build_id.gnu" semantic conventions. It represents the GNU +// build ID as found in the `.note.gnu.build-id` ELF section (hex string). +func ProcessExecutableBuildIDGNU(val string) attribute.KeyValue { + return ProcessExecutableBuildIDGNUKey.String(val) +} + +// ProcessExecutableBuildIDGo returns an attribute KeyValue conforming to the +// "process.executable.build_id.go" semantic conventions. It represents the Go +// build ID as retrieved by `go tool buildid `. +func ProcessExecutableBuildIDGo(val string) attribute.KeyValue { + return ProcessExecutableBuildIDGoKey.String(val) +} + +// ProcessExecutableBuildIDHtlhash returns an attribute KeyValue conforming to +// the "process.executable.build_id.htlhash" semantic conventions. It represents +// the profiling specific build ID for executables. See the OTel specification +// for Profiles for more information. +func ProcessExecutableBuildIDHtlhash(val string) attribute.KeyValue { + return ProcessExecutableBuildIDHtlhashKey.String(val) +} + +// ProcessExecutableName returns an attribute KeyValue conforming to the +// "process.executable.name" semantic conventions. It represents the name of the +// process executable. On Linux based systems, this SHOULD be set to the base +// name of the target of `/proc/[pid]/exe`. On Windows, this SHOULD be set to the +// base name of `GetProcessImageFileNameW`. +func ProcessExecutableName(val string) attribute.KeyValue { + return ProcessExecutableNameKey.String(val) +} + +// ProcessExecutablePath returns an attribute KeyValue conforming to the +// "process.executable.path" semantic conventions. It represents the full path to +// the process executable. On Linux based systems, can be set to the target of +// `proc/[pid]/exe`. On Windows, can be set to the result of +// `GetProcessImageFileNameW`. +func ProcessExecutablePath(val string) attribute.KeyValue { + return ProcessExecutablePathKey.String(val) +} + +// ProcessExitCode returns an attribute KeyValue conforming to the +// "process.exit.code" semantic conventions. It represents the exit code of the +// process. +func ProcessExitCode(val int) attribute.KeyValue { + return ProcessExitCodeKey.Int(val) +} + +// ProcessExitTime returns an attribute KeyValue conforming to the +// "process.exit.time" semantic conventions. It represents the date and time the +// process exited, in ISO 8601 format. +func ProcessExitTime(val string) attribute.KeyValue { + return ProcessExitTimeKey.String(val) +} + +// ProcessGroupLeaderPID returns an attribute KeyValue conforming to the +// "process.group_leader.pid" semantic conventions. It represents the PID of the +// process's group leader. This is also the process group ID (PGID) of the +// process. +func ProcessGroupLeaderPID(val int) attribute.KeyValue { + return ProcessGroupLeaderPIDKey.Int(val) +} + +// ProcessInteractive returns an attribute KeyValue conforming to the +// "process.interactive" semantic conventions. It represents the whether the +// process is connected to an interactive shell. +func ProcessInteractive(val bool) attribute.KeyValue { + return ProcessInteractiveKey.Bool(val) +} + +// ProcessLinuxCgroup returns an attribute KeyValue conforming to the +// "process.linux.cgroup" semantic conventions. It represents the control group +// associated with the process. +func ProcessLinuxCgroup(val string) attribute.KeyValue { + return ProcessLinuxCgroupKey.String(val) +} + +// ProcessOwner returns an attribute KeyValue conforming to the "process.owner" +// semantic conventions. It represents the username of the user that owns the +// process. +func ProcessOwner(val string) attribute.KeyValue { + return ProcessOwnerKey.String(val) +} + +// ProcessParentPID returns an attribute KeyValue conforming to the +// "process.parent_pid" semantic conventions. It represents the parent Process +// identifier (PPID). +func ProcessParentPID(val int) attribute.KeyValue { + return ProcessParentPIDKey.Int(val) +} + +// ProcessPID returns an attribute KeyValue conforming to the "process.pid" +// semantic conventions. It represents the process identifier (PID). +func ProcessPID(val int) attribute.KeyValue { + return ProcessPIDKey.Int(val) +} + +// ProcessRealUserID returns an attribute KeyValue conforming to the +// "process.real_user.id" semantic conventions. It represents the real user ID +// (RUID) of the process. +func ProcessRealUserID(val int) attribute.KeyValue { + return ProcessRealUserIDKey.Int(val) +} + +// ProcessRealUserName returns an attribute KeyValue conforming to the +// "process.real_user.name" semantic conventions. It represents the username of +// the real user of the process. +func ProcessRealUserName(val string) attribute.KeyValue { + return ProcessRealUserNameKey.String(val) +} + +// ProcessRuntimeDescription returns an attribute KeyValue conforming to the +// "process.runtime.description" semantic conventions. It represents an +// additional description about the runtime of the process, for example a +// specific vendor customization of the runtime environment. +func ProcessRuntimeDescription(val string) attribute.KeyValue { + return ProcessRuntimeDescriptionKey.String(val) +} + +// ProcessRuntimeName returns an attribute KeyValue conforming to the +// "process.runtime.name" semantic conventions. It represents the name of the +// runtime of this process. +func ProcessRuntimeName(val string) attribute.KeyValue { + return ProcessRuntimeNameKey.String(val) +} + +// ProcessRuntimeVersion returns an attribute KeyValue conforming to the +// "process.runtime.version" semantic conventions. It represents the version of +// the runtime of this process, as returned by the runtime without modification. +func ProcessRuntimeVersion(val string) attribute.KeyValue { + return ProcessRuntimeVersionKey.String(val) +} + +// ProcessSavedUserID returns an attribute KeyValue conforming to the +// "process.saved_user.id" semantic conventions. It represents the saved user ID +// (SUID) of the process. +func ProcessSavedUserID(val int) attribute.KeyValue { + return ProcessSavedUserIDKey.Int(val) +} + +// ProcessSavedUserName returns an attribute KeyValue conforming to the +// "process.saved_user.name" semantic conventions. It represents the username of +// the saved user. +func ProcessSavedUserName(val string) attribute.KeyValue { + return ProcessSavedUserNameKey.String(val) +} + +// ProcessSessionLeaderPID returns an attribute KeyValue conforming to the +// "process.session_leader.pid" semantic conventions. It represents the PID of +// the process's session leader. This is also the session ID (SID) of the +// process. +func ProcessSessionLeaderPID(val int) attribute.KeyValue { + return ProcessSessionLeaderPIDKey.Int(val) +} + +// ProcessTitle returns an attribute KeyValue conforming to the "process.title" +// semantic conventions. It represents the process title (proctitle). +func ProcessTitle(val string) attribute.KeyValue { + return ProcessTitleKey.String(val) +} + +// ProcessUserID returns an attribute KeyValue conforming to the +// "process.user.id" semantic conventions. It represents the effective user ID +// (EUID) of the process. +func ProcessUserID(val int) attribute.KeyValue { + return ProcessUserIDKey.Int(val) +} + +// ProcessUserName returns an attribute KeyValue conforming to the +// "process.user.name" semantic conventions. It represents the username of the +// effective user of the process. +func ProcessUserName(val string) attribute.KeyValue { + return ProcessUserNameKey.String(val) +} + +// ProcessVpid returns an attribute KeyValue conforming to the "process.vpid" +// semantic conventions. It represents the virtual process identifier. +func ProcessVpid(val int) attribute.KeyValue { + return ProcessVpidKey.Int(val) +} + +// ProcessWorkingDirectory returns an attribute KeyValue conforming to the +// "process.working_directory" semantic conventions. It represents the working +// directory of the process. +func ProcessWorkingDirectory(val string) attribute.KeyValue { + return ProcessWorkingDirectoryKey.String(val) +} + +// Enum values for process.context_switch_type +var ( + // voluntary + // Stability: development + ProcessContextSwitchTypeVoluntary = ProcessContextSwitchTypeKey.String("voluntary") + // involuntary + // Stability: development + ProcessContextSwitchTypeInvoluntary = ProcessContextSwitchTypeKey.String("involuntary") +) + +// Enum values for process.paging.fault_type +var ( + // major + // Stability: development + ProcessPagingFaultTypeMajor = ProcessPagingFaultTypeKey.String("major") + // minor + // Stability: development + ProcessPagingFaultTypeMinor = ProcessPagingFaultTypeKey.String("minor") +) + +// Namespace: profile +const ( + // ProfileFrameTypeKey is the attribute Key conforming to the + // "profile.frame.type" semantic conventions. It represents the describes the + // interpreter or compiler of a single frame. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "cpython" + ProfileFrameTypeKey = attribute.Key("profile.frame.type") +) + +// Enum values for profile.frame.type +var ( + // [.NET] + // + // Stability: development + // + // [.NET]: https://wikipedia.org/wiki/.NET + ProfileFrameTypeDotnet = ProfileFrameTypeKey.String("dotnet") + // [JVM] + // + // Stability: development + // + // [JVM]: https://wikipedia.org/wiki/Java_virtual_machine + ProfileFrameTypeJVM = ProfileFrameTypeKey.String("jvm") + // [Kernel] + // + // Stability: development + // + // [Kernel]: https://wikipedia.org/wiki/Kernel_(operating_system) + ProfileFrameTypeKernel = ProfileFrameTypeKey.String("kernel") + // Can be one of but not limited to [C], [C++], [Go] or [Rust]. If possible, a + // more precise value MUST be used. + // + // Stability: development + // + // [C]: https://wikipedia.org/wiki/C_(programming_language) + // [C++]: https://wikipedia.org/wiki/C%2B%2B + // [Go]: https://wikipedia.org/wiki/Go_(programming_language) + // [Rust]: https://wikipedia.org/wiki/Rust_(programming_language) + ProfileFrameTypeNative = ProfileFrameTypeKey.String("native") + // [Perl] + // + // Stability: development + // + // [Perl]: https://wikipedia.org/wiki/Perl + ProfileFrameTypePerl = ProfileFrameTypeKey.String("perl") + // [PHP] + // + // Stability: development + // + // [PHP]: https://wikipedia.org/wiki/PHP + ProfileFrameTypePHP = ProfileFrameTypeKey.String("php") + // [Python] + // + // Stability: development + // + // [Python]: https://wikipedia.org/wiki/Python_(programming_language) + ProfileFrameTypeCpython = ProfileFrameTypeKey.String("cpython") + // [Ruby] + // + // Stability: development + // + // [Ruby]: https://wikipedia.org/wiki/Ruby_(programming_language) + ProfileFrameTypeRuby = ProfileFrameTypeKey.String("ruby") + // [V8JS] + // + // Stability: development + // + // [V8JS]: https://wikipedia.org/wiki/V8_(JavaScript_engine) + ProfileFrameTypeV8JS = ProfileFrameTypeKey.String("v8js") + // [Erlang] + // + // Stability: development + // + // [Erlang]: https://en.wikipedia.org/wiki/BEAM_(Erlang_virtual_machine) + ProfileFrameTypeBeam = ProfileFrameTypeKey.String("beam") + // [Go], + // + // Stability: development + // + // [Go]: https://wikipedia.org/wiki/Go_(programming_language) + ProfileFrameTypeGo = ProfileFrameTypeKey.String("go") + // [Rust] + // + // Stability: development + // + // [Rust]: https://wikipedia.org/wiki/Rust_(programming_language) + ProfileFrameTypeRust = ProfileFrameTypeKey.String("rust") +) + +// Namespace: rpc +const ( + // RPCConnectRPCErrorCodeKey is the attribute Key conforming to the + // "rpc.connect_rpc.error_code" semantic conventions. It represents the + // [error codes] of the Connect request. Error codes are always string values. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // + // [error codes]: https://connectrpc.com//docs/protocol/#error-codes + RPCConnectRPCErrorCodeKey = attribute.Key("rpc.connect_rpc.error_code") + + // RPCGRPCStatusCodeKey is the attribute Key conforming to the + // "rpc.grpc.status_code" semantic conventions. It represents the + // [numeric status code] of the gRPC request. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // + // [numeric status code]: https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md + RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code") + + // RPCJSONRPCErrorCodeKey is the attribute Key conforming to the + // "rpc.jsonrpc.error_code" semantic conventions. It represents the `error.code` + // property of response if it is an error response. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: -32700, 100 + RPCJSONRPCErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code") + + // RPCJSONRPCErrorMessageKey is the attribute Key conforming to the + // "rpc.jsonrpc.error_message" semantic conventions. It represents the + // `error.message` property of response if it is an error response. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Parse error", "User already exists" + RPCJSONRPCErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message") + + // RPCJSONRPCRequestIDKey is the attribute Key conforming to the + // "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` + // property of request or response. Since protocol allows id to be int, string, + // `null` or missing (for notifications), value is expected to be cast to string + // for simplicity. Use empty string in case of `null` value. Omit entirely if + // this is a notification. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "10", "request-7", "" + RPCJSONRPCRequestIDKey = attribute.Key("rpc.jsonrpc.request_id") + + // RPCJSONRPCVersionKey is the attribute Key conforming to the + // "rpc.jsonrpc.version" semantic conventions. It represents the protocol + // version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 + // doesn't specify this, the value can be omitted. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2.0", "1.0" + RPCJSONRPCVersionKey = attribute.Key("rpc.jsonrpc.version") + + // RPCMessageCompressedSizeKey is the attribute Key conforming to the + // "rpc.message.compressed_size" semantic conventions. It represents the + // compressed size of the message in bytes. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + RPCMessageCompressedSizeKey = attribute.Key("rpc.message.compressed_size") + + // RPCMessageIDKey is the attribute Key conforming to the "rpc.message.id" + // semantic conventions. It MUST be calculated as two different counters + // starting from `1` one for sent messages and one for received message.. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: This way we guarantee that the values will be consistent between + // different implementations. + RPCMessageIDKey = attribute.Key("rpc.message.id") + + // RPCMessageTypeKey is the attribute Key conforming to the "rpc.message.type" + // semantic conventions. It represents the whether this is a received or sent + // message. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + RPCMessageTypeKey = attribute.Key("rpc.message.type") + + // RPCMessageUncompressedSizeKey is the attribute Key conforming to the + // "rpc.message.uncompressed_size" semantic conventions. It represents the + // uncompressed size of the message in bytes. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + RPCMessageUncompressedSizeKey = attribute.Key("rpc.message.uncompressed_size") + + // RPCMethodKey is the attribute Key conforming to the "rpc.method" semantic + // conventions. It represents the name of the (logical) method being called, + // must be equal to the $method part in the span name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: exampleMethod + // Note: This is the logical name of the method from the RPC interface + // perspective, which can be different from the name of any implementing + // method/function. The `code.function.name` attribute may be used to store the + // latter (e.g., method actually executing the call on the server side, RPC + // client stub method on the client side). + RPCMethodKey = attribute.Key("rpc.method") + + // RPCServiceKey is the attribute Key conforming to the "rpc.service" semantic + // conventions. It represents the full (logical) name of the service being + // called, including its package name, if applicable. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: myservice.EchoService + // Note: This is the logical name of the service from the RPC interface + // perspective, which can be different from the name of any implementing class. + // The `code.namespace` attribute may be used to store the latter (despite the + // attribute name, it may include a class name; e.g., class with method actually + // executing the call on the server side, RPC client stub class on the client + // side). + RPCServiceKey = attribute.Key("rpc.service") + + // RPCSystemKey is the attribute Key conforming to the "rpc.system" semantic + // conventions. It represents a string identifying the remoting system. See + // below for a list of well-known identifiers. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + RPCSystemKey = attribute.Key("rpc.system") +) + +// RPCConnectRPCRequestMetadata returns an attribute KeyValue conforming to the +// "rpc.connect_rpc.request.metadata" semantic conventions. It represents the +// connect request metadata, `` being the normalized Connect Metadata key +// (lowercase), the value being the metadata values. +func RPCConnectRPCRequestMetadata(key string, val ...string) attribute.KeyValue { + return attribute.StringSlice("rpc.connect_rpc.request.metadata."+key, val) +} + +// RPCConnectRPCResponseMetadata returns an attribute KeyValue conforming to the +// "rpc.connect_rpc.response.metadata" semantic conventions. It represents the +// connect response metadata, `` being the normalized Connect Metadata key +// (lowercase), the value being the metadata values. +func RPCConnectRPCResponseMetadata(key string, val ...string) attribute.KeyValue { + return attribute.StringSlice("rpc.connect_rpc.response.metadata."+key, val) +} + +// RPCGRPCRequestMetadata returns an attribute KeyValue conforming to the +// "rpc.grpc.request.metadata" semantic conventions. It represents the gRPC +// request metadata, `` being the normalized gRPC Metadata key (lowercase), +// the value being the metadata values. +func RPCGRPCRequestMetadata(key string, val ...string) attribute.KeyValue { + return attribute.StringSlice("rpc.grpc.request.metadata."+key, val) +} + +// RPCGRPCResponseMetadata returns an attribute KeyValue conforming to the +// "rpc.grpc.response.metadata" semantic conventions. It represents the gRPC +// response metadata, `` being the normalized gRPC Metadata key (lowercase), +// the value being the metadata values. +func RPCGRPCResponseMetadata(key string, val ...string) attribute.KeyValue { + return attribute.StringSlice("rpc.grpc.response.metadata."+key, val) +} + +// RPCJSONRPCErrorCode returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.error_code" semantic conventions. It represents the `error.code` +// property of response if it is an error response. +func RPCJSONRPCErrorCode(val int) attribute.KeyValue { + return RPCJSONRPCErrorCodeKey.Int(val) +} + +// RPCJSONRPCErrorMessage returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.error_message" semantic conventions. It represents the +// `error.message` property of response if it is an error response. +func RPCJSONRPCErrorMessage(val string) attribute.KeyValue { + return RPCJSONRPCErrorMessageKey.String(val) +} + +// RPCJSONRPCRequestID returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` property +// of request or response. Since protocol allows id to be int, string, `null` or +// missing (for notifications), value is expected to be cast to string for +// simplicity. Use empty string in case of `null` value. Omit entirely if this is +// a notification. +func RPCJSONRPCRequestID(val string) attribute.KeyValue { + return RPCJSONRPCRequestIDKey.String(val) +} + +// RPCJSONRPCVersion returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.version" semantic conventions. It represents the protocol version +// as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 doesn't +// specify this, the value can be omitted. +func RPCJSONRPCVersion(val string) attribute.KeyValue { + return RPCJSONRPCVersionKey.String(val) +} + +// RPCMessageCompressedSize returns an attribute KeyValue conforming to the +// "rpc.message.compressed_size" semantic conventions. It represents the +// compressed size of the message in bytes. +func RPCMessageCompressedSize(val int) attribute.KeyValue { + return RPCMessageCompressedSizeKey.Int(val) +} + +// RPCMessageID returns an attribute KeyValue conforming to the "rpc.message.id" +// semantic conventions. It MUST be calculated as two different counters starting +// from `1` one for sent messages and one for received message.. +func RPCMessageID(val int) attribute.KeyValue { + return RPCMessageIDKey.Int(val) +} + +// RPCMessageUncompressedSize returns an attribute KeyValue conforming to the +// "rpc.message.uncompressed_size" semantic conventions. It represents the +// uncompressed size of the message in bytes. +func RPCMessageUncompressedSize(val int) attribute.KeyValue { + return RPCMessageUncompressedSizeKey.Int(val) +} + +// RPCMethod returns an attribute KeyValue conforming to the "rpc.method" +// semantic conventions. It represents the name of the (logical) method being +// called, must be equal to the $method part in the span name. +func RPCMethod(val string) attribute.KeyValue { + return RPCMethodKey.String(val) +} + +// RPCService returns an attribute KeyValue conforming to the "rpc.service" +// semantic conventions. It represents the full (logical) name of the service +// being called, including its package name, if applicable. +func RPCService(val string) attribute.KeyValue { + return RPCServiceKey.String(val) +} + +// Enum values for rpc.connect_rpc.error_code +var ( + // cancelled + // Stability: development + RPCConnectRPCErrorCodeCancelled = RPCConnectRPCErrorCodeKey.String("cancelled") + // unknown + // Stability: development + RPCConnectRPCErrorCodeUnknown = RPCConnectRPCErrorCodeKey.String("unknown") + // invalid_argument + // Stability: development + RPCConnectRPCErrorCodeInvalidArgument = RPCConnectRPCErrorCodeKey.String("invalid_argument") + // deadline_exceeded + // Stability: development + RPCConnectRPCErrorCodeDeadlineExceeded = RPCConnectRPCErrorCodeKey.String("deadline_exceeded") + // not_found + // Stability: development + RPCConnectRPCErrorCodeNotFound = RPCConnectRPCErrorCodeKey.String("not_found") + // already_exists + // Stability: development + RPCConnectRPCErrorCodeAlreadyExists = RPCConnectRPCErrorCodeKey.String("already_exists") + // permission_denied + // Stability: development + RPCConnectRPCErrorCodePermissionDenied = RPCConnectRPCErrorCodeKey.String("permission_denied") + // resource_exhausted + // Stability: development + RPCConnectRPCErrorCodeResourceExhausted = RPCConnectRPCErrorCodeKey.String("resource_exhausted") + // failed_precondition + // Stability: development + RPCConnectRPCErrorCodeFailedPrecondition = RPCConnectRPCErrorCodeKey.String("failed_precondition") + // aborted + // Stability: development + RPCConnectRPCErrorCodeAborted = RPCConnectRPCErrorCodeKey.String("aborted") + // out_of_range + // Stability: development + RPCConnectRPCErrorCodeOutOfRange = RPCConnectRPCErrorCodeKey.String("out_of_range") + // unimplemented + // Stability: development + RPCConnectRPCErrorCodeUnimplemented = RPCConnectRPCErrorCodeKey.String("unimplemented") + // internal + // Stability: development + RPCConnectRPCErrorCodeInternal = RPCConnectRPCErrorCodeKey.String("internal") + // unavailable + // Stability: development + RPCConnectRPCErrorCodeUnavailable = RPCConnectRPCErrorCodeKey.String("unavailable") + // data_loss + // Stability: development + RPCConnectRPCErrorCodeDataLoss = RPCConnectRPCErrorCodeKey.String("data_loss") + // unauthenticated + // Stability: development + RPCConnectRPCErrorCodeUnauthenticated = RPCConnectRPCErrorCodeKey.String("unauthenticated") +) + +// Enum values for rpc.grpc.status_code +var ( + // OK + // Stability: development + RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0) + // CANCELLED + // Stability: development + RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1) + // UNKNOWN + // Stability: development + RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2) + // INVALID_ARGUMENT + // Stability: development + RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3) + // DEADLINE_EXCEEDED + // Stability: development + RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4) + // NOT_FOUND + // Stability: development + RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5) + // ALREADY_EXISTS + // Stability: development + RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6) + // PERMISSION_DENIED + // Stability: development + RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7) + // RESOURCE_EXHAUSTED + // Stability: development + RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8) + // FAILED_PRECONDITION + // Stability: development + RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9) + // ABORTED + // Stability: development + RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10) + // OUT_OF_RANGE + // Stability: development + RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11) + // UNIMPLEMENTED + // Stability: development + RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12) + // INTERNAL + // Stability: development + RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13) + // UNAVAILABLE + // Stability: development + RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14) + // DATA_LOSS + // Stability: development + RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15) + // UNAUTHENTICATED + // Stability: development + RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16) +) + +// Enum values for rpc.message.type +var ( + // sent + // Stability: development + RPCMessageTypeSent = RPCMessageTypeKey.String("SENT") + // received + // Stability: development + RPCMessageTypeReceived = RPCMessageTypeKey.String("RECEIVED") +) + +// Enum values for rpc.system +var ( + // gRPC + // Stability: development + RPCSystemGRPC = RPCSystemKey.String("grpc") + // Java RMI + // Stability: development + RPCSystemJavaRmi = RPCSystemKey.String("java_rmi") + // .NET WCF + // Stability: development + RPCSystemDotnetWcf = RPCSystemKey.String("dotnet_wcf") + // Apache Dubbo + // Stability: development + RPCSystemApacheDubbo = RPCSystemKey.String("apache_dubbo") + // Connect RPC + // Stability: development + RPCSystemConnectRPC = RPCSystemKey.String("connect_rpc") +) + +// Namespace: security_rule +const ( + // SecurityRuleCategoryKey is the attribute Key conforming to the + // "security_rule.category" semantic conventions. It represents a categorization + // value keyword used by the entity using the rule for detection of this event. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Attempted Information Leak" + SecurityRuleCategoryKey = attribute.Key("security_rule.category") + + // SecurityRuleDescriptionKey is the attribute Key conforming to the + // "security_rule.description" semantic conventions. It represents the + // description of the rule generating the event. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Block requests to public DNS over HTTPS / TLS protocols" + SecurityRuleDescriptionKey = attribute.Key("security_rule.description") + + // SecurityRuleLicenseKey is the attribute Key conforming to the + // "security_rule.license" semantic conventions. It represents the name of the + // license under which the rule used to generate this event is made available. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Apache 2.0" + SecurityRuleLicenseKey = attribute.Key("security_rule.license") + + // SecurityRuleNameKey is the attribute Key conforming to the + // "security_rule.name" semantic conventions. It represents the name of the rule + // or signature generating the event. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "BLOCK_DNS_over_TLS" + SecurityRuleNameKey = attribute.Key("security_rule.name") + + // SecurityRuleReferenceKey is the attribute Key conforming to the + // "security_rule.reference" semantic conventions. It represents the reference + // URL to additional information about the rule used to generate this event. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "https://en.wikipedia.org/wiki/DNS_over_TLS" + // Note: The URL can point to the vendor’s documentation about the rule. If + // that’s not available, it can also be a link to a more general page + // describing this type of alert. + SecurityRuleReferenceKey = attribute.Key("security_rule.reference") + + // SecurityRuleRulesetNameKey is the attribute Key conforming to the + // "security_rule.ruleset.name" semantic conventions. It represents the name of + // the ruleset, policy, group, or parent category in which the rule used to + // generate this event is a member. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Standard_Protocol_Filters" + SecurityRuleRulesetNameKey = attribute.Key("security_rule.ruleset.name") + + // SecurityRuleUUIDKey is the attribute Key conforming to the + // "security_rule.uuid" semantic conventions. It represents a rule ID that is + // unique within the scope of a set or group of agents, observers, or other + // entities using the rule for detection of this event. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "550e8400-e29b-41d4-a716-446655440000", "1100110011" + SecurityRuleUUIDKey = attribute.Key("security_rule.uuid") + + // SecurityRuleVersionKey is the attribute Key conforming to the + // "security_rule.version" semantic conventions. It represents the version / + // revision of the rule being used for analysis. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "1.0.0" + SecurityRuleVersionKey = attribute.Key("security_rule.version") +) + +// SecurityRuleCategory returns an attribute KeyValue conforming to the +// "security_rule.category" semantic conventions. It represents a categorization +// value keyword used by the entity using the rule for detection of this event. +func SecurityRuleCategory(val string) attribute.KeyValue { + return SecurityRuleCategoryKey.String(val) +} + +// SecurityRuleDescription returns an attribute KeyValue conforming to the +// "security_rule.description" semantic conventions. It represents the +// description of the rule generating the event. +func SecurityRuleDescription(val string) attribute.KeyValue { + return SecurityRuleDescriptionKey.String(val) +} + +// SecurityRuleLicense returns an attribute KeyValue conforming to the +// "security_rule.license" semantic conventions. It represents the name of the +// license under which the rule used to generate this event is made available. +func SecurityRuleLicense(val string) attribute.KeyValue { + return SecurityRuleLicenseKey.String(val) +} + +// SecurityRuleName returns an attribute KeyValue conforming to the +// "security_rule.name" semantic conventions. It represents the name of the rule +// or signature generating the event. +func SecurityRuleName(val string) attribute.KeyValue { + return SecurityRuleNameKey.String(val) +} + +// SecurityRuleReference returns an attribute KeyValue conforming to the +// "security_rule.reference" semantic conventions. It represents the reference +// URL to additional information about the rule used to generate this event. +func SecurityRuleReference(val string) attribute.KeyValue { + return SecurityRuleReferenceKey.String(val) +} + +// SecurityRuleRulesetName returns an attribute KeyValue conforming to the +// "security_rule.ruleset.name" semantic conventions. It represents the name of +// the ruleset, policy, group, or parent category in which the rule used to +// generate this event is a member. +func SecurityRuleRulesetName(val string) attribute.KeyValue { + return SecurityRuleRulesetNameKey.String(val) +} + +// SecurityRuleUUID returns an attribute KeyValue conforming to the +// "security_rule.uuid" semantic conventions. It represents a rule ID that is +// unique within the scope of a set or group of agents, observers, or other +// entities using the rule for detection of this event. +func SecurityRuleUUID(val string) attribute.KeyValue { + return SecurityRuleUUIDKey.String(val) +} + +// SecurityRuleVersion returns an attribute KeyValue conforming to the +// "security_rule.version" semantic conventions. It represents the version / +// revision of the rule being used for analysis. +func SecurityRuleVersion(val string) attribute.KeyValue { + return SecurityRuleVersionKey.String(val) +} + +// Namespace: server +const ( + // ServerAddressKey is the attribute Key conforming to the "server.address" + // semantic conventions. It represents the server domain name if available + // without reverse DNS lookup; otherwise, IP address or Unix domain socket name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "example.com", "10.1.2.80", "/tmp/my.sock" + // Note: When observed from the client side, and when communicating through an + // intermediary, `server.address` SHOULD represent the server address behind any + // intermediaries, for example proxies, if it's available. + ServerAddressKey = attribute.Key("server.address") + + // ServerPortKey is the attribute Key conforming to the "server.port" semantic + // conventions. It represents the server port number. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: 80, 8080, 443 + // Note: When observed from the client side, and when communicating through an + // intermediary, `server.port` SHOULD represent the server port behind any + // intermediaries, for example proxies, if it's available. + ServerPortKey = attribute.Key("server.port") +) + +// ServerAddress returns an attribute KeyValue conforming to the "server.address" +// semantic conventions. It represents the server domain name if available +// without reverse DNS lookup; otherwise, IP address or Unix domain socket name. +func ServerAddress(val string) attribute.KeyValue { + return ServerAddressKey.String(val) +} + +// ServerPort returns an attribute KeyValue conforming to the "server.port" +// semantic conventions. It represents the server port number. +func ServerPort(val int) attribute.KeyValue { + return ServerPortKey.Int(val) +} + +// Namespace: service +const ( + // ServiceInstanceIDKey is the attribute Key conforming to the + // "service.instance.id" semantic conventions. It represents the string ID of + // the service instance. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "627cc493-f310-47de-96bd-71410b7dec09" + // Note: MUST be unique for each instance of the same + // `service.namespace,service.name` pair (in other words + // `service.namespace,service.name,service.instance.id` triplet MUST be globally + // unique). The ID helps to + // distinguish instances of the same service that exist at the same time (e.g. + // instances of a horizontally scaled + // service). + // + // Implementations, such as SDKs, are recommended to generate a random Version 1 + // or Version 4 [RFC + // 4122] UUID, but are free to use an inherent unique ID as + // the source of + // this value if stability is desirable. In that case, the ID SHOULD be used as + // source of a UUID Version 5 and + // SHOULD use the following UUID as the namespace: + // `4d63009a-8d0f-11ee-aad7-4c796ed8e320`. + // + // UUIDs are typically recommended, as only an opaque value for the purposes of + // identifying a service instance is + // needed. Similar to what can be seen in the man page for the + // [`/etc/machine-id`] file, the underlying + // data, such as pod name and namespace should be treated as confidential, being + // the user's choice to expose it + // or not via another resource attribute. + // + // For applications running behind an application server (like unicorn), we do + // not recommend using one identifier + // for all processes participating in the application. Instead, it's recommended + // each division (e.g. a worker + // thread in unicorn) to have its own instance.id. + // + // It's not recommended for a Collector to set `service.instance.id` if it can't + // unambiguously determine the + // service instance that is generating that telemetry. For instance, creating an + // UUID based on `pod.name` will + // likely be wrong, as the Collector might not know from which container within + // that pod the telemetry originated. + // However, Collectors can set the `service.instance.id` if they can + // unambiguously determine the service instance + // for that telemetry. This is typically the case for scraping receivers, as + // they know the target address and + // port. + // + // [RFC + // 4122]: https://www.ietf.org/rfc/rfc4122.txt + // [`/etc/machine-id`]: https://www.freedesktop.org/software/systemd/man/latest/machine-id.html + ServiceInstanceIDKey = attribute.Key("service.instance.id") + + // ServiceNameKey is the attribute Key conforming to the "service.name" semantic + // conventions. It represents the logical name of the service. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "shoppingcart" + // Note: MUST be the same for all instances of horizontally scaled services. If + // the value was not specified, SDKs MUST fallback to `unknown_service:` + // concatenated with [`process.executable.name`], e.g. `unknown_service:bash`. + // If `process.executable.name` is not available, the value MUST be set to + // `unknown_service`. + // + // [`process.executable.name`]: process.md + ServiceNameKey = attribute.Key("service.name") + + // ServiceNamespaceKey is the attribute Key conforming to the + // "service.namespace" semantic conventions. It represents a namespace for + // `service.name`. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Shop" + // Note: A string value having a meaning that helps to distinguish a group of + // services, for example the team name that owns a group of services. + // `service.name` is expected to be unique within the same namespace. If + // `service.namespace` is not specified in the Resource then `service.name` is + // expected to be unique for all services that have no explicit namespace + // defined (so the empty/unspecified namespace is simply one more valid + // namespace). Zero-length namespace string is assumed equal to unspecified + // namespace. + ServiceNamespaceKey = attribute.Key("service.namespace") + + // ServiceVersionKey is the attribute Key conforming to the "service.version" + // semantic conventions. It represents the version string of the service API or + // implementation. The format is not defined by these conventions. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "2.0.0", "a01dbef8a" + ServiceVersionKey = attribute.Key("service.version") +) + +// ServiceInstanceID returns an attribute KeyValue conforming to the +// "service.instance.id" semantic conventions. It represents the string ID of the +// service instance. +func ServiceInstanceID(val string) attribute.KeyValue { + return ServiceInstanceIDKey.String(val) +} + +// ServiceName returns an attribute KeyValue conforming to the "service.name" +// semantic conventions. It represents the logical name of the service. +func ServiceName(val string) attribute.KeyValue { + return ServiceNameKey.String(val) +} + +// ServiceNamespace returns an attribute KeyValue conforming to the +// "service.namespace" semantic conventions. It represents a namespace for +// `service.name`. +func ServiceNamespace(val string) attribute.KeyValue { + return ServiceNamespaceKey.String(val) +} + +// ServiceVersion returns an attribute KeyValue conforming to the +// "service.version" semantic conventions. It represents the version string of +// the service API or implementation. The format is not defined by these +// conventions. +func ServiceVersion(val string) attribute.KeyValue { + return ServiceVersionKey.String(val) +} + +// Namespace: session +const ( + // SessionIDKey is the attribute Key conforming to the "session.id" semantic + // conventions. It represents a unique id to identify a session. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 00112233-4455-6677-8899-aabbccddeeff + SessionIDKey = attribute.Key("session.id") + + // SessionPreviousIDKey is the attribute Key conforming to the + // "session.previous_id" semantic conventions. It represents the previous + // `session.id` for this user, when known. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 00112233-4455-6677-8899-aabbccddeeff + SessionPreviousIDKey = attribute.Key("session.previous_id") +) + +// SessionID returns an attribute KeyValue conforming to the "session.id" +// semantic conventions. It represents a unique id to identify a session. +func SessionID(val string) attribute.KeyValue { + return SessionIDKey.String(val) +} + +// SessionPreviousID returns an attribute KeyValue conforming to the +// "session.previous_id" semantic conventions. It represents the previous +// `session.id` for this user, when known. +func SessionPreviousID(val string) attribute.KeyValue { + return SessionPreviousIDKey.String(val) +} + +// Namespace: signalr +const ( + // SignalRConnectionStatusKey is the attribute Key conforming to the + // "signalr.connection.status" semantic conventions. It represents the signalR + // HTTP connection closure status. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "app_shutdown", "timeout" + SignalRConnectionStatusKey = attribute.Key("signalr.connection.status") + + // SignalRTransportKey is the attribute Key conforming to the + // "signalr.transport" semantic conventions. It represents the + // [SignalR transport type]. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "web_sockets", "long_polling" + // + // [SignalR transport type]: https://github.com/dotnet/aspnetcore/blob/main/src/SignalR/docs/specs/TransportProtocols.md + SignalRTransportKey = attribute.Key("signalr.transport") +) + +// Enum values for signalr.connection.status +var ( + // The connection was closed normally. + // Stability: stable + SignalRConnectionStatusNormalClosure = SignalRConnectionStatusKey.String("normal_closure") + // The connection was closed due to a timeout. + // Stability: stable + SignalRConnectionStatusTimeout = SignalRConnectionStatusKey.String("timeout") + // The connection was closed because the app is shutting down. + // Stability: stable + SignalRConnectionStatusAppShutdown = SignalRConnectionStatusKey.String("app_shutdown") +) + +// Enum values for signalr.transport +var ( + // ServerSentEvents protocol + // Stability: stable + SignalRTransportServerSentEvents = SignalRTransportKey.String("server_sent_events") + // LongPolling protocol + // Stability: stable + SignalRTransportLongPolling = SignalRTransportKey.String("long_polling") + // WebSockets protocol + // Stability: stable + SignalRTransportWebSockets = SignalRTransportKey.String("web_sockets") +) + +// Namespace: source +const ( + // SourceAddressKey is the attribute Key conforming to the "source.address" + // semantic conventions. It represents the source address - domain name if + // available without reverse DNS lookup; otherwise, IP address or Unix domain + // socket name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "source.example.com", "10.1.2.80", "/tmp/my.sock" + // Note: When observed from the destination side, and when communicating through + // an intermediary, `source.address` SHOULD represent the source address behind + // any intermediaries, for example proxies, if it's available. + SourceAddressKey = attribute.Key("source.address") + + // SourcePortKey is the attribute Key conforming to the "source.port" semantic + // conventions. It represents the source port number. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 3389, 2888 + SourcePortKey = attribute.Key("source.port") +) + +// SourceAddress returns an attribute KeyValue conforming to the "source.address" +// semantic conventions. It represents the source address - domain name if +// available without reverse DNS lookup; otherwise, IP address or Unix domain +// socket name. +func SourceAddress(val string) attribute.KeyValue { + return SourceAddressKey.String(val) +} + +// SourcePort returns an attribute KeyValue conforming to the "source.port" +// semantic conventions. It represents the source port number. +func SourcePort(val int) attribute.KeyValue { + return SourcePortKey.Int(val) +} + +// Namespace: system +const ( + // SystemCPULogicalNumberKey is the attribute Key conforming to the + // "system.cpu.logical_number" semantic conventions. It represents the + // deprecated, use `cpu.logical_number` instead. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 1 + SystemCPULogicalNumberKey = attribute.Key("system.cpu.logical_number") + + // SystemDeviceKey is the attribute Key conforming to the "system.device" + // semantic conventions. It represents the device identifier. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "(identifier)" + SystemDeviceKey = attribute.Key("system.device") + + // SystemFilesystemModeKey is the attribute Key conforming to the + // "system.filesystem.mode" semantic conventions. It represents the filesystem + // mode. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "rw, ro" + SystemFilesystemModeKey = attribute.Key("system.filesystem.mode") + + // SystemFilesystemMountpointKey is the attribute Key conforming to the + // "system.filesystem.mountpoint" semantic conventions. It represents the + // filesystem mount path. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "/mnt/data" + SystemFilesystemMountpointKey = attribute.Key("system.filesystem.mountpoint") + + // SystemFilesystemStateKey is the attribute Key conforming to the + // "system.filesystem.state" semantic conventions. It represents the filesystem + // state. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "used" + SystemFilesystemStateKey = attribute.Key("system.filesystem.state") + + // SystemFilesystemTypeKey is the attribute Key conforming to the + // "system.filesystem.type" semantic conventions. It represents the filesystem + // type. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "ext4" + SystemFilesystemTypeKey = attribute.Key("system.filesystem.type") + + // SystemMemoryStateKey is the attribute Key conforming to the + // "system.memory.state" semantic conventions. It represents the memory state. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "free", "cached" + SystemMemoryStateKey = attribute.Key("system.memory.state") + + // SystemPagingDirectionKey is the attribute Key conforming to the + // "system.paging.direction" semantic conventions. It represents the paging + // access direction. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "in" + SystemPagingDirectionKey = attribute.Key("system.paging.direction") + + // SystemPagingStateKey is the attribute Key conforming to the + // "system.paging.state" semantic conventions. It represents the memory paging + // state. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "free" + SystemPagingStateKey = attribute.Key("system.paging.state") + + // SystemPagingTypeKey is the attribute Key conforming to the + // "system.paging.type" semantic conventions. It represents the memory paging + // type. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "minor" + SystemPagingTypeKey = attribute.Key("system.paging.type") + + // SystemProcessStatusKey is the attribute Key conforming to the + // "system.process.status" semantic conventions. It represents the process + // state, e.g., [Linux Process State Codes]. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "running" + // + // [Linux Process State Codes]: https://man7.org/linux/man-pages/man1/ps.1.html#PROCESS_STATE_CODES + SystemProcessStatusKey = attribute.Key("system.process.status") +) + +// SystemCPULogicalNumber returns an attribute KeyValue conforming to the +// "system.cpu.logical_number" semantic conventions. It represents the +// deprecated, use `cpu.logical_number` instead. +func SystemCPULogicalNumber(val int) attribute.KeyValue { + return SystemCPULogicalNumberKey.Int(val) +} + +// SystemDevice returns an attribute KeyValue conforming to the "system.device" +// semantic conventions. It represents the device identifier. +func SystemDevice(val string) attribute.KeyValue { + return SystemDeviceKey.String(val) +} + +// SystemFilesystemMode returns an attribute KeyValue conforming to the +// "system.filesystem.mode" semantic conventions. It represents the filesystem +// mode. +func SystemFilesystemMode(val string) attribute.KeyValue { + return SystemFilesystemModeKey.String(val) +} + +// SystemFilesystemMountpoint returns an attribute KeyValue conforming to the +// "system.filesystem.mountpoint" semantic conventions. It represents the +// filesystem mount path. +func SystemFilesystemMountpoint(val string) attribute.KeyValue { + return SystemFilesystemMountpointKey.String(val) +} + +// Enum values for system.filesystem.state +var ( + // used + // Stability: development + SystemFilesystemStateUsed = SystemFilesystemStateKey.String("used") + // free + // Stability: development + SystemFilesystemStateFree = SystemFilesystemStateKey.String("free") + // reserved + // Stability: development + SystemFilesystemStateReserved = SystemFilesystemStateKey.String("reserved") +) + +// Enum values for system.filesystem.type +var ( + // fat32 + // Stability: development + SystemFilesystemTypeFat32 = SystemFilesystemTypeKey.String("fat32") + // exfat + // Stability: development + SystemFilesystemTypeExfat = SystemFilesystemTypeKey.String("exfat") + // ntfs + // Stability: development + SystemFilesystemTypeNtfs = SystemFilesystemTypeKey.String("ntfs") + // refs + // Stability: development + SystemFilesystemTypeRefs = SystemFilesystemTypeKey.String("refs") + // hfsplus + // Stability: development + SystemFilesystemTypeHfsplus = SystemFilesystemTypeKey.String("hfsplus") + // ext4 + // Stability: development + SystemFilesystemTypeExt4 = SystemFilesystemTypeKey.String("ext4") +) + +// Enum values for system.memory.state +var ( + // Actual used virtual memory in bytes. + // Stability: development + SystemMemoryStateUsed = SystemMemoryStateKey.String("used") + // free + // Stability: development + SystemMemoryStateFree = SystemMemoryStateKey.String("free") + // buffers + // Stability: development + SystemMemoryStateBuffers = SystemMemoryStateKey.String("buffers") + // cached + // Stability: development + SystemMemoryStateCached = SystemMemoryStateKey.String("cached") +) + +// Enum values for system.paging.direction +var ( + // in + // Stability: development + SystemPagingDirectionIn = SystemPagingDirectionKey.String("in") + // out + // Stability: development + SystemPagingDirectionOut = SystemPagingDirectionKey.String("out") +) + +// Enum values for system.paging.state +var ( + // used + // Stability: development + SystemPagingStateUsed = SystemPagingStateKey.String("used") + // free + // Stability: development + SystemPagingStateFree = SystemPagingStateKey.String("free") +) + +// Enum values for system.paging.type +var ( + // major + // Stability: development + SystemPagingTypeMajor = SystemPagingTypeKey.String("major") + // minor + // Stability: development + SystemPagingTypeMinor = SystemPagingTypeKey.String("minor") +) + +// Enum values for system.process.status +var ( + // running + // Stability: development + SystemProcessStatusRunning = SystemProcessStatusKey.String("running") + // sleeping + // Stability: development + SystemProcessStatusSleeping = SystemProcessStatusKey.String("sleeping") + // stopped + // Stability: development + SystemProcessStatusStopped = SystemProcessStatusKey.String("stopped") + // defunct + // Stability: development + SystemProcessStatusDefunct = SystemProcessStatusKey.String("defunct") +) + +// Namespace: telemetry +const ( + // TelemetryDistroNameKey is the attribute Key conforming to the + // "telemetry.distro.name" semantic conventions. It represents the name of the + // auto instrumentation agent or distribution, if used. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "parts-unlimited-java" + // Note: Official auto instrumentation agents and distributions SHOULD set the + // `telemetry.distro.name` attribute to + // a string starting with `opentelemetry-`, e.g. + // `opentelemetry-java-instrumentation`. + TelemetryDistroNameKey = attribute.Key("telemetry.distro.name") + + // TelemetryDistroVersionKey is the attribute Key conforming to the + // "telemetry.distro.version" semantic conventions. It represents the version + // string of the auto instrumentation agent or distribution, if used. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "1.2.3" + TelemetryDistroVersionKey = attribute.Key("telemetry.distro.version") + + // TelemetrySDKLanguageKey is the attribute Key conforming to the + // "telemetry.sdk.language" semantic conventions. It represents the language of + // the telemetry SDK. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: + TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language") + + // TelemetrySDKNameKey is the attribute Key conforming to the + // "telemetry.sdk.name" semantic conventions. It represents the name of the + // telemetry SDK as defined above. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "opentelemetry" + // Note: The OpenTelemetry SDK MUST set the `telemetry.sdk.name` attribute to + // `opentelemetry`. + // If another SDK, like a fork or a vendor-provided implementation, is used, + // this SDK MUST set the + // `telemetry.sdk.name` attribute to the fully-qualified class or module name of + // this SDK's main entry point + // or another suitable identifier depending on the language. + // The identifier `opentelemetry` is reserved and MUST NOT be used in this case. + // All custom identifiers SHOULD be stable across different versions of an + // implementation. + TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name") + + // TelemetrySDKVersionKey is the attribute Key conforming to the + // "telemetry.sdk.version" semantic conventions. It represents the version + // string of the telemetry SDK. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "1.2.3" + TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version") +) + +// TelemetryDistroName returns an attribute KeyValue conforming to the +// "telemetry.distro.name" semantic conventions. It represents the name of the +// auto instrumentation agent or distribution, if used. +func TelemetryDistroName(val string) attribute.KeyValue { + return TelemetryDistroNameKey.String(val) +} + +// TelemetryDistroVersion returns an attribute KeyValue conforming to the +// "telemetry.distro.version" semantic conventions. It represents the version +// string of the auto instrumentation agent or distribution, if used. +func TelemetryDistroVersion(val string) attribute.KeyValue { + return TelemetryDistroVersionKey.String(val) +} + +// TelemetrySDKName returns an attribute KeyValue conforming to the +// "telemetry.sdk.name" semantic conventions. It represents the name of the +// telemetry SDK as defined above. +func TelemetrySDKName(val string) attribute.KeyValue { + return TelemetrySDKNameKey.String(val) +} + +// TelemetrySDKVersion returns an attribute KeyValue conforming to the +// "telemetry.sdk.version" semantic conventions. It represents the version string +// of the telemetry SDK. +func TelemetrySDKVersion(val string) attribute.KeyValue { + return TelemetrySDKVersionKey.String(val) +} + +// Enum values for telemetry.sdk.language +var ( + // cpp + // Stability: stable + TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp") + // dotnet + // Stability: stable + TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet") + // erlang + // Stability: stable + TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang") + // go + // Stability: stable + TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go") + // java + // Stability: stable + TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java") + // nodejs + // Stability: stable + TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs") + // php + // Stability: stable + TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php") + // python + // Stability: stable + TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python") + // ruby + // Stability: stable + TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby") + // rust + // Stability: stable + TelemetrySDKLanguageRust = TelemetrySDKLanguageKey.String("rust") + // swift + // Stability: stable + TelemetrySDKLanguageSwift = TelemetrySDKLanguageKey.String("swift") + // webjs + // Stability: stable + TelemetrySDKLanguageWebJS = TelemetrySDKLanguageKey.String("webjs") +) + +// Namespace: test +const ( + // TestCaseNameKey is the attribute Key conforming to the "test.case.name" + // semantic conventions. It represents the fully qualified human readable name + // of the [test case]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "org.example.TestCase1.test1", "example/tests/TestCase1.test1", + // "ExampleTestCase1_test1" + // + // [test case]: https://wikipedia.org/wiki/Test_case + TestCaseNameKey = attribute.Key("test.case.name") + + // TestCaseResultStatusKey is the attribute Key conforming to the + // "test.case.result.status" semantic conventions. It represents the status of + // the actual test case result from test execution. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "pass", "fail" + TestCaseResultStatusKey = attribute.Key("test.case.result.status") + + // TestSuiteNameKey is the attribute Key conforming to the "test.suite.name" + // semantic conventions. It represents the human readable name of a [test suite] + // . + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "TestSuite1" + // + // [test suite]: https://wikipedia.org/wiki/Test_suite + TestSuiteNameKey = attribute.Key("test.suite.name") + + // TestSuiteRunStatusKey is the attribute Key conforming to the + // "test.suite.run.status" semantic conventions. It represents the status of the + // test suite run. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "success", "failure", "skipped", "aborted", "timed_out", + // "in_progress" + TestSuiteRunStatusKey = attribute.Key("test.suite.run.status") +) + +// TestCaseName returns an attribute KeyValue conforming to the "test.case.name" +// semantic conventions. It represents the fully qualified human readable name of +// the [test case]. +// +// [test case]: https://wikipedia.org/wiki/Test_case +func TestCaseName(val string) attribute.KeyValue { + return TestCaseNameKey.String(val) +} + +// TestSuiteName returns an attribute KeyValue conforming to the +// "test.suite.name" semantic conventions. It represents the human readable name +// of a [test suite]. +// +// [test suite]: https://wikipedia.org/wiki/Test_suite +func TestSuiteName(val string) attribute.KeyValue { + return TestSuiteNameKey.String(val) +} + +// Enum values for test.case.result.status +var ( + // pass + // Stability: development + TestCaseResultStatusPass = TestCaseResultStatusKey.String("pass") + // fail + // Stability: development + TestCaseResultStatusFail = TestCaseResultStatusKey.String("fail") +) + +// Enum values for test.suite.run.status +var ( + // success + // Stability: development + TestSuiteRunStatusSuccess = TestSuiteRunStatusKey.String("success") + // failure + // Stability: development + TestSuiteRunStatusFailure = TestSuiteRunStatusKey.String("failure") + // skipped + // Stability: development + TestSuiteRunStatusSkipped = TestSuiteRunStatusKey.String("skipped") + // aborted + // Stability: development + TestSuiteRunStatusAborted = TestSuiteRunStatusKey.String("aborted") + // timed_out + // Stability: development + TestSuiteRunStatusTimedOut = TestSuiteRunStatusKey.String("timed_out") + // in_progress + // Stability: development + TestSuiteRunStatusInProgress = TestSuiteRunStatusKey.String("in_progress") +) + +// Namespace: thread +const ( + // ThreadIDKey is the attribute Key conforming to the "thread.id" semantic + // conventions. It represents the current "managed" thread ID (as opposed to OS + // thread ID). + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + ThreadIDKey = attribute.Key("thread.id") + + // ThreadNameKey is the attribute Key conforming to the "thread.name" semantic + // conventions. It represents the current thread name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: main + ThreadNameKey = attribute.Key("thread.name") +) + +// ThreadID returns an attribute KeyValue conforming to the "thread.id" semantic +// conventions. It represents the current "managed" thread ID (as opposed to OS +// thread ID). +func ThreadID(val int) attribute.KeyValue { + return ThreadIDKey.Int(val) +} + +// ThreadName returns an attribute KeyValue conforming to the "thread.name" +// semantic conventions. It represents the current thread name. +func ThreadName(val string) attribute.KeyValue { + return ThreadNameKey.String(val) +} + +// Namespace: tls +const ( + // TLSCipherKey is the attribute Key conforming to the "tls.cipher" semantic + // conventions. It represents the string indicating the [cipher] used during the + // current connection. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "TLS_RSA_WITH_3DES_EDE_CBC_SHA", + // "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256" + // Note: The values allowed for `tls.cipher` MUST be one of the `Descriptions` + // of the [registered TLS Cipher Suits]. + // + // [cipher]: https://datatracker.ietf.org/doc/html/rfc5246#appendix-A.5 + // [registered TLS Cipher Suits]: https://www.iana.org/assignments/tls-parameters/tls-parameters.xhtml#table-tls-parameters-4 + TLSCipherKey = attribute.Key("tls.cipher") + + // TLSClientCertificateKey is the attribute Key conforming to the + // "tls.client.certificate" semantic conventions. It represents the PEM-encoded + // stand-alone certificate offered by the client. This is usually + // mutually-exclusive of `client.certificate_chain` since this value also exists + // in that list. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "MII..." + TLSClientCertificateKey = attribute.Key("tls.client.certificate") + + // TLSClientCertificateChainKey is the attribute Key conforming to the + // "tls.client.certificate_chain" semantic conventions. It represents the array + // of PEM-encoded certificates that make up the certificate chain offered by the + // client. This is usually mutually-exclusive of `client.certificate` since that + // value should be the first certificate in the chain. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "MII...", "MI..." + TLSClientCertificateChainKey = attribute.Key("tls.client.certificate_chain") + + // TLSClientHashMd5Key is the attribute Key conforming to the + // "tls.client.hash.md5" semantic conventions. It represents the certificate + // fingerprint using the MD5 digest of DER-encoded version of certificate + // offered by the client. For consistency with other hash values, this value + // should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC" + TLSClientHashMd5Key = attribute.Key("tls.client.hash.md5") + + // TLSClientHashSha1Key is the attribute Key conforming to the + // "tls.client.hash.sha1" semantic conventions. It represents the certificate + // fingerprint using the SHA1 digest of DER-encoded version of certificate + // offered by the client. For consistency with other hash values, this value + // should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "9E393D93138888D288266C2D915214D1D1CCEB2A" + TLSClientHashSha1Key = attribute.Key("tls.client.hash.sha1") + + // TLSClientHashSha256Key is the attribute Key conforming to the + // "tls.client.hash.sha256" semantic conventions. It represents the certificate + // fingerprint using the SHA256 digest of DER-encoded version of certificate + // offered by the client. For consistency with other hash values, this value + // should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0" + TLSClientHashSha256Key = attribute.Key("tls.client.hash.sha256") + + // TLSClientIssuerKey is the attribute Key conforming to the "tls.client.issuer" + // semantic conventions. It represents the distinguished name of [subject] of + // the issuer of the x.509 certificate presented by the client. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "CN=Example Root CA, OU=Infrastructure Team, DC=example, DC=com" + // + // [subject]: https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6 + TLSClientIssuerKey = attribute.Key("tls.client.issuer") + + // TLSClientJa3Key is the attribute Key conforming to the "tls.client.ja3" + // semantic conventions. It represents a hash that identifies clients based on + // how they perform an SSL/TLS handshake. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "d4e5b18d6b55c71272893221c96ba240" + TLSClientJa3Key = attribute.Key("tls.client.ja3") + + // TLSClientNotAfterKey is the attribute Key conforming to the + // "tls.client.not_after" semantic conventions. It represents the date/Time + // indicating when client certificate is no longer considered valid. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2021-01-01T00:00:00.000Z" + TLSClientNotAfterKey = attribute.Key("tls.client.not_after") + + // TLSClientNotBeforeKey is the attribute Key conforming to the + // "tls.client.not_before" semantic conventions. It represents the date/Time + // indicating when client certificate is first considered valid. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "1970-01-01T00:00:00.000Z" + TLSClientNotBeforeKey = attribute.Key("tls.client.not_before") + + // TLSClientSubjectKey is the attribute Key conforming to the + // "tls.client.subject" semantic conventions. It represents the distinguished + // name of subject of the x.509 certificate presented by the client. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "CN=myclient, OU=Documentation Team, DC=example, DC=com" + TLSClientSubjectKey = attribute.Key("tls.client.subject") + + // TLSClientSupportedCiphersKey is the attribute Key conforming to the + // "tls.client.supported_ciphers" semantic conventions. It represents the array + // of ciphers offered by the client during the client hello. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", + // "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384" + TLSClientSupportedCiphersKey = attribute.Key("tls.client.supported_ciphers") + + // TLSCurveKey is the attribute Key conforming to the "tls.curve" semantic + // conventions. It represents the string indicating the curve used for the given + // cipher, when applicable. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "secp256r1" + TLSCurveKey = attribute.Key("tls.curve") + + // TLSEstablishedKey is the attribute Key conforming to the "tls.established" + // semantic conventions. It represents the boolean flag indicating if the TLS + // negotiation was successful and transitioned to an encrypted tunnel. + // + // Type: boolean + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: true + TLSEstablishedKey = attribute.Key("tls.established") + + // TLSNextProtocolKey is the attribute Key conforming to the "tls.next_protocol" + // semantic conventions. It represents the string indicating the protocol being + // tunneled. Per the values in the [IANA registry], this string should be lower + // case. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "http/1.1" + // + // [IANA registry]: https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids + TLSNextProtocolKey = attribute.Key("tls.next_protocol") + + // TLSProtocolNameKey is the attribute Key conforming to the "tls.protocol.name" + // semantic conventions. It represents the normalized lowercase protocol name + // parsed from original string of the negotiated [SSL/TLS protocol version]. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // + // [SSL/TLS protocol version]: https://docs.openssl.org/1.1.1/man3/SSL_get_version/#return-values + TLSProtocolNameKey = attribute.Key("tls.protocol.name") + + // TLSProtocolVersionKey is the attribute Key conforming to the + // "tls.protocol.version" semantic conventions. It represents the numeric part + // of the version parsed from the original string of the negotiated + // [SSL/TLS protocol version]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "1.2", "3" + // + // [SSL/TLS protocol version]: https://docs.openssl.org/1.1.1/man3/SSL_get_version/#return-values + TLSProtocolVersionKey = attribute.Key("tls.protocol.version") + + // TLSResumedKey is the attribute Key conforming to the "tls.resumed" semantic + // conventions. It represents the boolean flag indicating if this TLS connection + // was resumed from an existing TLS negotiation. + // + // Type: boolean + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: true + TLSResumedKey = attribute.Key("tls.resumed") + + // TLSServerCertificateKey is the attribute Key conforming to the + // "tls.server.certificate" semantic conventions. It represents the PEM-encoded + // stand-alone certificate offered by the server. This is usually + // mutually-exclusive of `server.certificate_chain` since this value also exists + // in that list. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "MII..." + TLSServerCertificateKey = attribute.Key("tls.server.certificate") + + // TLSServerCertificateChainKey is the attribute Key conforming to the + // "tls.server.certificate_chain" semantic conventions. It represents the array + // of PEM-encoded certificates that make up the certificate chain offered by the + // server. This is usually mutually-exclusive of `server.certificate` since that + // value should be the first certificate in the chain. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "MII...", "MI..." + TLSServerCertificateChainKey = attribute.Key("tls.server.certificate_chain") + + // TLSServerHashMd5Key is the attribute Key conforming to the + // "tls.server.hash.md5" semantic conventions. It represents the certificate + // fingerprint using the MD5 digest of DER-encoded version of certificate + // offered by the server. For consistency with other hash values, this value + // should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC" + TLSServerHashMd5Key = attribute.Key("tls.server.hash.md5") + + // TLSServerHashSha1Key is the attribute Key conforming to the + // "tls.server.hash.sha1" semantic conventions. It represents the certificate + // fingerprint using the SHA1 digest of DER-encoded version of certificate + // offered by the server. For consistency with other hash values, this value + // should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "9E393D93138888D288266C2D915214D1D1CCEB2A" + TLSServerHashSha1Key = attribute.Key("tls.server.hash.sha1") + + // TLSServerHashSha256Key is the attribute Key conforming to the + // "tls.server.hash.sha256" semantic conventions. It represents the certificate + // fingerprint using the SHA256 digest of DER-encoded version of certificate + // offered by the server. For consistency with other hash values, this value + // should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0" + TLSServerHashSha256Key = attribute.Key("tls.server.hash.sha256") + + // TLSServerIssuerKey is the attribute Key conforming to the "tls.server.issuer" + // semantic conventions. It represents the distinguished name of [subject] of + // the issuer of the x.509 certificate presented by the client. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "CN=Example Root CA, OU=Infrastructure Team, DC=example, DC=com" + // + // [subject]: https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6 + TLSServerIssuerKey = attribute.Key("tls.server.issuer") + + // TLSServerJa3sKey is the attribute Key conforming to the "tls.server.ja3s" + // semantic conventions. It represents a hash that identifies servers based on + // how they perform an SSL/TLS handshake. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "d4e5b18d6b55c71272893221c96ba240" + TLSServerJa3sKey = attribute.Key("tls.server.ja3s") + + // TLSServerNotAfterKey is the attribute Key conforming to the + // "tls.server.not_after" semantic conventions. It represents the date/Time + // indicating when server certificate is no longer considered valid. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "2021-01-01T00:00:00.000Z" + TLSServerNotAfterKey = attribute.Key("tls.server.not_after") + + // TLSServerNotBeforeKey is the attribute Key conforming to the + // "tls.server.not_before" semantic conventions. It represents the date/Time + // indicating when server certificate is first considered valid. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "1970-01-01T00:00:00.000Z" + TLSServerNotBeforeKey = attribute.Key("tls.server.not_before") + + // TLSServerSubjectKey is the attribute Key conforming to the + // "tls.server.subject" semantic conventions. It represents the distinguished + // name of subject of the x.509 certificate presented by the server. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "CN=myserver, OU=Documentation Team, DC=example, DC=com" + TLSServerSubjectKey = attribute.Key("tls.server.subject") +) + +// TLSCipher returns an attribute KeyValue conforming to the "tls.cipher" +// semantic conventions. It represents the string indicating the [cipher] used +// during the current connection. +// +// [cipher]: https://datatracker.ietf.org/doc/html/rfc5246#appendix-A.5 +func TLSCipher(val string) attribute.KeyValue { + return TLSCipherKey.String(val) +} + +// TLSClientCertificate returns an attribute KeyValue conforming to the +// "tls.client.certificate" semantic conventions. It represents the PEM-encoded +// stand-alone certificate offered by the client. This is usually +// mutually-exclusive of `client.certificate_chain` since this value also exists +// in that list. +func TLSClientCertificate(val string) attribute.KeyValue { + return TLSClientCertificateKey.String(val) +} + +// TLSClientCertificateChain returns an attribute KeyValue conforming to the +// "tls.client.certificate_chain" semantic conventions. It represents the array +// of PEM-encoded certificates that make up the certificate chain offered by the +// client. This is usually mutually-exclusive of `client.certificate` since that +// value should be the first certificate in the chain. +func TLSClientCertificateChain(val ...string) attribute.KeyValue { + return TLSClientCertificateChainKey.StringSlice(val) +} + +// TLSClientHashMd5 returns an attribute KeyValue conforming to the +// "tls.client.hash.md5" semantic conventions. It represents the certificate +// fingerprint using the MD5 digest of DER-encoded version of certificate offered +// by the client. For consistency with other hash values, this value should be +// formatted as an uppercase hash. +func TLSClientHashMd5(val string) attribute.KeyValue { + return TLSClientHashMd5Key.String(val) +} + +// TLSClientHashSha1 returns an attribute KeyValue conforming to the +// "tls.client.hash.sha1" semantic conventions. It represents the certificate +// fingerprint using the SHA1 digest of DER-encoded version of certificate +// offered by the client. For consistency with other hash values, this value +// should be formatted as an uppercase hash. +func TLSClientHashSha1(val string) attribute.KeyValue { + return TLSClientHashSha1Key.String(val) +} + +// TLSClientHashSha256 returns an attribute KeyValue conforming to the +// "tls.client.hash.sha256" semantic conventions. It represents the certificate +// fingerprint using the SHA256 digest of DER-encoded version of certificate +// offered by the client. For consistency with other hash values, this value +// should be formatted as an uppercase hash. +func TLSClientHashSha256(val string) attribute.KeyValue { + return TLSClientHashSha256Key.String(val) +} + +// TLSClientIssuer returns an attribute KeyValue conforming to the +// "tls.client.issuer" semantic conventions. It represents the distinguished name +// of [subject] of the issuer of the x.509 certificate presented by the client. +// +// [subject]: https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6 +func TLSClientIssuer(val string) attribute.KeyValue { + return TLSClientIssuerKey.String(val) +} + +// TLSClientJa3 returns an attribute KeyValue conforming to the "tls.client.ja3" +// semantic conventions. It represents a hash that identifies clients based on +// how they perform an SSL/TLS handshake. +func TLSClientJa3(val string) attribute.KeyValue { + return TLSClientJa3Key.String(val) +} + +// TLSClientNotAfter returns an attribute KeyValue conforming to the +// "tls.client.not_after" semantic conventions. It represents the date/Time +// indicating when client certificate is no longer considered valid. +func TLSClientNotAfter(val string) attribute.KeyValue { + return TLSClientNotAfterKey.String(val) +} + +// TLSClientNotBefore returns an attribute KeyValue conforming to the +// "tls.client.not_before" semantic conventions. It represents the date/Time +// indicating when client certificate is first considered valid. +func TLSClientNotBefore(val string) attribute.KeyValue { + return TLSClientNotBeforeKey.String(val) +} + +// TLSClientSubject returns an attribute KeyValue conforming to the +// "tls.client.subject" semantic conventions. It represents the distinguished +// name of subject of the x.509 certificate presented by the client. +func TLSClientSubject(val string) attribute.KeyValue { + return TLSClientSubjectKey.String(val) +} + +// TLSClientSupportedCiphers returns an attribute KeyValue conforming to the +// "tls.client.supported_ciphers" semantic conventions. It represents the array +// of ciphers offered by the client during the client hello. +func TLSClientSupportedCiphers(val ...string) attribute.KeyValue { + return TLSClientSupportedCiphersKey.StringSlice(val) +} + +// TLSCurve returns an attribute KeyValue conforming to the "tls.curve" semantic +// conventions. It represents the string indicating the curve used for the given +// cipher, when applicable. +func TLSCurve(val string) attribute.KeyValue { + return TLSCurveKey.String(val) +} + +// TLSEstablished returns an attribute KeyValue conforming to the +// "tls.established" semantic conventions. It represents the boolean flag +// indicating if the TLS negotiation was successful and transitioned to an +// encrypted tunnel. +func TLSEstablished(val bool) attribute.KeyValue { + return TLSEstablishedKey.Bool(val) +} + +// TLSNextProtocol returns an attribute KeyValue conforming to the +// "tls.next_protocol" semantic conventions. It represents the string indicating +// the protocol being tunneled. Per the values in the [IANA registry], this +// string should be lower case. +// +// [IANA registry]: https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids +func TLSNextProtocol(val string) attribute.KeyValue { + return TLSNextProtocolKey.String(val) +} + +// TLSProtocolVersion returns an attribute KeyValue conforming to the +// "tls.protocol.version" semantic conventions. It represents the numeric part of +// the version parsed from the original string of the negotiated +// [SSL/TLS protocol version]. +// +// [SSL/TLS protocol version]: https://docs.openssl.org/1.1.1/man3/SSL_get_version/#return-values +func TLSProtocolVersion(val string) attribute.KeyValue { + return TLSProtocolVersionKey.String(val) +} + +// TLSResumed returns an attribute KeyValue conforming to the "tls.resumed" +// semantic conventions. It represents the boolean flag indicating if this TLS +// connection was resumed from an existing TLS negotiation. +func TLSResumed(val bool) attribute.KeyValue { + return TLSResumedKey.Bool(val) +} + +// TLSServerCertificate returns an attribute KeyValue conforming to the +// "tls.server.certificate" semantic conventions. It represents the PEM-encoded +// stand-alone certificate offered by the server. This is usually +// mutually-exclusive of `server.certificate_chain` since this value also exists +// in that list. +func TLSServerCertificate(val string) attribute.KeyValue { + return TLSServerCertificateKey.String(val) +} + +// TLSServerCertificateChain returns an attribute KeyValue conforming to the +// "tls.server.certificate_chain" semantic conventions. It represents the array +// of PEM-encoded certificates that make up the certificate chain offered by the +// server. This is usually mutually-exclusive of `server.certificate` since that +// value should be the first certificate in the chain. +func TLSServerCertificateChain(val ...string) attribute.KeyValue { + return TLSServerCertificateChainKey.StringSlice(val) +} + +// TLSServerHashMd5 returns an attribute KeyValue conforming to the +// "tls.server.hash.md5" semantic conventions. It represents the certificate +// fingerprint using the MD5 digest of DER-encoded version of certificate offered +// by the server. For consistency with other hash values, this value should be +// formatted as an uppercase hash. +func TLSServerHashMd5(val string) attribute.KeyValue { + return TLSServerHashMd5Key.String(val) +} + +// TLSServerHashSha1 returns an attribute KeyValue conforming to the +// "tls.server.hash.sha1" semantic conventions. It represents the certificate +// fingerprint using the SHA1 digest of DER-encoded version of certificate +// offered by the server. For consistency with other hash values, this value +// should be formatted as an uppercase hash. +func TLSServerHashSha1(val string) attribute.KeyValue { + return TLSServerHashSha1Key.String(val) +} + +// TLSServerHashSha256 returns an attribute KeyValue conforming to the +// "tls.server.hash.sha256" semantic conventions. It represents the certificate +// fingerprint using the SHA256 digest of DER-encoded version of certificate +// offered by the server. For consistency with other hash values, this value +// should be formatted as an uppercase hash. +func TLSServerHashSha256(val string) attribute.KeyValue { + return TLSServerHashSha256Key.String(val) +} + +// TLSServerIssuer returns an attribute KeyValue conforming to the +// "tls.server.issuer" semantic conventions. It represents the distinguished name +// of [subject] of the issuer of the x.509 certificate presented by the client. +// +// [subject]: https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6 +func TLSServerIssuer(val string) attribute.KeyValue { + return TLSServerIssuerKey.String(val) +} + +// TLSServerJa3s returns an attribute KeyValue conforming to the +// "tls.server.ja3s" semantic conventions. It represents a hash that identifies +// servers based on how they perform an SSL/TLS handshake. +func TLSServerJa3s(val string) attribute.KeyValue { + return TLSServerJa3sKey.String(val) +} + +// TLSServerNotAfter returns an attribute KeyValue conforming to the +// "tls.server.not_after" semantic conventions. It represents the date/Time +// indicating when server certificate is no longer considered valid. +func TLSServerNotAfter(val string) attribute.KeyValue { + return TLSServerNotAfterKey.String(val) +} + +// TLSServerNotBefore returns an attribute KeyValue conforming to the +// "tls.server.not_before" semantic conventions. It represents the date/Time +// indicating when server certificate is first considered valid. +func TLSServerNotBefore(val string) attribute.KeyValue { + return TLSServerNotBeforeKey.String(val) +} + +// TLSServerSubject returns an attribute KeyValue conforming to the +// "tls.server.subject" semantic conventions. It represents the distinguished +// name of subject of the x.509 certificate presented by the server. +func TLSServerSubject(val string) attribute.KeyValue { + return TLSServerSubjectKey.String(val) +} + +// Enum values for tls.protocol.name +var ( + // ssl + // Stability: development + TLSProtocolNameSsl = TLSProtocolNameKey.String("ssl") + // tls + // Stability: development + TLSProtocolNameTLS = TLSProtocolNameKey.String("tls") +) + +// Namespace: url +const ( + // URLDomainKey is the attribute Key conforming to the "url.domain" semantic + // conventions. It represents the domain extracted from the `url.full`, such as + // "opentelemetry.io". + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "www.foo.bar", "opentelemetry.io", "3.12.167.2", + // "[1080:0:0:0:8:800:200C:417A]" + // Note: In some cases a URL may refer to an IP and/or port directly, without a + // domain name. In this case, the IP address would go to the domain field. If + // the URL contains a [literal IPv6 address] enclosed by `[` and `]`, the `[` + // and `]` characters should also be captured in the domain field. + // + // [literal IPv6 address]: https://www.rfc-editor.org/rfc/rfc2732#section-2 + URLDomainKey = attribute.Key("url.domain") + + // URLExtensionKey is the attribute Key conforming to the "url.extension" + // semantic conventions. It represents the file extension extracted from the + // `url.full`, excluding the leading dot. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "png", "gz" + // Note: The file extension is only set if it exists, as not every url has a + // file extension. When the file name has multiple extensions `example.tar.gz`, + // only the last one should be captured `gz`, not `tar.gz`. + URLExtensionKey = attribute.Key("url.extension") + + // URLFragmentKey is the attribute Key conforming to the "url.fragment" semantic + // conventions. It represents the [URI fragment] component. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "SemConv" + // + // [URI fragment]: https://www.rfc-editor.org/rfc/rfc3986#section-3.5 + URLFragmentKey = attribute.Key("url.fragment") + + // URLFullKey is the attribute Key conforming to the "url.full" semantic + // conventions. It represents the absolute URL describing a network resource + // according to [RFC3986]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "https://www.foo.bar/search?q=OpenTelemetry#SemConv", "//localhost" + // Note: For network calls, URL usually has + // `scheme://host[:port][path][?query][#fragment]` format, where the fragment + // is not transmitted over HTTP, but if it is known, it SHOULD be included + // nevertheless. + // + // `url.full` MUST NOT contain credentials passed via URL in form of + // `https://username:password@www.example.com/`. + // In such case username and password SHOULD be redacted and attribute's value + // SHOULD be `https://REDACTED:REDACTED@www.example.com/`. + // + // `url.full` SHOULD capture the absolute URL when it is available (or can be + // reconstructed). + // + // Sensitive content provided in `url.full` SHOULD be scrubbed when + // instrumentations can identify it. + // + // + // Query string values for the following keys SHOULD be redacted by default and + // replaced by the + // value `REDACTED`: + // + // - [`AWSAccessKeyId`] + // - [`Signature`] + // - [`sig`] + // - [`X-Goog-Signature`] + // + // This list is subject to change over time. + // + // When a query string value is redacted, the query string key SHOULD still be + // preserved, e.g. + // `https://www.example.com/path?color=blue&sig=REDACTED`. + // + // [RFC3986]: https://www.rfc-editor.org/rfc/rfc3986 + // [`AWSAccessKeyId`]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/RESTAuthentication.html#RESTAuthenticationQueryStringAuth + // [`Signature`]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/RESTAuthentication.html#RESTAuthenticationQueryStringAuth + // [`sig`]: https://learn.microsoft.com/azure/storage/common/storage-sas-overview#sas-token + // [`X-Goog-Signature`]: https://cloud.google.com/storage/docs/access-control/signed-urls + URLFullKey = attribute.Key("url.full") + + // URLOriginalKey is the attribute Key conforming to the "url.original" semantic + // conventions. It represents the unmodified original URL as seen in the event + // source. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "https://www.foo.bar/search?q=OpenTelemetry#SemConv", + // "search?q=OpenTelemetry" + // Note: In network monitoring, the observed URL may be a full URL, whereas in + // access logs, the URL is often just represented as a path. This field is meant + // to represent the URL as it was observed, complete or not. + // `url.original` might contain credentials passed via URL in form of + // `https://username:password@www.example.com/`. In such case password and + // username SHOULD NOT be redacted and attribute's value SHOULD remain the same. + URLOriginalKey = attribute.Key("url.original") + + // URLPathKey is the attribute Key conforming to the "url.path" semantic + // conventions. It represents the [URI path] component. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "/search" + // Note: Sensitive content provided in `url.path` SHOULD be scrubbed when + // instrumentations can identify it. + // + // [URI path]: https://www.rfc-editor.org/rfc/rfc3986#section-3.3 + URLPathKey = attribute.Key("url.path") + + // URLPortKey is the attribute Key conforming to the "url.port" semantic + // conventions. It represents the port extracted from the `url.full`. + // + // Type: int + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: 443 + URLPortKey = attribute.Key("url.port") + + // URLQueryKey is the attribute Key conforming to the "url.query" semantic + // conventions. It represents the [URI query] component. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "q=OpenTelemetry" + // Note: Sensitive content provided in `url.query` SHOULD be scrubbed when + // instrumentations can identify it. + // + // + // Query string values for the following keys SHOULD be redacted by default and + // replaced by the value `REDACTED`: + // + // - [`AWSAccessKeyId`] + // - [`Signature`] + // - [`sig`] + // - [`X-Goog-Signature`] + // + // This list is subject to change over time. + // + // When a query string value is redacted, the query string key SHOULD still be + // preserved, e.g. + // `q=OpenTelemetry&sig=REDACTED`. + // + // [URI query]: https://www.rfc-editor.org/rfc/rfc3986#section-3.4 + // [`AWSAccessKeyId`]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/RESTAuthentication.html#RESTAuthenticationQueryStringAuth + // [`Signature`]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/RESTAuthentication.html#RESTAuthenticationQueryStringAuth + // [`sig`]: https://learn.microsoft.com/azure/storage/common/storage-sas-overview#sas-token + // [`X-Goog-Signature`]: https://cloud.google.com/storage/docs/access-control/signed-urls + URLQueryKey = attribute.Key("url.query") + + // URLRegisteredDomainKey is the attribute Key conforming to the + // "url.registered_domain" semantic conventions. It represents the highest + // registered url domain, stripped of the subdomain. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "example.com", "foo.co.uk" + // Note: This value can be determined precisely with the [public suffix list]. + // For example, the registered domain for `foo.example.com` is `example.com`. + // Trying to approximate this by simply taking the last two labels will not work + // well for TLDs such as `co.uk`. + // + // [public suffix list]: https://publicsuffix.org/ + URLRegisteredDomainKey = attribute.Key("url.registered_domain") + + // URLSchemeKey is the attribute Key conforming to the "url.scheme" semantic + // conventions. It represents the [URI scheme] component identifying the used + // protocol. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "https", "ftp", "telnet" + // + // [URI scheme]: https://www.rfc-editor.org/rfc/rfc3986#section-3.1 + URLSchemeKey = attribute.Key("url.scheme") + + // URLSubdomainKey is the attribute Key conforming to the "url.subdomain" + // semantic conventions. It represents the subdomain portion of a fully + // qualified domain name includes all of the names except the host name under + // the registered_domain. In a partially qualified domain, or if the + // qualification level of the full name cannot be determined, subdomain contains + // all of the names below the registered domain. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "east", "sub2.sub1" + // Note: The subdomain portion of `www.east.mydomain.co.uk` is `east`. If the + // domain has multiple levels of subdomain, such as `sub2.sub1.example.com`, the + // subdomain field should contain `sub2.sub1`, with no trailing period. + URLSubdomainKey = attribute.Key("url.subdomain") + + // URLTemplateKey is the attribute Key conforming to the "url.template" semantic + // conventions. It represents the low-cardinality template of an + // [absolute path reference]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "/users/{id}", "/users/:id", "/users?id={id}" + // + // [absolute path reference]: https://www.rfc-editor.org/rfc/rfc3986#section-4.2 + URLTemplateKey = attribute.Key("url.template") + + // URLTopLevelDomainKey is the attribute Key conforming to the + // "url.top_level_domain" semantic conventions. It represents the effective top + // level domain (eTLD), also known as the domain suffix, is the last part of the + // domain name. For example, the top level domain for example.com is `com`. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "com", "co.uk" + // Note: This value can be determined precisely with the [public suffix list]. + // + // [public suffix list]: https://publicsuffix.org/ + URLTopLevelDomainKey = attribute.Key("url.top_level_domain") +) + +// URLDomain returns an attribute KeyValue conforming to the "url.domain" +// semantic conventions. It represents the domain extracted from the `url.full`, +// such as "opentelemetry.io". +func URLDomain(val string) attribute.KeyValue { + return URLDomainKey.String(val) +} + +// URLExtension returns an attribute KeyValue conforming to the "url.extension" +// semantic conventions. It represents the file extension extracted from the +// `url.full`, excluding the leading dot. +func URLExtension(val string) attribute.KeyValue { + return URLExtensionKey.String(val) +} + +// URLFragment returns an attribute KeyValue conforming to the "url.fragment" +// semantic conventions. It represents the [URI fragment] component. +// +// [URI fragment]: https://www.rfc-editor.org/rfc/rfc3986#section-3.5 +func URLFragment(val string) attribute.KeyValue { + return URLFragmentKey.String(val) +} + +// URLFull returns an attribute KeyValue conforming to the "url.full" semantic +// conventions. It represents the absolute URL describing a network resource +// according to [RFC3986]. +// +// [RFC3986]: https://www.rfc-editor.org/rfc/rfc3986 +func URLFull(val string) attribute.KeyValue { + return URLFullKey.String(val) +} + +// URLOriginal returns an attribute KeyValue conforming to the "url.original" +// semantic conventions. It represents the unmodified original URL as seen in the +// event source. +func URLOriginal(val string) attribute.KeyValue { + return URLOriginalKey.String(val) +} + +// URLPath returns an attribute KeyValue conforming to the "url.path" semantic +// conventions. It represents the [URI path] component. +// +// [URI path]: https://www.rfc-editor.org/rfc/rfc3986#section-3.3 +func URLPath(val string) attribute.KeyValue { + return URLPathKey.String(val) +} + +// URLPort returns an attribute KeyValue conforming to the "url.port" semantic +// conventions. It represents the port extracted from the `url.full`. +func URLPort(val int) attribute.KeyValue { + return URLPortKey.Int(val) +} + +// URLQuery returns an attribute KeyValue conforming to the "url.query" semantic +// conventions. It represents the [URI query] component. +// +// [URI query]: https://www.rfc-editor.org/rfc/rfc3986#section-3.4 +func URLQuery(val string) attribute.KeyValue { + return URLQueryKey.String(val) +} + +// URLRegisteredDomain returns an attribute KeyValue conforming to the +// "url.registered_domain" semantic conventions. It represents the highest +// registered url domain, stripped of the subdomain. +func URLRegisteredDomain(val string) attribute.KeyValue { + return URLRegisteredDomainKey.String(val) +} + +// URLScheme returns an attribute KeyValue conforming to the "url.scheme" +// semantic conventions. It represents the [URI scheme] component identifying the +// used protocol. +// +// [URI scheme]: https://www.rfc-editor.org/rfc/rfc3986#section-3.1 +func URLScheme(val string) attribute.KeyValue { + return URLSchemeKey.String(val) +} + +// URLSubdomain returns an attribute KeyValue conforming to the "url.subdomain" +// semantic conventions. It represents the subdomain portion of a fully qualified +// domain name includes all of the names except the host name under the +// registered_domain. In a partially qualified domain, or if the qualification +// level of the full name cannot be determined, subdomain contains all of the +// names below the registered domain. +func URLSubdomain(val string) attribute.KeyValue { + return URLSubdomainKey.String(val) +} + +// URLTemplate returns an attribute KeyValue conforming to the "url.template" +// semantic conventions. It represents the low-cardinality template of an +// [absolute path reference]. +// +// [absolute path reference]: https://www.rfc-editor.org/rfc/rfc3986#section-4.2 +func URLTemplate(val string) attribute.KeyValue { + return URLTemplateKey.String(val) +} + +// URLTopLevelDomain returns an attribute KeyValue conforming to the +// "url.top_level_domain" semantic conventions. It represents the effective top +// level domain (eTLD), also known as the domain suffix, is the last part of the +// domain name. For example, the top level domain for example.com is `com`. +func URLTopLevelDomain(val string) attribute.KeyValue { + return URLTopLevelDomainKey.String(val) +} + +// Namespace: user +const ( + // UserEmailKey is the attribute Key conforming to the "user.email" semantic + // conventions. It represents the user email address. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "a.einstein@example.com" + UserEmailKey = attribute.Key("user.email") + + // UserFullNameKey is the attribute Key conforming to the "user.full_name" + // semantic conventions. It represents the user's full name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Albert Einstein" + UserFullNameKey = attribute.Key("user.full_name") + + // UserHashKey is the attribute Key conforming to the "user.hash" semantic + // conventions. It represents the unique user hash to correlate information for + // a user in anonymized form. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "364fc68eaf4c8acec74a4e52d7d1feaa" + // Note: Useful if `user.id` or `user.name` contain confidential information and + // cannot be used. + UserHashKey = attribute.Key("user.hash") + + // UserIDKey is the attribute Key conforming to the "user.id" semantic + // conventions. It represents the unique identifier of the user. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "S-1-5-21-202424912787-2692429404-2351956786-1000" + UserIDKey = attribute.Key("user.id") + + // UserNameKey is the attribute Key conforming to the "user.name" semantic + // conventions. It represents the short name or login/username of the user. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "a.einstein" + UserNameKey = attribute.Key("user.name") + + // UserRolesKey is the attribute Key conforming to the "user.roles" semantic + // conventions. It represents the array of user roles at the time of the event. + // + // Type: string[] + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "admin", "reporting_user" + UserRolesKey = attribute.Key("user.roles") +) + +// UserEmail returns an attribute KeyValue conforming to the "user.email" +// semantic conventions. It represents the user email address. +func UserEmail(val string) attribute.KeyValue { + return UserEmailKey.String(val) +} + +// UserFullName returns an attribute KeyValue conforming to the "user.full_name" +// semantic conventions. It represents the user's full name. +func UserFullName(val string) attribute.KeyValue { + return UserFullNameKey.String(val) +} + +// UserHash returns an attribute KeyValue conforming to the "user.hash" semantic +// conventions. It represents the unique user hash to correlate information for a +// user in anonymized form. +func UserHash(val string) attribute.KeyValue { + return UserHashKey.String(val) +} + +// UserID returns an attribute KeyValue conforming to the "user.id" semantic +// conventions. It represents the unique identifier of the user. +func UserID(val string) attribute.KeyValue { + return UserIDKey.String(val) +} + +// UserName returns an attribute KeyValue conforming to the "user.name" semantic +// conventions. It represents the short name or login/username of the user. +func UserName(val string) attribute.KeyValue { + return UserNameKey.String(val) +} + +// UserRoles returns an attribute KeyValue conforming to the "user.roles" +// semantic conventions. It represents the array of user roles at the time of the +// event. +func UserRoles(val ...string) attribute.KeyValue { + return UserRolesKey.StringSlice(val) +} + +// Namespace: user_agent +const ( + // UserAgentNameKey is the attribute Key conforming to the "user_agent.name" + // semantic conventions. It represents the name of the user-agent extracted from + // original. Usually refers to the browser's name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Safari", "YourApp" + // Note: [Example] of extracting browser's name from original string. In the + // case of using a user-agent for non-browser products, such as microservices + // with multiple names/versions inside the `user_agent.original`, the most + // significant name SHOULD be selected. In such a scenario it should align with + // `user_agent.version` + // + // [Example]: https://www.whatsmyua.info + UserAgentNameKey = attribute.Key("user_agent.name") + + // UserAgentOriginalKey is the attribute Key conforming to the + // "user_agent.original" semantic conventions. It represents the value of the + // [HTTP User-Agent] header sent by the client. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Stable + // + // Examples: "CERN-LineMode/2.15 libwww/2.17b3", "Mozilla/5.0 (iPhone; CPU + // iPhone OS 14_7_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) + // Version/14.1.2 Mobile/15E148 Safari/604.1", "YourApp/1.0.0 + // grpc-java-okhttp/1.27.2" + // + // [HTTP User-Agent]: https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent + UserAgentOriginalKey = attribute.Key("user_agent.original") + + // UserAgentOSNameKey is the attribute Key conforming to the + // "user_agent.os.name" semantic conventions. It represents the human readable + // operating system name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "iOS", "Android", "Ubuntu" + // Note: For mapping user agent strings to OS names, libraries such as + // [ua-parser] can be utilized. + // + // [ua-parser]: https://github.com/ua-parser + UserAgentOSNameKey = attribute.Key("user_agent.os.name") + + // UserAgentOSVersionKey is the attribute Key conforming to the + // "user_agent.os.version" semantic conventions. It represents the version + // string of the operating system as defined in [Version Attributes]. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "14.2.1", "18.04.1" + // Note: For mapping user agent strings to OS versions, libraries such as + // [ua-parser] can be utilized. + // + // [Version Attributes]: /docs/resource/README.md#version-attributes + // [ua-parser]: https://github.com/ua-parser + UserAgentOSVersionKey = attribute.Key("user_agent.os.version") + + // UserAgentSyntheticTypeKey is the attribute Key conforming to the + // "user_agent.synthetic.type" semantic conventions. It represents the specifies + // the category of synthetic traffic, such as tests or bots. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // Note: This attribute MAY be derived from the contents of the + // `user_agent.original` attribute. Components that populate the attribute are + // responsible for determining what they consider to be synthetic bot or test + // traffic. This attribute can either be set for self-identification purposes, + // or on telemetry detected to be generated as a result of a synthetic request. + // This attribute is useful for distinguishing between genuine client traffic + // and synthetic traffic generated by bots or tests. + UserAgentSyntheticTypeKey = attribute.Key("user_agent.synthetic.type") + + // UserAgentVersionKey is the attribute Key conforming to the + // "user_agent.version" semantic conventions. It represents the version of the + // user-agent extracted from original. Usually refers to the browser's version. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "14.1.2", "1.0.0" + // Note: [Example] of extracting browser's version from original string. In the + // case of using a user-agent for non-browser products, such as microservices + // with multiple names/versions inside the `user_agent.original`, the most + // significant version SHOULD be selected. In such a scenario it should align + // with `user_agent.name` + // + // [Example]: https://www.whatsmyua.info + UserAgentVersionKey = attribute.Key("user_agent.version") +) + +// UserAgentName returns an attribute KeyValue conforming to the +// "user_agent.name" semantic conventions. It represents the name of the +// user-agent extracted from original. Usually refers to the browser's name. +func UserAgentName(val string) attribute.KeyValue { + return UserAgentNameKey.String(val) +} + +// UserAgentOriginal returns an attribute KeyValue conforming to the +// "user_agent.original" semantic conventions. It represents the value of the +// [HTTP User-Agent] header sent by the client. +// +// [HTTP User-Agent]: https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent +func UserAgentOriginal(val string) attribute.KeyValue { + return UserAgentOriginalKey.String(val) +} + +// UserAgentOSName returns an attribute KeyValue conforming to the +// "user_agent.os.name" semantic conventions. It represents the human readable +// operating system name. +func UserAgentOSName(val string) attribute.KeyValue { + return UserAgentOSNameKey.String(val) +} + +// UserAgentOSVersion returns an attribute KeyValue conforming to the +// "user_agent.os.version" semantic conventions. It represents the version string +// of the operating system as defined in [Version Attributes]. +// +// [Version Attributes]: /docs/resource/README.md#version-attributes +func UserAgentOSVersion(val string) attribute.KeyValue { + return UserAgentOSVersionKey.String(val) +} + +// UserAgentVersion returns an attribute KeyValue conforming to the +// "user_agent.version" semantic conventions. It represents the version of the +// user-agent extracted from original. Usually refers to the browser's version. +func UserAgentVersion(val string) attribute.KeyValue { + return UserAgentVersionKey.String(val) +} + +// Enum values for user_agent.synthetic.type +var ( + // Bot source. + // Stability: development + UserAgentSyntheticTypeBot = UserAgentSyntheticTypeKey.String("bot") + // Synthetic test source. + // Stability: development + UserAgentSyntheticTypeTest = UserAgentSyntheticTypeKey.String("test") +) + +// Namespace: vcs +const ( + // VCSChangeIDKey is the attribute Key conforming to the "vcs.change.id" + // semantic conventions. It represents the ID of the change (pull request/merge + // request/changelist) if applicable. This is usually a unique (within + // repository) identifier generated by the VCS system. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "123" + VCSChangeIDKey = attribute.Key("vcs.change.id") + + // VCSChangeStateKey is the attribute Key conforming to the "vcs.change.state" + // semantic conventions. It represents the state of the change (pull + // request/merge request/changelist). + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "open", "closed", "merged" + VCSChangeStateKey = attribute.Key("vcs.change.state") + + // VCSChangeTitleKey is the attribute Key conforming to the "vcs.change.title" + // semantic conventions. It represents the human readable title of the change + // (pull request/merge request/changelist). This title is often a brief summary + // of the change and may get merged in to a ref as the commit summary. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "Fixes broken thing", "feat: add my new feature", "[chore] update + // dependency" + VCSChangeTitleKey = attribute.Key("vcs.change.title") + + // VCSLineChangeTypeKey is the attribute Key conforming to the + // "vcs.line_change.type" semantic conventions. It represents the type of line + // change being measured on a branch or change. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "added", "removed" + VCSLineChangeTypeKey = attribute.Key("vcs.line_change.type") + + // VCSOwnerNameKey is the attribute Key conforming to the "vcs.owner.name" + // semantic conventions. It represents the group owner within the version + // control system. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "my-org", "myteam", "business-unit" + VCSOwnerNameKey = attribute.Key("vcs.owner.name") + + // VCSProviderNameKey is the attribute Key conforming to the "vcs.provider.name" + // semantic conventions. It represents the name of the version control system + // provider. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "github", "gitlab", "gitea", "bitbucket" + VCSProviderNameKey = attribute.Key("vcs.provider.name") + + // VCSRefBaseNameKey is the attribute Key conforming to the "vcs.ref.base.name" + // semantic conventions. It represents the name of the [reference] such as + // **branch** or **tag** in the repository. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "my-feature-branch", "tag-1-test" + // Note: `base` refers to the starting point of a change. For example, `main` + // would be the base reference of type branch if you've created a new + // reference of type branch from it and created new commits. + // + // [reference]: https://git-scm.com/docs/gitglossary#def_ref + VCSRefBaseNameKey = attribute.Key("vcs.ref.base.name") + + // VCSRefBaseRevisionKey is the attribute Key conforming to the + // "vcs.ref.base.revision" semantic conventions. It represents the revision, + // literally [revised version], The revision most often refers to a commit + // object in Git, or a revision number in SVN. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "9d59409acf479dfa0df1aa568182e43e43df8bbe28d60fcf2bc52e30068802cc", + // "main", "123", "HEAD" + // Note: `base` refers to the starting point of a change. For example, `main` + // would be the base reference of type branch if you've created a new + // reference of type branch from it and created new commits. The + // revision can be a full [hash value (see + // glossary)], + // of the recorded change to a ref within a repository pointing to a + // commit [commit] object. It does + // not necessarily have to be a hash; it can simply define a [revision + // number] + // which is an integer that is monotonically increasing. In cases where + // it is identical to the `ref.base.name`, it SHOULD still be included. + // It is up to the implementer to decide which value to set as the + // revision based on the VCS system and situational context. + // + // [revised version]: https://www.merriam-webster.com/dictionary/revision + // [hash value (see + // glossary)]: https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-5.pdf + // [commit]: https://git-scm.com/docs/git-commit + // [revision + // number]: https://svnbook.red-bean.com/en/1.7/svn.tour.revs.specifiers.html + VCSRefBaseRevisionKey = attribute.Key("vcs.ref.base.revision") + + // VCSRefBaseTypeKey is the attribute Key conforming to the "vcs.ref.base.type" + // semantic conventions. It represents the type of the [reference] in the + // repository. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "branch", "tag" + // Note: `base` refers to the starting point of a change. For example, `main` + // would be the base reference of type branch if you've created a new + // reference of type branch from it and created new commits. + // + // [reference]: https://git-scm.com/docs/gitglossary#def_ref + VCSRefBaseTypeKey = attribute.Key("vcs.ref.base.type") + + // VCSRefHeadNameKey is the attribute Key conforming to the "vcs.ref.head.name" + // semantic conventions. It represents the name of the [reference] such as + // **branch** or **tag** in the repository. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "my-feature-branch", "tag-1-test" + // Note: `head` refers to where you are right now; the current reference at a + // given time. + // + // [reference]: https://git-scm.com/docs/gitglossary#def_ref + VCSRefHeadNameKey = attribute.Key("vcs.ref.head.name") + + // VCSRefHeadRevisionKey is the attribute Key conforming to the + // "vcs.ref.head.revision" semantic conventions. It represents the revision, + // literally [revised version], The revision most often refers to a commit + // object in Git, or a revision number in SVN. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "9d59409acf479dfa0df1aa568182e43e43df8bbe28d60fcf2bc52e30068802cc", + // "main", "123", "HEAD" + // Note: `head` refers to where you are right now; the current reference at a + // given time.The revision can be a full [hash value (see + // glossary)], + // of the recorded change to a ref within a repository pointing to a + // commit [commit] object. It does + // not necessarily have to be a hash; it can simply define a [revision + // number] + // which is an integer that is monotonically increasing. In cases where + // it is identical to the `ref.head.name`, it SHOULD still be included. + // It is up to the implementer to decide which value to set as the + // revision based on the VCS system and situational context. + // + // [revised version]: https://www.merriam-webster.com/dictionary/revision + // [hash value (see + // glossary)]: https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-5.pdf + // [commit]: https://git-scm.com/docs/git-commit + // [revision + // number]: https://svnbook.red-bean.com/en/1.7/svn.tour.revs.specifiers.html + VCSRefHeadRevisionKey = attribute.Key("vcs.ref.head.revision") + + // VCSRefHeadTypeKey is the attribute Key conforming to the "vcs.ref.head.type" + // semantic conventions. It represents the type of the [reference] in the + // repository. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "branch", "tag" + // Note: `head` refers to where you are right now; the current reference at a + // given time. + // + // [reference]: https://git-scm.com/docs/gitglossary#def_ref + VCSRefHeadTypeKey = attribute.Key("vcs.ref.head.type") + + // VCSRefTypeKey is the attribute Key conforming to the "vcs.ref.type" semantic + // conventions. It represents the type of the [reference] in the repository. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "branch", "tag" + // + // [reference]: https://git-scm.com/docs/gitglossary#def_ref + VCSRefTypeKey = attribute.Key("vcs.ref.type") + + // VCSRepositoryNameKey is the attribute Key conforming to the + // "vcs.repository.name" semantic conventions. It represents the human readable + // name of the repository. It SHOULD NOT include any additional identifier like + // Group/SubGroup in GitLab or organization in GitHub. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "semantic-conventions", "my-cool-repo" + // Note: Due to it only being the name, it can clash with forks of the same + // repository if collecting telemetry across multiple orgs or groups in + // the same backends. + VCSRepositoryNameKey = attribute.Key("vcs.repository.name") + + // VCSRepositoryURLFullKey is the attribute Key conforming to the + // "vcs.repository.url.full" semantic conventions. It represents the + // [canonical URL] of the repository providing the complete HTTP(S) address in + // order to locate and identify the repository through a browser. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: + // "https://github.com/opentelemetry/open-telemetry-collector-contrib", + // "https://gitlab.com/my-org/my-project/my-projects-project/repo" + // Note: In Git Version Control Systems, the canonical URL SHOULD NOT include + // the `.git` extension. + // + // [canonical URL]: https://support.google.com/webmasters/answer/10347851?hl=en#:~:text=A%20canonical%20URL%20is%20the,Google%20chooses%20one%20as%20canonical. + VCSRepositoryURLFullKey = attribute.Key("vcs.repository.url.full") + + // VCSRevisionDeltaDirectionKey is the attribute Key conforming to the + // "vcs.revision_delta.direction" semantic conventions. It represents the type + // of revision comparison. + // + // Type: Enum + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "ahead", "behind" + VCSRevisionDeltaDirectionKey = attribute.Key("vcs.revision_delta.direction") +) + +// VCSChangeID returns an attribute KeyValue conforming to the "vcs.change.id" +// semantic conventions. It represents the ID of the change (pull request/merge +// request/changelist) if applicable. This is usually a unique (within +// repository) identifier generated by the VCS system. +func VCSChangeID(val string) attribute.KeyValue { + return VCSChangeIDKey.String(val) +} + +// VCSChangeTitle returns an attribute KeyValue conforming to the +// "vcs.change.title" semantic conventions. It represents the human readable +// title of the change (pull request/merge request/changelist). This title is +// often a brief summary of the change and may get merged in to a ref as the +// commit summary. +func VCSChangeTitle(val string) attribute.KeyValue { + return VCSChangeTitleKey.String(val) +} + +// VCSOwnerName returns an attribute KeyValue conforming to the "vcs.owner.name" +// semantic conventions. It represents the group owner within the version control +// system. +func VCSOwnerName(val string) attribute.KeyValue { + return VCSOwnerNameKey.String(val) +} + +// VCSRefBaseName returns an attribute KeyValue conforming to the +// "vcs.ref.base.name" semantic conventions. It represents the name of the +// [reference] such as **branch** or **tag** in the repository. +// +// [reference]: https://git-scm.com/docs/gitglossary#def_ref +func VCSRefBaseName(val string) attribute.KeyValue { + return VCSRefBaseNameKey.String(val) +} + +// VCSRefBaseRevision returns an attribute KeyValue conforming to the +// "vcs.ref.base.revision" semantic conventions. It represents the revision, +// literally [revised version], The revision most often refers to a commit object +// in Git, or a revision number in SVN. +// +// [revised version]: https://www.merriam-webster.com/dictionary/revision +func VCSRefBaseRevision(val string) attribute.KeyValue { + return VCSRefBaseRevisionKey.String(val) +} + +// VCSRefHeadName returns an attribute KeyValue conforming to the +// "vcs.ref.head.name" semantic conventions. It represents the name of the +// [reference] such as **branch** or **tag** in the repository. +// +// [reference]: https://git-scm.com/docs/gitglossary#def_ref +func VCSRefHeadName(val string) attribute.KeyValue { + return VCSRefHeadNameKey.String(val) +} + +// VCSRefHeadRevision returns an attribute KeyValue conforming to the +// "vcs.ref.head.revision" semantic conventions. It represents the revision, +// literally [revised version], The revision most often refers to a commit object +// in Git, or a revision number in SVN. +// +// [revised version]: https://www.merriam-webster.com/dictionary/revision +func VCSRefHeadRevision(val string) attribute.KeyValue { + return VCSRefHeadRevisionKey.String(val) +} + +// VCSRepositoryName returns an attribute KeyValue conforming to the +// "vcs.repository.name" semantic conventions. It represents the human readable +// name of the repository. It SHOULD NOT include any additional identifier like +// Group/SubGroup in GitLab or organization in GitHub. +func VCSRepositoryName(val string) attribute.KeyValue { + return VCSRepositoryNameKey.String(val) +} + +// VCSRepositoryURLFull returns an attribute KeyValue conforming to the +// "vcs.repository.url.full" semantic conventions. It represents the +// [canonical URL] of the repository providing the complete HTTP(S) address in +// order to locate and identify the repository through a browser. +// +// [canonical URL]: https://support.google.com/webmasters/answer/10347851?hl=en#:~:text=A%20canonical%20URL%20is%20the,Google%20chooses%20one%20as%20canonical. +func VCSRepositoryURLFull(val string) attribute.KeyValue { + return VCSRepositoryURLFullKey.String(val) +} + +// Enum values for vcs.change.state +var ( + // Open means the change is currently active and under review. It hasn't been + // merged into the target branch yet, and it's still possible to make changes or + // add comments. + // Stability: development + VCSChangeStateOpen = VCSChangeStateKey.String("open") + // WIP (work-in-progress, draft) means the change is still in progress and not + // yet ready for a full review. It might still undergo significant changes. + // Stability: development + VCSChangeStateWip = VCSChangeStateKey.String("wip") + // Closed means the merge request has been closed without merging. This can + // happen for various reasons, such as the changes being deemed unnecessary, the + // issue being resolved in another way, or the author deciding to withdraw the + // request. + // Stability: development + VCSChangeStateClosed = VCSChangeStateKey.String("closed") + // Merged indicates that the change has been successfully integrated into the + // target codebase. + // Stability: development + VCSChangeStateMerged = VCSChangeStateKey.String("merged") +) + +// Enum values for vcs.line_change.type +var ( + // How many lines were added. + // Stability: development + VCSLineChangeTypeAdded = VCSLineChangeTypeKey.String("added") + // How many lines were removed. + // Stability: development + VCSLineChangeTypeRemoved = VCSLineChangeTypeKey.String("removed") +) + +// Enum values for vcs.provider.name +var ( + // [GitHub] + // Stability: development + // + // [GitHub]: https://github.com + VCSProviderNameGithub = VCSProviderNameKey.String("github") + // [GitLab] + // Stability: development + // + // [GitLab]: https://gitlab.com + VCSProviderNameGitlab = VCSProviderNameKey.String("gitlab") + // [Gitea] + // Stability: development + // + // [Gitea]: https://gitea.io + VCSProviderNameGitea = VCSProviderNameKey.String("gitea") + // [Bitbucket] + // Stability: development + // + // [Bitbucket]: https://bitbucket.org + VCSProviderNameBitbucket = VCSProviderNameKey.String("bitbucket") +) + +// Enum values for vcs.ref.base.type +var ( + // [branch] + // Stability: development + // + // [branch]: https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddefbranchabranch + VCSRefBaseTypeBranch = VCSRefBaseTypeKey.String("branch") + // [tag] + // Stability: development + // + // [tag]: https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddeftagatag + VCSRefBaseTypeTag = VCSRefBaseTypeKey.String("tag") +) + +// Enum values for vcs.ref.head.type +var ( + // [branch] + // Stability: development + // + // [branch]: https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddefbranchabranch + VCSRefHeadTypeBranch = VCSRefHeadTypeKey.String("branch") + // [tag] + // Stability: development + // + // [tag]: https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddeftagatag + VCSRefHeadTypeTag = VCSRefHeadTypeKey.String("tag") +) + +// Enum values for vcs.ref.type +var ( + // [branch] + // Stability: development + // + // [branch]: https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddefbranchabranch + VCSRefTypeBranch = VCSRefTypeKey.String("branch") + // [tag] + // Stability: development + // + // [tag]: https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddeftagatag + VCSRefTypeTag = VCSRefTypeKey.String("tag") +) + +// Enum values for vcs.revision_delta.direction +var ( + // How many revisions the change is behind the target ref. + // Stability: development + VCSRevisionDeltaDirectionBehind = VCSRevisionDeltaDirectionKey.String("behind") + // How many revisions the change is ahead of the target ref. + // Stability: development + VCSRevisionDeltaDirectionAhead = VCSRevisionDeltaDirectionKey.String("ahead") +) + +// Namespace: webengine +const ( + // WebEngineDescriptionKey is the attribute Key conforming to the + // "webengine.description" semantic conventions. It represents the additional + // description of the web engine (e.g. detailed version and edition + // information). + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) - + // 2.2.2.Final" + WebEngineDescriptionKey = attribute.Key("webengine.description") + + // WebEngineNameKey is the attribute Key conforming to the "webengine.name" + // semantic conventions. It represents the name of the web engine. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "WildFly" + WebEngineNameKey = attribute.Key("webengine.name") + + // WebEngineVersionKey is the attribute Key conforming to the + // "webengine.version" semantic conventions. It represents the version of the + // web engine. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "21.0.0" + WebEngineVersionKey = attribute.Key("webengine.version") +) + +// WebEngineDescription returns an attribute KeyValue conforming to the +// "webengine.description" semantic conventions. It represents the additional +// description of the web engine (e.g. detailed version and edition information). +func WebEngineDescription(val string) attribute.KeyValue { + return WebEngineDescriptionKey.String(val) +} + +// WebEngineName returns an attribute KeyValue conforming to the "webengine.name" +// semantic conventions. It represents the name of the web engine. +func WebEngineName(val string) attribute.KeyValue { + return WebEngineNameKey.String(val) +} + +// WebEngineVersion returns an attribute KeyValue conforming to the +// "webengine.version" semantic conventions. It represents the version of the web +// engine. +func WebEngineVersion(val string) attribute.KeyValue { + return WebEngineVersionKey.String(val) +} + +// Namespace: zos +const ( + // ZOSSmfIDKey is the attribute Key conforming to the "zos.smf.id" semantic + // conventions. It represents the System Management Facility (SMF) Identifier + // uniquely identified a z/OS system within a SYSPLEX or mainframe environment + // and is used for system and performance analysis. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "SYS1" + ZOSSmfIDKey = attribute.Key("zos.smf.id") + + // ZOSSysplexNameKey is the attribute Key conforming to the "zos.sysplex.name" + // semantic conventions. It represents the name of the SYSPLEX to which the z/OS + // system belongs too. + // + // Type: string + // RequirementLevel: Recommended + // Stability: Development + // + // Examples: "SYSPLEX1" + ZOSSysplexNameKey = attribute.Key("zos.sysplex.name") +) + +// ZOSSmfID returns an attribute KeyValue conforming to the "zos.smf.id" semantic +// conventions. It represents the System Management Facility (SMF) Identifier +// uniquely identified a z/OS system within a SYSPLEX or mainframe environment +// and is used for system and performance analysis. +func ZOSSmfID(val string) attribute.KeyValue { + return ZOSSmfIDKey.String(val) +} + +// ZOSSysplexName returns an attribute KeyValue conforming to the +// "zos.sysplex.name" semantic conventions. It represents the name of the SYSPLEX +// to which the z/OS system belongs too. +func ZOSSysplexName(val string) attribute.KeyValue { + return ZOSSysplexNameKey.String(val) +} \ No newline at end of file diff --git a/semconv/v1.37.0/azureconv/metric.go b/semconv/v1.37.0/azureconv/metric.go new file mode 100644 index 00000000000..7a1b132e6f1 --- /dev/null +++ b/semconv/v1.37.0/azureconv/metric.go @@ -0,0 +1,333 @@ +// Code generated from semantic convention specification. DO NOT EDIT. + +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package httpconv provides types and functionality for OpenTelemetry semantic +// conventions in the "azure" namespace. +package azureconv + +import ( + "context" + "sync" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/noop" +) + +var ( + addOptPool = &sync.Pool{New: func() any { return &[]metric.AddOption{} }} + recOptPool = &sync.Pool{New: func() any { return &[]metric.RecordOption{} }} +) + +// CosmosDBConsistencyLevelAttr is an attribute conforming to the +// azure.cosmosdb.consistency.level semantic conventions. It represents the +// account or request [consistency level]. +// +// [consistency level]: https://learn.microsoft.com/azure/cosmos-db/consistency-levels +type CosmosDBConsistencyLevelAttr string + +var ( + // CosmosDBConsistencyLevelStrong is the strong. + CosmosDBConsistencyLevelStrong CosmosDBConsistencyLevelAttr = "Strong" + // CosmosDBConsistencyLevelBoundedStaleness is the bounded Staleness. + CosmosDBConsistencyLevelBoundedStaleness CosmosDBConsistencyLevelAttr = "BoundedStaleness" + // CosmosDBConsistencyLevelSession is the session. + CosmosDBConsistencyLevelSession CosmosDBConsistencyLevelAttr = "Session" + // CosmosDBConsistencyLevelEventual is the eventual. + CosmosDBConsistencyLevelEventual CosmosDBConsistencyLevelAttr = "Eventual" + // CosmosDBConsistencyLevelConsistentPrefix is the consistent Prefix. + CosmosDBConsistencyLevelConsistentPrefix CosmosDBConsistencyLevelAttr = "ConsistentPrefix" +) + +// ErrorTypeAttr is an attribute conforming to the error.type semantic +// conventions. It represents the describes a class of error the operation ended +// with. +type ErrorTypeAttr string + +var ( + // ErrorTypeOther is a fallback error value to be used when the instrumentation + // doesn't define a custom value. + ErrorTypeOther ErrorTypeAttr = "_OTHER" +) + +// CosmosDBClientActiveInstanceCount is an instrument used to record metric +// values conforming to the "azure.cosmosdb.client.active_instance.count" +// semantic conventions. It represents the number of active client instances. +type CosmosDBClientActiveInstanceCount struct { + metric.Int64UpDownCounter +} + +// NewCosmosDBClientActiveInstanceCount returns a new +// CosmosDBClientActiveInstanceCount instrument. +func NewCosmosDBClientActiveInstanceCount( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (CosmosDBClientActiveInstanceCount, error) { + // Check if the meter is nil. + if m == nil { + return CosmosDBClientActiveInstanceCount{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "azure.cosmosdb.client.active_instance.count", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("Number of active client instances."), + metric.WithUnit("{instance}"), + }, opt...)..., + ) + if err != nil { + return CosmosDBClientActiveInstanceCount{noop.Int64UpDownCounter{}}, err + } + return CosmosDBClientActiveInstanceCount{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m CosmosDBClientActiveInstanceCount) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (CosmosDBClientActiveInstanceCount) Name() string { + return "azure.cosmosdb.client.active_instance.count" +} + +// Unit returns the semantic convention unit of the instrument +func (CosmosDBClientActiveInstanceCount) Unit() string { + return "{instance}" +} + +// Description returns the semantic convention description of the instrument +func (CosmosDBClientActiveInstanceCount) Description() string { + return "Number of active client instances." +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +func (m CosmosDBClientActiveInstanceCount) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +func (m CosmosDBClientActiveInstanceCount) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AttrServerPort returns an optional attribute for the "server.port" semantic +// convention. It represents the server port number. +func (CosmosDBClientActiveInstanceCount) AttrServerPort(val int) attribute.KeyValue { + return attribute.Int("server.port", val) +} + +// AttrServerAddress returns an optional attribute for the "server.address" +// semantic convention. It represents the name of the database host. +func (CosmosDBClientActiveInstanceCount) AttrServerAddress(val string) attribute.KeyValue { + return attribute.String("server.address", val) +} + +// CosmosDBClientOperationRequestCharge is an instrument used to record metric +// values conforming to the "azure.cosmosdb.client.operation.request_charge" +// semantic conventions. It represents the [Request units] consumed by the +// operation. +// +// [Request units]: https://learn.microsoft.com/azure/cosmos-db/request-units +type CosmosDBClientOperationRequestCharge struct { + metric.Int64Histogram +} + +// NewCosmosDBClientOperationRequestCharge returns a new +// CosmosDBClientOperationRequestCharge instrument. +func NewCosmosDBClientOperationRequestCharge( + m metric.Meter, + opt ...metric.Int64HistogramOption, +) (CosmosDBClientOperationRequestCharge, error) { + // Check if the meter is nil. + if m == nil { + return CosmosDBClientOperationRequestCharge{noop.Int64Histogram{}}, nil + } + + i, err := m.Int64Histogram( + "azure.cosmosdb.client.operation.request_charge", + append([]metric.Int64HistogramOption{ + metric.WithDescription("[Request units](https://learn.microsoft.com/azure/cosmos-db/request-units) consumed by the operation."), + metric.WithUnit("{request_unit}"), + }, opt...)..., + ) + if err != nil { + return CosmosDBClientOperationRequestCharge{noop.Int64Histogram{}}, err + } + return CosmosDBClientOperationRequestCharge{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m CosmosDBClientOperationRequestCharge) Inst() metric.Int64Histogram { + return m.Int64Histogram +} + +// Name returns the semantic convention name of the instrument. +func (CosmosDBClientOperationRequestCharge) Name() string { + return "azure.cosmosdb.client.operation.request_charge" +} + +// Unit returns the semantic convention unit of the instrument +func (CosmosDBClientOperationRequestCharge) Unit() string { + return "{request_unit}" +} + +// Description returns the semantic convention description of the instrument +func (CosmosDBClientOperationRequestCharge) Description() string { + return "[Request units](https://learn.microsoft.com/azure/cosmos-db/request-units) consumed by the operation." +} + +// Record records val to the current distribution for attrs. +// +// The dbOperationName is the the name of the operation or command being +// executed. +// +// All additional attrs passed are included in the recorded value. +func (m CosmosDBClientOperationRequestCharge) Record( + ctx context.Context, + val int64, + dbOperationName string, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("db.operation.name", dbOperationName), + )..., + ), + ) + + m.Int64Histogram.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +func (m CosmosDBClientOperationRequestCharge) RecordSet(ctx context.Context, val int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Histogram.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Histogram.Record(ctx, val, *o...) +} + +// AttrCosmosDBConsistencyLevel returns an optional attribute for the +// "azure.cosmosdb.consistency.level" semantic convention. It represents the +// account or request [consistency level]. +// +// [consistency level]: https://learn.microsoft.com/azure/cosmos-db/consistency-levels +func (CosmosDBClientOperationRequestCharge) AttrCosmosDBConsistencyLevel(val CosmosDBConsistencyLevelAttr) attribute.KeyValue { + return attribute.String("azure.cosmosdb.consistency.level", string(val)) +} + +// AttrCosmosDBResponseSubStatusCode returns an optional attribute for the +// "azure.cosmosdb.response.sub_status_code" semantic convention. It represents +// the cosmos DB sub status code. +func (CosmosDBClientOperationRequestCharge) AttrCosmosDBResponseSubStatusCode(val int) attribute.KeyValue { + return attribute.Int("azure.cosmosdb.response.sub_status_code", val) +} + +// AttrDBCollectionName returns an optional attribute for the +// "db.collection.name" semantic convention. It represents the cosmos DB +// container name. +func (CosmosDBClientOperationRequestCharge) AttrDBCollectionName(val string) attribute.KeyValue { + return attribute.String("db.collection.name", val) +} + +// AttrDBNamespace returns an optional attribute for the "db.namespace" semantic +// convention. It represents the name of the database, fully qualified within the +// server address and port. +func (CosmosDBClientOperationRequestCharge) AttrDBNamespace(val string) attribute.KeyValue { + return attribute.String("db.namespace", val) +} + +// AttrDBResponseStatusCode returns an optional attribute for the +// "db.response.status_code" semantic convention. It represents the database +// response status code. +func (CosmosDBClientOperationRequestCharge) AttrDBResponseStatusCode(val string) attribute.KeyValue { + return attribute.String("db.response.status_code", val) +} + +// AttrErrorType returns an optional attribute for the "error.type" semantic +// convention. It represents the describes a class of error the operation ended +// with. +func (CosmosDBClientOperationRequestCharge) AttrErrorType(val ErrorTypeAttr) attribute.KeyValue { + return attribute.String("error.type", string(val)) +} + +// AttrServerPort returns an optional attribute for the "server.port" semantic +// convention. It represents the server port number. +func (CosmosDBClientOperationRequestCharge) AttrServerPort(val int) attribute.KeyValue { + return attribute.Int("server.port", val) +} + +// AttrCosmosDBOperationContactedRegions returns an optional attribute for the +// "azure.cosmosdb.operation.contacted_regions" semantic convention. It +// represents the list of regions contacted during operation in the order that +// they were contacted. If there is more than one region listed, it indicates +// that the operation was performed on multiple regions i.e. cross-regional call. +func (CosmosDBClientOperationRequestCharge) AttrCosmosDBOperationContactedRegions(val ...string) attribute.KeyValue { + return attribute.StringSlice("azure.cosmosdb.operation.contacted_regions", val) +} + +// AttrServerAddress returns an optional attribute for the "server.address" +// semantic convention. It represents the name of the database host. +func (CosmosDBClientOperationRequestCharge) AttrServerAddress(val string) attribute.KeyValue { + return attribute.String("server.address", val) +} \ No newline at end of file diff --git a/semconv/v1.37.0/cicdconv/metric.go b/semconv/v1.37.0/cicdconv/metric.go new file mode 100644 index 00000000000..2e156d394c1 --- /dev/null +++ b/semconv/v1.37.0/cicdconv/metric.go @@ -0,0 +1,643 @@ +// Code generated from semantic convention specification. DO NOT EDIT. + +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package httpconv provides types and functionality for OpenTelemetry semantic +// conventions in the "cicd" namespace. +package cicdconv + +import ( + "context" + "sync" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/noop" +) + +var ( + addOptPool = &sync.Pool{New: func() any { return &[]metric.AddOption{} }} + recOptPool = &sync.Pool{New: func() any { return &[]metric.RecordOption{} }} +) + +// PipelineResultAttr is an attribute conforming to the cicd.pipeline.result +// semantic conventions. It represents the result of a pipeline run. +type PipelineResultAttr string + +var ( + // PipelineResultSuccess is the pipeline run finished successfully. + PipelineResultSuccess PipelineResultAttr = "success" + // PipelineResultFailure is the pipeline run did not finish successfully, eg. + // due to a compile error or a failing test. Such failures are usually detected + // by non-zero exit codes of the tools executed in the pipeline run. + PipelineResultFailure PipelineResultAttr = "failure" + // PipelineResultError is the pipeline run failed due to an error in the CICD + // system, eg. due to the worker being killed. + PipelineResultError PipelineResultAttr = "error" + // PipelineResultTimeout is a timeout caused the pipeline run to be interrupted. + PipelineResultTimeout PipelineResultAttr = "timeout" + // PipelineResultCancellation is the pipeline run was cancelled, eg. by a user + // manually cancelling the pipeline run. + PipelineResultCancellation PipelineResultAttr = "cancellation" + // PipelineResultSkip is the pipeline run was skipped, eg. due to a precondition + // not being met. + PipelineResultSkip PipelineResultAttr = "skip" +) + +// PipelineRunStateAttr is an attribute conforming to the cicd.pipeline.run.state +// semantic conventions. It represents the pipeline run goes through these states +// during its lifecycle. +type PipelineRunStateAttr string + +var ( + // PipelineRunStatePending is the run pending state spans from the event + // triggering the pipeline run until the execution of the run starts (eg. time + // spent in a queue, provisioning agents, creating run resources). + PipelineRunStatePending PipelineRunStateAttr = "pending" + // PipelineRunStateExecuting is the executing state spans the execution of any + // run tasks (eg. build, test). + PipelineRunStateExecuting PipelineRunStateAttr = "executing" + // PipelineRunStateFinalizing is the finalizing state spans from when the run + // has finished executing (eg. cleanup of run resources). + PipelineRunStateFinalizing PipelineRunStateAttr = "finalizing" +) + +// WorkerStateAttr is an attribute conforming to the cicd.worker.state semantic +// conventions. It represents the state of a CICD worker / agent. +type WorkerStateAttr string + +var ( + // WorkerStateAvailable is the worker is not performing work for the CICD + // system. It is available to the CICD system to perform work on (online / + // idle). + WorkerStateAvailable WorkerStateAttr = "available" + // WorkerStateBusy is the worker is performing work for the CICD system. + WorkerStateBusy WorkerStateAttr = "busy" + // WorkerStateOffline is the worker is not available to the CICD system + // (disconnected / down). + WorkerStateOffline WorkerStateAttr = "offline" +) + +// ErrorTypeAttr is an attribute conforming to the error.type semantic +// conventions. It represents the describes a class of error the operation ended +// with. +type ErrorTypeAttr string + +var ( + // ErrorTypeOther is a fallback error value to be used when the instrumentation + // doesn't define a custom value. + ErrorTypeOther ErrorTypeAttr = "_OTHER" +) + +// PipelineRunActive is an instrument used to record metric values conforming to +// the "cicd.pipeline.run.active" semantic conventions. It represents the number +// of pipeline runs currently active in the system by state. +type PipelineRunActive struct { + metric.Int64UpDownCounter +} + +// NewPipelineRunActive returns a new PipelineRunActive instrument. +func NewPipelineRunActive( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (PipelineRunActive, error) { + // Check if the meter is nil. + if m == nil { + return PipelineRunActive{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "cicd.pipeline.run.active", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("The number of pipeline runs currently active in the system by state."), + metric.WithUnit("{run}"), + }, opt...)..., + ) + if err != nil { + return PipelineRunActive{noop.Int64UpDownCounter{}}, err + } + return PipelineRunActive{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m PipelineRunActive) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (PipelineRunActive) Name() string { + return "cicd.pipeline.run.active" +} + +// Unit returns the semantic convention unit of the instrument +func (PipelineRunActive) Unit() string { + return "{run}" +} + +// Description returns the semantic convention description of the instrument +func (PipelineRunActive) Description() string { + return "The number of pipeline runs currently active in the system by state." +} + +// Add adds incr to the existing count for attrs. +// +// The pipelineName is the the human readable name of the pipeline within a CI/CD +// system. +// +// The pipelineRunState is the the pipeline run goes through these states during +// its lifecycle. +func (m PipelineRunActive) Add( + ctx context.Context, + incr int64, + pipelineName string, + pipelineRunState PipelineRunStateAttr, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("cicd.pipeline.name", pipelineName), + attribute.String("cicd.pipeline.run.state", string(pipelineRunState)), + )..., + ), + ) + + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +func (m PipelineRunActive) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// PipelineRunDuration is an instrument used to record metric values conforming +// to the "cicd.pipeline.run.duration" semantic conventions. It represents the +// duration of a pipeline run grouped by pipeline, state and result. +type PipelineRunDuration struct { + metric.Float64Histogram +} + +// NewPipelineRunDuration returns a new PipelineRunDuration instrument. +func NewPipelineRunDuration( + m metric.Meter, + opt ...metric.Float64HistogramOption, +) (PipelineRunDuration, error) { + // Check if the meter is nil. + if m == nil { + return PipelineRunDuration{noop.Float64Histogram{}}, nil + } + + i, err := m.Float64Histogram( + "cicd.pipeline.run.duration", + append([]metric.Float64HistogramOption{ + metric.WithDescription("Duration of a pipeline run grouped by pipeline, state and result."), + metric.WithUnit("s"), + }, opt...)..., + ) + if err != nil { + return PipelineRunDuration{noop.Float64Histogram{}}, err + } + return PipelineRunDuration{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m PipelineRunDuration) Inst() metric.Float64Histogram { + return m.Float64Histogram +} + +// Name returns the semantic convention name of the instrument. +func (PipelineRunDuration) Name() string { + return "cicd.pipeline.run.duration" +} + +// Unit returns the semantic convention unit of the instrument +func (PipelineRunDuration) Unit() string { + return "s" +} + +// Description returns the semantic convention description of the instrument +func (PipelineRunDuration) Description() string { + return "Duration of a pipeline run grouped by pipeline, state and result." +} + +// Record records val to the current distribution for attrs. +// +// The pipelineName is the the human readable name of the pipeline within a CI/CD +// system. +// +// The pipelineRunState is the the pipeline run goes through these states during +// its lifecycle. +// +// All additional attrs passed are included in the recorded value. +func (m PipelineRunDuration) Record( + ctx context.Context, + val float64, + pipelineName string, + pipelineRunState PipelineRunStateAttr, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Float64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("cicd.pipeline.name", pipelineName), + attribute.String("cicd.pipeline.run.state", string(pipelineRunState)), + )..., + ), + ) + + m.Float64Histogram.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +func (m PipelineRunDuration) RecordSet(ctx context.Context, val float64, set attribute.Set) { + if set.Len() == 0 { + m.Float64Histogram.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Float64Histogram.Record(ctx, val, *o...) +} + +// AttrPipelineResult returns an optional attribute for the +// "cicd.pipeline.result" semantic convention. It represents the result of a +// pipeline run. +func (PipelineRunDuration) AttrPipelineResult(val PipelineResultAttr) attribute.KeyValue { + return attribute.String("cicd.pipeline.result", string(val)) +} + +// AttrErrorType returns an optional attribute for the "error.type" semantic +// convention. It represents the describes a class of error the operation ended +// with. +func (PipelineRunDuration) AttrErrorType(val ErrorTypeAttr) attribute.KeyValue { + return attribute.String("error.type", string(val)) +} + +// PipelineRunErrors is an instrument used to record metric values conforming to +// the "cicd.pipeline.run.errors" semantic conventions. It represents the number +// of errors encountered in pipeline runs (eg. compile, test failures). +type PipelineRunErrors struct { + metric.Int64Counter +} + +// NewPipelineRunErrors returns a new PipelineRunErrors instrument. +func NewPipelineRunErrors( + m metric.Meter, + opt ...metric.Int64CounterOption, +) (PipelineRunErrors, error) { + // Check if the meter is nil. + if m == nil { + return PipelineRunErrors{noop.Int64Counter{}}, nil + } + + i, err := m.Int64Counter( + "cicd.pipeline.run.errors", + append([]metric.Int64CounterOption{ + metric.WithDescription("The number of errors encountered in pipeline runs (eg. compile, test failures)."), + metric.WithUnit("{error}"), + }, opt...)..., + ) + if err != nil { + return PipelineRunErrors{noop.Int64Counter{}}, err + } + return PipelineRunErrors{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m PipelineRunErrors) Inst() metric.Int64Counter { + return m.Int64Counter +} + +// Name returns the semantic convention name of the instrument. +func (PipelineRunErrors) Name() string { + return "cicd.pipeline.run.errors" +} + +// Unit returns the semantic convention unit of the instrument +func (PipelineRunErrors) Unit() string { + return "{error}" +} + +// Description returns the semantic convention description of the instrument +func (PipelineRunErrors) Description() string { + return "The number of errors encountered in pipeline runs (eg. compile, test failures)." +} + +// Add adds incr to the existing count for attrs. +// +// The pipelineName is the the human readable name of the pipeline within a CI/CD +// system. +// +// The errorType is the describes a class of error the operation ended with. +// +// There might be errors in a pipeline run that are non fatal (eg. they are +// suppressed) or in a parallel stage multiple stages could have a fatal error. +// This means that this error count might not be the same as the count of metric +// `cicd.pipeline.run.duration` with run result `failure`. +func (m PipelineRunErrors) Add( + ctx context.Context, + incr int64, + pipelineName string, + errorType ErrorTypeAttr, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("cicd.pipeline.name", pipelineName), + attribute.String("error.type", string(errorType)), + )..., + ), + ) + + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// There might be errors in a pipeline run that are non fatal (eg. they are +// suppressed) or in a parallel stage multiple stages could have a fatal error. +// This means that this error count might not be the same as the count of metric +// `cicd.pipeline.run.duration` with run result `failure`. +func (m PipelineRunErrors) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Counter.Add(ctx, incr, *o...) +} + +// SystemErrors is an instrument used to record metric values conforming to the +// "cicd.system.errors" semantic conventions. It represents the number of errors +// in a component of the CICD system (eg. controller, scheduler, agent). +type SystemErrors struct { + metric.Int64Counter +} + +// NewSystemErrors returns a new SystemErrors instrument. +func NewSystemErrors( + m metric.Meter, + opt ...metric.Int64CounterOption, +) (SystemErrors, error) { + // Check if the meter is nil. + if m == nil { + return SystemErrors{noop.Int64Counter{}}, nil + } + + i, err := m.Int64Counter( + "cicd.system.errors", + append([]metric.Int64CounterOption{ + metric.WithDescription("The number of errors in a component of the CICD system (eg. controller, scheduler, agent)."), + metric.WithUnit("{error}"), + }, opt...)..., + ) + if err != nil { + return SystemErrors{noop.Int64Counter{}}, err + } + return SystemErrors{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SystemErrors) Inst() metric.Int64Counter { + return m.Int64Counter +} + +// Name returns the semantic convention name of the instrument. +func (SystemErrors) Name() string { + return "cicd.system.errors" +} + +// Unit returns the semantic convention unit of the instrument +func (SystemErrors) Unit() string { + return "{error}" +} + +// Description returns the semantic convention description of the instrument +func (SystemErrors) Description() string { + return "The number of errors in a component of the CICD system (eg. controller, scheduler, agent)." +} + +// Add adds incr to the existing count for attrs. +// +// The systemComponent is the the name of a component of the CICD system. +// +// The errorType is the describes a class of error the operation ended with. +// +// Errors in pipeline run execution are explicitly excluded. Ie a test failure is +// not counted in this metric. +func (m SystemErrors) Add( + ctx context.Context, + incr int64, + systemComponent string, + errorType ErrorTypeAttr, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("cicd.system.component", systemComponent), + attribute.String("error.type", string(errorType)), + )..., + ), + ) + + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// Errors in pipeline run execution are explicitly excluded. Ie a test failure is +// not counted in this metric. +func (m SystemErrors) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Counter.Add(ctx, incr, *o...) +} + +// WorkerCount is an instrument used to record metric values conforming to the +// "cicd.worker.count" semantic conventions. It represents the number of workers +// on the CICD system by state. +type WorkerCount struct { + metric.Int64UpDownCounter +} + +// NewWorkerCount returns a new WorkerCount instrument. +func NewWorkerCount( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (WorkerCount, error) { + // Check if the meter is nil. + if m == nil { + return WorkerCount{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "cicd.worker.count", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("The number of workers on the CICD system by state."), + metric.WithUnit("{count}"), + }, opt...)..., + ) + if err != nil { + return WorkerCount{noop.Int64UpDownCounter{}}, err + } + return WorkerCount{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m WorkerCount) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (WorkerCount) Name() string { + return "cicd.worker.count" +} + +// Unit returns the semantic convention unit of the instrument +func (WorkerCount) Unit() string { + return "{count}" +} + +// Description returns the semantic convention description of the instrument +func (WorkerCount) Description() string { + return "The number of workers on the CICD system by state." +} + +// Add adds incr to the existing count for attrs. +// +// The workerState is the the state of a CICD worker / agent. +func (m WorkerCount) Add( + ctx context.Context, + incr int64, + workerState WorkerStateAttr, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("cicd.worker.state", string(workerState)), + )..., + ), + ) + + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +func (m WorkerCount) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} \ No newline at end of file diff --git a/semconv/v1.37.0/containerconv/metric.go b/semconv/v1.37.0/containerconv/metric.go new file mode 100644 index 00000000000..a480f96e512 --- /dev/null +++ b/semconv/v1.37.0/containerconv/metric.go @@ -0,0 +1,1000 @@ +// Code generated from semantic convention specification. DO NOT EDIT. + +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package httpconv provides types and functionality for OpenTelemetry semantic +// conventions in the "container" namespace. +package containerconv + +import ( + "context" + "sync" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/noop" +) + +var ( + addOptPool = &sync.Pool{New: func() any { return &[]metric.AddOption{} }} + recOptPool = &sync.Pool{New: func() any { return &[]metric.RecordOption{} }} +) + +// CPUModeAttr is an attribute conforming to the cpu.mode semantic conventions. +// It represents the CPU mode for this data point. A container's CPU metric +// SHOULD be characterized *either* by data points with no `mode` labels, +// *or only* data points with `mode` labels. +type CPUModeAttr string + +var ( + // CPUModeUser is the user. + CPUModeUser CPUModeAttr = "user" + // CPUModeSystem is the system. + CPUModeSystem CPUModeAttr = "system" + // CPUModeNice is the nice. + CPUModeNice CPUModeAttr = "nice" + // CPUModeIdle is the idle. + CPUModeIdle CPUModeAttr = "idle" + // CPUModeIOWait is the IO Wait. + CPUModeIOWait CPUModeAttr = "iowait" + // CPUModeInterrupt is the interrupt. + CPUModeInterrupt CPUModeAttr = "interrupt" + // CPUModeSteal is the steal. + CPUModeSteal CPUModeAttr = "steal" + // CPUModeKernel is the kernel. + CPUModeKernel CPUModeAttr = "kernel" +) + +// DiskIODirectionAttr is an attribute conforming to the disk.io.direction +// semantic conventions. It represents the disk IO operation direction. +type DiskIODirectionAttr string + +var ( + // DiskIODirectionRead is the standardized value "read" of DiskIODirectionAttr. + DiskIODirectionRead DiskIODirectionAttr = "read" + // DiskIODirectionWrite is the standardized value "write" of + // DiskIODirectionAttr. + DiskIODirectionWrite DiskIODirectionAttr = "write" +) + +// NetworkIODirectionAttr is an attribute conforming to the network.io.direction +// semantic conventions. It represents the network IO operation direction. +type NetworkIODirectionAttr string + +var ( + // NetworkIODirectionTransmit is the standardized value "transmit" of + // NetworkIODirectionAttr. + NetworkIODirectionTransmit NetworkIODirectionAttr = "transmit" + // NetworkIODirectionReceive is the standardized value "receive" of + // NetworkIODirectionAttr. + NetworkIODirectionReceive NetworkIODirectionAttr = "receive" +) + +// CPUTime is an instrument used to record metric values conforming to the +// "container.cpu.time" semantic conventions. It represents the total CPU time +// consumed. +type CPUTime struct { + metric.Float64Counter +} + +// NewCPUTime returns a new CPUTime instrument. +func NewCPUTime( + m metric.Meter, + opt ...metric.Float64CounterOption, +) (CPUTime, error) { + // Check if the meter is nil. + if m == nil { + return CPUTime{noop.Float64Counter{}}, nil + } + + i, err := m.Float64Counter( + "container.cpu.time", + append([]metric.Float64CounterOption{ + metric.WithDescription("Total CPU time consumed."), + metric.WithUnit("s"), + }, opt...)..., + ) + if err != nil { + return CPUTime{noop.Float64Counter{}}, err + } + return CPUTime{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m CPUTime) Inst() metric.Float64Counter { + return m.Float64Counter +} + +// Name returns the semantic convention name of the instrument. +func (CPUTime) Name() string { + return "container.cpu.time" +} + +// Unit returns the semantic convention unit of the instrument +func (CPUTime) Unit() string { + return "s" +} + +// Description returns the semantic convention description of the instrument +func (CPUTime) Description() string { + return "Total CPU time consumed." +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +// +// Total CPU time consumed by the specific container on all available CPU cores +func (m CPUTime) Add( + ctx context.Context, + incr float64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Float64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Float64Counter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// Total CPU time consumed by the specific container on all available CPU cores +func (m CPUTime) AddSet(ctx context.Context, incr float64, set attribute.Set) { + if set.Len() == 0 { + m.Float64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Float64Counter.Add(ctx, incr, *o...) +} + +// AttrCPUMode returns an optional attribute for the "cpu.mode" semantic +// convention. It represents the CPU mode for this data point. A container's CPU +// metric SHOULD be characterized *either* by data points with no `mode` labels, +// *or only* data points with `mode` labels. +func (CPUTime) AttrCPUMode(val CPUModeAttr) attribute.KeyValue { + return attribute.String("cpu.mode", string(val)) +} + +// CPUUsage is an instrument used to record metric values conforming to the +// "container.cpu.usage" semantic conventions. It represents the container's CPU +// usage, measured in cpus. Range from 0 to the number of allocatable CPUs. +type CPUUsage struct { + metric.Int64Gauge +} + +// NewCPUUsage returns a new CPUUsage instrument. +func NewCPUUsage( + m metric.Meter, + opt ...metric.Int64GaugeOption, +) (CPUUsage, error) { + // Check if the meter is nil. + if m == nil { + return CPUUsage{noop.Int64Gauge{}}, nil + } + + i, err := m.Int64Gauge( + "container.cpu.usage", + append([]metric.Int64GaugeOption{ + metric.WithDescription("Container's CPU usage, measured in cpus. Range from 0 to the number of allocatable CPUs."), + metric.WithUnit("{cpu}"), + }, opt...)..., + ) + if err != nil { + return CPUUsage{noop.Int64Gauge{}}, err + } + return CPUUsage{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m CPUUsage) Inst() metric.Int64Gauge { + return m.Int64Gauge +} + +// Name returns the semantic convention name of the instrument. +func (CPUUsage) Name() string { + return "container.cpu.usage" +} + +// Unit returns the semantic convention unit of the instrument +func (CPUUsage) Unit() string { + return "{cpu}" +} + +// Description returns the semantic convention description of the instrument +func (CPUUsage) Description() string { + return "Container's CPU usage, measured in cpus. Range from 0 to the number of allocatable CPUs." +} + +// Record records val to the current distribution for attrs. +// +// All additional attrs passed are included in the recorded value. +// +// CPU usage of the specific container on all available CPU cores, averaged over +// the sample window +func (m CPUUsage) Record( + ctx context.Context, + val int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Gauge.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64Gauge.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +// +// CPU usage of the specific container on all available CPU cores, averaged over +// the sample window +func (m CPUUsage) RecordSet(ctx context.Context, val int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Gauge.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Gauge.Record(ctx, val, *o...) +} + +// AttrCPUMode returns an optional attribute for the "cpu.mode" semantic +// convention. It represents the CPU mode for this data point. A container's CPU +// metric SHOULD be characterized *either* by data points with no `mode` labels, +// *or only* data points with `mode` labels. +func (CPUUsage) AttrCPUMode(val CPUModeAttr) attribute.KeyValue { + return attribute.String("cpu.mode", string(val)) +} + +// DiskIO is an instrument used to record metric values conforming to the +// "container.disk.io" semantic conventions. It represents the disk bytes for the +// container. +type DiskIO struct { + metric.Int64Counter +} + +// NewDiskIO returns a new DiskIO instrument. +func NewDiskIO( + m metric.Meter, + opt ...metric.Int64CounterOption, +) (DiskIO, error) { + // Check if the meter is nil. + if m == nil { + return DiskIO{noop.Int64Counter{}}, nil + } + + i, err := m.Int64Counter( + "container.disk.io", + append([]metric.Int64CounterOption{ + metric.WithDescription("Disk bytes for the container."), + metric.WithUnit("By"), + }, opt...)..., + ) + if err != nil { + return DiskIO{noop.Int64Counter{}}, err + } + return DiskIO{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m DiskIO) Inst() metric.Int64Counter { + return m.Int64Counter +} + +// Name returns the semantic convention name of the instrument. +func (DiskIO) Name() string { + return "container.disk.io" +} + +// Unit returns the semantic convention unit of the instrument +func (DiskIO) Unit() string { + return "By" +} + +// Description returns the semantic convention description of the instrument +func (DiskIO) Description() string { + return "Disk bytes for the container." +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +// +// The total number of bytes read/written successfully (aggregated from all +// disks). +func (m DiskIO) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// The total number of bytes read/written successfully (aggregated from all +// disks). +func (m DiskIO) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AttrDiskIODirection returns an optional attribute for the "disk.io.direction" +// semantic convention. It represents the disk IO operation direction. +func (DiskIO) AttrDiskIODirection(val DiskIODirectionAttr) attribute.KeyValue { + return attribute.String("disk.io.direction", string(val)) +} + +// AttrSystemDevice returns an optional attribute for the "system.device" +// semantic convention. It represents the device identifier. +func (DiskIO) AttrSystemDevice(val string) attribute.KeyValue { + return attribute.String("system.device", val) +} + +// FilesystemAvailable is an instrument used to record metric values conforming +// to the "container.filesystem.available" semantic conventions. It represents +// the container filesystem available bytes. +type FilesystemAvailable struct { + metric.Int64UpDownCounter +} + +// NewFilesystemAvailable returns a new FilesystemAvailable instrument. +func NewFilesystemAvailable( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (FilesystemAvailable, error) { + // Check if the meter is nil. + if m == nil { + return FilesystemAvailable{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "container.filesystem.available", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("Container filesystem available bytes."), + metric.WithUnit("By"), + }, opt...)..., + ) + if err != nil { + return FilesystemAvailable{noop.Int64UpDownCounter{}}, err + } + return FilesystemAvailable{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m FilesystemAvailable) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (FilesystemAvailable) Name() string { + return "container.filesystem.available" +} + +// Unit returns the semantic convention unit of the instrument +func (FilesystemAvailable) Unit() string { + return "By" +} + +// Description returns the semantic convention description of the instrument +func (FilesystemAvailable) Description() string { + return "Container filesystem available bytes." +} + +// Add adds incr to the existing count for attrs. +// +// In K8s, this metric is derived from the +// [FsStats.AvailableBytes] field +// of the [ContainerStats.Rootfs] +// of the Kubelet's stats API. +// +// [FsStats.AvailableBytes]: https://pkg.go.dev/k8s.io/kubelet@v0.33.0/pkg/apis/stats/v1alpha1#FsStats +// [ContainerStats.Rootfs]: https://pkg.go.dev/k8s.io/kubelet@v0.33.0/pkg/apis/stats/v1alpha1#ContainerStats +func (m FilesystemAvailable) Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// In K8s, this metric is derived from the +// [FsStats.AvailableBytes] field +// of the [ContainerStats.Rootfs] +// of the Kubelet's stats API. +// +// [FsStats.AvailableBytes]: https://pkg.go.dev/k8s.io/kubelet@v0.33.0/pkg/apis/stats/v1alpha1#FsStats +// [ContainerStats.Rootfs]: https://pkg.go.dev/k8s.io/kubelet@v0.33.0/pkg/apis/stats/v1alpha1#ContainerStats +func (m FilesystemAvailable) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// FilesystemCapacity is an instrument used to record metric values conforming to +// the "container.filesystem.capacity" semantic conventions. It represents the +// container filesystem capacity. +type FilesystemCapacity struct { + metric.Int64UpDownCounter +} + +// NewFilesystemCapacity returns a new FilesystemCapacity instrument. +func NewFilesystemCapacity( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (FilesystemCapacity, error) { + // Check if the meter is nil. + if m == nil { + return FilesystemCapacity{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "container.filesystem.capacity", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("Container filesystem capacity."), + metric.WithUnit("By"), + }, opt...)..., + ) + if err != nil { + return FilesystemCapacity{noop.Int64UpDownCounter{}}, err + } + return FilesystemCapacity{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m FilesystemCapacity) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (FilesystemCapacity) Name() string { + return "container.filesystem.capacity" +} + +// Unit returns the semantic convention unit of the instrument +func (FilesystemCapacity) Unit() string { + return "By" +} + +// Description returns the semantic convention description of the instrument +func (FilesystemCapacity) Description() string { + return "Container filesystem capacity." +} + +// Add adds incr to the existing count for attrs. +// +// In K8s, this metric is derived from the +// [FsStats.CapacityBytes] field +// of the [ContainerStats.Rootfs] +// of the Kubelet's stats API. +// +// [FsStats.CapacityBytes]: https://pkg.go.dev/k8s.io/kubelet@v0.33.0/pkg/apis/stats/v1alpha1#FsStats +// [ContainerStats.Rootfs]: https://pkg.go.dev/k8s.io/kubelet@v0.33.0/pkg/apis/stats/v1alpha1#ContainerStats +func (m FilesystemCapacity) Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// In K8s, this metric is derived from the +// [FsStats.CapacityBytes] field +// of the [ContainerStats.Rootfs] +// of the Kubelet's stats API. +// +// [FsStats.CapacityBytes]: https://pkg.go.dev/k8s.io/kubelet@v0.33.0/pkg/apis/stats/v1alpha1#FsStats +// [ContainerStats.Rootfs]: https://pkg.go.dev/k8s.io/kubelet@v0.33.0/pkg/apis/stats/v1alpha1#ContainerStats +func (m FilesystemCapacity) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// FilesystemUsage is an instrument used to record metric values conforming to +// the "container.filesystem.usage" semantic conventions. It represents the +// container filesystem usage. +type FilesystemUsage struct { + metric.Int64UpDownCounter +} + +// NewFilesystemUsage returns a new FilesystemUsage instrument. +func NewFilesystemUsage( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (FilesystemUsage, error) { + // Check if the meter is nil. + if m == nil { + return FilesystemUsage{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "container.filesystem.usage", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("Container filesystem usage."), + metric.WithUnit("By"), + }, opt...)..., + ) + if err != nil { + return FilesystemUsage{noop.Int64UpDownCounter{}}, err + } + return FilesystemUsage{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m FilesystemUsage) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (FilesystemUsage) Name() string { + return "container.filesystem.usage" +} + +// Unit returns the semantic convention unit of the instrument +func (FilesystemUsage) Unit() string { + return "By" +} + +// Description returns the semantic convention description of the instrument +func (FilesystemUsage) Description() string { + return "Container filesystem usage." +} + +// Add adds incr to the existing count for attrs. +// +// This may not equal capacity - available. +// +// In K8s, this metric is derived from the +// [FsStats.UsedBytes] field +// of the [ContainerStats.Rootfs] +// of the Kubelet's stats API. +// +// [FsStats.UsedBytes]: https://pkg.go.dev/k8s.io/kubelet@v0.33.0/pkg/apis/stats/v1alpha1#FsStats +// [ContainerStats.Rootfs]: https://pkg.go.dev/k8s.io/kubelet@v0.33.0/pkg/apis/stats/v1alpha1#ContainerStats +func (m FilesystemUsage) Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// This may not equal capacity - available. +// +// In K8s, this metric is derived from the +// [FsStats.UsedBytes] field +// of the [ContainerStats.Rootfs] +// of the Kubelet's stats API. +// +// [FsStats.UsedBytes]: https://pkg.go.dev/k8s.io/kubelet@v0.33.0/pkg/apis/stats/v1alpha1#FsStats +// [ContainerStats.Rootfs]: https://pkg.go.dev/k8s.io/kubelet@v0.33.0/pkg/apis/stats/v1alpha1#ContainerStats +func (m FilesystemUsage) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// MemoryUsage is an instrument used to record metric values conforming to the +// "container.memory.usage" semantic conventions. It represents the memory usage +// of the container. +type MemoryUsage struct { + metric.Int64Counter +} + +// NewMemoryUsage returns a new MemoryUsage instrument. +func NewMemoryUsage( + m metric.Meter, + opt ...metric.Int64CounterOption, +) (MemoryUsage, error) { + // Check if the meter is nil. + if m == nil { + return MemoryUsage{noop.Int64Counter{}}, nil + } + + i, err := m.Int64Counter( + "container.memory.usage", + append([]metric.Int64CounterOption{ + metric.WithDescription("Memory usage of the container."), + metric.WithUnit("By"), + }, opt...)..., + ) + if err != nil { + return MemoryUsage{noop.Int64Counter{}}, err + } + return MemoryUsage{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m MemoryUsage) Inst() metric.Int64Counter { + return m.Int64Counter +} + +// Name returns the semantic convention name of the instrument. +func (MemoryUsage) Name() string { + return "container.memory.usage" +} + +// Unit returns the semantic convention unit of the instrument +func (MemoryUsage) Unit() string { + return "By" +} + +// Description returns the semantic convention description of the instrument +func (MemoryUsage) Description() string { + return "Memory usage of the container." +} + +// Add adds incr to the existing count for attrs. +// +// Memory usage of the container. +func (m MemoryUsage) Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// Memory usage of the container. +func (m MemoryUsage) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Counter.Add(ctx, incr, *o...) +} + +// NetworkIO is an instrument used to record metric values conforming to the +// "container.network.io" semantic conventions. It represents the network bytes +// for the container. +type NetworkIO struct { + metric.Int64Counter +} + +// NewNetworkIO returns a new NetworkIO instrument. +func NewNetworkIO( + m metric.Meter, + opt ...metric.Int64CounterOption, +) (NetworkIO, error) { + // Check if the meter is nil. + if m == nil { + return NetworkIO{noop.Int64Counter{}}, nil + } + + i, err := m.Int64Counter( + "container.network.io", + append([]metric.Int64CounterOption{ + metric.WithDescription("Network bytes for the container."), + metric.WithUnit("By"), + }, opt...)..., + ) + if err != nil { + return NetworkIO{noop.Int64Counter{}}, err + } + return NetworkIO{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m NetworkIO) Inst() metric.Int64Counter { + return m.Int64Counter +} + +// Name returns the semantic convention name of the instrument. +func (NetworkIO) Name() string { + return "container.network.io" +} + +// Unit returns the semantic convention unit of the instrument +func (NetworkIO) Unit() string { + return "By" +} + +// Description returns the semantic convention description of the instrument +func (NetworkIO) Description() string { + return "Network bytes for the container." +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +// +// The number of bytes sent/received on all network interfaces by the container. +func (m NetworkIO) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// The number of bytes sent/received on all network interfaces by the container. +func (m NetworkIO) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AttrNetworkInterfaceName returns an optional attribute for the +// "network.interface.name" semantic convention. It represents the network +// interface name. +func (NetworkIO) AttrNetworkInterfaceName(val string) attribute.KeyValue { + return attribute.String("network.interface.name", val) +} + +// AttrNetworkIODirection returns an optional attribute for the +// "network.io.direction" semantic convention. It represents the network IO +// operation direction. +func (NetworkIO) AttrNetworkIODirection(val NetworkIODirectionAttr) attribute.KeyValue { + return attribute.String("network.io.direction", string(val)) +} + +// Uptime is an instrument used to record metric values conforming to the +// "container.uptime" semantic conventions. It represents the time the container +// has been running. +type Uptime struct { + metric.Float64Gauge +} + +// NewUptime returns a new Uptime instrument. +func NewUptime( + m metric.Meter, + opt ...metric.Float64GaugeOption, +) (Uptime, error) { + // Check if the meter is nil. + if m == nil { + return Uptime{noop.Float64Gauge{}}, nil + } + + i, err := m.Float64Gauge( + "container.uptime", + append([]metric.Float64GaugeOption{ + metric.WithDescription("The time the container has been running."), + metric.WithUnit("s"), + }, opt...)..., + ) + if err != nil { + return Uptime{noop.Float64Gauge{}}, err + } + return Uptime{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m Uptime) Inst() metric.Float64Gauge { + return m.Float64Gauge +} + +// Name returns the semantic convention name of the instrument. +func (Uptime) Name() string { + return "container.uptime" +} + +// Unit returns the semantic convention unit of the instrument +func (Uptime) Unit() string { + return "s" +} + +// Description returns the semantic convention description of the instrument +func (Uptime) Description() string { + return "The time the container has been running." +} + +// Record records val to the current distribution for attrs. +// +// Instrumentations SHOULD use a gauge with type `double` and measure uptime in +// seconds as a floating point number with the highest precision available. +// The actual accuracy would depend on the instrumentation and operating system. +func (m Uptime) Record(ctx context.Context, val float64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Float64Gauge.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Float64Gauge.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +// +// Instrumentations SHOULD use a gauge with type `double` and measure uptime in +// seconds as a floating point number with the highest precision available. +// The actual accuracy would depend on the instrumentation and operating system. +func (m Uptime) RecordSet(ctx context.Context, val float64, set attribute.Set) { + if set.Len() == 0 { + m.Float64Gauge.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Float64Gauge.Record(ctx, val, *o...) +} \ No newline at end of file diff --git a/semconv/v1.37.0/dbconv/metric.go b/semconv/v1.37.0/dbconv/metric.go new file mode 100644 index 00000000000..e08447e53be --- /dev/null +++ b/semconv/v1.37.0/dbconv/metric.go @@ -0,0 +1,1542 @@ +// Code generated from semantic convention specification. DO NOT EDIT. + +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package httpconv provides types and functionality for OpenTelemetry semantic +// conventions in the "db" namespace. +package dbconv + +import ( + "context" + "sync" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/noop" +) + +var ( + addOptPool = &sync.Pool{New: func() any { return &[]metric.AddOption{} }} + recOptPool = &sync.Pool{New: func() any { return &[]metric.RecordOption{} }} +) + +// ClientConnectionStateAttr is an attribute conforming to the +// db.client.connection.state semantic conventions. It represents the state of a +// connection in the pool. +type ClientConnectionStateAttr string + +var ( + // ClientConnectionStateIdle is the standardized value "idle" of + // ClientConnectionStateAttr. + ClientConnectionStateIdle ClientConnectionStateAttr = "idle" + // ClientConnectionStateUsed is the standardized value "used" of + // ClientConnectionStateAttr. + ClientConnectionStateUsed ClientConnectionStateAttr = "used" +) + +// SystemNameAttr is an attribute conforming to the db.system.name semantic +// conventions. It represents the database management system (DBMS) product as +// identified by the client instrumentation. +type SystemNameAttr string + +var ( + // SystemNameOtherSQL is the some other SQL database. Fallback only. + SystemNameOtherSQL SystemNameAttr = "other_sql" + // SystemNameSoftwareagAdabas is the [Adabas (Adaptable Database System)]. + // + // [Adabas (Adaptable Database System)]: https://documentation.softwareag.com/?pf=adabas + SystemNameSoftwareagAdabas SystemNameAttr = "softwareag.adabas" + // SystemNameActianIngres is the [Actian Ingres]. + // + // [Actian Ingres]: https://www.actian.com/databases/ingres/ + SystemNameActianIngres SystemNameAttr = "actian.ingres" + // SystemNameAWSDynamoDB is the [Amazon DynamoDB]. + // + // [Amazon DynamoDB]: https://aws.amazon.com/pm/dynamodb/ + SystemNameAWSDynamoDB SystemNameAttr = "aws.dynamodb" + // SystemNameAWSRedshift is the [Amazon Redshift]. + // + // [Amazon Redshift]: https://aws.amazon.com/redshift/ + SystemNameAWSRedshift SystemNameAttr = "aws.redshift" + // SystemNameAzureCosmosDB is the [Azure Cosmos DB]. + // + // [Azure Cosmos DB]: https://learn.microsoft.com/azure/cosmos-db + SystemNameAzureCosmosDB SystemNameAttr = "azure.cosmosdb" + // SystemNameIntersystemsCache is the [InterSystems Caché]. + // + // [InterSystems Caché]: https://www.intersystems.com/products/cache/ + SystemNameIntersystemsCache SystemNameAttr = "intersystems.cache" + // SystemNameCassandra is the [Apache Cassandra]. + // + // [Apache Cassandra]: https://cassandra.apache.org/ + SystemNameCassandra SystemNameAttr = "cassandra" + // SystemNameClickHouse is the [ClickHouse]. + // + // [ClickHouse]: https://clickhouse.com/ + SystemNameClickHouse SystemNameAttr = "clickhouse" + // SystemNameCockroachDB is the [CockroachDB]. + // + // [CockroachDB]: https://www.cockroachlabs.com/ + SystemNameCockroachDB SystemNameAttr = "cockroachdb" + // SystemNameCouchbase is the [Couchbase]. + // + // [Couchbase]: https://www.couchbase.com/ + SystemNameCouchbase SystemNameAttr = "couchbase" + // SystemNameCouchDB is the [Apache CouchDB]. + // + // [Apache CouchDB]: https://couchdb.apache.org/ + SystemNameCouchDB SystemNameAttr = "couchdb" + // SystemNameDerby is the [Apache Derby]. + // + // [Apache Derby]: https://db.apache.org/derby/ + SystemNameDerby SystemNameAttr = "derby" + // SystemNameElasticsearch is the [Elasticsearch]. + // + // [Elasticsearch]: https://www.elastic.co/elasticsearch + SystemNameElasticsearch SystemNameAttr = "elasticsearch" + // SystemNameFirebirdSQL is the [Firebird]. + // + // [Firebird]: https://www.firebirdsql.org/ + SystemNameFirebirdSQL SystemNameAttr = "firebirdsql" + // SystemNameGCPSpanner is the [Google Cloud Spanner]. + // + // [Google Cloud Spanner]: https://cloud.google.com/spanner + SystemNameGCPSpanner SystemNameAttr = "gcp.spanner" + // SystemNameGeode is the [Apache Geode]. + // + // [Apache Geode]: https://geode.apache.org/ + SystemNameGeode SystemNameAttr = "geode" + // SystemNameH2database is the [H2 Database]. + // + // [H2 Database]: https://h2database.com/ + SystemNameH2database SystemNameAttr = "h2database" + // SystemNameHBase is the [Apache HBase]. + // + // [Apache HBase]: https://hbase.apache.org/ + SystemNameHBase SystemNameAttr = "hbase" + // SystemNameHive is the [Apache Hive]. + // + // [Apache Hive]: https://hive.apache.org/ + SystemNameHive SystemNameAttr = "hive" + // SystemNameHSQLDB is the [HyperSQL Database]. + // + // [HyperSQL Database]: https://hsqldb.org/ + SystemNameHSQLDB SystemNameAttr = "hsqldb" + // SystemNameIBMDB2 is the [IBM Db2]. + // + // [IBM Db2]: https://www.ibm.com/db2 + SystemNameIBMDB2 SystemNameAttr = "ibm.db2" + // SystemNameIBMInformix is the [IBM Informix]. + // + // [IBM Informix]: https://www.ibm.com/products/informix + SystemNameIBMInformix SystemNameAttr = "ibm.informix" + // SystemNameIBMNetezza is the [IBM Netezza]. + // + // [IBM Netezza]: https://www.ibm.com/products/netezza + SystemNameIBMNetezza SystemNameAttr = "ibm.netezza" + // SystemNameInfluxDB is the [InfluxDB]. + // + // [InfluxDB]: https://www.influxdata.com/ + SystemNameInfluxDB SystemNameAttr = "influxdb" + // SystemNameInstantDB is the [Instant]. + // + // [Instant]: https://www.instantdb.com/ + SystemNameInstantDB SystemNameAttr = "instantdb" + // SystemNameMariaDB is the [MariaDB]. + // + // [MariaDB]: https://mariadb.org/ + SystemNameMariaDB SystemNameAttr = "mariadb" + // SystemNameMemcached is the [Memcached]. + // + // [Memcached]: https://memcached.org/ + SystemNameMemcached SystemNameAttr = "memcached" + // SystemNameMongoDB is the [MongoDB]. + // + // [MongoDB]: https://www.mongodb.com/ + SystemNameMongoDB SystemNameAttr = "mongodb" + // SystemNameMicrosoftSQLServer is the [Microsoft SQL Server]. + // + // [Microsoft SQL Server]: https://www.microsoft.com/sql-server + SystemNameMicrosoftSQLServer SystemNameAttr = "microsoft.sql_server" + // SystemNameMySQL is the [MySQL]. + // + // [MySQL]: https://www.mysql.com/ + SystemNameMySQL SystemNameAttr = "mysql" + // SystemNameNeo4j is the [Neo4j]. + // + // [Neo4j]: https://neo4j.com/ + SystemNameNeo4j SystemNameAttr = "neo4j" + // SystemNameOpenSearch is the [OpenSearch]. + // + // [OpenSearch]: https://opensearch.org/ + SystemNameOpenSearch SystemNameAttr = "opensearch" + // SystemNameOracleDB is the [Oracle Database]. + // + // [Oracle Database]: https://www.oracle.com/database/ + SystemNameOracleDB SystemNameAttr = "oracle.db" + // SystemNamePostgreSQL is the [PostgreSQL]. + // + // [PostgreSQL]: https://www.postgresql.org/ + SystemNamePostgreSQL SystemNameAttr = "postgresql" + // SystemNameRedis is the [Redis]. + // + // [Redis]: https://redis.io/ + SystemNameRedis SystemNameAttr = "redis" + // SystemNameSAPHANA is the [SAP HANA]. + // + // [SAP HANA]: https://www.sap.com/products/technology-platform/hana/what-is-sap-hana.html + SystemNameSAPHANA SystemNameAttr = "sap.hana" + // SystemNameSAPMaxDB is the [SAP MaxDB]. + // + // [SAP MaxDB]: https://maxdb.sap.com/ + SystemNameSAPMaxDB SystemNameAttr = "sap.maxdb" + // SystemNameSQLite is the [SQLite]. + // + // [SQLite]: https://www.sqlite.org/ + SystemNameSQLite SystemNameAttr = "sqlite" + // SystemNameTeradata is the [Teradata]. + // + // [Teradata]: https://www.teradata.com/ + SystemNameTeradata SystemNameAttr = "teradata" + // SystemNameTrino is the [Trino]. + // + // [Trino]: https://trino.io/ + SystemNameTrino SystemNameAttr = "trino" +) + +// ErrorTypeAttr is an attribute conforming to the error.type semantic +// conventions. It represents the describes a class of error the operation ended +// with. +type ErrorTypeAttr string + +var ( + // ErrorTypeOther is a fallback error value to be used when the instrumentation + // doesn't define a custom value. + ErrorTypeOther ErrorTypeAttr = "_OTHER" +) + +// ClientConnectionCount is an instrument used to record metric values conforming +// to the "db.client.connection.count" semantic conventions. It represents the +// number of connections that are currently in state described by the `state` +// attribute. +type ClientConnectionCount struct { + metric.Int64UpDownCounter +} + +// NewClientConnectionCount returns a new ClientConnectionCount instrument. +func NewClientConnectionCount( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (ClientConnectionCount, error) { + // Check if the meter is nil. + if m == nil { + return ClientConnectionCount{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "db.client.connection.count", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("The number of connections that are currently in state described by the `state` attribute."), + metric.WithUnit("{connection}"), + }, opt...)..., + ) + if err != nil { + return ClientConnectionCount{noop.Int64UpDownCounter{}}, err + } + return ClientConnectionCount{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ClientConnectionCount) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (ClientConnectionCount) Name() string { + return "db.client.connection.count" +} + +// Unit returns the semantic convention unit of the instrument +func (ClientConnectionCount) Unit() string { + return "{connection}" +} + +// Description returns the semantic convention description of the instrument +func (ClientConnectionCount) Description() string { + return "The number of connections that are currently in state described by the `state` attribute." +} + +// Add adds incr to the existing count for attrs. +// +// The clientConnectionPoolName is the the name of the connection pool; unique +// within the instrumented application. In case the connection pool +// implementation doesn't provide a name, instrumentation SHOULD use a +// combination of parameters that would make the name unique, for example, +// combining attributes `server.address`, `server.port`, and `db.namespace`, +// formatted as `server.address:server.port/db.namespace`. Instrumentations that +// generate connection pool name following different patterns SHOULD document it. +// +// The clientConnectionState is the the state of a connection in the pool +func (m ClientConnectionCount) Add( + ctx context.Context, + incr int64, + clientConnectionPoolName string, + clientConnectionState ClientConnectionStateAttr, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("db.client.connection.pool.name", clientConnectionPoolName), + attribute.String("db.client.connection.state", string(clientConnectionState)), + )..., + ), + ) + + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +func (m ClientConnectionCount) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// ClientConnectionCreateTime is an instrument used to record metric values +// conforming to the "db.client.connection.create_time" semantic conventions. It +// represents the time it took to create a new connection. +type ClientConnectionCreateTime struct { + metric.Float64Histogram +} + +// NewClientConnectionCreateTime returns a new ClientConnectionCreateTime +// instrument. +func NewClientConnectionCreateTime( + m metric.Meter, + opt ...metric.Float64HistogramOption, +) (ClientConnectionCreateTime, error) { + // Check if the meter is nil. + if m == nil { + return ClientConnectionCreateTime{noop.Float64Histogram{}}, nil + } + + i, err := m.Float64Histogram( + "db.client.connection.create_time", + append([]metric.Float64HistogramOption{ + metric.WithDescription("The time it took to create a new connection."), + metric.WithUnit("s"), + }, opt...)..., + ) + if err != nil { + return ClientConnectionCreateTime{noop.Float64Histogram{}}, err + } + return ClientConnectionCreateTime{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ClientConnectionCreateTime) Inst() metric.Float64Histogram { + return m.Float64Histogram +} + +// Name returns the semantic convention name of the instrument. +func (ClientConnectionCreateTime) Name() string { + return "db.client.connection.create_time" +} + +// Unit returns the semantic convention unit of the instrument +func (ClientConnectionCreateTime) Unit() string { + return "s" +} + +// Description returns the semantic convention description of the instrument +func (ClientConnectionCreateTime) Description() string { + return "The time it took to create a new connection." +} + +// Record records val to the current distribution for attrs. +// +// The clientConnectionPoolName is the the name of the connection pool; unique +// within the instrumented application. In case the connection pool +// implementation doesn't provide a name, instrumentation SHOULD use a +// combination of parameters that would make the name unique, for example, +// combining attributes `server.address`, `server.port`, and `db.namespace`, +// formatted as `server.address:server.port/db.namespace`. Instrumentations that +// generate connection pool name following different patterns SHOULD document it. +func (m ClientConnectionCreateTime) Record( + ctx context.Context, + val float64, + clientConnectionPoolName string, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Float64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("db.client.connection.pool.name", clientConnectionPoolName), + )..., + ), + ) + + m.Float64Histogram.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +func (m ClientConnectionCreateTime) RecordSet(ctx context.Context, val float64, set attribute.Set) { + if set.Len() == 0 { + m.Float64Histogram.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Float64Histogram.Record(ctx, val, *o...) +} + +// ClientConnectionIdleMax is an instrument used to record metric values +// conforming to the "db.client.connection.idle.max" semantic conventions. It +// represents the maximum number of idle open connections allowed. +type ClientConnectionIdleMax struct { + metric.Int64UpDownCounter +} + +// NewClientConnectionIdleMax returns a new ClientConnectionIdleMax instrument. +func NewClientConnectionIdleMax( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (ClientConnectionIdleMax, error) { + // Check if the meter is nil. + if m == nil { + return ClientConnectionIdleMax{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "db.client.connection.idle.max", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("The maximum number of idle open connections allowed."), + metric.WithUnit("{connection}"), + }, opt...)..., + ) + if err != nil { + return ClientConnectionIdleMax{noop.Int64UpDownCounter{}}, err + } + return ClientConnectionIdleMax{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ClientConnectionIdleMax) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (ClientConnectionIdleMax) Name() string { + return "db.client.connection.idle.max" +} + +// Unit returns the semantic convention unit of the instrument +func (ClientConnectionIdleMax) Unit() string { + return "{connection}" +} + +// Description returns the semantic convention description of the instrument +func (ClientConnectionIdleMax) Description() string { + return "The maximum number of idle open connections allowed." +} + +// Add adds incr to the existing count for attrs. +// +// The clientConnectionPoolName is the the name of the connection pool; unique +// within the instrumented application. In case the connection pool +// implementation doesn't provide a name, instrumentation SHOULD use a +// combination of parameters that would make the name unique, for example, +// combining attributes `server.address`, `server.port`, and `db.namespace`, +// formatted as `server.address:server.port/db.namespace`. Instrumentations that +// generate connection pool name following different patterns SHOULD document it. +func (m ClientConnectionIdleMax) Add( + ctx context.Context, + incr int64, + clientConnectionPoolName string, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("db.client.connection.pool.name", clientConnectionPoolName), + )..., + ), + ) + + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +func (m ClientConnectionIdleMax) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// ClientConnectionIdleMin is an instrument used to record metric values +// conforming to the "db.client.connection.idle.min" semantic conventions. It +// represents the minimum number of idle open connections allowed. +type ClientConnectionIdleMin struct { + metric.Int64UpDownCounter +} + +// NewClientConnectionIdleMin returns a new ClientConnectionIdleMin instrument. +func NewClientConnectionIdleMin( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (ClientConnectionIdleMin, error) { + // Check if the meter is nil. + if m == nil { + return ClientConnectionIdleMin{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "db.client.connection.idle.min", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("The minimum number of idle open connections allowed."), + metric.WithUnit("{connection}"), + }, opt...)..., + ) + if err != nil { + return ClientConnectionIdleMin{noop.Int64UpDownCounter{}}, err + } + return ClientConnectionIdleMin{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ClientConnectionIdleMin) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (ClientConnectionIdleMin) Name() string { + return "db.client.connection.idle.min" +} + +// Unit returns the semantic convention unit of the instrument +func (ClientConnectionIdleMin) Unit() string { + return "{connection}" +} + +// Description returns the semantic convention description of the instrument +func (ClientConnectionIdleMin) Description() string { + return "The minimum number of idle open connections allowed." +} + +// Add adds incr to the existing count for attrs. +// +// The clientConnectionPoolName is the the name of the connection pool; unique +// within the instrumented application. In case the connection pool +// implementation doesn't provide a name, instrumentation SHOULD use a +// combination of parameters that would make the name unique, for example, +// combining attributes `server.address`, `server.port`, and `db.namespace`, +// formatted as `server.address:server.port/db.namespace`. Instrumentations that +// generate connection pool name following different patterns SHOULD document it. +func (m ClientConnectionIdleMin) Add( + ctx context.Context, + incr int64, + clientConnectionPoolName string, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("db.client.connection.pool.name", clientConnectionPoolName), + )..., + ), + ) + + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +func (m ClientConnectionIdleMin) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// ClientConnectionMax is an instrument used to record metric values conforming +// to the "db.client.connection.max" semantic conventions. It represents the +// maximum number of open connections allowed. +type ClientConnectionMax struct { + metric.Int64UpDownCounter +} + +// NewClientConnectionMax returns a new ClientConnectionMax instrument. +func NewClientConnectionMax( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (ClientConnectionMax, error) { + // Check if the meter is nil. + if m == nil { + return ClientConnectionMax{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "db.client.connection.max", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("The maximum number of open connections allowed."), + metric.WithUnit("{connection}"), + }, opt...)..., + ) + if err != nil { + return ClientConnectionMax{noop.Int64UpDownCounter{}}, err + } + return ClientConnectionMax{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ClientConnectionMax) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (ClientConnectionMax) Name() string { + return "db.client.connection.max" +} + +// Unit returns the semantic convention unit of the instrument +func (ClientConnectionMax) Unit() string { + return "{connection}" +} + +// Description returns the semantic convention description of the instrument +func (ClientConnectionMax) Description() string { + return "The maximum number of open connections allowed." +} + +// Add adds incr to the existing count for attrs. +// +// The clientConnectionPoolName is the the name of the connection pool; unique +// within the instrumented application. In case the connection pool +// implementation doesn't provide a name, instrumentation SHOULD use a +// combination of parameters that would make the name unique, for example, +// combining attributes `server.address`, `server.port`, and `db.namespace`, +// formatted as `server.address:server.port/db.namespace`. Instrumentations that +// generate connection pool name following different patterns SHOULD document it. +func (m ClientConnectionMax) Add( + ctx context.Context, + incr int64, + clientConnectionPoolName string, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("db.client.connection.pool.name", clientConnectionPoolName), + )..., + ), + ) + + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +func (m ClientConnectionMax) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// ClientConnectionPendingRequests is an instrument used to record metric values +// conforming to the "db.client.connection.pending_requests" semantic +// conventions. It represents the number of current pending requests for an open +// connection. +type ClientConnectionPendingRequests struct { + metric.Int64UpDownCounter +} + +// NewClientConnectionPendingRequests returns a new +// ClientConnectionPendingRequests instrument. +func NewClientConnectionPendingRequests( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (ClientConnectionPendingRequests, error) { + // Check if the meter is nil. + if m == nil { + return ClientConnectionPendingRequests{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "db.client.connection.pending_requests", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("The number of current pending requests for an open connection."), + metric.WithUnit("{request}"), + }, opt...)..., + ) + if err != nil { + return ClientConnectionPendingRequests{noop.Int64UpDownCounter{}}, err + } + return ClientConnectionPendingRequests{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ClientConnectionPendingRequests) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (ClientConnectionPendingRequests) Name() string { + return "db.client.connection.pending_requests" +} + +// Unit returns the semantic convention unit of the instrument +func (ClientConnectionPendingRequests) Unit() string { + return "{request}" +} + +// Description returns the semantic convention description of the instrument +func (ClientConnectionPendingRequests) Description() string { + return "The number of current pending requests for an open connection." +} + +// Add adds incr to the existing count for attrs. +// +// The clientConnectionPoolName is the the name of the connection pool; unique +// within the instrumented application. In case the connection pool +// implementation doesn't provide a name, instrumentation SHOULD use a +// combination of parameters that would make the name unique, for example, +// combining attributes `server.address`, `server.port`, and `db.namespace`, +// formatted as `server.address:server.port/db.namespace`. Instrumentations that +// generate connection pool name following different patterns SHOULD document it. +func (m ClientConnectionPendingRequests) Add( + ctx context.Context, + incr int64, + clientConnectionPoolName string, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("db.client.connection.pool.name", clientConnectionPoolName), + )..., + ), + ) + + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +func (m ClientConnectionPendingRequests) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// ClientConnectionTimeouts is an instrument used to record metric values +// conforming to the "db.client.connection.timeouts" semantic conventions. It +// represents the number of connection timeouts that have occurred trying to +// obtain a connection from the pool. +type ClientConnectionTimeouts struct { + metric.Int64Counter +} + +// NewClientConnectionTimeouts returns a new ClientConnectionTimeouts instrument. +func NewClientConnectionTimeouts( + m metric.Meter, + opt ...metric.Int64CounterOption, +) (ClientConnectionTimeouts, error) { + // Check if the meter is nil. + if m == nil { + return ClientConnectionTimeouts{noop.Int64Counter{}}, nil + } + + i, err := m.Int64Counter( + "db.client.connection.timeouts", + append([]metric.Int64CounterOption{ + metric.WithDescription("The number of connection timeouts that have occurred trying to obtain a connection from the pool."), + metric.WithUnit("{timeout}"), + }, opt...)..., + ) + if err != nil { + return ClientConnectionTimeouts{noop.Int64Counter{}}, err + } + return ClientConnectionTimeouts{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ClientConnectionTimeouts) Inst() metric.Int64Counter { + return m.Int64Counter +} + +// Name returns the semantic convention name of the instrument. +func (ClientConnectionTimeouts) Name() string { + return "db.client.connection.timeouts" +} + +// Unit returns the semantic convention unit of the instrument +func (ClientConnectionTimeouts) Unit() string { + return "{timeout}" +} + +// Description returns the semantic convention description of the instrument +func (ClientConnectionTimeouts) Description() string { + return "The number of connection timeouts that have occurred trying to obtain a connection from the pool." +} + +// Add adds incr to the existing count for attrs. +// +// The clientConnectionPoolName is the the name of the connection pool; unique +// within the instrumented application. In case the connection pool +// implementation doesn't provide a name, instrumentation SHOULD use a +// combination of parameters that would make the name unique, for example, +// combining attributes `server.address`, `server.port`, and `db.namespace`, +// formatted as `server.address:server.port/db.namespace`. Instrumentations that +// generate connection pool name following different patterns SHOULD document it. +func (m ClientConnectionTimeouts) Add( + ctx context.Context, + incr int64, + clientConnectionPoolName string, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("db.client.connection.pool.name", clientConnectionPoolName), + )..., + ), + ) + + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +func (m ClientConnectionTimeouts) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Counter.Add(ctx, incr, *o...) +} + +// ClientConnectionUseTime is an instrument used to record metric values +// conforming to the "db.client.connection.use_time" semantic conventions. It +// represents the time between borrowing a connection and returning it to the +// pool. +type ClientConnectionUseTime struct { + metric.Float64Histogram +} + +// NewClientConnectionUseTime returns a new ClientConnectionUseTime instrument. +func NewClientConnectionUseTime( + m metric.Meter, + opt ...metric.Float64HistogramOption, +) (ClientConnectionUseTime, error) { + // Check if the meter is nil. + if m == nil { + return ClientConnectionUseTime{noop.Float64Histogram{}}, nil + } + + i, err := m.Float64Histogram( + "db.client.connection.use_time", + append([]metric.Float64HistogramOption{ + metric.WithDescription("The time between borrowing a connection and returning it to the pool."), + metric.WithUnit("s"), + }, opt...)..., + ) + if err != nil { + return ClientConnectionUseTime{noop.Float64Histogram{}}, err + } + return ClientConnectionUseTime{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ClientConnectionUseTime) Inst() metric.Float64Histogram { + return m.Float64Histogram +} + +// Name returns the semantic convention name of the instrument. +func (ClientConnectionUseTime) Name() string { + return "db.client.connection.use_time" +} + +// Unit returns the semantic convention unit of the instrument +func (ClientConnectionUseTime) Unit() string { + return "s" +} + +// Description returns the semantic convention description of the instrument +func (ClientConnectionUseTime) Description() string { + return "The time between borrowing a connection and returning it to the pool." +} + +// Record records val to the current distribution for attrs. +// +// The clientConnectionPoolName is the the name of the connection pool; unique +// within the instrumented application. In case the connection pool +// implementation doesn't provide a name, instrumentation SHOULD use a +// combination of parameters that would make the name unique, for example, +// combining attributes `server.address`, `server.port`, and `db.namespace`, +// formatted as `server.address:server.port/db.namespace`. Instrumentations that +// generate connection pool name following different patterns SHOULD document it. +func (m ClientConnectionUseTime) Record( + ctx context.Context, + val float64, + clientConnectionPoolName string, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Float64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("db.client.connection.pool.name", clientConnectionPoolName), + )..., + ), + ) + + m.Float64Histogram.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +func (m ClientConnectionUseTime) RecordSet(ctx context.Context, val float64, set attribute.Set) { + if set.Len() == 0 { + m.Float64Histogram.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Float64Histogram.Record(ctx, val, *o...) +} + +// ClientConnectionWaitTime is an instrument used to record metric values +// conforming to the "db.client.connection.wait_time" semantic conventions. It +// represents the time it took to obtain an open connection from the pool. +type ClientConnectionWaitTime struct { + metric.Float64Histogram +} + +// NewClientConnectionWaitTime returns a new ClientConnectionWaitTime instrument. +func NewClientConnectionWaitTime( + m metric.Meter, + opt ...metric.Float64HistogramOption, +) (ClientConnectionWaitTime, error) { + // Check if the meter is nil. + if m == nil { + return ClientConnectionWaitTime{noop.Float64Histogram{}}, nil + } + + i, err := m.Float64Histogram( + "db.client.connection.wait_time", + append([]metric.Float64HistogramOption{ + metric.WithDescription("The time it took to obtain an open connection from the pool."), + metric.WithUnit("s"), + }, opt...)..., + ) + if err != nil { + return ClientConnectionWaitTime{noop.Float64Histogram{}}, err + } + return ClientConnectionWaitTime{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ClientConnectionWaitTime) Inst() metric.Float64Histogram { + return m.Float64Histogram +} + +// Name returns the semantic convention name of the instrument. +func (ClientConnectionWaitTime) Name() string { + return "db.client.connection.wait_time" +} + +// Unit returns the semantic convention unit of the instrument +func (ClientConnectionWaitTime) Unit() string { + return "s" +} + +// Description returns the semantic convention description of the instrument +func (ClientConnectionWaitTime) Description() string { + return "The time it took to obtain an open connection from the pool." +} + +// Record records val to the current distribution for attrs. +// +// The clientConnectionPoolName is the the name of the connection pool; unique +// within the instrumented application. In case the connection pool +// implementation doesn't provide a name, instrumentation SHOULD use a +// combination of parameters that would make the name unique, for example, +// combining attributes `server.address`, `server.port`, and `db.namespace`, +// formatted as `server.address:server.port/db.namespace`. Instrumentations that +// generate connection pool name following different patterns SHOULD document it. +func (m ClientConnectionWaitTime) Record( + ctx context.Context, + val float64, + clientConnectionPoolName string, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Float64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("db.client.connection.pool.name", clientConnectionPoolName), + )..., + ), + ) + + m.Float64Histogram.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +func (m ClientConnectionWaitTime) RecordSet(ctx context.Context, val float64, set attribute.Set) { + if set.Len() == 0 { + m.Float64Histogram.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Float64Histogram.Record(ctx, val, *o...) +} + +// ClientOperationDuration is an instrument used to record metric values +// conforming to the "db.client.operation.duration" semantic conventions. It +// represents the duration of database client operations. +type ClientOperationDuration struct { + metric.Float64Histogram +} + +// NewClientOperationDuration returns a new ClientOperationDuration instrument. +func NewClientOperationDuration( + m metric.Meter, + opt ...metric.Float64HistogramOption, +) (ClientOperationDuration, error) { + // Check if the meter is nil. + if m == nil { + return ClientOperationDuration{noop.Float64Histogram{}}, nil + } + + i, err := m.Float64Histogram( + "db.client.operation.duration", + append([]metric.Float64HistogramOption{ + metric.WithDescription("Duration of database client operations."), + metric.WithUnit("s"), + }, opt...)..., + ) + if err != nil { + return ClientOperationDuration{noop.Float64Histogram{}}, err + } + return ClientOperationDuration{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ClientOperationDuration) Inst() metric.Float64Histogram { + return m.Float64Histogram +} + +// Name returns the semantic convention name of the instrument. +func (ClientOperationDuration) Name() string { + return "db.client.operation.duration" +} + +// Unit returns the semantic convention unit of the instrument +func (ClientOperationDuration) Unit() string { + return "s" +} + +// Description returns the semantic convention description of the instrument +func (ClientOperationDuration) Description() string { + return "Duration of database client operations." +} + +// Record records val to the current distribution for attrs. +// +// The systemName is the the database management system (DBMS) product as +// identified by the client instrumentation. +// +// All additional attrs passed are included in the recorded value. +// +// Batch operations SHOULD be recorded as a single operation. +func (m ClientOperationDuration) Record( + ctx context.Context, + val float64, + systemName SystemNameAttr, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Float64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("db.system.name", string(systemName)), + )..., + ), + ) + + m.Float64Histogram.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +// +// Batch operations SHOULD be recorded as a single operation. +func (m ClientOperationDuration) RecordSet(ctx context.Context, val float64, set attribute.Set) { + if set.Len() == 0 { + m.Float64Histogram.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Float64Histogram.Record(ctx, val, *o...) +} + +// AttrCollectionName returns an optional attribute for the "db.collection.name" +// semantic convention. It represents the name of a collection (table, container) +// within the database. +func (ClientOperationDuration) AttrCollectionName(val string) attribute.KeyValue { + return attribute.String("db.collection.name", val) +} + +// AttrNamespace returns an optional attribute for the "db.namespace" semantic +// convention. It represents the name of the database, fully qualified within the +// server address and port. +func (ClientOperationDuration) AttrNamespace(val string) attribute.KeyValue { + return attribute.String("db.namespace", val) +} + +// AttrOperationName returns an optional attribute for the "db.operation.name" +// semantic convention. It represents the name of the operation or command being +// executed. +func (ClientOperationDuration) AttrOperationName(val string) attribute.KeyValue { + return attribute.String("db.operation.name", val) +} + +// AttrResponseStatusCode returns an optional attribute for the +// "db.response.status_code" semantic convention. It represents the database +// response status code. +func (ClientOperationDuration) AttrResponseStatusCode(val string) attribute.KeyValue { + return attribute.String("db.response.status_code", val) +} + +// AttrErrorType returns an optional attribute for the "error.type" semantic +// convention. It represents the describes a class of error the operation ended +// with. +func (ClientOperationDuration) AttrErrorType(val ErrorTypeAttr) attribute.KeyValue { + return attribute.String("error.type", string(val)) +} + +// AttrServerPort returns an optional attribute for the "server.port" semantic +// convention. It represents the server port number. +func (ClientOperationDuration) AttrServerPort(val int) attribute.KeyValue { + return attribute.Int("server.port", val) +} + +// AttrQuerySummary returns an optional attribute for the "db.query.summary" +// semantic convention. It represents the low cardinality summary of a database +// query. +func (ClientOperationDuration) AttrQuerySummary(val string) attribute.KeyValue { + return attribute.String("db.query.summary", val) +} + +// AttrStoredProcedureName returns an optional attribute for the +// "db.stored_procedure.name" semantic convention. It represents the name of a +// stored procedure within the database. +func (ClientOperationDuration) AttrStoredProcedureName(val string) attribute.KeyValue { + return attribute.String("db.stored_procedure.name", val) +} + +// AttrNetworkPeerAddress returns an optional attribute for the +// "network.peer.address" semantic convention. It represents the peer address of +// the database node where the operation was performed. +func (ClientOperationDuration) AttrNetworkPeerAddress(val string) attribute.KeyValue { + return attribute.String("network.peer.address", val) +} + +// AttrNetworkPeerPort returns an optional attribute for the "network.peer.port" +// semantic convention. It represents the peer port number of the network +// connection. +func (ClientOperationDuration) AttrNetworkPeerPort(val int) attribute.KeyValue { + return attribute.Int("network.peer.port", val) +} + +// AttrServerAddress returns an optional attribute for the "server.address" +// semantic convention. It represents the name of the database host. +func (ClientOperationDuration) AttrServerAddress(val string) attribute.KeyValue { + return attribute.String("server.address", val) +} + +// AttrQueryText returns an optional attribute for the "db.query.text" semantic +// convention. It represents the database query being executed. +func (ClientOperationDuration) AttrQueryText(val string) attribute.KeyValue { + return attribute.String("db.query.text", val) +} + +// ClientResponseReturnedRows is an instrument used to record metric values +// conforming to the "db.client.response.returned_rows" semantic conventions. It +// represents the actual number of records returned by the database operation. +type ClientResponseReturnedRows struct { + metric.Int64Histogram +} + +// NewClientResponseReturnedRows returns a new ClientResponseReturnedRows +// instrument. +func NewClientResponseReturnedRows( + m metric.Meter, + opt ...metric.Int64HistogramOption, +) (ClientResponseReturnedRows, error) { + // Check if the meter is nil. + if m == nil { + return ClientResponseReturnedRows{noop.Int64Histogram{}}, nil + } + + i, err := m.Int64Histogram( + "db.client.response.returned_rows", + append([]metric.Int64HistogramOption{ + metric.WithDescription("The actual number of records returned by the database operation."), + metric.WithUnit("{row}"), + }, opt...)..., + ) + if err != nil { + return ClientResponseReturnedRows{noop.Int64Histogram{}}, err + } + return ClientResponseReturnedRows{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ClientResponseReturnedRows) Inst() metric.Int64Histogram { + return m.Int64Histogram +} + +// Name returns the semantic convention name of the instrument. +func (ClientResponseReturnedRows) Name() string { + return "db.client.response.returned_rows" +} + +// Unit returns the semantic convention unit of the instrument +func (ClientResponseReturnedRows) Unit() string { + return "{row}" +} + +// Description returns the semantic convention description of the instrument +func (ClientResponseReturnedRows) Description() string { + return "The actual number of records returned by the database operation." +} + +// Record records val to the current distribution for attrs. +// +// The systemName is the the database management system (DBMS) product as +// identified by the client instrumentation. +// +// All additional attrs passed are included in the recorded value. +func (m ClientResponseReturnedRows) Record( + ctx context.Context, + val int64, + systemName SystemNameAttr, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("db.system.name", string(systemName)), + )..., + ), + ) + + m.Int64Histogram.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +func (m ClientResponseReturnedRows) RecordSet(ctx context.Context, val int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Histogram.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Histogram.Record(ctx, val, *o...) +} + +// AttrCollectionName returns an optional attribute for the "db.collection.name" +// semantic convention. It represents the name of a collection (table, container) +// within the database. +func (ClientResponseReturnedRows) AttrCollectionName(val string) attribute.KeyValue { + return attribute.String("db.collection.name", val) +} + +// AttrNamespace returns an optional attribute for the "db.namespace" semantic +// convention. It represents the name of the database, fully qualified within the +// server address and port. +func (ClientResponseReturnedRows) AttrNamespace(val string) attribute.KeyValue { + return attribute.String("db.namespace", val) +} + +// AttrOperationName returns an optional attribute for the "db.operation.name" +// semantic convention. It represents the name of the operation or command being +// executed. +func (ClientResponseReturnedRows) AttrOperationName(val string) attribute.KeyValue { + return attribute.String("db.operation.name", val) +} + +// AttrResponseStatusCode returns an optional attribute for the +// "db.response.status_code" semantic convention. It represents the database +// response status code. +func (ClientResponseReturnedRows) AttrResponseStatusCode(val string) attribute.KeyValue { + return attribute.String("db.response.status_code", val) +} + +// AttrErrorType returns an optional attribute for the "error.type" semantic +// convention. It represents the describes a class of error the operation ended +// with. +func (ClientResponseReturnedRows) AttrErrorType(val ErrorTypeAttr) attribute.KeyValue { + return attribute.String("error.type", string(val)) +} + +// AttrServerPort returns an optional attribute for the "server.port" semantic +// convention. It represents the server port number. +func (ClientResponseReturnedRows) AttrServerPort(val int) attribute.KeyValue { + return attribute.Int("server.port", val) +} + +// AttrQuerySummary returns an optional attribute for the "db.query.summary" +// semantic convention. It represents the low cardinality summary of a database +// query. +func (ClientResponseReturnedRows) AttrQuerySummary(val string) attribute.KeyValue { + return attribute.String("db.query.summary", val) +} + +// AttrNetworkPeerAddress returns an optional attribute for the +// "network.peer.address" semantic convention. It represents the peer address of +// the database node where the operation was performed. +func (ClientResponseReturnedRows) AttrNetworkPeerAddress(val string) attribute.KeyValue { + return attribute.String("network.peer.address", val) +} + +// AttrNetworkPeerPort returns an optional attribute for the "network.peer.port" +// semantic convention. It represents the peer port number of the network +// connection. +func (ClientResponseReturnedRows) AttrNetworkPeerPort(val int) attribute.KeyValue { + return attribute.Int("network.peer.port", val) +} + +// AttrServerAddress returns an optional attribute for the "server.address" +// semantic convention. It represents the name of the database host. +func (ClientResponseReturnedRows) AttrServerAddress(val string) attribute.KeyValue { + return attribute.String("server.address", val) +} + +// AttrQueryText returns an optional attribute for the "db.query.text" semantic +// convention. It represents the database query being executed. +func (ClientResponseReturnedRows) AttrQueryText(val string) attribute.KeyValue { + return attribute.String("db.query.text", val) +} \ No newline at end of file diff --git a/semconv/v1.37.0/dnsconv/metric.go b/semconv/v1.37.0/dnsconv/metric.go new file mode 100644 index 00000000000..b5348a23ffd --- /dev/null +++ b/semconv/v1.37.0/dnsconv/metric.go @@ -0,0 +1,139 @@ +// Code generated from semantic convention specification. DO NOT EDIT. + +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package httpconv provides types and functionality for OpenTelemetry semantic +// conventions in the "dns" namespace. +package dnsconv + +import ( + "context" + "sync" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/noop" +) + +var ( + addOptPool = &sync.Pool{New: func() any { return &[]metric.AddOption{} }} + recOptPool = &sync.Pool{New: func() any { return &[]metric.RecordOption{} }} +) + +// ErrorTypeAttr is an attribute conforming to the error.type semantic +// conventions. It represents the describes the error the DNS lookup failed with. +type ErrorTypeAttr string + +var ( + // ErrorTypeOther is a fallback error value to be used when the instrumentation + // doesn't define a custom value. + ErrorTypeOther ErrorTypeAttr = "_OTHER" +) + +// LookupDuration is an instrument used to record metric values conforming to the +// "dns.lookup.duration" semantic conventions. It represents the measures the +// time taken to perform a DNS lookup. +type LookupDuration struct { + metric.Float64Histogram +} + +// NewLookupDuration returns a new LookupDuration instrument. +func NewLookupDuration( + m metric.Meter, + opt ...metric.Float64HistogramOption, +) (LookupDuration, error) { + // Check if the meter is nil. + if m == nil { + return LookupDuration{noop.Float64Histogram{}}, nil + } + + i, err := m.Float64Histogram( + "dns.lookup.duration", + append([]metric.Float64HistogramOption{ + metric.WithDescription("Measures the time taken to perform a DNS lookup."), + metric.WithUnit("s"), + }, opt...)..., + ) + if err != nil { + return LookupDuration{noop.Float64Histogram{}}, err + } + return LookupDuration{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m LookupDuration) Inst() metric.Float64Histogram { + return m.Float64Histogram +} + +// Name returns the semantic convention name of the instrument. +func (LookupDuration) Name() string { + return "dns.lookup.duration" +} + +// Unit returns the semantic convention unit of the instrument +func (LookupDuration) Unit() string { + return "s" +} + +// Description returns the semantic convention description of the instrument +func (LookupDuration) Description() string { + return "Measures the time taken to perform a DNS lookup." +} + +// Record records val to the current distribution for attrs. +// +// The questionName is the the name being queried. +// +// All additional attrs passed are included in the recorded value. +func (m LookupDuration) Record( + ctx context.Context, + val float64, + questionName string, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Float64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("dns.question.name", questionName), + )..., + ), + ) + + m.Float64Histogram.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +func (m LookupDuration) RecordSet(ctx context.Context, val float64, set attribute.Set) { + if set.Len() == 0 { + m.Float64Histogram.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Float64Histogram.Record(ctx, val, *o...) +} + +// AttrErrorType returns an optional attribute for the "error.type" semantic +// convention. It represents the describes the error the DNS lookup failed with. +func (LookupDuration) AttrErrorType(val ErrorTypeAttr) attribute.KeyValue { + return attribute.String("error.type", string(val)) +} \ No newline at end of file diff --git a/semconv/v1.37.0/doc.go b/semconv/v1.37.0/doc.go new file mode 100644 index 00000000000..11101032107 --- /dev/null +++ b/semconv/v1.37.0/doc.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package semconv implements OpenTelemetry semantic conventions. +// +// OpenTelemetry semantic conventions are agreed standardized naming +// patterns for OpenTelemetry things. This package represents the v1.37.0 +// version of the OpenTelemetry semantic conventions. +package semconv // import "go.opentelemetry.io/otel/semconv/v1.37.0" diff --git a/semconv/v1.37.0/error_type.go b/semconv/v1.37.0/error_type.go new file mode 100644 index 00000000000..666bded4baf --- /dev/null +++ b/semconv/v1.37.0/error_type.go @@ -0,0 +1,31 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.37.0" + +import ( + "fmt" + "reflect" + + "go.opentelemetry.io/otel/attribute" +) + +// ErrorType returns an [attribute.KeyValue] identifying the error type of err. +func ErrorType(err error) attribute.KeyValue { + if err == nil { + return ErrorTypeOther + } + t := reflect.TypeOf(err) + var value string + if t.PkgPath() == "" && t.Name() == "" { + // Likely a builtin type. + value = t.String() + } else { + value = fmt.Sprintf("%s.%s", t.PkgPath(), t.Name()) + } + + if value == "" { + return ErrorTypeOther + } + return ErrorTypeKey.String(value) +} diff --git a/semconv/v1.37.0/error_type_test.go b/semconv/v1.37.0/error_type_test.go new file mode 100644 index 00000000000..457c313e8f6 --- /dev/null +++ b/semconv/v1.37.0/error_type_test.go @@ -0,0 +1,59 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.37.0" + +import ( + "errors" + "fmt" + "reflect" + "testing" + + "go.opentelemetry.io/otel/attribute" +) + +type CustomError struct{} + +func (CustomError) Error() string { + return "custom error" +} + +func TestErrorType(t *testing.T) { + customErr := CustomError{} + builtinErr := errors.New("something went wrong") + var nilErr error + + wantCustomType := reflect.TypeOf(customErr) + wantCustomStr := fmt.Sprintf("%s.%s", wantCustomType.PkgPath(), wantCustomType.Name()) + + tests := []struct { + name string + err error + want attribute.KeyValue + }{ + { + name: "BuiltinError", + err: builtinErr, + want: attribute.String("error.type", "*errors.errorString"), + }, + { + name: "CustomError", + err: customErr, + want: attribute.String("error.type", wantCustomStr), + }, + { + name: "NilError", + err: nilErr, + want: ErrorTypeOther, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := ErrorType(tt.err) + if got != tt.want { + t.Errorf("ErrorType(%v) = %v, want %v", tt.err, got, tt.want) + } + }) + } +} diff --git a/semconv/v1.37.0/exception.go b/semconv/v1.37.0/exception.go new file mode 100644 index 00000000000..e67469a4f69 --- /dev/null +++ b/semconv/v1.37.0/exception.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.37.0" + +const ( + // ExceptionEventName is the name of the Span event representing an exception. + ExceptionEventName = "exception" +) diff --git a/semconv/v1.37.0/faasconv/metric.go b/semconv/v1.37.0/faasconv/metric.go new file mode 100644 index 00000000000..f2f0510bbaa --- /dev/null +++ b/semconv/v1.37.0/faasconv/metric.go @@ -0,0 +1,964 @@ +// Code generated from semantic convention specification. DO NOT EDIT. + +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package httpconv provides types and functionality for OpenTelemetry semantic +// conventions in the "faas" namespace. +package faasconv + +import ( + "context" + "sync" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/noop" +) + +var ( + addOptPool = &sync.Pool{New: func() any { return &[]metric.AddOption{} }} + recOptPool = &sync.Pool{New: func() any { return &[]metric.RecordOption{} }} +) + +// TriggerAttr is an attribute conforming to the faas.trigger semantic +// conventions. It represents the type of the trigger which caused this function +// invocation. +type TriggerAttr string + +var ( + // TriggerDatasource is a response to some data source operation such as a + // database or filesystem read/write. + TriggerDatasource TriggerAttr = "datasource" + // TriggerHTTP is the to provide an answer to an inbound HTTP request. + TriggerHTTP TriggerAttr = "http" + // TriggerPubSub is a function is set to be executed when messages are sent to a + // messaging system. + TriggerPubSub TriggerAttr = "pubsub" + // TriggerTimer is a function is scheduled to be executed regularly. + TriggerTimer TriggerAttr = "timer" + // TriggerOther is the if none of the others apply. + TriggerOther TriggerAttr = "other" +) + +// Coldstarts is an instrument used to record metric values conforming to the +// "faas.coldstarts" semantic conventions. It represents the number of invocation +// cold starts. +type Coldstarts struct { + metric.Int64Counter +} + +// NewColdstarts returns a new Coldstarts instrument. +func NewColdstarts( + m metric.Meter, + opt ...metric.Int64CounterOption, +) (Coldstarts, error) { + // Check if the meter is nil. + if m == nil { + return Coldstarts{noop.Int64Counter{}}, nil + } + + i, err := m.Int64Counter( + "faas.coldstarts", + append([]metric.Int64CounterOption{ + metric.WithDescription("Number of invocation cold starts."), + metric.WithUnit("{coldstart}"), + }, opt...)..., + ) + if err != nil { + return Coldstarts{noop.Int64Counter{}}, err + } + return Coldstarts{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m Coldstarts) Inst() metric.Int64Counter { + return m.Int64Counter +} + +// Name returns the semantic convention name of the instrument. +func (Coldstarts) Name() string { + return "faas.coldstarts" +} + +// Unit returns the semantic convention unit of the instrument +func (Coldstarts) Unit() string { + return "{coldstart}" +} + +// Description returns the semantic convention description of the instrument +func (Coldstarts) Description() string { + return "Number of invocation cold starts." +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +func (m Coldstarts) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +func (m Coldstarts) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AttrTrigger returns an optional attribute for the "faas.trigger" semantic +// convention. It represents the type of the trigger which caused this function +// invocation. +func (Coldstarts) AttrTrigger(val TriggerAttr) attribute.KeyValue { + return attribute.String("faas.trigger", string(val)) +} + +// CPUUsage is an instrument used to record metric values conforming to the +// "faas.cpu_usage" semantic conventions. It represents the distribution of CPU +// usage per invocation. +type CPUUsage struct { + metric.Float64Histogram +} + +// NewCPUUsage returns a new CPUUsage instrument. +func NewCPUUsage( + m metric.Meter, + opt ...metric.Float64HistogramOption, +) (CPUUsage, error) { + // Check if the meter is nil. + if m == nil { + return CPUUsage{noop.Float64Histogram{}}, nil + } + + i, err := m.Float64Histogram( + "faas.cpu_usage", + append([]metric.Float64HistogramOption{ + metric.WithDescription("Distribution of CPU usage per invocation."), + metric.WithUnit("s"), + }, opt...)..., + ) + if err != nil { + return CPUUsage{noop.Float64Histogram{}}, err + } + return CPUUsage{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m CPUUsage) Inst() metric.Float64Histogram { + return m.Float64Histogram +} + +// Name returns the semantic convention name of the instrument. +func (CPUUsage) Name() string { + return "faas.cpu_usage" +} + +// Unit returns the semantic convention unit of the instrument +func (CPUUsage) Unit() string { + return "s" +} + +// Description returns the semantic convention description of the instrument +func (CPUUsage) Description() string { + return "Distribution of CPU usage per invocation." +} + +// Record records val to the current distribution for attrs. +// +// All additional attrs passed are included in the recorded value. +func (m CPUUsage) Record( + ctx context.Context, + val float64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Float64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Float64Histogram.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +func (m CPUUsage) RecordSet(ctx context.Context, val float64, set attribute.Set) { + if set.Len() == 0 { + m.Float64Histogram.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Float64Histogram.Record(ctx, val, *o...) +} + +// AttrTrigger returns an optional attribute for the "faas.trigger" semantic +// convention. It represents the type of the trigger which caused this function +// invocation. +func (CPUUsage) AttrTrigger(val TriggerAttr) attribute.KeyValue { + return attribute.String("faas.trigger", string(val)) +} + +// Errors is an instrument used to record metric values conforming to the +// "faas.errors" semantic conventions. It represents the number of invocation +// errors. +type Errors struct { + metric.Int64Counter +} + +// NewErrors returns a new Errors instrument. +func NewErrors( + m metric.Meter, + opt ...metric.Int64CounterOption, +) (Errors, error) { + // Check if the meter is nil. + if m == nil { + return Errors{noop.Int64Counter{}}, nil + } + + i, err := m.Int64Counter( + "faas.errors", + append([]metric.Int64CounterOption{ + metric.WithDescription("Number of invocation errors."), + metric.WithUnit("{error}"), + }, opt...)..., + ) + if err != nil { + return Errors{noop.Int64Counter{}}, err + } + return Errors{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m Errors) Inst() metric.Int64Counter { + return m.Int64Counter +} + +// Name returns the semantic convention name of the instrument. +func (Errors) Name() string { + return "faas.errors" +} + +// Unit returns the semantic convention unit of the instrument +func (Errors) Unit() string { + return "{error}" +} + +// Description returns the semantic convention description of the instrument +func (Errors) Description() string { + return "Number of invocation errors." +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +func (m Errors) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +func (m Errors) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AttrTrigger returns an optional attribute for the "faas.trigger" semantic +// convention. It represents the type of the trigger which caused this function +// invocation. +func (Errors) AttrTrigger(val TriggerAttr) attribute.KeyValue { + return attribute.String("faas.trigger", string(val)) +} + +// InitDuration is an instrument used to record metric values conforming to the +// "faas.init_duration" semantic conventions. It represents the measures the +// duration of the function's initialization, such as a cold start. +type InitDuration struct { + metric.Float64Histogram +} + +// NewInitDuration returns a new InitDuration instrument. +func NewInitDuration( + m metric.Meter, + opt ...metric.Float64HistogramOption, +) (InitDuration, error) { + // Check if the meter is nil. + if m == nil { + return InitDuration{noop.Float64Histogram{}}, nil + } + + i, err := m.Float64Histogram( + "faas.init_duration", + append([]metric.Float64HistogramOption{ + metric.WithDescription("Measures the duration of the function's initialization, such as a cold start."), + metric.WithUnit("s"), + }, opt...)..., + ) + if err != nil { + return InitDuration{noop.Float64Histogram{}}, err + } + return InitDuration{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m InitDuration) Inst() metric.Float64Histogram { + return m.Float64Histogram +} + +// Name returns the semantic convention name of the instrument. +func (InitDuration) Name() string { + return "faas.init_duration" +} + +// Unit returns the semantic convention unit of the instrument +func (InitDuration) Unit() string { + return "s" +} + +// Description returns the semantic convention description of the instrument +func (InitDuration) Description() string { + return "Measures the duration of the function's initialization, such as a cold start." +} + +// Record records val to the current distribution for attrs. +// +// All additional attrs passed are included in the recorded value. +func (m InitDuration) Record( + ctx context.Context, + val float64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Float64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Float64Histogram.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +func (m InitDuration) RecordSet(ctx context.Context, val float64, set attribute.Set) { + if set.Len() == 0 { + m.Float64Histogram.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Float64Histogram.Record(ctx, val, *o...) +} + +// AttrTrigger returns an optional attribute for the "faas.trigger" semantic +// convention. It represents the type of the trigger which caused this function +// invocation. +func (InitDuration) AttrTrigger(val TriggerAttr) attribute.KeyValue { + return attribute.String("faas.trigger", string(val)) +} + +// Invocations is an instrument used to record metric values conforming to the +// "faas.invocations" semantic conventions. It represents the number of +// successful invocations. +type Invocations struct { + metric.Int64Counter +} + +// NewInvocations returns a new Invocations instrument. +func NewInvocations( + m metric.Meter, + opt ...metric.Int64CounterOption, +) (Invocations, error) { + // Check if the meter is nil. + if m == nil { + return Invocations{noop.Int64Counter{}}, nil + } + + i, err := m.Int64Counter( + "faas.invocations", + append([]metric.Int64CounterOption{ + metric.WithDescription("Number of successful invocations."), + metric.WithUnit("{invocation}"), + }, opt...)..., + ) + if err != nil { + return Invocations{noop.Int64Counter{}}, err + } + return Invocations{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m Invocations) Inst() metric.Int64Counter { + return m.Int64Counter +} + +// Name returns the semantic convention name of the instrument. +func (Invocations) Name() string { + return "faas.invocations" +} + +// Unit returns the semantic convention unit of the instrument +func (Invocations) Unit() string { + return "{invocation}" +} + +// Description returns the semantic convention description of the instrument +func (Invocations) Description() string { + return "Number of successful invocations." +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +func (m Invocations) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +func (m Invocations) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AttrTrigger returns an optional attribute for the "faas.trigger" semantic +// convention. It represents the type of the trigger which caused this function +// invocation. +func (Invocations) AttrTrigger(val TriggerAttr) attribute.KeyValue { + return attribute.String("faas.trigger", string(val)) +} + +// InvokeDuration is an instrument used to record metric values conforming to the +// "faas.invoke_duration" semantic conventions. It represents the measures the +// duration of the function's logic execution. +type InvokeDuration struct { + metric.Float64Histogram +} + +// NewInvokeDuration returns a new InvokeDuration instrument. +func NewInvokeDuration( + m metric.Meter, + opt ...metric.Float64HistogramOption, +) (InvokeDuration, error) { + // Check if the meter is nil. + if m == nil { + return InvokeDuration{noop.Float64Histogram{}}, nil + } + + i, err := m.Float64Histogram( + "faas.invoke_duration", + append([]metric.Float64HistogramOption{ + metric.WithDescription("Measures the duration of the function's logic execution."), + metric.WithUnit("s"), + }, opt...)..., + ) + if err != nil { + return InvokeDuration{noop.Float64Histogram{}}, err + } + return InvokeDuration{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m InvokeDuration) Inst() metric.Float64Histogram { + return m.Float64Histogram +} + +// Name returns the semantic convention name of the instrument. +func (InvokeDuration) Name() string { + return "faas.invoke_duration" +} + +// Unit returns the semantic convention unit of the instrument +func (InvokeDuration) Unit() string { + return "s" +} + +// Description returns the semantic convention description of the instrument +func (InvokeDuration) Description() string { + return "Measures the duration of the function's logic execution." +} + +// Record records val to the current distribution for attrs. +// +// All additional attrs passed are included in the recorded value. +func (m InvokeDuration) Record( + ctx context.Context, + val float64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Float64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Float64Histogram.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +func (m InvokeDuration) RecordSet(ctx context.Context, val float64, set attribute.Set) { + if set.Len() == 0 { + m.Float64Histogram.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Float64Histogram.Record(ctx, val, *o...) +} + +// AttrTrigger returns an optional attribute for the "faas.trigger" semantic +// convention. It represents the type of the trigger which caused this function +// invocation. +func (InvokeDuration) AttrTrigger(val TriggerAttr) attribute.KeyValue { + return attribute.String("faas.trigger", string(val)) +} + +// MemUsage is an instrument used to record metric values conforming to the +// "faas.mem_usage" semantic conventions. It represents the distribution of max +// memory usage per invocation. +type MemUsage struct { + metric.Int64Histogram +} + +// NewMemUsage returns a new MemUsage instrument. +func NewMemUsage( + m metric.Meter, + opt ...metric.Int64HistogramOption, +) (MemUsage, error) { + // Check if the meter is nil. + if m == nil { + return MemUsage{noop.Int64Histogram{}}, nil + } + + i, err := m.Int64Histogram( + "faas.mem_usage", + append([]metric.Int64HistogramOption{ + metric.WithDescription("Distribution of max memory usage per invocation."), + metric.WithUnit("By"), + }, opt...)..., + ) + if err != nil { + return MemUsage{noop.Int64Histogram{}}, err + } + return MemUsage{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m MemUsage) Inst() metric.Int64Histogram { + return m.Int64Histogram +} + +// Name returns the semantic convention name of the instrument. +func (MemUsage) Name() string { + return "faas.mem_usage" +} + +// Unit returns the semantic convention unit of the instrument +func (MemUsage) Unit() string { + return "By" +} + +// Description returns the semantic convention description of the instrument +func (MemUsage) Description() string { + return "Distribution of max memory usage per invocation." +} + +// Record records val to the current distribution for attrs. +// +// All additional attrs passed are included in the recorded value. +func (m MemUsage) Record( + ctx context.Context, + val int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64Histogram.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +func (m MemUsage) RecordSet(ctx context.Context, val int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Histogram.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Histogram.Record(ctx, val, *o...) +} + +// AttrTrigger returns an optional attribute for the "faas.trigger" semantic +// convention. It represents the type of the trigger which caused this function +// invocation. +func (MemUsage) AttrTrigger(val TriggerAttr) attribute.KeyValue { + return attribute.String("faas.trigger", string(val)) +} + +// NetIO is an instrument used to record metric values conforming to the +// "faas.net_io" semantic conventions. It represents the distribution of net I/O +// usage per invocation. +type NetIO struct { + metric.Int64Histogram +} + +// NewNetIO returns a new NetIO instrument. +func NewNetIO( + m metric.Meter, + opt ...metric.Int64HistogramOption, +) (NetIO, error) { + // Check if the meter is nil. + if m == nil { + return NetIO{noop.Int64Histogram{}}, nil + } + + i, err := m.Int64Histogram( + "faas.net_io", + append([]metric.Int64HistogramOption{ + metric.WithDescription("Distribution of net I/O usage per invocation."), + metric.WithUnit("By"), + }, opt...)..., + ) + if err != nil { + return NetIO{noop.Int64Histogram{}}, err + } + return NetIO{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m NetIO) Inst() metric.Int64Histogram { + return m.Int64Histogram +} + +// Name returns the semantic convention name of the instrument. +func (NetIO) Name() string { + return "faas.net_io" +} + +// Unit returns the semantic convention unit of the instrument +func (NetIO) Unit() string { + return "By" +} + +// Description returns the semantic convention description of the instrument +func (NetIO) Description() string { + return "Distribution of net I/O usage per invocation." +} + +// Record records val to the current distribution for attrs. +// +// All additional attrs passed are included in the recorded value. +func (m NetIO) Record( + ctx context.Context, + val int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64Histogram.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +func (m NetIO) RecordSet(ctx context.Context, val int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Histogram.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Histogram.Record(ctx, val, *o...) +} + +// AttrTrigger returns an optional attribute for the "faas.trigger" semantic +// convention. It represents the type of the trigger which caused this function +// invocation. +func (NetIO) AttrTrigger(val TriggerAttr) attribute.KeyValue { + return attribute.String("faas.trigger", string(val)) +} + +// Timeouts is an instrument used to record metric values conforming to the +// "faas.timeouts" semantic conventions. It represents the number of invocation +// timeouts. +type Timeouts struct { + metric.Int64Counter +} + +// NewTimeouts returns a new Timeouts instrument. +func NewTimeouts( + m metric.Meter, + opt ...metric.Int64CounterOption, +) (Timeouts, error) { + // Check if the meter is nil. + if m == nil { + return Timeouts{noop.Int64Counter{}}, nil + } + + i, err := m.Int64Counter( + "faas.timeouts", + append([]metric.Int64CounterOption{ + metric.WithDescription("Number of invocation timeouts."), + metric.WithUnit("{timeout}"), + }, opt...)..., + ) + if err != nil { + return Timeouts{noop.Int64Counter{}}, err + } + return Timeouts{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m Timeouts) Inst() metric.Int64Counter { + return m.Int64Counter +} + +// Name returns the semantic convention name of the instrument. +func (Timeouts) Name() string { + return "faas.timeouts" +} + +// Unit returns the semantic convention unit of the instrument +func (Timeouts) Unit() string { + return "{timeout}" +} + +// Description returns the semantic convention description of the instrument +func (Timeouts) Description() string { + return "Number of invocation timeouts." +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +func (m Timeouts) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +func (m Timeouts) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AttrTrigger returns an optional attribute for the "faas.trigger" semantic +// convention. It represents the type of the trigger which caused this function +// invocation. +func (Timeouts) AttrTrigger(val TriggerAttr) attribute.KeyValue { + return attribute.String("faas.trigger", string(val)) +} \ No newline at end of file diff --git a/semconv/v1.37.0/genaiconv/metric.go b/semconv/v1.37.0/genaiconv/metric.go new file mode 100644 index 00000000000..a697015c917 --- /dev/null +++ b/semconv/v1.37.0/genaiconv/metric.go @@ -0,0 +1,821 @@ +// Code generated from semantic convention specification. DO NOT EDIT. + +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package httpconv provides types and functionality for OpenTelemetry semantic +// conventions in the "gen_ai" namespace. +package genaiconv + +import ( + "context" + "sync" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/noop" +) + +var ( + addOptPool = &sync.Pool{New: func() any { return &[]metric.AddOption{} }} + recOptPool = &sync.Pool{New: func() any { return &[]metric.RecordOption{} }} +) + +// ErrorTypeAttr is an attribute conforming to the error.type semantic +// conventions. It represents the describes a class of error the operation ended +// with. +type ErrorTypeAttr string + +var ( + // ErrorTypeOther is a fallback error value to be used when the instrumentation + // doesn't define a custom value. + ErrorTypeOther ErrorTypeAttr = "_OTHER" +) + +// OperationNameAttr is an attribute conforming to the gen_ai.operation.name +// semantic conventions. It represents the name of the operation being performed. +type OperationNameAttr string + +var ( + // OperationNameChat is the chat completion operation such as [OpenAI Chat API] + // . + // + // [OpenAI Chat API]: https://platform.openai.com/docs/api-reference/chat + OperationNameChat OperationNameAttr = "chat" + // OperationNameGenerateContent is the multimodal content generation operation + // such as [Gemini Generate Content]. + // + // [Gemini Generate Content]: https://ai.google.dev/api/generate-content + OperationNameGenerateContent OperationNameAttr = "generate_content" + // OperationNameTextCompletion is the text completions operation such as + // [OpenAI Completions API (Legacy)]. + // + // [OpenAI Completions API (Legacy)]: https://platform.openai.com/docs/api-reference/completions + OperationNameTextCompletion OperationNameAttr = "text_completion" + // OperationNameEmbeddings is the embeddings operation such as + // [OpenAI Create embeddings API]. + // + // [OpenAI Create embeddings API]: https://platform.openai.com/docs/api-reference/embeddings/create + OperationNameEmbeddings OperationNameAttr = "embeddings" + // OperationNameCreateAgent is the create GenAI agent. + OperationNameCreateAgent OperationNameAttr = "create_agent" + // OperationNameInvokeAgent is the invoke GenAI agent. + OperationNameInvokeAgent OperationNameAttr = "invoke_agent" + // OperationNameExecuteTool is the execute a tool. + OperationNameExecuteTool OperationNameAttr = "execute_tool" +) + +// ProviderNameAttr is an attribute conforming to the gen_ai.provider.name +// semantic conventions. It represents the Generative AI provider as identified +// by the client or server instrumentation. +type ProviderNameAttr string + +var ( + // ProviderNameOpenAI is the [OpenAI]. + // + // [OpenAI]: https://openai.com/ + ProviderNameOpenAI ProviderNameAttr = "openai" + // ProviderNameGCPGenAI is the any Google generative AI endpoint. + ProviderNameGCPGenAI ProviderNameAttr = "gcp.gen_ai" + // ProviderNameGCPVertexAI is the [Vertex AI]. + // + // [Vertex AI]: https://cloud.google.com/vertex-ai + ProviderNameGCPVertexAI ProviderNameAttr = "gcp.vertex_ai" + // ProviderNameGCPGemini is the [Gemini]. + // + // [Gemini]: https://cloud.google.com/products/gemini + ProviderNameGCPGemini ProviderNameAttr = "gcp.gemini" + // ProviderNameAnthropic is the [Anthropic]. + // + // [Anthropic]: https://www.anthropic.com/ + ProviderNameAnthropic ProviderNameAttr = "anthropic" + // ProviderNameCohere is the [Cohere]. + // + // [Cohere]: https://cohere.com/ + ProviderNameCohere ProviderNameAttr = "cohere" + // ProviderNameAzureAIInference is the azure AI Inference. + ProviderNameAzureAIInference ProviderNameAttr = "azure.ai.inference" + // ProviderNameAzureAIOpenAI is the [Azure OpenAI]. + // + // [Azure OpenAI]: https://azure.microsoft.com/products/ai-services/openai-service/ + ProviderNameAzureAIOpenAI ProviderNameAttr = "azure.ai.openai" + // ProviderNameIBMWatsonxAI is the [IBM Watsonx AI]. + // + // [IBM Watsonx AI]: https://www.ibm.com/products/watsonx-ai + ProviderNameIBMWatsonxAI ProviderNameAttr = "ibm.watsonx.ai" + // ProviderNameAWSBedrock is the [AWS Bedrock]. + // + // [AWS Bedrock]: https://aws.amazon.com/bedrock + ProviderNameAWSBedrock ProviderNameAttr = "aws.bedrock" + // ProviderNamePerplexity is the [Perplexity]. + // + // [Perplexity]: https://www.perplexity.ai/ + ProviderNamePerplexity ProviderNameAttr = "perplexity" + // ProviderNameXAI is the [xAI]. + // + // [xAI]: https://x.ai/ + ProviderNameXAI ProviderNameAttr = "x_ai" + // ProviderNameDeepseek is the [DeepSeek]. + // + // [DeepSeek]: https://www.deepseek.com/ + ProviderNameDeepseek ProviderNameAttr = "deepseek" + // ProviderNameGroq is the [Groq]. + // + // [Groq]: https://groq.com/ + ProviderNameGroq ProviderNameAttr = "groq" + // ProviderNameMistralAI is the [Mistral AI]. + // + // [Mistral AI]: https://mistral.ai/ + ProviderNameMistralAI ProviderNameAttr = "mistral_ai" +) + +// TokenTypeAttr is an attribute conforming to the gen_ai.token.type semantic +// conventions. It represents the type of token being counted. +type TokenTypeAttr string + +var ( + // TokenTypeInput is the input tokens (prompt, input, etc.). + TokenTypeInput TokenTypeAttr = "input" + // TokenTypeOutput is the output tokens (completion, response, etc.). + TokenTypeOutput TokenTypeAttr = "output" +) + +// ClientOperationDuration is an instrument used to record metric values +// conforming to the "gen_ai.client.operation.duration" semantic conventions. It +// represents the genAI operation duration. +type ClientOperationDuration struct { + metric.Float64Histogram +} + +// NewClientOperationDuration returns a new ClientOperationDuration instrument. +func NewClientOperationDuration( + m metric.Meter, + opt ...metric.Float64HistogramOption, +) (ClientOperationDuration, error) { + // Check if the meter is nil. + if m == nil { + return ClientOperationDuration{noop.Float64Histogram{}}, nil + } + + i, err := m.Float64Histogram( + "gen_ai.client.operation.duration", + append([]metric.Float64HistogramOption{ + metric.WithDescription("GenAI operation duration."), + metric.WithUnit("s"), + }, opt...)..., + ) + if err != nil { + return ClientOperationDuration{noop.Float64Histogram{}}, err + } + return ClientOperationDuration{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ClientOperationDuration) Inst() metric.Float64Histogram { + return m.Float64Histogram +} + +// Name returns the semantic convention name of the instrument. +func (ClientOperationDuration) Name() string { + return "gen_ai.client.operation.duration" +} + +// Unit returns the semantic convention unit of the instrument +func (ClientOperationDuration) Unit() string { + return "s" +} + +// Description returns the semantic convention description of the instrument +func (ClientOperationDuration) Description() string { + return "GenAI operation duration." +} + +// Record records val to the current distribution for attrs. +// +// The operationName is the the name of the operation being performed. +// +// The providerName is the the Generative AI provider as identified by the client +// or server instrumentation. +// +// All additional attrs passed are included in the recorded value. +func (m ClientOperationDuration) Record( + ctx context.Context, + val float64, + operationName OperationNameAttr, + providerName ProviderNameAttr, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Float64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("gen_ai.operation.name", string(operationName)), + attribute.String("gen_ai.provider.name", string(providerName)), + )..., + ), + ) + + m.Float64Histogram.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +func (m ClientOperationDuration) RecordSet(ctx context.Context, val float64, set attribute.Set) { + if set.Len() == 0 { + m.Float64Histogram.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Float64Histogram.Record(ctx, val, *o...) +} + +// AttrErrorType returns an optional attribute for the "error.type" semantic +// convention. It represents the describes a class of error the operation ended +// with. +func (ClientOperationDuration) AttrErrorType(val ErrorTypeAttr) attribute.KeyValue { + return attribute.String("error.type", string(val)) +} + +// AttrRequestModel returns an optional attribute for the "gen_ai.request.model" +// semantic convention. It represents the name of the GenAI model a request is +// being made to. +func (ClientOperationDuration) AttrRequestModel(val string) attribute.KeyValue { + return attribute.String("gen_ai.request.model", val) +} + +// AttrServerPort returns an optional attribute for the "server.port" semantic +// convention. It represents the genAI server port. +func (ClientOperationDuration) AttrServerPort(val int) attribute.KeyValue { + return attribute.Int("server.port", val) +} + +// AttrResponseModel returns an optional attribute for the +// "gen_ai.response.model" semantic convention. It represents the name of the +// model that generated the response. +func (ClientOperationDuration) AttrResponseModel(val string) attribute.KeyValue { + return attribute.String("gen_ai.response.model", val) +} + +// AttrServerAddress returns an optional attribute for the "server.address" +// semantic convention. It represents the genAI server address. +func (ClientOperationDuration) AttrServerAddress(val string) attribute.KeyValue { + return attribute.String("server.address", val) +} + +// ClientTokenUsage is an instrument used to record metric values conforming to +// the "gen_ai.client.token.usage" semantic conventions. It represents the number +// of input and output tokens used. +type ClientTokenUsage struct { + metric.Int64Histogram +} + +// NewClientTokenUsage returns a new ClientTokenUsage instrument. +func NewClientTokenUsage( + m metric.Meter, + opt ...metric.Int64HistogramOption, +) (ClientTokenUsage, error) { + // Check if the meter is nil. + if m == nil { + return ClientTokenUsage{noop.Int64Histogram{}}, nil + } + + i, err := m.Int64Histogram( + "gen_ai.client.token.usage", + append([]metric.Int64HistogramOption{ + metric.WithDescription("Number of input and output tokens used."), + metric.WithUnit("{token}"), + }, opt...)..., + ) + if err != nil { + return ClientTokenUsage{noop.Int64Histogram{}}, err + } + return ClientTokenUsage{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ClientTokenUsage) Inst() metric.Int64Histogram { + return m.Int64Histogram +} + +// Name returns the semantic convention name of the instrument. +func (ClientTokenUsage) Name() string { + return "gen_ai.client.token.usage" +} + +// Unit returns the semantic convention unit of the instrument +func (ClientTokenUsage) Unit() string { + return "{token}" +} + +// Description returns the semantic convention description of the instrument +func (ClientTokenUsage) Description() string { + return "Number of input and output tokens used." +} + +// Record records val to the current distribution for attrs. +// +// The operationName is the the name of the operation being performed. +// +// The providerName is the the Generative AI provider as identified by the client +// or server instrumentation. +// +// The tokenType is the the type of token being counted. +// +// All additional attrs passed are included in the recorded value. +func (m ClientTokenUsage) Record( + ctx context.Context, + val int64, + operationName OperationNameAttr, + providerName ProviderNameAttr, + tokenType TokenTypeAttr, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("gen_ai.operation.name", string(operationName)), + attribute.String("gen_ai.provider.name", string(providerName)), + attribute.String("gen_ai.token.type", string(tokenType)), + )..., + ), + ) + + m.Int64Histogram.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +func (m ClientTokenUsage) RecordSet(ctx context.Context, val int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Histogram.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Histogram.Record(ctx, val, *o...) +} + +// AttrRequestModel returns an optional attribute for the "gen_ai.request.model" +// semantic convention. It represents the name of the GenAI model a request is +// being made to. +func (ClientTokenUsage) AttrRequestModel(val string) attribute.KeyValue { + return attribute.String("gen_ai.request.model", val) +} + +// AttrServerPort returns an optional attribute for the "server.port" semantic +// convention. It represents the genAI server port. +func (ClientTokenUsage) AttrServerPort(val int) attribute.KeyValue { + return attribute.Int("server.port", val) +} + +// AttrResponseModel returns an optional attribute for the +// "gen_ai.response.model" semantic convention. It represents the name of the +// model that generated the response. +func (ClientTokenUsage) AttrResponseModel(val string) attribute.KeyValue { + return attribute.String("gen_ai.response.model", val) +} + +// AttrServerAddress returns an optional attribute for the "server.address" +// semantic convention. It represents the genAI server address. +func (ClientTokenUsage) AttrServerAddress(val string) attribute.KeyValue { + return attribute.String("server.address", val) +} + +// ServerRequestDuration is an instrument used to record metric values conforming +// to the "gen_ai.server.request.duration" semantic conventions. It represents +// the generative AI server request duration such as time-to-last byte or last +// output token. +type ServerRequestDuration struct { + metric.Float64Histogram +} + +// NewServerRequestDuration returns a new ServerRequestDuration instrument. +func NewServerRequestDuration( + m metric.Meter, + opt ...metric.Float64HistogramOption, +) (ServerRequestDuration, error) { + // Check if the meter is nil. + if m == nil { + return ServerRequestDuration{noop.Float64Histogram{}}, nil + } + + i, err := m.Float64Histogram( + "gen_ai.server.request.duration", + append([]metric.Float64HistogramOption{ + metric.WithDescription("Generative AI server request duration such as time-to-last byte or last output token."), + metric.WithUnit("s"), + }, opt...)..., + ) + if err != nil { + return ServerRequestDuration{noop.Float64Histogram{}}, err + } + return ServerRequestDuration{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ServerRequestDuration) Inst() metric.Float64Histogram { + return m.Float64Histogram +} + +// Name returns the semantic convention name of the instrument. +func (ServerRequestDuration) Name() string { + return "gen_ai.server.request.duration" +} + +// Unit returns the semantic convention unit of the instrument +func (ServerRequestDuration) Unit() string { + return "s" +} + +// Description returns the semantic convention description of the instrument +func (ServerRequestDuration) Description() string { + return "Generative AI server request duration such as time-to-last byte or last output token." +} + +// Record records val to the current distribution for attrs. +// +// The operationName is the the name of the operation being performed. +// +// The providerName is the the Generative AI provider as identified by the client +// or server instrumentation. +// +// All additional attrs passed are included in the recorded value. +func (m ServerRequestDuration) Record( + ctx context.Context, + val float64, + operationName OperationNameAttr, + providerName ProviderNameAttr, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Float64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("gen_ai.operation.name", string(operationName)), + attribute.String("gen_ai.provider.name", string(providerName)), + )..., + ), + ) + + m.Float64Histogram.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +func (m ServerRequestDuration) RecordSet(ctx context.Context, val float64, set attribute.Set) { + if set.Len() == 0 { + m.Float64Histogram.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Float64Histogram.Record(ctx, val, *o...) +} + +// AttrErrorType returns an optional attribute for the "error.type" semantic +// convention. It represents the describes a class of error the operation ended +// with. +func (ServerRequestDuration) AttrErrorType(val ErrorTypeAttr) attribute.KeyValue { + return attribute.String("error.type", string(val)) +} + +// AttrRequestModel returns an optional attribute for the "gen_ai.request.model" +// semantic convention. It represents the name of the GenAI model a request is +// being made to. +func (ServerRequestDuration) AttrRequestModel(val string) attribute.KeyValue { + return attribute.String("gen_ai.request.model", val) +} + +// AttrServerPort returns an optional attribute for the "server.port" semantic +// convention. It represents the genAI server port. +func (ServerRequestDuration) AttrServerPort(val int) attribute.KeyValue { + return attribute.Int("server.port", val) +} + +// AttrResponseModel returns an optional attribute for the +// "gen_ai.response.model" semantic convention. It represents the name of the +// model that generated the response. +func (ServerRequestDuration) AttrResponseModel(val string) attribute.KeyValue { + return attribute.String("gen_ai.response.model", val) +} + +// AttrServerAddress returns an optional attribute for the "server.address" +// semantic convention. It represents the genAI server address. +func (ServerRequestDuration) AttrServerAddress(val string) attribute.KeyValue { + return attribute.String("server.address", val) +} + +// ServerTimePerOutputToken is an instrument used to record metric values +// conforming to the "gen_ai.server.time_per_output_token" semantic conventions. +// It represents the time per output token generated after the first token for +// successful responses. +type ServerTimePerOutputToken struct { + metric.Float64Histogram +} + +// NewServerTimePerOutputToken returns a new ServerTimePerOutputToken instrument. +func NewServerTimePerOutputToken( + m metric.Meter, + opt ...metric.Float64HistogramOption, +) (ServerTimePerOutputToken, error) { + // Check if the meter is nil. + if m == nil { + return ServerTimePerOutputToken{noop.Float64Histogram{}}, nil + } + + i, err := m.Float64Histogram( + "gen_ai.server.time_per_output_token", + append([]metric.Float64HistogramOption{ + metric.WithDescription("Time per output token generated after the first token for successful responses."), + metric.WithUnit("s"), + }, opt...)..., + ) + if err != nil { + return ServerTimePerOutputToken{noop.Float64Histogram{}}, err + } + return ServerTimePerOutputToken{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ServerTimePerOutputToken) Inst() metric.Float64Histogram { + return m.Float64Histogram +} + +// Name returns the semantic convention name of the instrument. +func (ServerTimePerOutputToken) Name() string { + return "gen_ai.server.time_per_output_token" +} + +// Unit returns the semantic convention unit of the instrument +func (ServerTimePerOutputToken) Unit() string { + return "s" +} + +// Description returns the semantic convention description of the instrument +func (ServerTimePerOutputToken) Description() string { + return "Time per output token generated after the first token for successful responses." +} + +// Record records val to the current distribution for attrs. +// +// The operationName is the the name of the operation being performed. +// +// The providerName is the the Generative AI provider as identified by the client +// or server instrumentation. +// +// All additional attrs passed are included in the recorded value. +func (m ServerTimePerOutputToken) Record( + ctx context.Context, + val float64, + operationName OperationNameAttr, + providerName ProviderNameAttr, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Float64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("gen_ai.operation.name", string(operationName)), + attribute.String("gen_ai.provider.name", string(providerName)), + )..., + ), + ) + + m.Float64Histogram.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +func (m ServerTimePerOutputToken) RecordSet(ctx context.Context, val float64, set attribute.Set) { + if set.Len() == 0 { + m.Float64Histogram.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Float64Histogram.Record(ctx, val, *o...) +} + +// AttrRequestModel returns an optional attribute for the "gen_ai.request.model" +// semantic convention. It represents the name of the GenAI model a request is +// being made to. +func (ServerTimePerOutputToken) AttrRequestModel(val string) attribute.KeyValue { + return attribute.String("gen_ai.request.model", val) +} + +// AttrServerPort returns an optional attribute for the "server.port" semantic +// convention. It represents the genAI server port. +func (ServerTimePerOutputToken) AttrServerPort(val int) attribute.KeyValue { + return attribute.Int("server.port", val) +} + +// AttrResponseModel returns an optional attribute for the +// "gen_ai.response.model" semantic convention. It represents the name of the +// model that generated the response. +func (ServerTimePerOutputToken) AttrResponseModel(val string) attribute.KeyValue { + return attribute.String("gen_ai.response.model", val) +} + +// AttrServerAddress returns an optional attribute for the "server.address" +// semantic convention. It represents the genAI server address. +func (ServerTimePerOutputToken) AttrServerAddress(val string) attribute.KeyValue { + return attribute.String("server.address", val) +} + +// ServerTimeToFirstToken is an instrument used to record metric values +// conforming to the "gen_ai.server.time_to_first_token" semantic conventions. It +// represents the time to generate first token for successful responses. +type ServerTimeToFirstToken struct { + metric.Float64Histogram +} + +// NewServerTimeToFirstToken returns a new ServerTimeToFirstToken instrument. +func NewServerTimeToFirstToken( + m metric.Meter, + opt ...metric.Float64HistogramOption, +) (ServerTimeToFirstToken, error) { + // Check if the meter is nil. + if m == nil { + return ServerTimeToFirstToken{noop.Float64Histogram{}}, nil + } + + i, err := m.Float64Histogram( + "gen_ai.server.time_to_first_token", + append([]metric.Float64HistogramOption{ + metric.WithDescription("Time to generate first token for successful responses."), + metric.WithUnit("s"), + }, opt...)..., + ) + if err != nil { + return ServerTimeToFirstToken{noop.Float64Histogram{}}, err + } + return ServerTimeToFirstToken{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ServerTimeToFirstToken) Inst() metric.Float64Histogram { + return m.Float64Histogram +} + +// Name returns the semantic convention name of the instrument. +func (ServerTimeToFirstToken) Name() string { + return "gen_ai.server.time_to_first_token" +} + +// Unit returns the semantic convention unit of the instrument +func (ServerTimeToFirstToken) Unit() string { + return "s" +} + +// Description returns the semantic convention description of the instrument +func (ServerTimeToFirstToken) Description() string { + return "Time to generate first token for successful responses." +} + +// Record records val to the current distribution for attrs. +// +// The operationName is the the name of the operation being performed. +// +// The providerName is the the Generative AI provider as identified by the client +// or server instrumentation. +// +// All additional attrs passed are included in the recorded value. +func (m ServerTimeToFirstToken) Record( + ctx context.Context, + val float64, + operationName OperationNameAttr, + providerName ProviderNameAttr, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Float64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("gen_ai.operation.name", string(operationName)), + attribute.String("gen_ai.provider.name", string(providerName)), + )..., + ), + ) + + m.Float64Histogram.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +func (m ServerTimeToFirstToken) RecordSet(ctx context.Context, val float64, set attribute.Set) { + if set.Len() == 0 { + m.Float64Histogram.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Float64Histogram.Record(ctx, val, *o...) +} + +// AttrRequestModel returns an optional attribute for the "gen_ai.request.model" +// semantic convention. It represents the name of the GenAI model a request is +// being made to. +func (ServerTimeToFirstToken) AttrRequestModel(val string) attribute.KeyValue { + return attribute.String("gen_ai.request.model", val) +} + +// AttrServerPort returns an optional attribute for the "server.port" semantic +// convention. It represents the genAI server port. +func (ServerTimeToFirstToken) AttrServerPort(val int) attribute.KeyValue { + return attribute.Int("server.port", val) +} + +// AttrResponseModel returns an optional attribute for the +// "gen_ai.response.model" semantic convention. It represents the name of the +// model that generated the response. +func (ServerTimeToFirstToken) AttrResponseModel(val string) attribute.KeyValue { + return attribute.String("gen_ai.response.model", val) +} + +// AttrServerAddress returns an optional attribute for the "server.address" +// semantic convention. It represents the genAI server address. +func (ServerTimeToFirstToken) AttrServerAddress(val string) attribute.KeyValue { + return attribute.String("server.address", val) +} \ No newline at end of file diff --git a/semconv/v1.37.0/goconv/metric.go b/semconv/v1.37.0/goconv/metric.go new file mode 100644 index 00000000000..fe9e2933e17 --- /dev/null +++ b/semconv/v1.37.0/goconv/metric.go @@ -0,0 +1,531 @@ +// Code generated from semantic convention specification. DO NOT EDIT. + +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package httpconv provides types and functionality for OpenTelemetry semantic +// conventions in the "go" namespace. +package goconv + +import ( + "context" + "sync" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/noop" +) + +var ( + addOptPool = &sync.Pool{New: func() any { return &[]metric.AddOption{} }} + recOptPool = &sync.Pool{New: func() any { return &[]metric.RecordOption{} }} +) + +// MemoryTypeAttr is an attribute conforming to the go.memory.type semantic +// conventions. It represents the type of memory. +type MemoryTypeAttr string + +var ( + // MemoryTypeStack is the memory allocated from the heap that is reserved for + // stack space, whether or not it is currently in-use. + MemoryTypeStack MemoryTypeAttr = "stack" + // MemoryTypeOther is the memory used by the Go runtime, excluding other + // categories of memory usage described in this enumeration. + MemoryTypeOther MemoryTypeAttr = "other" +) + +// ConfigGogc is an instrument used to record metric values conforming to the +// "go.config.gogc" semantic conventions. It represents the heap size target +// percentage configured by the user, otherwise 100. +type ConfigGogc struct { + metric.Int64ObservableUpDownCounter +} + +// NewConfigGogc returns a new ConfigGogc instrument. +func NewConfigGogc( + m metric.Meter, + opt ...metric.Int64ObservableUpDownCounterOption, +) (ConfigGogc, error) { + // Check if the meter is nil. + if m == nil { + return ConfigGogc{noop.Int64ObservableUpDownCounter{}}, nil + } + + i, err := m.Int64ObservableUpDownCounter( + "go.config.gogc", + append([]metric.Int64ObservableUpDownCounterOption{ + metric.WithDescription("Heap size target percentage configured by the user, otherwise 100."), + metric.WithUnit("%"), + }, opt...)..., + ) + if err != nil { + return ConfigGogc{noop.Int64ObservableUpDownCounter{}}, err + } + return ConfigGogc{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ConfigGogc) Inst() metric.Int64ObservableUpDownCounter { + return m.Int64ObservableUpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (ConfigGogc) Name() string { + return "go.config.gogc" +} + +// Unit returns the semantic convention unit of the instrument +func (ConfigGogc) Unit() string { + return "%" +} + +// Description returns the semantic convention description of the instrument +func (ConfigGogc) Description() string { + return "Heap size target percentage configured by the user, otherwise 100." +} + +// GoroutineCount is an instrument used to record metric values conforming to the +// "go.goroutine.count" semantic conventions. It represents the count of live +// goroutines. +type GoroutineCount struct { + metric.Int64ObservableUpDownCounter +} + +// NewGoroutineCount returns a new GoroutineCount instrument. +func NewGoroutineCount( + m metric.Meter, + opt ...metric.Int64ObservableUpDownCounterOption, +) (GoroutineCount, error) { + // Check if the meter is nil. + if m == nil { + return GoroutineCount{noop.Int64ObservableUpDownCounter{}}, nil + } + + i, err := m.Int64ObservableUpDownCounter( + "go.goroutine.count", + append([]metric.Int64ObservableUpDownCounterOption{ + metric.WithDescription("Count of live goroutines."), + metric.WithUnit("{goroutine}"), + }, opt...)..., + ) + if err != nil { + return GoroutineCount{noop.Int64ObservableUpDownCounter{}}, err + } + return GoroutineCount{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m GoroutineCount) Inst() metric.Int64ObservableUpDownCounter { + return m.Int64ObservableUpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (GoroutineCount) Name() string { + return "go.goroutine.count" +} + +// Unit returns the semantic convention unit of the instrument +func (GoroutineCount) Unit() string { + return "{goroutine}" +} + +// Description returns the semantic convention description of the instrument +func (GoroutineCount) Description() string { + return "Count of live goroutines." +} + +// MemoryAllocated is an instrument used to record metric values conforming to +// the "go.memory.allocated" semantic conventions. It represents the memory +// allocated to the heap by the application. +type MemoryAllocated struct { + metric.Int64ObservableCounter +} + +// NewMemoryAllocated returns a new MemoryAllocated instrument. +func NewMemoryAllocated( + m metric.Meter, + opt ...metric.Int64ObservableCounterOption, +) (MemoryAllocated, error) { + // Check if the meter is nil. + if m == nil { + return MemoryAllocated{noop.Int64ObservableCounter{}}, nil + } + + i, err := m.Int64ObservableCounter( + "go.memory.allocated", + append([]metric.Int64ObservableCounterOption{ + metric.WithDescription("Memory allocated to the heap by the application."), + metric.WithUnit("By"), + }, opt...)..., + ) + if err != nil { + return MemoryAllocated{noop.Int64ObservableCounter{}}, err + } + return MemoryAllocated{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m MemoryAllocated) Inst() metric.Int64ObservableCounter { + return m.Int64ObservableCounter +} + +// Name returns the semantic convention name of the instrument. +func (MemoryAllocated) Name() string { + return "go.memory.allocated" +} + +// Unit returns the semantic convention unit of the instrument +func (MemoryAllocated) Unit() string { + return "By" +} + +// Description returns the semantic convention description of the instrument +func (MemoryAllocated) Description() string { + return "Memory allocated to the heap by the application." +} + +// MemoryAllocations is an instrument used to record metric values conforming to +// the "go.memory.allocations" semantic conventions. It represents the count of +// allocations to the heap by the application. +type MemoryAllocations struct { + metric.Int64ObservableCounter +} + +// NewMemoryAllocations returns a new MemoryAllocations instrument. +func NewMemoryAllocations( + m metric.Meter, + opt ...metric.Int64ObservableCounterOption, +) (MemoryAllocations, error) { + // Check if the meter is nil. + if m == nil { + return MemoryAllocations{noop.Int64ObservableCounter{}}, nil + } + + i, err := m.Int64ObservableCounter( + "go.memory.allocations", + append([]metric.Int64ObservableCounterOption{ + metric.WithDescription("Count of allocations to the heap by the application."), + metric.WithUnit("{allocation}"), + }, opt...)..., + ) + if err != nil { + return MemoryAllocations{noop.Int64ObservableCounter{}}, err + } + return MemoryAllocations{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m MemoryAllocations) Inst() metric.Int64ObservableCounter { + return m.Int64ObservableCounter +} + +// Name returns the semantic convention name of the instrument. +func (MemoryAllocations) Name() string { + return "go.memory.allocations" +} + +// Unit returns the semantic convention unit of the instrument +func (MemoryAllocations) Unit() string { + return "{allocation}" +} + +// Description returns the semantic convention description of the instrument +func (MemoryAllocations) Description() string { + return "Count of allocations to the heap by the application." +} + +// MemoryGCGoal is an instrument used to record metric values conforming to the +// "go.memory.gc.goal" semantic conventions. It represents the heap size target +// for the end of the GC cycle. +type MemoryGCGoal struct { + metric.Int64ObservableUpDownCounter +} + +// NewMemoryGCGoal returns a new MemoryGCGoal instrument. +func NewMemoryGCGoal( + m metric.Meter, + opt ...metric.Int64ObservableUpDownCounterOption, +) (MemoryGCGoal, error) { + // Check if the meter is nil. + if m == nil { + return MemoryGCGoal{noop.Int64ObservableUpDownCounter{}}, nil + } + + i, err := m.Int64ObservableUpDownCounter( + "go.memory.gc.goal", + append([]metric.Int64ObservableUpDownCounterOption{ + metric.WithDescription("Heap size target for the end of the GC cycle."), + metric.WithUnit("By"), + }, opt...)..., + ) + if err != nil { + return MemoryGCGoal{noop.Int64ObservableUpDownCounter{}}, err + } + return MemoryGCGoal{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m MemoryGCGoal) Inst() metric.Int64ObservableUpDownCounter { + return m.Int64ObservableUpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (MemoryGCGoal) Name() string { + return "go.memory.gc.goal" +} + +// Unit returns the semantic convention unit of the instrument +func (MemoryGCGoal) Unit() string { + return "By" +} + +// Description returns the semantic convention description of the instrument +func (MemoryGCGoal) Description() string { + return "Heap size target for the end of the GC cycle." +} + +// MemoryLimit is an instrument used to record metric values conforming to the +// "go.memory.limit" semantic conventions. It represents the go runtime memory +// limit configured by the user, if a limit exists. +type MemoryLimit struct { + metric.Int64ObservableUpDownCounter +} + +// NewMemoryLimit returns a new MemoryLimit instrument. +func NewMemoryLimit( + m metric.Meter, + opt ...metric.Int64ObservableUpDownCounterOption, +) (MemoryLimit, error) { + // Check if the meter is nil. + if m == nil { + return MemoryLimit{noop.Int64ObservableUpDownCounter{}}, nil + } + + i, err := m.Int64ObservableUpDownCounter( + "go.memory.limit", + append([]metric.Int64ObservableUpDownCounterOption{ + metric.WithDescription("Go runtime memory limit configured by the user, if a limit exists."), + metric.WithUnit("By"), + }, opt...)..., + ) + if err != nil { + return MemoryLimit{noop.Int64ObservableUpDownCounter{}}, err + } + return MemoryLimit{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m MemoryLimit) Inst() metric.Int64ObservableUpDownCounter { + return m.Int64ObservableUpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (MemoryLimit) Name() string { + return "go.memory.limit" +} + +// Unit returns the semantic convention unit of the instrument +func (MemoryLimit) Unit() string { + return "By" +} + +// Description returns the semantic convention description of the instrument +func (MemoryLimit) Description() string { + return "Go runtime memory limit configured by the user, if a limit exists." +} + +// MemoryUsed is an instrument used to record metric values conforming to the +// "go.memory.used" semantic conventions. It represents the memory used by the Go +// runtime. +type MemoryUsed struct { + metric.Int64ObservableUpDownCounter +} + +// NewMemoryUsed returns a new MemoryUsed instrument. +func NewMemoryUsed( + m metric.Meter, + opt ...metric.Int64ObservableUpDownCounterOption, +) (MemoryUsed, error) { + // Check if the meter is nil. + if m == nil { + return MemoryUsed{noop.Int64ObservableUpDownCounter{}}, nil + } + + i, err := m.Int64ObservableUpDownCounter( + "go.memory.used", + append([]metric.Int64ObservableUpDownCounterOption{ + metric.WithDescription("Memory used by the Go runtime."), + metric.WithUnit("By"), + }, opt...)..., + ) + if err != nil { + return MemoryUsed{noop.Int64ObservableUpDownCounter{}}, err + } + return MemoryUsed{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m MemoryUsed) Inst() metric.Int64ObservableUpDownCounter { + return m.Int64ObservableUpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (MemoryUsed) Name() string { + return "go.memory.used" +} + +// Unit returns the semantic convention unit of the instrument +func (MemoryUsed) Unit() string { + return "By" +} + +// Description returns the semantic convention description of the instrument +func (MemoryUsed) Description() string { + return "Memory used by the Go runtime." +} + +// AttrMemoryType returns an optional attribute for the "go.memory.type" semantic +// convention. It represents the type of memory. +func (MemoryUsed) AttrMemoryType(val MemoryTypeAttr) attribute.KeyValue { + return attribute.String("go.memory.type", string(val)) +} + +// ProcessorLimit is an instrument used to record metric values conforming to the +// "go.processor.limit" semantic conventions. It represents the number of OS +// threads that can execute user-level Go code simultaneously. +type ProcessorLimit struct { + metric.Int64ObservableUpDownCounter +} + +// NewProcessorLimit returns a new ProcessorLimit instrument. +func NewProcessorLimit( + m metric.Meter, + opt ...metric.Int64ObservableUpDownCounterOption, +) (ProcessorLimit, error) { + // Check if the meter is nil. + if m == nil { + return ProcessorLimit{noop.Int64ObservableUpDownCounter{}}, nil + } + + i, err := m.Int64ObservableUpDownCounter( + "go.processor.limit", + append([]metric.Int64ObservableUpDownCounterOption{ + metric.WithDescription("The number of OS threads that can execute user-level Go code simultaneously."), + metric.WithUnit("{thread}"), + }, opt...)..., + ) + if err != nil { + return ProcessorLimit{noop.Int64ObservableUpDownCounter{}}, err + } + return ProcessorLimit{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ProcessorLimit) Inst() metric.Int64ObservableUpDownCounter { + return m.Int64ObservableUpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (ProcessorLimit) Name() string { + return "go.processor.limit" +} + +// Unit returns the semantic convention unit of the instrument +func (ProcessorLimit) Unit() string { + return "{thread}" +} + +// Description returns the semantic convention description of the instrument +func (ProcessorLimit) Description() string { + return "The number of OS threads that can execute user-level Go code simultaneously." +} + +// ScheduleDuration is an instrument used to record metric values conforming to +// the "go.schedule.duration" semantic conventions. It represents the time +// goroutines have spent in the scheduler in a runnable state before actually +// running. +type ScheduleDuration struct { + metric.Float64Histogram +} + +// NewScheduleDuration returns a new ScheduleDuration instrument. +func NewScheduleDuration( + m metric.Meter, + opt ...metric.Float64HistogramOption, +) (ScheduleDuration, error) { + // Check if the meter is nil. + if m == nil { + return ScheduleDuration{noop.Float64Histogram{}}, nil + } + + i, err := m.Float64Histogram( + "go.schedule.duration", + append([]metric.Float64HistogramOption{ + metric.WithDescription("The time goroutines have spent in the scheduler in a runnable state before actually running."), + metric.WithUnit("s"), + }, opt...)..., + ) + if err != nil { + return ScheduleDuration{noop.Float64Histogram{}}, err + } + return ScheduleDuration{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ScheduleDuration) Inst() metric.Float64Histogram { + return m.Float64Histogram +} + +// Name returns the semantic convention name of the instrument. +func (ScheduleDuration) Name() string { + return "go.schedule.duration" +} + +// Unit returns the semantic convention unit of the instrument +func (ScheduleDuration) Unit() string { + return "s" +} + +// Description returns the semantic convention description of the instrument +func (ScheduleDuration) Description() string { + return "The time goroutines have spent in the scheduler in a runnable state before actually running." +} + +// Record records val to the current distribution for attrs. +// +// Computed from `/sched/latencies:seconds`. Bucket boundaries are provided by +// the runtime, and are subject to change. +func (m ScheduleDuration) Record(ctx context.Context, val float64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Float64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Float64Histogram.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +// +// Computed from `/sched/latencies:seconds`. Bucket boundaries are provided by +// the runtime, and are subject to change. +func (m ScheduleDuration) RecordSet(ctx context.Context, val float64, set attribute.Set) { + if set.Len() == 0 { + m.Float64Histogram.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Float64Histogram.Record(ctx, val, *o...) +} \ No newline at end of file diff --git a/semconv/v1.37.0/httpconv/metric.go b/semconv/v1.37.0/httpconv/metric.go new file mode 100644 index 00000000000..55bde895ddd --- /dev/null +++ b/semconv/v1.37.0/httpconv/metric.go @@ -0,0 +1,1641 @@ +// Code generated from semantic convention specification. DO NOT EDIT. + +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package httpconv provides types and functionality for OpenTelemetry semantic +// conventions in the "http" namespace. +package httpconv + +import ( + "context" + "sync" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/noop" +) + +var ( + addOptPool = &sync.Pool{New: func() any { return &[]metric.AddOption{} }} + recOptPool = &sync.Pool{New: func() any { return &[]metric.RecordOption{} }} +) + +// ErrorTypeAttr is an attribute conforming to the error.type semantic +// conventions. It represents the describes a class of error the operation ended +// with. +type ErrorTypeAttr string + +var ( + // ErrorTypeOther is a fallback error value to be used when the instrumentation + // doesn't define a custom value. + ErrorTypeOther ErrorTypeAttr = "_OTHER" +) + +// ConnectionStateAttr is an attribute conforming to the http.connection.state +// semantic conventions. It represents the state of the HTTP connection in the +// HTTP connection pool. +type ConnectionStateAttr string + +var ( + // ConnectionStateActive is the active state. + ConnectionStateActive ConnectionStateAttr = "active" + // ConnectionStateIdle is the idle state. + ConnectionStateIdle ConnectionStateAttr = "idle" +) + +// RequestMethodAttr is an attribute conforming to the http.request.method +// semantic conventions. It represents the HTTP request method. +type RequestMethodAttr string + +var ( + // RequestMethodConnect is the CONNECT method. + RequestMethodConnect RequestMethodAttr = "CONNECT" + // RequestMethodDelete is the DELETE method. + RequestMethodDelete RequestMethodAttr = "DELETE" + // RequestMethodGet is the GET method. + RequestMethodGet RequestMethodAttr = "GET" + // RequestMethodHead is the HEAD method. + RequestMethodHead RequestMethodAttr = "HEAD" + // RequestMethodOptions is the OPTIONS method. + RequestMethodOptions RequestMethodAttr = "OPTIONS" + // RequestMethodPatch is the PATCH method. + RequestMethodPatch RequestMethodAttr = "PATCH" + // RequestMethodPost is the POST method. + RequestMethodPost RequestMethodAttr = "POST" + // RequestMethodPut is the PUT method. + RequestMethodPut RequestMethodAttr = "PUT" + // RequestMethodTrace is the TRACE method. + RequestMethodTrace RequestMethodAttr = "TRACE" + // RequestMethodOther is the any HTTP method that the instrumentation has no + // prior knowledge of. + RequestMethodOther RequestMethodAttr = "_OTHER" +) + +// UserAgentSyntheticTypeAttr is an attribute conforming to the +// user_agent.synthetic.type semantic conventions. It represents the specifies +// the category of synthetic traffic, such as tests or bots. +type UserAgentSyntheticTypeAttr string + +var ( + // UserAgentSyntheticTypeBot is the bot source. + UserAgentSyntheticTypeBot UserAgentSyntheticTypeAttr = "bot" + // UserAgentSyntheticTypeTest is the synthetic test source. + UserAgentSyntheticTypeTest UserAgentSyntheticTypeAttr = "test" +) + +// ClientActiveRequests is an instrument used to record metric values conforming +// to the "http.client.active_requests" semantic conventions. It represents the +// number of active HTTP requests. +type ClientActiveRequests struct { + metric.Int64UpDownCounter +} + +// NewClientActiveRequests returns a new ClientActiveRequests instrument. +func NewClientActiveRequests( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (ClientActiveRequests, error) { + // Check if the meter is nil. + if m == nil { + return ClientActiveRequests{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "http.client.active_requests", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("Number of active HTTP requests."), + metric.WithUnit("{request}"), + }, opt...)..., + ) + if err != nil { + return ClientActiveRequests{noop.Int64UpDownCounter{}}, err + } + return ClientActiveRequests{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ClientActiveRequests) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (ClientActiveRequests) Name() string { + return "http.client.active_requests" +} + +// Unit returns the semantic convention unit of the instrument +func (ClientActiveRequests) Unit() string { + return "{request}" +} + +// Description returns the semantic convention description of the instrument +func (ClientActiveRequests) Description() string { + return "Number of active HTTP requests." +} + +// Add adds incr to the existing count for attrs. +// +// The serverAddress is the server domain name if available without reverse DNS +// lookup; otherwise, IP address or Unix domain socket name. +// +// The serverPort is the server port number. +// +// All additional attrs passed are included in the recorded value. +func (m ClientActiveRequests) Add( + ctx context.Context, + incr int64, + serverAddress string, + serverPort int, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("server.address", serverAddress), + attribute.Int("server.port", serverPort), + )..., + ), + ) + + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +func (m ClientActiveRequests) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AttrURLTemplate returns an optional attribute for the "url.template" semantic +// convention. It represents the low-cardinality template of an +// [absolute path reference]. +// +// [absolute path reference]: https://www.rfc-editor.org/rfc/rfc3986#section-4.2 +func (ClientActiveRequests) AttrURLTemplate(val string) attribute.KeyValue { + return attribute.String("url.template", val) +} + +// AttrRequestMethod returns an optional attribute for the "http.request.method" +// semantic convention. It represents the HTTP request method. +func (ClientActiveRequests) AttrRequestMethod(val RequestMethodAttr) attribute.KeyValue { + return attribute.String("http.request.method", string(val)) +} + +// AttrURLScheme returns an optional attribute for the "url.scheme" semantic +// convention. It represents the [URI scheme] component identifying the used +// protocol. +// +// [URI scheme]: https://www.rfc-editor.org/rfc/rfc3986#section-3.1 +func (ClientActiveRequests) AttrURLScheme(val string) attribute.KeyValue { + return attribute.String("url.scheme", val) +} + +// ClientConnectionDuration is an instrument used to record metric values +// conforming to the "http.client.connection.duration" semantic conventions. It +// represents the duration of the successfully established outbound HTTP +// connections. +type ClientConnectionDuration struct { + metric.Float64Histogram +} + +// NewClientConnectionDuration returns a new ClientConnectionDuration instrument. +func NewClientConnectionDuration( + m metric.Meter, + opt ...metric.Float64HistogramOption, +) (ClientConnectionDuration, error) { + // Check if the meter is nil. + if m == nil { + return ClientConnectionDuration{noop.Float64Histogram{}}, nil + } + + i, err := m.Float64Histogram( + "http.client.connection.duration", + append([]metric.Float64HistogramOption{ + metric.WithDescription("The duration of the successfully established outbound HTTP connections."), + metric.WithUnit("s"), + }, opt...)..., + ) + if err != nil { + return ClientConnectionDuration{noop.Float64Histogram{}}, err + } + return ClientConnectionDuration{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ClientConnectionDuration) Inst() metric.Float64Histogram { + return m.Float64Histogram +} + +// Name returns the semantic convention name of the instrument. +func (ClientConnectionDuration) Name() string { + return "http.client.connection.duration" +} + +// Unit returns the semantic convention unit of the instrument +func (ClientConnectionDuration) Unit() string { + return "s" +} + +// Description returns the semantic convention description of the instrument +func (ClientConnectionDuration) Description() string { + return "The duration of the successfully established outbound HTTP connections." +} + +// Record records val to the current distribution for attrs. +// +// The serverAddress is the server domain name if available without reverse DNS +// lookup; otherwise, IP address or Unix domain socket name. +// +// The serverPort is the server port number. +// +// All additional attrs passed are included in the recorded value. +func (m ClientConnectionDuration) Record( + ctx context.Context, + val float64, + serverAddress string, + serverPort int, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Float64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("server.address", serverAddress), + attribute.Int("server.port", serverPort), + )..., + ), + ) + + m.Float64Histogram.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +func (m ClientConnectionDuration) RecordSet(ctx context.Context, val float64, set attribute.Set) { + if set.Len() == 0 { + m.Float64Histogram.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Float64Histogram.Record(ctx, val, *o...) +} + +// AttrNetworkPeerAddress returns an optional attribute for the +// "network.peer.address" semantic convention. It represents the peer address of +// the network connection - IP address or Unix domain socket name. +func (ClientConnectionDuration) AttrNetworkPeerAddress(val string) attribute.KeyValue { + return attribute.String("network.peer.address", val) +} + +// AttrNetworkProtocolVersion returns an optional attribute for the +// "network.protocol.version" semantic convention. It represents the actual +// version of the protocol used for network communication. +func (ClientConnectionDuration) AttrNetworkProtocolVersion(val string) attribute.KeyValue { + return attribute.String("network.protocol.version", val) +} + +// AttrURLScheme returns an optional attribute for the "url.scheme" semantic +// convention. It represents the [URI scheme] component identifying the used +// protocol. +// +// [URI scheme]: https://www.rfc-editor.org/rfc/rfc3986#section-3.1 +func (ClientConnectionDuration) AttrURLScheme(val string) attribute.KeyValue { + return attribute.String("url.scheme", val) +} + +// ClientOpenConnections is an instrument used to record metric values conforming +// to the "http.client.open_connections" semantic conventions. It represents the +// number of outbound HTTP connections that are currently active or idle on the +// client. +type ClientOpenConnections struct { + metric.Int64UpDownCounter +} + +// NewClientOpenConnections returns a new ClientOpenConnections instrument. +func NewClientOpenConnections( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (ClientOpenConnections, error) { + // Check if the meter is nil. + if m == nil { + return ClientOpenConnections{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "http.client.open_connections", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("Number of outbound HTTP connections that are currently active or idle on the client."), + metric.WithUnit("{connection}"), + }, opt...)..., + ) + if err != nil { + return ClientOpenConnections{noop.Int64UpDownCounter{}}, err + } + return ClientOpenConnections{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ClientOpenConnections) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (ClientOpenConnections) Name() string { + return "http.client.open_connections" +} + +// Unit returns the semantic convention unit of the instrument +func (ClientOpenConnections) Unit() string { + return "{connection}" +} + +// Description returns the semantic convention description of the instrument +func (ClientOpenConnections) Description() string { + return "Number of outbound HTTP connections that are currently active or idle on the client." +} + +// Add adds incr to the existing count for attrs. +// +// The connectionState is the state of the HTTP connection in the HTTP connection +// pool. +// +// The serverAddress is the server domain name if available without reverse DNS +// lookup; otherwise, IP address or Unix domain socket name. +// +// The serverPort is the server port number. +// +// All additional attrs passed are included in the recorded value. +func (m ClientOpenConnections) Add( + ctx context.Context, + incr int64, + connectionState ConnectionStateAttr, + serverAddress string, + serverPort int, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("http.connection.state", string(connectionState)), + attribute.String("server.address", serverAddress), + attribute.Int("server.port", serverPort), + )..., + ), + ) + + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +func (m ClientOpenConnections) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AttrNetworkPeerAddress returns an optional attribute for the +// "network.peer.address" semantic convention. It represents the peer address of +// the network connection - IP address or Unix domain socket name. +func (ClientOpenConnections) AttrNetworkPeerAddress(val string) attribute.KeyValue { + return attribute.String("network.peer.address", val) +} + +// AttrNetworkProtocolVersion returns an optional attribute for the +// "network.protocol.version" semantic convention. It represents the actual +// version of the protocol used for network communication. +func (ClientOpenConnections) AttrNetworkProtocolVersion(val string) attribute.KeyValue { + return attribute.String("network.protocol.version", val) +} + +// AttrURLScheme returns an optional attribute for the "url.scheme" semantic +// convention. It represents the [URI scheme] component identifying the used +// protocol. +// +// [URI scheme]: https://www.rfc-editor.org/rfc/rfc3986#section-3.1 +func (ClientOpenConnections) AttrURLScheme(val string) attribute.KeyValue { + return attribute.String("url.scheme", val) +} + +// ClientRequestBodySize is an instrument used to record metric values conforming +// to the "http.client.request.body.size" semantic conventions. It represents the +// size of HTTP client request bodies. +type ClientRequestBodySize struct { + metric.Int64Histogram +} + +// NewClientRequestBodySize returns a new ClientRequestBodySize instrument. +func NewClientRequestBodySize( + m metric.Meter, + opt ...metric.Int64HistogramOption, +) (ClientRequestBodySize, error) { + // Check if the meter is nil. + if m == nil { + return ClientRequestBodySize{noop.Int64Histogram{}}, nil + } + + i, err := m.Int64Histogram( + "http.client.request.body.size", + append([]metric.Int64HistogramOption{ + metric.WithDescription("Size of HTTP client request bodies."), + metric.WithUnit("By"), + }, opt...)..., + ) + if err != nil { + return ClientRequestBodySize{noop.Int64Histogram{}}, err + } + return ClientRequestBodySize{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ClientRequestBodySize) Inst() metric.Int64Histogram { + return m.Int64Histogram +} + +// Name returns the semantic convention name of the instrument. +func (ClientRequestBodySize) Name() string { + return "http.client.request.body.size" +} + +// Unit returns the semantic convention unit of the instrument +func (ClientRequestBodySize) Unit() string { + return "By" +} + +// Description returns the semantic convention description of the instrument +func (ClientRequestBodySize) Description() string { + return "Size of HTTP client request bodies." +} + +// Record records val to the current distribution for attrs. +// +// The requestMethod is the HTTP request method. +// +// The serverAddress is the server domain name if available without reverse DNS +// lookup; otherwise, IP address or Unix domain socket name. +// +// The serverPort is the server port number. +// +// All additional attrs passed are included in the recorded value. +// +// The size of the request payload body in bytes. This is the number of bytes +// transferred excluding headers and is often, but not always, present as the +// [Content-Length] header. For requests using transport encoding, this should be +// the compressed size. +// +// [Content-Length]: https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length +func (m ClientRequestBodySize) Record( + ctx context.Context, + val int64, + requestMethod RequestMethodAttr, + serverAddress string, + serverPort int, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("http.request.method", string(requestMethod)), + attribute.String("server.address", serverAddress), + attribute.Int("server.port", serverPort), + )..., + ), + ) + + m.Int64Histogram.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +// +// The size of the request payload body in bytes. This is the number of bytes +// transferred excluding headers and is often, but not always, present as the +// [Content-Length] header. For requests using transport encoding, this should be +// the compressed size. +// +// [Content-Length]: https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length +func (m ClientRequestBodySize) RecordSet(ctx context.Context, val int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Histogram.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Histogram.Record(ctx, val, *o...) +} + +// AttrErrorType returns an optional attribute for the "error.type" semantic +// convention. It represents the describes a class of error the operation ended +// with. +func (ClientRequestBodySize) AttrErrorType(val ErrorTypeAttr) attribute.KeyValue { + return attribute.String("error.type", string(val)) +} + +// AttrResponseStatusCode returns an optional attribute for the +// "http.response.status_code" semantic convention. It represents the +// [HTTP response status code]. +// +// [HTTP response status code]: https://tools.ietf.org/html/rfc7231#section-6 +func (ClientRequestBodySize) AttrResponseStatusCode(val int) attribute.KeyValue { + return attribute.Int("http.response.status_code", val) +} + +// AttrNetworkProtocolName returns an optional attribute for the +// "network.protocol.name" semantic convention. It represents the +// [OSI application layer] or non-OSI equivalent. +// +// [OSI application layer]: https://wikipedia.org/wiki/Application_layer +func (ClientRequestBodySize) AttrNetworkProtocolName(val string) attribute.KeyValue { + return attribute.String("network.protocol.name", val) +} + +// AttrURLTemplate returns an optional attribute for the "url.template" semantic +// convention. It represents the low-cardinality template of an +// [absolute path reference]. +// +// [absolute path reference]: https://www.rfc-editor.org/rfc/rfc3986#section-4.2 +func (ClientRequestBodySize) AttrURLTemplate(val string) attribute.KeyValue { + return attribute.String("url.template", val) +} + +// AttrNetworkProtocolVersion returns an optional attribute for the +// "network.protocol.version" semantic convention. It represents the actual +// version of the protocol used for network communication. +func (ClientRequestBodySize) AttrNetworkProtocolVersion(val string) attribute.KeyValue { + return attribute.String("network.protocol.version", val) +} + +// AttrURLScheme returns an optional attribute for the "url.scheme" semantic +// convention. It represents the [URI scheme] component identifying the used +// protocol. +// +// [URI scheme]: https://www.rfc-editor.org/rfc/rfc3986#section-3.1 +func (ClientRequestBodySize) AttrURLScheme(val string) attribute.KeyValue { + return attribute.String("url.scheme", val) +} + +// ClientRequestDuration is an instrument used to record metric values conforming +// to the "http.client.request.duration" semantic conventions. It represents the +// duration of HTTP client requests. +type ClientRequestDuration struct { + metric.Float64Histogram +} + +// NewClientRequestDuration returns a new ClientRequestDuration instrument. +func NewClientRequestDuration( + m metric.Meter, + opt ...metric.Float64HistogramOption, +) (ClientRequestDuration, error) { + // Check if the meter is nil. + if m == nil { + return ClientRequestDuration{noop.Float64Histogram{}}, nil + } + + i, err := m.Float64Histogram( + "http.client.request.duration", + append([]metric.Float64HistogramOption{ + metric.WithDescription("Duration of HTTP client requests."), + metric.WithUnit("s"), + }, opt...)..., + ) + if err != nil { + return ClientRequestDuration{noop.Float64Histogram{}}, err + } + return ClientRequestDuration{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ClientRequestDuration) Inst() metric.Float64Histogram { + return m.Float64Histogram +} + +// Name returns the semantic convention name of the instrument. +func (ClientRequestDuration) Name() string { + return "http.client.request.duration" +} + +// Unit returns the semantic convention unit of the instrument +func (ClientRequestDuration) Unit() string { + return "s" +} + +// Description returns the semantic convention description of the instrument +func (ClientRequestDuration) Description() string { + return "Duration of HTTP client requests." +} + +// Record records val to the current distribution for attrs. +// +// The requestMethod is the HTTP request method. +// +// The serverAddress is the server domain name if available without reverse DNS +// lookup; otherwise, IP address or Unix domain socket name. +// +// The serverPort is the server port number. +// +// All additional attrs passed are included in the recorded value. +func (m ClientRequestDuration) Record( + ctx context.Context, + val float64, + requestMethod RequestMethodAttr, + serverAddress string, + serverPort int, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Float64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("http.request.method", string(requestMethod)), + attribute.String("server.address", serverAddress), + attribute.Int("server.port", serverPort), + )..., + ), + ) + + m.Float64Histogram.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +func (m ClientRequestDuration) RecordSet(ctx context.Context, val float64, set attribute.Set) { + if set.Len() == 0 { + m.Float64Histogram.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Float64Histogram.Record(ctx, val, *o...) +} + +// AttrErrorType returns an optional attribute for the "error.type" semantic +// convention. It represents the describes a class of error the operation ended +// with. +func (ClientRequestDuration) AttrErrorType(val ErrorTypeAttr) attribute.KeyValue { + return attribute.String("error.type", string(val)) +} + +// AttrResponseStatusCode returns an optional attribute for the +// "http.response.status_code" semantic convention. It represents the +// [HTTP response status code]. +// +// [HTTP response status code]: https://tools.ietf.org/html/rfc7231#section-6 +func (ClientRequestDuration) AttrResponseStatusCode(val int) attribute.KeyValue { + return attribute.Int("http.response.status_code", val) +} + +// AttrNetworkProtocolName returns an optional attribute for the +// "network.protocol.name" semantic convention. It represents the +// [OSI application layer] or non-OSI equivalent. +// +// [OSI application layer]: https://wikipedia.org/wiki/Application_layer +func (ClientRequestDuration) AttrNetworkProtocolName(val string) attribute.KeyValue { + return attribute.String("network.protocol.name", val) +} + +// AttrNetworkProtocolVersion returns an optional attribute for the +// "network.protocol.version" semantic convention. It represents the actual +// version of the protocol used for network communication. +func (ClientRequestDuration) AttrNetworkProtocolVersion(val string) attribute.KeyValue { + return attribute.String("network.protocol.version", val) +} + +// AttrURLScheme returns an optional attribute for the "url.scheme" semantic +// convention. It represents the [URI scheme] component identifying the used +// protocol. +// +// [URI scheme]: https://www.rfc-editor.org/rfc/rfc3986#section-3.1 +func (ClientRequestDuration) AttrURLScheme(val string) attribute.KeyValue { + return attribute.String("url.scheme", val) +} + +// AttrURLTemplate returns an optional attribute for the "url.template" semantic +// convention. It represents the low-cardinality template of an +// [absolute path reference]. +// +// [absolute path reference]: https://www.rfc-editor.org/rfc/rfc3986#section-4.2 +func (ClientRequestDuration) AttrURLTemplate(val string) attribute.KeyValue { + return attribute.String("url.template", val) +} + +// ClientResponseBodySize is an instrument used to record metric values +// conforming to the "http.client.response.body.size" semantic conventions. It +// represents the size of HTTP client response bodies. +type ClientResponseBodySize struct { + metric.Int64Histogram +} + +// NewClientResponseBodySize returns a new ClientResponseBodySize instrument. +func NewClientResponseBodySize( + m metric.Meter, + opt ...metric.Int64HistogramOption, +) (ClientResponseBodySize, error) { + // Check if the meter is nil. + if m == nil { + return ClientResponseBodySize{noop.Int64Histogram{}}, nil + } + + i, err := m.Int64Histogram( + "http.client.response.body.size", + append([]metric.Int64HistogramOption{ + metric.WithDescription("Size of HTTP client response bodies."), + metric.WithUnit("By"), + }, opt...)..., + ) + if err != nil { + return ClientResponseBodySize{noop.Int64Histogram{}}, err + } + return ClientResponseBodySize{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ClientResponseBodySize) Inst() metric.Int64Histogram { + return m.Int64Histogram +} + +// Name returns the semantic convention name of the instrument. +func (ClientResponseBodySize) Name() string { + return "http.client.response.body.size" +} + +// Unit returns the semantic convention unit of the instrument +func (ClientResponseBodySize) Unit() string { + return "By" +} + +// Description returns the semantic convention description of the instrument +func (ClientResponseBodySize) Description() string { + return "Size of HTTP client response bodies." +} + +// Record records val to the current distribution for attrs. +// +// The requestMethod is the HTTP request method. +// +// The serverAddress is the server domain name if available without reverse DNS +// lookup; otherwise, IP address or Unix domain socket name. +// +// The serverPort is the server port number. +// +// All additional attrs passed are included in the recorded value. +// +// The size of the response payload body in bytes. This is the number of bytes +// transferred excluding headers and is often, but not always, present as the +// [Content-Length] header. For requests using transport encoding, this should be +// the compressed size. +// +// [Content-Length]: https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length +func (m ClientResponseBodySize) Record( + ctx context.Context, + val int64, + requestMethod RequestMethodAttr, + serverAddress string, + serverPort int, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("http.request.method", string(requestMethod)), + attribute.String("server.address", serverAddress), + attribute.Int("server.port", serverPort), + )..., + ), + ) + + m.Int64Histogram.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +// +// The size of the response payload body in bytes. This is the number of bytes +// transferred excluding headers and is often, but not always, present as the +// [Content-Length] header. For requests using transport encoding, this should be +// the compressed size. +// +// [Content-Length]: https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length +func (m ClientResponseBodySize) RecordSet(ctx context.Context, val int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Histogram.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Histogram.Record(ctx, val, *o...) +} + +// AttrErrorType returns an optional attribute for the "error.type" semantic +// convention. It represents the describes a class of error the operation ended +// with. +func (ClientResponseBodySize) AttrErrorType(val ErrorTypeAttr) attribute.KeyValue { + return attribute.String("error.type", string(val)) +} + +// AttrResponseStatusCode returns an optional attribute for the +// "http.response.status_code" semantic convention. It represents the +// [HTTP response status code]. +// +// [HTTP response status code]: https://tools.ietf.org/html/rfc7231#section-6 +func (ClientResponseBodySize) AttrResponseStatusCode(val int) attribute.KeyValue { + return attribute.Int("http.response.status_code", val) +} + +// AttrNetworkProtocolName returns an optional attribute for the +// "network.protocol.name" semantic convention. It represents the +// [OSI application layer] or non-OSI equivalent. +// +// [OSI application layer]: https://wikipedia.org/wiki/Application_layer +func (ClientResponseBodySize) AttrNetworkProtocolName(val string) attribute.KeyValue { + return attribute.String("network.protocol.name", val) +} + +// AttrURLTemplate returns an optional attribute for the "url.template" semantic +// convention. It represents the low-cardinality template of an +// [absolute path reference]. +// +// [absolute path reference]: https://www.rfc-editor.org/rfc/rfc3986#section-4.2 +func (ClientResponseBodySize) AttrURLTemplate(val string) attribute.KeyValue { + return attribute.String("url.template", val) +} + +// AttrNetworkProtocolVersion returns an optional attribute for the +// "network.protocol.version" semantic convention. It represents the actual +// version of the protocol used for network communication. +func (ClientResponseBodySize) AttrNetworkProtocolVersion(val string) attribute.KeyValue { + return attribute.String("network.protocol.version", val) +} + +// AttrURLScheme returns an optional attribute for the "url.scheme" semantic +// convention. It represents the [URI scheme] component identifying the used +// protocol. +// +// [URI scheme]: https://www.rfc-editor.org/rfc/rfc3986#section-3.1 +func (ClientResponseBodySize) AttrURLScheme(val string) attribute.KeyValue { + return attribute.String("url.scheme", val) +} + +// ServerActiveRequests is an instrument used to record metric values conforming +// to the "http.server.active_requests" semantic conventions. It represents the +// number of active HTTP server requests. +type ServerActiveRequests struct { + metric.Int64UpDownCounter +} + +// NewServerActiveRequests returns a new ServerActiveRequests instrument. +func NewServerActiveRequests( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (ServerActiveRequests, error) { + // Check if the meter is nil. + if m == nil { + return ServerActiveRequests{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "http.server.active_requests", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("Number of active HTTP server requests."), + metric.WithUnit("{request}"), + }, opt...)..., + ) + if err != nil { + return ServerActiveRequests{noop.Int64UpDownCounter{}}, err + } + return ServerActiveRequests{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ServerActiveRequests) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (ServerActiveRequests) Name() string { + return "http.server.active_requests" +} + +// Unit returns the semantic convention unit of the instrument +func (ServerActiveRequests) Unit() string { + return "{request}" +} + +// Description returns the semantic convention description of the instrument +func (ServerActiveRequests) Description() string { + return "Number of active HTTP server requests." +} + +// Add adds incr to the existing count for attrs. +// +// The requestMethod is the HTTP request method. +// +// The urlScheme is the the [URI scheme] component identifying the used protocol. +// +// All additional attrs passed are included in the recorded value. +// +// [URI scheme]: https://www.rfc-editor.org/rfc/rfc3986#section-3.1 +func (m ServerActiveRequests) Add( + ctx context.Context, + incr int64, + requestMethod RequestMethodAttr, + urlScheme string, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("http.request.method", string(requestMethod)), + attribute.String("url.scheme", urlScheme), + )..., + ), + ) + + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +func (m ServerActiveRequests) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AttrServerAddress returns an optional attribute for the "server.address" +// semantic convention. It represents the name of the local HTTP server that +// received the request. +func (ServerActiveRequests) AttrServerAddress(val string) attribute.KeyValue { + return attribute.String("server.address", val) +} + +// AttrServerPort returns an optional attribute for the "server.port" semantic +// convention. It represents the port of the local HTTP server that received the +// request. +func (ServerActiveRequests) AttrServerPort(val int) attribute.KeyValue { + return attribute.Int("server.port", val) +} + +// ServerRequestBodySize is an instrument used to record metric values conforming +// to the "http.server.request.body.size" semantic conventions. It represents the +// size of HTTP server request bodies. +type ServerRequestBodySize struct { + metric.Int64Histogram +} + +// NewServerRequestBodySize returns a new ServerRequestBodySize instrument. +func NewServerRequestBodySize( + m metric.Meter, + opt ...metric.Int64HistogramOption, +) (ServerRequestBodySize, error) { + // Check if the meter is nil. + if m == nil { + return ServerRequestBodySize{noop.Int64Histogram{}}, nil + } + + i, err := m.Int64Histogram( + "http.server.request.body.size", + append([]metric.Int64HistogramOption{ + metric.WithDescription("Size of HTTP server request bodies."), + metric.WithUnit("By"), + }, opt...)..., + ) + if err != nil { + return ServerRequestBodySize{noop.Int64Histogram{}}, err + } + return ServerRequestBodySize{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ServerRequestBodySize) Inst() metric.Int64Histogram { + return m.Int64Histogram +} + +// Name returns the semantic convention name of the instrument. +func (ServerRequestBodySize) Name() string { + return "http.server.request.body.size" +} + +// Unit returns the semantic convention unit of the instrument +func (ServerRequestBodySize) Unit() string { + return "By" +} + +// Description returns the semantic convention description of the instrument +func (ServerRequestBodySize) Description() string { + return "Size of HTTP server request bodies." +} + +// Record records val to the current distribution for attrs. +// +// The requestMethod is the HTTP request method. +// +// The urlScheme is the the [URI scheme] component identifying the used protocol. +// +// All additional attrs passed are included in the recorded value. +// +// [URI scheme]: https://www.rfc-editor.org/rfc/rfc3986#section-3.1 +// +// The size of the request payload body in bytes. This is the number of bytes +// transferred excluding headers and is often, but not always, present as the +// [Content-Length] header. For requests using transport encoding, this should be +// the compressed size. +// +// [Content-Length]: https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length +func (m ServerRequestBodySize) Record( + ctx context.Context, + val int64, + requestMethod RequestMethodAttr, + urlScheme string, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("http.request.method", string(requestMethod)), + attribute.String("url.scheme", urlScheme), + )..., + ), + ) + + m.Int64Histogram.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +// +// The size of the request payload body in bytes. This is the number of bytes +// transferred excluding headers and is often, but not always, present as the +// [Content-Length] header. For requests using transport encoding, this should be +// the compressed size. +// +// [Content-Length]: https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length +func (m ServerRequestBodySize) RecordSet(ctx context.Context, val int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Histogram.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Histogram.Record(ctx, val, *o...) +} + +// AttrErrorType returns an optional attribute for the "error.type" semantic +// convention. It represents the describes a class of error the operation ended +// with. +func (ServerRequestBodySize) AttrErrorType(val ErrorTypeAttr) attribute.KeyValue { + return attribute.String("error.type", string(val)) +} + +// AttrResponseStatusCode returns an optional attribute for the +// "http.response.status_code" semantic convention. It represents the +// [HTTP response status code]. +// +// [HTTP response status code]: https://tools.ietf.org/html/rfc7231#section-6 +func (ServerRequestBodySize) AttrResponseStatusCode(val int) attribute.KeyValue { + return attribute.Int("http.response.status_code", val) +} + +// AttrRoute returns an optional attribute for the "http.route" semantic +// convention. It represents the matched route, that is, the path template in the +// format used by the respective server framework. +func (ServerRequestBodySize) AttrRoute(val string) attribute.KeyValue { + return attribute.String("http.route", val) +} + +// AttrNetworkProtocolName returns an optional attribute for the +// "network.protocol.name" semantic convention. It represents the +// [OSI application layer] or non-OSI equivalent. +// +// [OSI application layer]: https://wikipedia.org/wiki/Application_layer +func (ServerRequestBodySize) AttrNetworkProtocolName(val string) attribute.KeyValue { + return attribute.String("network.protocol.name", val) +} + +// AttrNetworkProtocolVersion returns an optional attribute for the +// "network.protocol.version" semantic convention. It represents the actual +// version of the protocol used for network communication. +func (ServerRequestBodySize) AttrNetworkProtocolVersion(val string) attribute.KeyValue { + return attribute.String("network.protocol.version", val) +} + +// AttrServerAddress returns an optional attribute for the "server.address" +// semantic convention. It represents the name of the local HTTP server that +// received the request. +func (ServerRequestBodySize) AttrServerAddress(val string) attribute.KeyValue { + return attribute.String("server.address", val) +} + +// AttrServerPort returns an optional attribute for the "server.port" semantic +// convention. It represents the port of the local HTTP server that received the +// request. +func (ServerRequestBodySize) AttrServerPort(val int) attribute.KeyValue { + return attribute.Int("server.port", val) +} + +// AttrUserAgentSyntheticType returns an optional attribute for the +// "user_agent.synthetic.type" semantic convention. It represents the specifies +// the category of synthetic traffic, such as tests or bots. +func (ServerRequestBodySize) AttrUserAgentSyntheticType(val UserAgentSyntheticTypeAttr) attribute.KeyValue { + return attribute.String("user_agent.synthetic.type", string(val)) +} + +// ServerRequestDuration is an instrument used to record metric values conforming +// to the "http.server.request.duration" semantic conventions. It represents the +// duration of HTTP server requests. +type ServerRequestDuration struct { + metric.Float64Histogram +} + +// NewServerRequestDuration returns a new ServerRequestDuration instrument. +func NewServerRequestDuration( + m metric.Meter, + opt ...metric.Float64HistogramOption, +) (ServerRequestDuration, error) { + // Check if the meter is nil. + if m == nil { + return ServerRequestDuration{noop.Float64Histogram{}}, nil + } + + i, err := m.Float64Histogram( + "http.server.request.duration", + append([]metric.Float64HistogramOption{ + metric.WithDescription("Duration of HTTP server requests."), + metric.WithUnit("s"), + }, opt...)..., + ) + if err != nil { + return ServerRequestDuration{noop.Float64Histogram{}}, err + } + return ServerRequestDuration{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ServerRequestDuration) Inst() metric.Float64Histogram { + return m.Float64Histogram +} + +// Name returns the semantic convention name of the instrument. +func (ServerRequestDuration) Name() string { + return "http.server.request.duration" +} + +// Unit returns the semantic convention unit of the instrument +func (ServerRequestDuration) Unit() string { + return "s" +} + +// Description returns the semantic convention description of the instrument +func (ServerRequestDuration) Description() string { + return "Duration of HTTP server requests." +} + +// Record records val to the current distribution for attrs. +// +// The requestMethod is the HTTP request method. +// +// The urlScheme is the the [URI scheme] component identifying the used protocol. +// +// All additional attrs passed are included in the recorded value. +// +// [URI scheme]: https://www.rfc-editor.org/rfc/rfc3986#section-3.1 +func (m ServerRequestDuration) Record( + ctx context.Context, + val float64, + requestMethod RequestMethodAttr, + urlScheme string, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Float64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("http.request.method", string(requestMethod)), + attribute.String("url.scheme", urlScheme), + )..., + ), + ) + + m.Float64Histogram.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +func (m ServerRequestDuration) RecordSet(ctx context.Context, val float64, set attribute.Set) { + if set.Len() == 0 { + m.Float64Histogram.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Float64Histogram.Record(ctx, val, *o...) +} + +// AttrErrorType returns an optional attribute for the "error.type" semantic +// convention. It represents the describes a class of error the operation ended +// with. +func (ServerRequestDuration) AttrErrorType(val ErrorTypeAttr) attribute.KeyValue { + return attribute.String("error.type", string(val)) +} + +// AttrResponseStatusCode returns an optional attribute for the +// "http.response.status_code" semantic convention. It represents the +// [HTTP response status code]. +// +// [HTTP response status code]: https://tools.ietf.org/html/rfc7231#section-6 +func (ServerRequestDuration) AttrResponseStatusCode(val int) attribute.KeyValue { + return attribute.Int("http.response.status_code", val) +} + +// AttrRoute returns an optional attribute for the "http.route" semantic +// convention. It represents the matched route, that is, the path template in the +// format used by the respective server framework. +func (ServerRequestDuration) AttrRoute(val string) attribute.KeyValue { + return attribute.String("http.route", val) +} + +// AttrNetworkProtocolName returns an optional attribute for the +// "network.protocol.name" semantic convention. It represents the +// [OSI application layer] or non-OSI equivalent. +// +// [OSI application layer]: https://wikipedia.org/wiki/Application_layer +func (ServerRequestDuration) AttrNetworkProtocolName(val string) attribute.KeyValue { + return attribute.String("network.protocol.name", val) +} + +// AttrNetworkProtocolVersion returns an optional attribute for the +// "network.protocol.version" semantic convention. It represents the actual +// version of the protocol used for network communication. +func (ServerRequestDuration) AttrNetworkProtocolVersion(val string) attribute.KeyValue { + return attribute.String("network.protocol.version", val) +} + +// AttrServerAddress returns an optional attribute for the "server.address" +// semantic convention. It represents the name of the local HTTP server that +// received the request. +func (ServerRequestDuration) AttrServerAddress(val string) attribute.KeyValue { + return attribute.String("server.address", val) +} + +// AttrServerPort returns an optional attribute for the "server.port" semantic +// convention. It represents the port of the local HTTP server that received the +// request. +func (ServerRequestDuration) AttrServerPort(val int) attribute.KeyValue { + return attribute.Int("server.port", val) +} + +// AttrUserAgentSyntheticType returns an optional attribute for the +// "user_agent.synthetic.type" semantic convention. It represents the specifies +// the category of synthetic traffic, such as tests or bots. +func (ServerRequestDuration) AttrUserAgentSyntheticType(val UserAgentSyntheticTypeAttr) attribute.KeyValue { + return attribute.String("user_agent.synthetic.type", string(val)) +} + +// ServerResponseBodySize is an instrument used to record metric values +// conforming to the "http.server.response.body.size" semantic conventions. It +// represents the size of HTTP server response bodies. +type ServerResponseBodySize struct { + metric.Int64Histogram +} + +// NewServerResponseBodySize returns a new ServerResponseBodySize instrument. +func NewServerResponseBodySize( + m metric.Meter, + opt ...metric.Int64HistogramOption, +) (ServerResponseBodySize, error) { + // Check if the meter is nil. + if m == nil { + return ServerResponseBodySize{noop.Int64Histogram{}}, nil + } + + i, err := m.Int64Histogram( + "http.server.response.body.size", + append([]metric.Int64HistogramOption{ + metric.WithDescription("Size of HTTP server response bodies."), + metric.WithUnit("By"), + }, opt...)..., + ) + if err != nil { + return ServerResponseBodySize{noop.Int64Histogram{}}, err + } + return ServerResponseBodySize{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ServerResponseBodySize) Inst() metric.Int64Histogram { + return m.Int64Histogram +} + +// Name returns the semantic convention name of the instrument. +func (ServerResponseBodySize) Name() string { + return "http.server.response.body.size" +} + +// Unit returns the semantic convention unit of the instrument +func (ServerResponseBodySize) Unit() string { + return "By" +} + +// Description returns the semantic convention description of the instrument +func (ServerResponseBodySize) Description() string { + return "Size of HTTP server response bodies." +} + +// Record records val to the current distribution for attrs. +// +// The requestMethod is the HTTP request method. +// +// The urlScheme is the the [URI scheme] component identifying the used protocol. +// +// All additional attrs passed are included in the recorded value. +// +// [URI scheme]: https://www.rfc-editor.org/rfc/rfc3986#section-3.1 +// +// The size of the response payload body in bytes. This is the number of bytes +// transferred excluding headers and is often, but not always, present as the +// [Content-Length] header. For requests using transport encoding, this should be +// the compressed size. +// +// [Content-Length]: https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length +func (m ServerResponseBodySize) Record( + ctx context.Context, + val int64, + requestMethod RequestMethodAttr, + urlScheme string, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("http.request.method", string(requestMethod)), + attribute.String("url.scheme", urlScheme), + )..., + ), + ) + + m.Int64Histogram.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +// +// The size of the response payload body in bytes. This is the number of bytes +// transferred excluding headers and is often, but not always, present as the +// [Content-Length] header. For requests using transport encoding, this should be +// the compressed size. +// +// [Content-Length]: https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length +func (m ServerResponseBodySize) RecordSet(ctx context.Context, val int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Histogram.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Histogram.Record(ctx, val, *o...) +} + +// AttrErrorType returns an optional attribute for the "error.type" semantic +// convention. It represents the describes a class of error the operation ended +// with. +func (ServerResponseBodySize) AttrErrorType(val ErrorTypeAttr) attribute.KeyValue { + return attribute.String("error.type", string(val)) +} + +// AttrResponseStatusCode returns an optional attribute for the +// "http.response.status_code" semantic convention. It represents the +// [HTTP response status code]. +// +// [HTTP response status code]: https://tools.ietf.org/html/rfc7231#section-6 +func (ServerResponseBodySize) AttrResponseStatusCode(val int) attribute.KeyValue { + return attribute.Int("http.response.status_code", val) +} + +// AttrRoute returns an optional attribute for the "http.route" semantic +// convention. It represents the matched route, that is, the path template in the +// format used by the respective server framework. +func (ServerResponseBodySize) AttrRoute(val string) attribute.KeyValue { + return attribute.String("http.route", val) +} + +// AttrNetworkProtocolName returns an optional attribute for the +// "network.protocol.name" semantic convention. It represents the +// [OSI application layer] or non-OSI equivalent. +// +// [OSI application layer]: https://wikipedia.org/wiki/Application_layer +func (ServerResponseBodySize) AttrNetworkProtocolName(val string) attribute.KeyValue { + return attribute.String("network.protocol.name", val) +} + +// AttrNetworkProtocolVersion returns an optional attribute for the +// "network.protocol.version" semantic convention. It represents the actual +// version of the protocol used for network communication. +func (ServerResponseBodySize) AttrNetworkProtocolVersion(val string) attribute.KeyValue { + return attribute.String("network.protocol.version", val) +} + +// AttrServerAddress returns an optional attribute for the "server.address" +// semantic convention. It represents the name of the local HTTP server that +// received the request. +func (ServerResponseBodySize) AttrServerAddress(val string) attribute.KeyValue { + return attribute.String("server.address", val) +} + +// AttrServerPort returns an optional attribute for the "server.port" semantic +// convention. It represents the port of the local HTTP server that received the +// request. +func (ServerResponseBodySize) AttrServerPort(val int) attribute.KeyValue { + return attribute.Int("server.port", val) +} + +// AttrUserAgentSyntheticType returns an optional attribute for the +// "user_agent.synthetic.type" semantic convention. It represents the specifies +// the category of synthetic traffic, such as tests or bots. +func (ServerResponseBodySize) AttrUserAgentSyntheticType(val UserAgentSyntheticTypeAttr) attribute.KeyValue { + return attribute.String("user_agent.synthetic.type", string(val)) +} \ No newline at end of file diff --git a/semconv/v1.37.0/hwconv/metric.go b/semconv/v1.37.0/hwconv/metric.go new file mode 100644 index 00000000000..b687e4bb37c --- /dev/null +++ b/semconv/v1.37.0/hwconv/metric.go @@ -0,0 +1,5939 @@ +// Code generated from semantic convention specification. DO NOT EDIT. + +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package httpconv provides types and functionality for OpenTelemetry semantic +// conventions in the "hw" namespace. +package hwconv + +import ( + "context" + "sync" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/noop" +) + +var ( + addOptPool = &sync.Pool{New: func() any { return &[]metric.AddOption{} }} + recOptPool = &sync.Pool{New: func() any { return &[]metric.RecordOption{} }} +) + +// ErrorTypeAttr is an attribute conforming to the error.type semantic +// conventions. It represents the type of error encountered by the component. +type ErrorTypeAttr string + +var ( + // ErrorTypeOther is a fallback error value to be used when the instrumentation + // doesn't define a custom value. + ErrorTypeOther ErrorTypeAttr = "_OTHER" +) + +// BatteryStateAttr is an attribute conforming to the hw.battery.state semantic +// conventions. It represents the current state of the battery. +type BatteryStateAttr string + +var ( + // BatteryStateCharging is the charging. + BatteryStateCharging BatteryStateAttr = "charging" + // BatteryStateDischarging is the discharging. + BatteryStateDischarging BatteryStateAttr = "discharging" +) + +// GpuTaskAttr is an attribute conforming to the hw.gpu.task semantic +// conventions. It represents the type of task the GPU is performing. +type GpuTaskAttr string + +var ( + // GpuTaskDecoder is the decoder. + GpuTaskDecoder GpuTaskAttr = "decoder" + // GpuTaskEncoder is the encoder. + GpuTaskEncoder GpuTaskAttr = "encoder" + // GpuTaskGeneral is the general. + GpuTaskGeneral GpuTaskAttr = "general" +) + +// LimitTypeAttr is an attribute conforming to the hw.limit_type semantic +// conventions. It represents the represents battery charge level thresholds +// relevant to device operation and health. Each `limit_type` denotes a specific +// charge limit such as the minimum or maximum optimal charge, the shutdown +// threshold, or energy-saving thresholds. These values are typically provided by +// the hardware or firmware to guide safe and efficient battery usage. +type LimitTypeAttr string + +var ( + // LimitTypeCritical is the critical. + LimitTypeCritical LimitTypeAttr = "critical" + // LimitTypeDegraded is the degraded. + LimitTypeDegraded LimitTypeAttr = "degraded" + // LimitTypeHighCritical is the high Critical. + LimitTypeHighCritical LimitTypeAttr = "high.critical" + // LimitTypeHighDegraded is the high Degraded. + LimitTypeHighDegraded LimitTypeAttr = "high.degraded" + // LimitTypeLowCritical is the low Critical. + LimitTypeLowCritical LimitTypeAttr = "low.critical" + // LimitTypeLowDegraded is the low Degraded. + LimitTypeLowDegraded LimitTypeAttr = "low.degraded" + // LimitTypeMax is the maximum. + LimitTypeMax LimitTypeAttr = "max" + // LimitTypeThrottled is the throttled. + LimitTypeThrottled LimitTypeAttr = "throttled" + // LimitTypeTurbo is the turbo. + LimitTypeTurbo LimitTypeAttr = "turbo" +) + +// LogicalDiskStateAttr is an attribute conforming to the hw.logical_disk.state +// semantic conventions. It represents the state of the logical disk space usage. +type LogicalDiskStateAttr string + +var ( + // LogicalDiskStateUsed is the used. + LogicalDiskStateUsed LogicalDiskStateAttr = "used" + // LogicalDiskStateFree is the free. + LogicalDiskStateFree LogicalDiskStateAttr = "free" +) + +// PhysicalDiskStateAttr is an attribute conforming to the hw.physical_disk.state +// semantic conventions. It represents the state of the physical disk endurance +// utilization. +type PhysicalDiskStateAttr string + +var ( + // PhysicalDiskStateRemaining is the remaining. + PhysicalDiskStateRemaining PhysicalDiskStateAttr = "remaining" +) + +// StateAttr is an attribute conforming to the hw.state semantic conventions. It +// represents the current state of the component. +type StateAttr string + +var ( + // StateDegraded is the degraded. + StateDegraded StateAttr = "degraded" + // StateFailed is the failed. + StateFailed StateAttr = "failed" + // StateNeedsCleaning is the needs Cleaning. + StateNeedsCleaning StateAttr = "needs_cleaning" + // StateOk is the OK. + StateOk StateAttr = "ok" + // StatePredictedFailure is the predicted Failure. + StatePredictedFailure StateAttr = "predicted_failure" +) + +// TapeDriveOperationTypeAttr is an attribute conforming to the +// hw.tape_drive.operation_type semantic conventions. It represents the type of +// tape drive operation. +type TapeDriveOperationTypeAttr string + +var ( + // TapeDriveOperationTypeMount is the mount. + TapeDriveOperationTypeMount TapeDriveOperationTypeAttr = "mount" + // TapeDriveOperationTypeUnmount is the unmount. + TapeDriveOperationTypeUnmount TapeDriveOperationTypeAttr = "unmount" + // TapeDriveOperationTypeClean is the clean. + TapeDriveOperationTypeClean TapeDriveOperationTypeAttr = "clean" +) + +// TypeAttr is an attribute conforming to the hw.type semantic conventions. It +// represents the type of the component. +type TypeAttr string + +var ( + // TypeBattery is the battery. + TypeBattery TypeAttr = "battery" + // TypeCPU is the CPU. + TypeCPU TypeAttr = "cpu" + // TypeDiskController is the disk controller. + TypeDiskController TypeAttr = "disk_controller" + // TypeEnclosure is the enclosure. + TypeEnclosure TypeAttr = "enclosure" + // TypeFan is the fan. + TypeFan TypeAttr = "fan" + // TypeGpu is the GPU. + TypeGpu TypeAttr = "gpu" + // TypeLogicalDisk is the logical disk. + TypeLogicalDisk TypeAttr = "logical_disk" + // TypeMemory is the memory. + TypeMemory TypeAttr = "memory" + // TypeNetwork is the network. + TypeNetwork TypeAttr = "network" + // TypePhysicalDisk is the physical disk. + TypePhysicalDisk TypeAttr = "physical_disk" + // TypePowerSupply is the power supply. + TypePowerSupply TypeAttr = "power_supply" + // TypeTapeDrive is the tape drive. + TypeTapeDrive TypeAttr = "tape_drive" + // TypeTemperature is the temperature. + TypeTemperature TypeAttr = "temperature" + // TypeVoltage is the voltage. + TypeVoltage TypeAttr = "voltage" +) + +// NetworkIODirectionAttr is an attribute conforming to the network.io.direction +// semantic conventions. It represents the direction of network traffic for +// network errors. +type NetworkIODirectionAttr string + +var ( + // NetworkIODirectionTransmit is the standardized value "transmit" of + // NetworkIODirectionAttr. + NetworkIODirectionTransmit NetworkIODirectionAttr = "transmit" + // NetworkIODirectionReceive is the standardized value "receive" of + // NetworkIODirectionAttr. + NetworkIODirectionReceive NetworkIODirectionAttr = "receive" +) + +// BatteryCharge is an instrument used to record metric values conforming to the +// "hw.battery.charge" semantic conventions. It represents the remaining fraction +// of battery charge. +type BatteryCharge struct { + metric.Int64Gauge +} + +// NewBatteryCharge returns a new BatteryCharge instrument. +func NewBatteryCharge( + m metric.Meter, + opt ...metric.Int64GaugeOption, +) (BatteryCharge, error) { + // Check if the meter is nil. + if m == nil { + return BatteryCharge{noop.Int64Gauge{}}, nil + } + + i, err := m.Int64Gauge( + "hw.battery.charge", + append([]metric.Int64GaugeOption{ + metric.WithDescription("Remaining fraction of battery charge."), + metric.WithUnit("1"), + }, opt...)..., + ) + if err != nil { + return BatteryCharge{noop.Int64Gauge{}}, err + } + return BatteryCharge{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m BatteryCharge) Inst() metric.Int64Gauge { + return m.Int64Gauge +} + +// Name returns the semantic convention name of the instrument. +func (BatteryCharge) Name() string { + return "hw.battery.charge" +} + +// Unit returns the semantic convention unit of the instrument +func (BatteryCharge) Unit() string { + return "1" +} + +// Description returns the semantic convention description of the instrument +func (BatteryCharge) Description() string { + return "Remaining fraction of battery charge." +} + +// Record records val to the current distribution for attrs. +// +// The id is the an identifier for the hardware component, unique within the +// monitored host +// +// All additional attrs passed are included in the recorded value. +func (m BatteryCharge) Record( + ctx context.Context, + val int64, + id string, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Gauge.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("hw.id", id), + )..., + ), + ) + + m.Int64Gauge.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +func (m BatteryCharge) RecordSet(ctx context.Context, val int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Gauge.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Gauge.Record(ctx, val, *o...) +} + +// AttrBatteryCapacity returns an optional attribute for the +// "hw.battery.capacity" semantic convention. It represents the design capacity +// in Watts-hours or Amper-hours. +func (BatteryCharge) AttrBatteryCapacity(val string) attribute.KeyValue { + return attribute.String("hw.battery.capacity", val) +} + +// AttrBatteryChemistry returns an optional attribute for the +// "hw.battery.chemistry" semantic convention. It represents the battery +// [chemistry], e.g. Lithium-Ion, Nickel-Cadmium, etc. +// +// [chemistry]: https://schemas.dmtf.org/wbem/cim-html/2.31.0/CIM_Battery.html +func (BatteryCharge) AttrBatteryChemistry(val string) attribute.KeyValue { + return attribute.String("hw.battery.chemistry", val) +} + +// AttrModel returns an optional attribute for the "hw.model" semantic +// convention. It represents the descriptive model name of the hardware +// component. +func (BatteryCharge) AttrModel(val string) attribute.KeyValue { + return attribute.String("hw.model", val) +} + +// AttrName returns an optional attribute for the "hw.name" semantic convention. +// It represents an easily-recognizable name for the hardware component. +func (BatteryCharge) AttrName(val string) attribute.KeyValue { + return attribute.String("hw.name", val) +} + +// AttrParent returns an optional attribute for the "hw.parent" semantic +// convention. It represents the unique identifier of the parent component +// (typically the `hw.id` attribute of the enclosure, or disk controller). +func (BatteryCharge) AttrParent(val string) attribute.KeyValue { + return attribute.String("hw.parent", val) +} + +// AttrVendor returns an optional attribute for the "hw.vendor" semantic +// convention. It represents the vendor name of the hardware component. +func (BatteryCharge) AttrVendor(val string) attribute.KeyValue { + return attribute.String("hw.vendor", val) +} + +// BatteryChargeLimit is an instrument used to record metric values conforming to +// the "hw.battery.charge.limit" semantic conventions. It represents the lower +// limit of battery charge fraction to ensure proper operation. +type BatteryChargeLimit struct { + metric.Int64Gauge +} + +// NewBatteryChargeLimit returns a new BatteryChargeLimit instrument. +func NewBatteryChargeLimit( + m metric.Meter, + opt ...metric.Int64GaugeOption, +) (BatteryChargeLimit, error) { + // Check if the meter is nil. + if m == nil { + return BatteryChargeLimit{noop.Int64Gauge{}}, nil + } + + i, err := m.Int64Gauge( + "hw.battery.charge.limit", + append([]metric.Int64GaugeOption{ + metric.WithDescription("Lower limit of battery charge fraction to ensure proper operation."), + metric.WithUnit("1"), + }, opt...)..., + ) + if err != nil { + return BatteryChargeLimit{noop.Int64Gauge{}}, err + } + return BatteryChargeLimit{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m BatteryChargeLimit) Inst() metric.Int64Gauge { + return m.Int64Gauge +} + +// Name returns the semantic convention name of the instrument. +func (BatteryChargeLimit) Name() string { + return "hw.battery.charge.limit" +} + +// Unit returns the semantic convention unit of the instrument +func (BatteryChargeLimit) Unit() string { + return "1" +} + +// Description returns the semantic convention description of the instrument +func (BatteryChargeLimit) Description() string { + return "Lower limit of battery charge fraction to ensure proper operation." +} + +// Record records val to the current distribution for attrs. +// +// The id is the an identifier for the hardware component, unique within the +// monitored host +// +// All additional attrs passed are included in the recorded value. +func (m BatteryChargeLimit) Record( + ctx context.Context, + val int64, + id string, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Gauge.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("hw.id", id), + )..., + ), + ) + + m.Int64Gauge.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +func (m BatteryChargeLimit) RecordSet(ctx context.Context, val int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Gauge.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Gauge.Record(ctx, val, *o...) +} + +// AttrBatteryCapacity returns an optional attribute for the +// "hw.battery.capacity" semantic convention. It represents the design capacity +// in Watts-hours or Amper-hours. +func (BatteryChargeLimit) AttrBatteryCapacity(val string) attribute.KeyValue { + return attribute.String("hw.battery.capacity", val) +} + +// AttrBatteryChemistry returns an optional attribute for the +// "hw.battery.chemistry" semantic convention. It represents the battery +// [chemistry], e.g. Lithium-Ion, Nickel-Cadmium, etc. +// +// [chemistry]: https://schemas.dmtf.org/wbem/cim-html/2.31.0/CIM_Battery.html +func (BatteryChargeLimit) AttrBatteryChemistry(val string) attribute.KeyValue { + return attribute.String("hw.battery.chemistry", val) +} + +// AttrLimitType returns an optional attribute for the "hw.limit_type" semantic +// convention. It represents the represents battery charge level thresholds +// relevant to device operation and health. Each `limit_type` denotes a specific +// charge limit such as the minimum or maximum optimal charge, the shutdown +// threshold, or energy-saving thresholds. These values are typically provided by +// the hardware or firmware to guide safe and efficient battery usage. +func (BatteryChargeLimit) AttrLimitType(val LimitTypeAttr) attribute.KeyValue { + return attribute.String("hw.limit_type", string(val)) +} + +// AttrModel returns an optional attribute for the "hw.model" semantic +// convention. It represents the descriptive model name of the hardware +// component. +func (BatteryChargeLimit) AttrModel(val string) attribute.KeyValue { + return attribute.String("hw.model", val) +} + +// AttrName returns an optional attribute for the "hw.name" semantic convention. +// It represents an easily-recognizable name for the hardware component. +func (BatteryChargeLimit) AttrName(val string) attribute.KeyValue { + return attribute.String("hw.name", val) +} + +// AttrParent returns an optional attribute for the "hw.parent" semantic +// convention. It represents the unique identifier of the parent component +// (typically the `hw.id` attribute of the enclosure, or disk controller). +func (BatteryChargeLimit) AttrParent(val string) attribute.KeyValue { + return attribute.String("hw.parent", val) +} + +// AttrVendor returns an optional attribute for the "hw.vendor" semantic +// convention. It represents the vendor name of the hardware component. +func (BatteryChargeLimit) AttrVendor(val string) attribute.KeyValue { + return attribute.String("hw.vendor", val) +} + +// BatteryTimeLeft is an instrument used to record metric values conforming to +// the "hw.battery.time_left" semantic conventions. It represents the time left +// before battery is completely charged or discharged. +type BatteryTimeLeft struct { + metric.Float64Gauge +} + +// NewBatteryTimeLeft returns a new BatteryTimeLeft instrument. +func NewBatteryTimeLeft( + m metric.Meter, + opt ...metric.Float64GaugeOption, +) (BatteryTimeLeft, error) { + // Check if the meter is nil. + if m == nil { + return BatteryTimeLeft{noop.Float64Gauge{}}, nil + } + + i, err := m.Float64Gauge( + "hw.battery.time_left", + append([]metric.Float64GaugeOption{ + metric.WithDescription("Time left before battery is completely charged or discharged."), + metric.WithUnit("s"), + }, opt...)..., + ) + if err != nil { + return BatteryTimeLeft{noop.Float64Gauge{}}, err + } + return BatteryTimeLeft{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m BatteryTimeLeft) Inst() metric.Float64Gauge { + return m.Float64Gauge +} + +// Name returns the semantic convention name of the instrument. +func (BatteryTimeLeft) Name() string { + return "hw.battery.time_left" +} + +// Unit returns the semantic convention unit of the instrument +func (BatteryTimeLeft) Unit() string { + return "s" +} + +// Description returns the semantic convention description of the instrument +func (BatteryTimeLeft) Description() string { + return "Time left before battery is completely charged or discharged." +} + +// Record records val to the current distribution for attrs. +// +// The id is the an identifier for the hardware component, unique within the +// monitored host +// +// The state is the the current state of the component +// +// All additional attrs passed are included in the recorded value. +func (m BatteryTimeLeft) Record( + ctx context.Context, + val float64, + id string, + state StateAttr, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Float64Gauge.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("hw.id", id), + attribute.String("hw.state", string(state)), + )..., + ), + ) + + m.Float64Gauge.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +func (m BatteryTimeLeft) RecordSet(ctx context.Context, val float64, set attribute.Set) { + if set.Len() == 0 { + m.Float64Gauge.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Float64Gauge.Record(ctx, val, *o...) +} + +// AttrBatteryState returns an optional attribute for the "hw.battery.state" +// semantic convention. It represents the current state of the battery. +func (BatteryTimeLeft) AttrBatteryState(val BatteryStateAttr) attribute.KeyValue { + return attribute.String("hw.battery.state", string(val)) +} + +// AttrBatteryCapacity returns an optional attribute for the +// "hw.battery.capacity" semantic convention. It represents the design capacity +// in Watts-hours or Amper-hours. +func (BatteryTimeLeft) AttrBatteryCapacity(val string) attribute.KeyValue { + return attribute.String("hw.battery.capacity", val) +} + +// AttrBatteryChemistry returns an optional attribute for the +// "hw.battery.chemistry" semantic convention. It represents the battery +// [chemistry], e.g. Lithium-Ion, Nickel-Cadmium, etc. +// +// [chemistry]: https://schemas.dmtf.org/wbem/cim-html/2.31.0/CIM_Battery.html +func (BatteryTimeLeft) AttrBatteryChemistry(val string) attribute.KeyValue { + return attribute.String("hw.battery.chemistry", val) +} + +// AttrModel returns an optional attribute for the "hw.model" semantic +// convention. It represents the descriptive model name of the hardware +// component. +func (BatteryTimeLeft) AttrModel(val string) attribute.KeyValue { + return attribute.String("hw.model", val) +} + +// AttrName returns an optional attribute for the "hw.name" semantic convention. +// It represents an easily-recognizable name for the hardware component. +func (BatteryTimeLeft) AttrName(val string) attribute.KeyValue { + return attribute.String("hw.name", val) +} + +// AttrParent returns an optional attribute for the "hw.parent" semantic +// convention. It represents the unique identifier of the parent component +// (typically the `hw.id` attribute of the enclosure, or disk controller). +func (BatteryTimeLeft) AttrParent(val string) attribute.KeyValue { + return attribute.String("hw.parent", val) +} + +// AttrVendor returns an optional attribute for the "hw.vendor" semantic +// convention. It represents the vendor name of the hardware component. +func (BatteryTimeLeft) AttrVendor(val string) attribute.KeyValue { + return attribute.String("hw.vendor", val) +} + +// CPUSpeed is an instrument used to record metric values conforming to the +// "hw.cpu.speed" semantic conventions. It represents the CPU current frequency. +type CPUSpeed struct { + metric.Int64Gauge +} + +// NewCPUSpeed returns a new CPUSpeed instrument. +func NewCPUSpeed( + m metric.Meter, + opt ...metric.Int64GaugeOption, +) (CPUSpeed, error) { + // Check if the meter is nil. + if m == nil { + return CPUSpeed{noop.Int64Gauge{}}, nil + } + + i, err := m.Int64Gauge( + "hw.cpu.speed", + append([]metric.Int64GaugeOption{ + metric.WithDescription("CPU current frequency."), + metric.WithUnit("Hz"), + }, opt...)..., + ) + if err != nil { + return CPUSpeed{noop.Int64Gauge{}}, err + } + return CPUSpeed{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m CPUSpeed) Inst() metric.Int64Gauge { + return m.Int64Gauge +} + +// Name returns the semantic convention name of the instrument. +func (CPUSpeed) Name() string { + return "hw.cpu.speed" +} + +// Unit returns the semantic convention unit of the instrument +func (CPUSpeed) Unit() string { + return "Hz" +} + +// Description returns the semantic convention description of the instrument +func (CPUSpeed) Description() string { + return "CPU current frequency." +} + +// Record records val to the current distribution for attrs. +// +// The id is the an identifier for the hardware component, unique within the +// monitored host +// +// All additional attrs passed are included in the recorded value. +func (m CPUSpeed) Record( + ctx context.Context, + val int64, + id string, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Gauge.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("hw.id", id), + )..., + ), + ) + + m.Int64Gauge.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +func (m CPUSpeed) RecordSet(ctx context.Context, val int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Gauge.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Gauge.Record(ctx, val, *o...) +} + +// AttrModel returns an optional attribute for the "hw.model" semantic +// convention. It represents the descriptive model name of the hardware +// component. +func (CPUSpeed) AttrModel(val string) attribute.KeyValue { + return attribute.String("hw.model", val) +} + +// AttrName returns an optional attribute for the "hw.name" semantic convention. +// It represents an easily-recognizable name for the hardware component. +func (CPUSpeed) AttrName(val string) attribute.KeyValue { + return attribute.String("hw.name", val) +} + +// AttrParent returns an optional attribute for the "hw.parent" semantic +// convention. It represents the unique identifier of the parent component +// (typically the `hw.id` attribute of the enclosure, or disk controller). +func (CPUSpeed) AttrParent(val string) attribute.KeyValue { + return attribute.String("hw.parent", val) +} + +// AttrVendor returns an optional attribute for the "hw.vendor" semantic +// convention. It represents the vendor name of the hardware component. +func (CPUSpeed) AttrVendor(val string) attribute.KeyValue { + return attribute.String("hw.vendor", val) +} + +// CPUSpeedLimit is an instrument used to record metric values conforming to the +// "hw.cpu.speed.limit" semantic conventions. It represents the CPU maximum +// frequency. +type CPUSpeedLimit struct { + metric.Int64Gauge +} + +// NewCPUSpeedLimit returns a new CPUSpeedLimit instrument. +func NewCPUSpeedLimit( + m metric.Meter, + opt ...metric.Int64GaugeOption, +) (CPUSpeedLimit, error) { + // Check if the meter is nil. + if m == nil { + return CPUSpeedLimit{noop.Int64Gauge{}}, nil + } + + i, err := m.Int64Gauge( + "hw.cpu.speed.limit", + append([]metric.Int64GaugeOption{ + metric.WithDescription("CPU maximum frequency."), + metric.WithUnit("Hz"), + }, opt...)..., + ) + if err != nil { + return CPUSpeedLimit{noop.Int64Gauge{}}, err + } + return CPUSpeedLimit{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m CPUSpeedLimit) Inst() metric.Int64Gauge { + return m.Int64Gauge +} + +// Name returns the semantic convention name of the instrument. +func (CPUSpeedLimit) Name() string { + return "hw.cpu.speed.limit" +} + +// Unit returns the semantic convention unit of the instrument +func (CPUSpeedLimit) Unit() string { + return "Hz" +} + +// Description returns the semantic convention description of the instrument +func (CPUSpeedLimit) Description() string { + return "CPU maximum frequency." +} + +// Record records val to the current distribution for attrs. +// +// The id is the an identifier for the hardware component, unique within the +// monitored host +// +// All additional attrs passed are included in the recorded value. +func (m CPUSpeedLimit) Record( + ctx context.Context, + val int64, + id string, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Gauge.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("hw.id", id), + )..., + ), + ) + + m.Int64Gauge.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +func (m CPUSpeedLimit) RecordSet(ctx context.Context, val int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Gauge.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Gauge.Record(ctx, val, *o...) +} + +// AttrLimitType returns an optional attribute for the "hw.limit_type" semantic +// convention. It represents the type of limit for hardware components. +func (CPUSpeedLimit) AttrLimitType(val LimitTypeAttr) attribute.KeyValue { + return attribute.String("hw.limit_type", string(val)) +} + +// AttrModel returns an optional attribute for the "hw.model" semantic +// convention. It represents the descriptive model name of the hardware +// component. +func (CPUSpeedLimit) AttrModel(val string) attribute.KeyValue { + return attribute.String("hw.model", val) +} + +// AttrName returns an optional attribute for the "hw.name" semantic convention. +// It represents an easily-recognizable name for the hardware component. +func (CPUSpeedLimit) AttrName(val string) attribute.KeyValue { + return attribute.String("hw.name", val) +} + +// AttrParent returns an optional attribute for the "hw.parent" semantic +// convention. It represents the unique identifier of the parent component +// (typically the `hw.id` attribute of the enclosure, or disk controller). +func (CPUSpeedLimit) AttrParent(val string) attribute.KeyValue { + return attribute.String("hw.parent", val) +} + +// AttrVendor returns an optional attribute for the "hw.vendor" semantic +// convention. It represents the vendor name of the hardware component. +func (CPUSpeedLimit) AttrVendor(val string) attribute.KeyValue { + return attribute.String("hw.vendor", val) +} + +// Energy is an instrument used to record metric values conforming to the +// "hw.energy" semantic conventions. It represents the energy consumed by the +// component. +type Energy struct { + metric.Int64Counter +} + +// NewEnergy returns a new Energy instrument. +func NewEnergy( + m metric.Meter, + opt ...metric.Int64CounterOption, +) (Energy, error) { + // Check if the meter is nil. + if m == nil { + return Energy{noop.Int64Counter{}}, nil + } + + i, err := m.Int64Counter( + "hw.energy", + append([]metric.Int64CounterOption{ + metric.WithDescription("Energy consumed by the component."), + metric.WithUnit("J"), + }, opt...)..., + ) + if err != nil { + return Energy{noop.Int64Counter{}}, err + } + return Energy{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m Energy) Inst() metric.Int64Counter { + return m.Int64Counter +} + +// Name returns the semantic convention name of the instrument. +func (Energy) Name() string { + return "hw.energy" +} + +// Unit returns the semantic convention unit of the instrument +func (Energy) Unit() string { + return "J" +} + +// Description returns the semantic convention description of the instrument +func (Energy) Description() string { + return "Energy consumed by the component." +} + +// Add adds incr to the existing count for attrs. +// +// The id is the an identifier for the hardware component, unique within the +// monitored host +// +// The hwType is the type of the component +// +// All additional attrs passed are included in the recorded value. +func (m Energy) Add( + ctx context.Context, + incr int64, + id string, + hwType TypeAttr, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("hw.id", id), + attribute.String("hw.type", string(hwType)), + )..., + ), + ) + + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +func (m Energy) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AttrName returns an optional attribute for the "hw.name" semantic convention. +// It represents an easily-recognizable name for the hardware component. +func (Energy) AttrName(val string) attribute.KeyValue { + return attribute.String("hw.name", val) +} + +// AttrParent returns an optional attribute for the "hw.parent" semantic +// convention. It represents the unique identifier of the parent component +// (typically the `hw.id` attribute of the enclosure, or disk controller). +func (Energy) AttrParent(val string) attribute.KeyValue { + return attribute.String("hw.parent", val) +} + +// Errors is an instrument used to record metric values conforming to the +// "hw.errors" semantic conventions. It represents the number of errors +// encountered by the component. +type Errors struct { + metric.Int64Counter +} + +// NewErrors returns a new Errors instrument. +func NewErrors( + m metric.Meter, + opt ...metric.Int64CounterOption, +) (Errors, error) { + // Check if the meter is nil. + if m == nil { + return Errors{noop.Int64Counter{}}, nil + } + + i, err := m.Int64Counter( + "hw.errors", + append([]metric.Int64CounterOption{ + metric.WithDescription("Number of errors encountered by the component."), + metric.WithUnit("{error}"), + }, opt...)..., + ) + if err != nil { + return Errors{noop.Int64Counter{}}, err + } + return Errors{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m Errors) Inst() metric.Int64Counter { + return m.Int64Counter +} + +// Name returns the semantic convention name of the instrument. +func (Errors) Name() string { + return "hw.errors" +} + +// Unit returns the semantic convention unit of the instrument +func (Errors) Unit() string { + return "{error}" +} + +// Description returns the semantic convention description of the instrument +func (Errors) Description() string { + return "Number of errors encountered by the component." +} + +// Add adds incr to the existing count for attrs. +// +// The id is the an identifier for the hardware component, unique within the +// monitored host +// +// The hwType is the type of the component +// +// All additional attrs passed are included in the recorded value. +func (m Errors) Add( + ctx context.Context, + incr int64, + id string, + hwType TypeAttr, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("hw.id", id), + attribute.String("hw.type", string(hwType)), + )..., + ), + ) + + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +func (m Errors) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AttrErrorType returns an optional attribute for the "error.type" semantic +// convention. It represents the type of error encountered by the component. +func (Errors) AttrErrorType(val ErrorTypeAttr) attribute.KeyValue { + return attribute.String("error.type", string(val)) +} + +// AttrName returns an optional attribute for the "hw.name" semantic convention. +// It represents an easily-recognizable name for the hardware component. +func (Errors) AttrName(val string) attribute.KeyValue { + return attribute.String("hw.name", val) +} + +// AttrParent returns an optional attribute for the "hw.parent" semantic +// convention. It represents the unique identifier of the parent component +// (typically the `hw.id` attribute of the enclosure, or disk controller). +func (Errors) AttrParent(val string) attribute.KeyValue { + return attribute.String("hw.parent", val) +} + +// AttrNetworkIODirection returns an optional attribute for the +// "network.io.direction" semantic convention. It represents the direction of +// network traffic for network errors. +func (Errors) AttrNetworkIODirection(val NetworkIODirectionAttr) attribute.KeyValue { + return attribute.String("network.io.direction", string(val)) +} + +// FanSpeed is an instrument used to record metric values conforming to the +// "hw.fan.speed" semantic conventions. It represents the fan speed in +// revolutions per minute. +type FanSpeed struct { + metric.Int64Gauge +} + +// NewFanSpeed returns a new FanSpeed instrument. +func NewFanSpeed( + m metric.Meter, + opt ...metric.Int64GaugeOption, +) (FanSpeed, error) { + // Check if the meter is nil. + if m == nil { + return FanSpeed{noop.Int64Gauge{}}, nil + } + + i, err := m.Int64Gauge( + "hw.fan.speed", + append([]metric.Int64GaugeOption{ + metric.WithDescription("Fan speed in revolutions per minute."), + metric.WithUnit("rpm"), + }, opt...)..., + ) + if err != nil { + return FanSpeed{noop.Int64Gauge{}}, err + } + return FanSpeed{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m FanSpeed) Inst() metric.Int64Gauge { + return m.Int64Gauge +} + +// Name returns the semantic convention name of the instrument. +func (FanSpeed) Name() string { + return "hw.fan.speed" +} + +// Unit returns the semantic convention unit of the instrument +func (FanSpeed) Unit() string { + return "rpm" +} + +// Description returns the semantic convention description of the instrument +func (FanSpeed) Description() string { + return "Fan speed in revolutions per minute." +} + +// Record records val to the current distribution for attrs. +// +// The id is the an identifier for the hardware component, unique within the +// monitored host +// +// All additional attrs passed are included in the recorded value. +func (m FanSpeed) Record( + ctx context.Context, + val int64, + id string, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Gauge.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("hw.id", id), + )..., + ), + ) + + m.Int64Gauge.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +func (m FanSpeed) RecordSet(ctx context.Context, val int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Gauge.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Gauge.Record(ctx, val, *o...) +} + +// AttrName returns an optional attribute for the "hw.name" semantic convention. +// It represents an easily-recognizable name for the hardware component. +func (FanSpeed) AttrName(val string) attribute.KeyValue { + return attribute.String("hw.name", val) +} + +// AttrParent returns an optional attribute for the "hw.parent" semantic +// convention. It represents the unique identifier of the parent component +// (typically the `hw.id` attribute of the enclosure, or disk controller). +func (FanSpeed) AttrParent(val string) attribute.KeyValue { + return attribute.String("hw.parent", val) +} + +// AttrSensorLocation returns an optional attribute for the "hw.sensor_location" +// semantic convention. It represents the location of the sensor. +func (FanSpeed) AttrSensorLocation(val string) attribute.KeyValue { + return attribute.String("hw.sensor_location", val) +} + +// FanSpeedLimit is an instrument used to record metric values conforming to the +// "hw.fan.speed.limit" semantic conventions. It represents the speed limit in +// rpm. +type FanSpeedLimit struct { + metric.Int64Gauge +} + +// NewFanSpeedLimit returns a new FanSpeedLimit instrument. +func NewFanSpeedLimit( + m metric.Meter, + opt ...metric.Int64GaugeOption, +) (FanSpeedLimit, error) { + // Check if the meter is nil. + if m == nil { + return FanSpeedLimit{noop.Int64Gauge{}}, nil + } + + i, err := m.Int64Gauge( + "hw.fan.speed.limit", + append([]metric.Int64GaugeOption{ + metric.WithDescription("Speed limit in rpm."), + metric.WithUnit("rpm"), + }, opt...)..., + ) + if err != nil { + return FanSpeedLimit{noop.Int64Gauge{}}, err + } + return FanSpeedLimit{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m FanSpeedLimit) Inst() metric.Int64Gauge { + return m.Int64Gauge +} + +// Name returns the semantic convention name of the instrument. +func (FanSpeedLimit) Name() string { + return "hw.fan.speed.limit" +} + +// Unit returns the semantic convention unit of the instrument +func (FanSpeedLimit) Unit() string { + return "rpm" +} + +// Description returns the semantic convention description of the instrument +func (FanSpeedLimit) Description() string { + return "Speed limit in rpm." +} + +// Record records val to the current distribution for attrs. +// +// The id is the an identifier for the hardware component, unique within the +// monitored host +// +// All additional attrs passed are included in the recorded value. +func (m FanSpeedLimit) Record( + ctx context.Context, + val int64, + id string, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Gauge.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("hw.id", id), + )..., + ), + ) + + m.Int64Gauge.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +func (m FanSpeedLimit) RecordSet(ctx context.Context, val int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Gauge.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Gauge.Record(ctx, val, *o...) +} + +// AttrLimitType returns an optional attribute for the "hw.limit_type" semantic +// convention. It represents the type of limit for hardware components. +func (FanSpeedLimit) AttrLimitType(val LimitTypeAttr) attribute.KeyValue { + return attribute.String("hw.limit_type", string(val)) +} + +// AttrName returns an optional attribute for the "hw.name" semantic convention. +// It represents an easily-recognizable name for the hardware component. +func (FanSpeedLimit) AttrName(val string) attribute.KeyValue { + return attribute.String("hw.name", val) +} + +// AttrParent returns an optional attribute for the "hw.parent" semantic +// convention. It represents the unique identifier of the parent component +// (typically the `hw.id` attribute of the enclosure, or disk controller). +func (FanSpeedLimit) AttrParent(val string) attribute.KeyValue { + return attribute.String("hw.parent", val) +} + +// AttrSensorLocation returns an optional attribute for the "hw.sensor_location" +// semantic convention. It represents the location of the sensor. +func (FanSpeedLimit) AttrSensorLocation(val string) attribute.KeyValue { + return attribute.String("hw.sensor_location", val) +} + +// FanSpeedRatio is an instrument used to record metric values conforming to the +// "hw.fan.speed_ratio" semantic conventions. It represents the fan speed +// expressed as a fraction of its maximum speed. +type FanSpeedRatio struct { + metric.Int64Gauge +} + +// NewFanSpeedRatio returns a new FanSpeedRatio instrument. +func NewFanSpeedRatio( + m metric.Meter, + opt ...metric.Int64GaugeOption, +) (FanSpeedRatio, error) { + // Check if the meter is nil. + if m == nil { + return FanSpeedRatio{noop.Int64Gauge{}}, nil + } + + i, err := m.Int64Gauge( + "hw.fan.speed_ratio", + append([]metric.Int64GaugeOption{ + metric.WithDescription("Fan speed expressed as a fraction of its maximum speed."), + metric.WithUnit("1"), + }, opt...)..., + ) + if err != nil { + return FanSpeedRatio{noop.Int64Gauge{}}, err + } + return FanSpeedRatio{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m FanSpeedRatio) Inst() metric.Int64Gauge { + return m.Int64Gauge +} + +// Name returns the semantic convention name of the instrument. +func (FanSpeedRatio) Name() string { + return "hw.fan.speed_ratio" +} + +// Unit returns the semantic convention unit of the instrument +func (FanSpeedRatio) Unit() string { + return "1" +} + +// Description returns the semantic convention description of the instrument +func (FanSpeedRatio) Description() string { + return "Fan speed expressed as a fraction of its maximum speed." +} + +// Record records val to the current distribution for attrs. +// +// The id is the an identifier for the hardware component, unique within the +// monitored host +// +// All additional attrs passed are included in the recorded value. +func (m FanSpeedRatio) Record( + ctx context.Context, + val int64, + id string, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Gauge.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("hw.id", id), + )..., + ), + ) + + m.Int64Gauge.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +func (m FanSpeedRatio) RecordSet(ctx context.Context, val int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Gauge.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Gauge.Record(ctx, val, *o...) +} + +// AttrName returns an optional attribute for the "hw.name" semantic convention. +// It represents an easily-recognizable name for the hardware component. +func (FanSpeedRatio) AttrName(val string) attribute.KeyValue { + return attribute.String("hw.name", val) +} + +// AttrParent returns an optional attribute for the "hw.parent" semantic +// convention. It represents the unique identifier of the parent component +// (typically the `hw.id` attribute of the enclosure, or disk controller). +func (FanSpeedRatio) AttrParent(val string) attribute.KeyValue { + return attribute.String("hw.parent", val) +} + +// AttrSensorLocation returns an optional attribute for the "hw.sensor_location" +// semantic convention. It represents the location of the sensor. +func (FanSpeedRatio) AttrSensorLocation(val string) attribute.KeyValue { + return attribute.String("hw.sensor_location", val) +} + +// GpuIO is an instrument used to record metric values conforming to the +// "hw.gpu.io" semantic conventions. It represents the received and transmitted +// bytes by the GPU. +type GpuIO struct { + metric.Int64Counter +} + +// NewGpuIO returns a new GpuIO instrument. +func NewGpuIO( + m metric.Meter, + opt ...metric.Int64CounterOption, +) (GpuIO, error) { + // Check if the meter is nil. + if m == nil { + return GpuIO{noop.Int64Counter{}}, nil + } + + i, err := m.Int64Counter( + "hw.gpu.io", + append([]metric.Int64CounterOption{ + metric.WithDescription("Received and transmitted bytes by the GPU."), + metric.WithUnit("By"), + }, opt...)..., + ) + if err != nil { + return GpuIO{noop.Int64Counter{}}, err + } + return GpuIO{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m GpuIO) Inst() metric.Int64Counter { + return m.Int64Counter +} + +// Name returns the semantic convention name of the instrument. +func (GpuIO) Name() string { + return "hw.gpu.io" +} + +// Unit returns the semantic convention unit of the instrument +func (GpuIO) Unit() string { + return "By" +} + +// Description returns the semantic convention description of the instrument +func (GpuIO) Description() string { + return "Received and transmitted bytes by the GPU." +} + +// Add adds incr to the existing count for attrs. +// +// The id is the an identifier for the hardware component, unique within the +// monitored host +// +// The networkIoDirection is the the network IO operation direction. +// +// All additional attrs passed are included in the recorded value. +func (m GpuIO) Add( + ctx context.Context, + incr int64, + id string, + networkIoDirection NetworkIODirectionAttr, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("hw.id", id), + attribute.String("network.io.direction", string(networkIoDirection)), + )..., + ), + ) + + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +func (m GpuIO) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AttrDriverVersion returns an optional attribute for the "hw.driver_version" +// semantic convention. It represents the driver version for the hardware +// component. +func (GpuIO) AttrDriverVersion(val string) attribute.KeyValue { + return attribute.String("hw.driver_version", val) +} + +// AttrFirmwareVersion returns an optional attribute for the +// "hw.firmware_version" semantic convention. It represents the firmware version +// of the hardware component. +func (GpuIO) AttrFirmwareVersion(val string) attribute.KeyValue { + return attribute.String("hw.firmware_version", val) +} + +// AttrModel returns an optional attribute for the "hw.model" semantic +// convention. It represents the descriptive model name of the hardware +// component. +func (GpuIO) AttrModel(val string) attribute.KeyValue { + return attribute.String("hw.model", val) +} + +// AttrName returns an optional attribute for the "hw.name" semantic convention. +// It represents an easily-recognizable name for the hardware component. +func (GpuIO) AttrName(val string) attribute.KeyValue { + return attribute.String("hw.name", val) +} + +// AttrParent returns an optional attribute for the "hw.parent" semantic +// convention. It represents the unique identifier of the parent component +// (typically the `hw.id` attribute of the enclosure, or disk controller). +func (GpuIO) AttrParent(val string) attribute.KeyValue { + return attribute.String("hw.parent", val) +} + +// AttrSerialNumber returns an optional attribute for the "hw.serial_number" +// semantic convention. It represents the serial number of the hardware +// component. +func (GpuIO) AttrSerialNumber(val string) attribute.KeyValue { + return attribute.String("hw.serial_number", val) +} + +// AttrVendor returns an optional attribute for the "hw.vendor" semantic +// convention. It represents the vendor name of the hardware component. +func (GpuIO) AttrVendor(val string) attribute.KeyValue { + return attribute.String("hw.vendor", val) +} + +// GpuMemoryLimit is an instrument used to record metric values conforming to the +// "hw.gpu.memory.limit" semantic conventions. It represents the size of the GPU +// memory. +type GpuMemoryLimit struct { + metric.Int64UpDownCounter +} + +// NewGpuMemoryLimit returns a new GpuMemoryLimit instrument. +func NewGpuMemoryLimit( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (GpuMemoryLimit, error) { + // Check if the meter is nil. + if m == nil { + return GpuMemoryLimit{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "hw.gpu.memory.limit", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("Size of the GPU memory."), + metric.WithUnit("By"), + }, opt...)..., + ) + if err != nil { + return GpuMemoryLimit{noop.Int64UpDownCounter{}}, err + } + return GpuMemoryLimit{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m GpuMemoryLimit) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (GpuMemoryLimit) Name() string { + return "hw.gpu.memory.limit" +} + +// Unit returns the semantic convention unit of the instrument +func (GpuMemoryLimit) Unit() string { + return "By" +} + +// Description returns the semantic convention description of the instrument +func (GpuMemoryLimit) Description() string { + return "Size of the GPU memory." +} + +// Add adds incr to the existing count for attrs. +// +// The id is the an identifier for the hardware component, unique within the +// monitored host +// +// All additional attrs passed are included in the recorded value. +func (m GpuMemoryLimit) Add( + ctx context.Context, + incr int64, + id string, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("hw.id", id), + )..., + ), + ) + + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +func (m GpuMemoryLimit) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AttrDriverVersion returns an optional attribute for the "hw.driver_version" +// semantic convention. It represents the driver version for the hardware +// component. +func (GpuMemoryLimit) AttrDriverVersion(val string) attribute.KeyValue { + return attribute.String("hw.driver_version", val) +} + +// AttrFirmwareVersion returns an optional attribute for the +// "hw.firmware_version" semantic convention. It represents the firmware version +// of the hardware component. +func (GpuMemoryLimit) AttrFirmwareVersion(val string) attribute.KeyValue { + return attribute.String("hw.firmware_version", val) +} + +// AttrModel returns an optional attribute for the "hw.model" semantic +// convention. It represents the descriptive model name of the hardware +// component. +func (GpuMemoryLimit) AttrModel(val string) attribute.KeyValue { + return attribute.String("hw.model", val) +} + +// AttrName returns an optional attribute for the "hw.name" semantic convention. +// It represents an easily-recognizable name for the hardware component. +func (GpuMemoryLimit) AttrName(val string) attribute.KeyValue { + return attribute.String("hw.name", val) +} + +// AttrParent returns an optional attribute for the "hw.parent" semantic +// convention. It represents the unique identifier of the parent component +// (typically the `hw.id` attribute of the enclosure, or disk controller). +func (GpuMemoryLimit) AttrParent(val string) attribute.KeyValue { + return attribute.String("hw.parent", val) +} + +// AttrSerialNumber returns an optional attribute for the "hw.serial_number" +// semantic convention. It represents the serial number of the hardware +// component. +func (GpuMemoryLimit) AttrSerialNumber(val string) attribute.KeyValue { + return attribute.String("hw.serial_number", val) +} + +// AttrVendor returns an optional attribute for the "hw.vendor" semantic +// convention. It represents the vendor name of the hardware component. +func (GpuMemoryLimit) AttrVendor(val string) attribute.KeyValue { + return attribute.String("hw.vendor", val) +} + +// GpuMemoryUsage is an instrument used to record metric values conforming to the +// "hw.gpu.memory.usage" semantic conventions. It represents the GPU memory used. +type GpuMemoryUsage struct { + metric.Int64UpDownCounter +} + +// NewGpuMemoryUsage returns a new GpuMemoryUsage instrument. +func NewGpuMemoryUsage( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (GpuMemoryUsage, error) { + // Check if the meter is nil. + if m == nil { + return GpuMemoryUsage{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "hw.gpu.memory.usage", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("GPU memory used."), + metric.WithUnit("By"), + }, opt...)..., + ) + if err != nil { + return GpuMemoryUsage{noop.Int64UpDownCounter{}}, err + } + return GpuMemoryUsage{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m GpuMemoryUsage) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (GpuMemoryUsage) Name() string { + return "hw.gpu.memory.usage" +} + +// Unit returns the semantic convention unit of the instrument +func (GpuMemoryUsage) Unit() string { + return "By" +} + +// Description returns the semantic convention description of the instrument +func (GpuMemoryUsage) Description() string { + return "GPU memory used." +} + +// Add adds incr to the existing count for attrs. +// +// The id is the an identifier for the hardware component, unique within the +// monitored host +// +// All additional attrs passed are included in the recorded value. +func (m GpuMemoryUsage) Add( + ctx context.Context, + incr int64, + id string, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("hw.id", id), + )..., + ), + ) + + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +func (m GpuMemoryUsage) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AttrDriverVersion returns an optional attribute for the "hw.driver_version" +// semantic convention. It represents the driver version for the hardware +// component. +func (GpuMemoryUsage) AttrDriverVersion(val string) attribute.KeyValue { + return attribute.String("hw.driver_version", val) +} + +// AttrFirmwareVersion returns an optional attribute for the +// "hw.firmware_version" semantic convention. It represents the firmware version +// of the hardware component. +func (GpuMemoryUsage) AttrFirmwareVersion(val string) attribute.KeyValue { + return attribute.String("hw.firmware_version", val) +} + +// AttrModel returns an optional attribute for the "hw.model" semantic +// convention. It represents the descriptive model name of the hardware +// component. +func (GpuMemoryUsage) AttrModel(val string) attribute.KeyValue { + return attribute.String("hw.model", val) +} + +// AttrName returns an optional attribute for the "hw.name" semantic convention. +// It represents an easily-recognizable name for the hardware component. +func (GpuMemoryUsage) AttrName(val string) attribute.KeyValue { + return attribute.String("hw.name", val) +} + +// AttrParent returns an optional attribute for the "hw.parent" semantic +// convention. It represents the unique identifier of the parent component +// (typically the `hw.id` attribute of the enclosure, or disk controller). +func (GpuMemoryUsage) AttrParent(val string) attribute.KeyValue { + return attribute.String("hw.parent", val) +} + +// AttrSerialNumber returns an optional attribute for the "hw.serial_number" +// semantic convention. It represents the serial number of the hardware +// component. +func (GpuMemoryUsage) AttrSerialNumber(val string) attribute.KeyValue { + return attribute.String("hw.serial_number", val) +} + +// AttrVendor returns an optional attribute for the "hw.vendor" semantic +// convention. It represents the vendor name of the hardware component. +func (GpuMemoryUsage) AttrVendor(val string) attribute.KeyValue { + return attribute.String("hw.vendor", val) +} + +// GpuMemoryUtilization is an instrument used to record metric values conforming +// to the "hw.gpu.memory.utilization" semantic conventions. It represents the +// fraction of GPU memory used. +type GpuMemoryUtilization struct { + metric.Int64Gauge +} + +// NewGpuMemoryUtilization returns a new GpuMemoryUtilization instrument. +func NewGpuMemoryUtilization( + m metric.Meter, + opt ...metric.Int64GaugeOption, +) (GpuMemoryUtilization, error) { + // Check if the meter is nil. + if m == nil { + return GpuMemoryUtilization{noop.Int64Gauge{}}, nil + } + + i, err := m.Int64Gauge( + "hw.gpu.memory.utilization", + append([]metric.Int64GaugeOption{ + metric.WithDescription("Fraction of GPU memory used."), + metric.WithUnit("1"), + }, opt...)..., + ) + if err != nil { + return GpuMemoryUtilization{noop.Int64Gauge{}}, err + } + return GpuMemoryUtilization{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m GpuMemoryUtilization) Inst() metric.Int64Gauge { + return m.Int64Gauge +} + +// Name returns the semantic convention name of the instrument. +func (GpuMemoryUtilization) Name() string { + return "hw.gpu.memory.utilization" +} + +// Unit returns the semantic convention unit of the instrument +func (GpuMemoryUtilization) Unit() string { + return "1" +} + +// Description returns the semantic convention description of the instrument +func (GpuMemoryUtilization) Description() string { + return "Fraction of GPU memory used." +} + +// Record records val to the current distribution for attrs. +// +// The id is the an identifier for the hardware component, unique within the +// monitored host +// +// All additional attrs passed are included in the recorded value. +func (m GpuMemoryUtilization) Record( + ctx context.Context, + val int64, + id string, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Gauge.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("hw.id", id), + )..., + ), + ) + + m.Int64Gauge.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +func (m GpuMemoryUtilization) RecordSet(ctx context.Context, val int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Gauge.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Gauge.Record(ctx, val, *o...) +} + +// AttrDriverVersion returns an optional attribute for the "hw.driver_version" +// semantic convention. It represents the driver version for the hardware +// component. +func (GpuMemoryUtilization) AttrDriverVersion(val string) attribute.KeyValue { + return attribute.String("hw.driver_version", val) +} + +// AttrFirmwareVersion returns an optional attribute for the +// "hw.firmware_version" semantic convention. It represents the firmware version +// of the hardware component. +func (GpuMemoryUtilization) AttrFirmwareVersion(val string) attribute.KeyValue { + return attribute.String("hw.firmware_version", val) +} + +// AttrModel returns an optional attribute for the "hw.model" semantic +// convention. It represents the descriptive model name of the hardware +// component. +func (GpuMemoryUtilization) AttrModel(val string) attribute.KeyValue { + return attribute.String("hw.model", val) +} + +// AttrName returns an optional attribute for the "hw.name" semantic convention. +// It represents an easily-recognizable name for the hardware component. +func (GpuMemoryUtilization) AttrName(val string) attribute.KeyValue { + return attribute.String("hw.name", val) +} + +// AttrParent returns an optional attribute for the "hw.parent" semantic +// convention. It represents the unique identifier of the parent component +// (typically the `hw.id` attribute of the enclosure, or disk controller). +func (GpuMemoryUtilization) AttrParent(val string) attribute.KeyValue { + return attribute.String("hw.parent", val) +} + +// AttrSerialNumber returns an optional attribute for the "hw.serial_number" +// semantic convention. It represents the serial number of the hardware +// component. +func (GpuMemoryUtilization) AttrSerialNumber(val string) attribute.KeyValue { + return attribute.String("hw.serial_number", val) +} + +// AttrVendor returns an optional attribute for the "hw.vendor" semantic +// convention. It represents the vendor name of the hardware component. +func (GpuMemoryUtilization) AttrVendor(val string) attribute.KeyValue { + return attribute.String("hw.vendor", val) +} + +// GpuUtilization is an instrument used to record metric values conforming to the +// "hw.gpu.utilization" semantic conventions. It represents the fraction of time +// spent in a specific task. +type GpuUtilization struct { + metric.Int64Gauge +} + +// NewGpuUtilization returns a new GpuUtilization instrument. +func NewGpuUtilization( + m metric.Meter, + opt ...metric.Int64GaugeOption, +) (GpuUtilization, error) { + // Check if the meter is nil. + if m == nil { + return GpuUtilization{noop.Int64Gauge{}}, nil + } + + i, err := m.Int64Gauge( + "hw.gpu.utilization", + append([]metric.Int64GaugeOption{ + metric.WithDescription("Fraction of time spent in a specific task."), + metric.WithUnit("1"), + }, opt...)..., + ) + if err != nil { + return GpuUtilization{noop.Int64Gauge{}}, err + } + return GpuUtilization{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m GpuUtilization) Inst() metric.Int64Gauge { + return m.Int64Gauge +} + +// Name returns the semantic convention name of the instrument. +func (GpuUtilization) Name() string { + return "hw.gpu.utilization" +} + +// Unit returns the semantic convention unit of the instrument +func (GpuUtilization) Unit() string { + return "1" +} + +// Description returns the semantic convention description of the instrument +func (GpuUtilization) Description() string { + return "Fraction of time spent in a specific task." +} + +// Record records val to the current distribution for attrs. +// +// The id is the an identifier for the hardware component, unique within the +// monitored host +// +// All additional attrs passed are included in the recorded value. +func (m GpuUtilization) Record( + ctx context.Context, + val int64, + id string, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Gauge.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("hw.id", id), + )..., + ), + ) + + m.Int64Gauge.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +func (m GpuUtilization) RecordSet(ctx context.Context, val int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Gauge.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Gauge.Record(ctx, val, *o...) +} + +// AttrDriverVersion returns an optional attribute for the "hw.driver_version" +// semantic convention. It represents the driver version for the hardware +// component. +func (GpuUtilization) AttrDriverVersion(val string) attribute.KeyValue { + return attribute.String("hw.driver_version", val) +} + +// AttrFirmwareVersion returns an optional attribute for the +// "hw.firmware_version" semantic convention. It represents the firmware version +// of the hardware component. +func (GpuUtilization) AttrFirmwareVersion(val string) attribute.KeyValue { + return attribute.String("hw.firmware_version", val) +} + +// AttrGpuTask returns an optional attribute for the "hw.gpu.task" semantic +// convention. It represents the type of task the GPU is performing. +func (GpuUtilization) AttrGpuTask(val GpuTaskAttr) attribute.KeyValue { + return attribute.String("hw.gpu.task", string(val)) +} + +// AttrModel returns an optional attribute for the "hw.model" semantic +// convention. It represents the descriptive model name of the hardware +// component. +func (GpuUtilization) AttrModel(val string) attribute.KeyValue { + return attribute.String("hw.model", val) +} + +// AttrName returns an optional attribute for the "hw.name" semantic convention. +// It represents an easily-recognizable name for the hardware component. +func (GpuUtilization) AttrName(val string) attribute.KeyValue { + return attribute.String("hw.name", val) +} + +// AttrParent returns an optional attribute for the "hw.parent" semantic +// convention. It represents the unique identifier of the parent component +// (typically the `hw.id` attribute of the enclosure, or disk controller). +func (GpuUtilization) AttrParent(val string) attribute.KeyValue { + return attribute.String("hw.parent", val) +} + +// AttrSerialNumber returns an optional attribute for the "hw.serial_number" +// semantic convention. It represents the serial number of the hardware +// component. +func (GpuUtilization) AttrSerialNumber(val string) attribute.KeyValue { + return attribute.String("hw.serial_number", val) +} + +// AttrVendor returns an optional attribute for the "hw.vendor" semantic +// convention. It represents the vendor name of the hardware component. +func (GpuUtilization) AttrVendor(val string) attribute.KeyValue { + return attribute.String("hw.vendor", val) +} + +// HostAmbientTemperature is an instrument used to record metric values +// conforming to the "hw.host.ambient_temperature" semantic conventions. It +// represents the ambient (external) temperature of the physical host. +type HostAmbientTemperature struct { + metric.Int64Gauge +} + +// NewHostAmbientTemperature returns a new HostAmbientTemperature instrument. +func NewHostAmbientTemperature( + m metric.Meter, + opt ...metric.Int64GaugeOption, +) (HostAmbientTemperature, error) { + // Check if the meter is nil. + if m == nil { + return HostAmbientTemperature{noop.Int64Gauge{}}, nil + } + + i, err := m.Int64Gauge( + "hw.host.ambient_temperature", + append([]metric.Int64GaugeOption{ + metric.WithDescription("Ambient (external) temperature of the physical host."), + metric.WithUnit("Cel"), + }, opt...)..., + ) + if err != nil { + return HostAmbientTemperature{noop.Int64Gauge{}}, err + } + return HostAmbientTemperature{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m HostAmbientTemperature) Inst() metric.Int64Gauge { + return m.Int64Gauge +} + +// Name returns the semantic convention name of the instrument. +func (HostAmbientTemperature) Name() string { + return "hw.host.ambient_temperature" +} + +// Unit returns the semantic convention unit of the instrument +func (HostAmbientTemperature) Unit() string { + return "Cel" +} + +// Description returns the semantic convention description of the instrument +func (HostAmbientTemperature) Description() string { + return "Ambient (external) temperature of the physical host." +} + +// Record records val to the current distribution for attrs. +// +// The id is the an identifier for the hardware component, unique within the +// monitored host +// +// All additional attrs passed are included in the recorded value. +func (m HostAmbientTemperature) Record( + ctx context.Context, + val int64, + id string, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Gauge.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("hw.id", id), + )..., + ), + ) + + m.Int64Gauge.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +func (m HostAmbientTemperature) RecordSet(ctx context.Context, val int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Gauge.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Gauge.Record(ctx, val, *o...) +} + +// AttrName returns an optional attribute for the "hw.name" semantic convention. +// It represents an easily-recognizable name for the hardware component. +func (HostAmbientTemperature) AttrName(val string) attribute.KeyValue { + return attribute.String("hw.name", val) +} + +// AttrParent returns an optional attribute for the "hw.parent" semantic +// convention. It represents the unique identifier of the parent component +// (typically the `hw.id` attribute of the enclosure, or disk controller). +func (HostAmbientTemperature) AttrParent(val string) attribute.KeyValue { + return attribute.String("hw.parent", val) +} + +// HostEnergy is an instrument used to record metric values conforming to the +// "hw.host.energy" semantic conventions. It represents the total energy consumed +// by the entire physical host, in joules. +type HostEnergy struct { + metric.Int64Counter +} + +// NewHostEnergy returns a new HostEnergy instrument. +func NewHostEnergy( + m metric.Meter, + opt ...metric.Int64CounterOption, +) (HostEnergy, error) { + // Check if the meter is nil. + if m == nil { + return HostEnergy{noop.Int64Counter{}}, nil + } + + i, err := m.Int64Counter( + "hw.host.energy", + append([]metric.Int64CounterOption{ + metric.WithDescription("Total energy consumed by the entire physical host, in joules."), + metric.WithUnit("J"), + }, opt...)..., + ) + if err != nil { + return HostEnergy{noop.Int64Counter{}}, err + } + return HostEnergy{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m HostEnergy) Inst() metric.Int64Counter { + return m.Int64Counter +} + +// Name returns the semantic convention name of the instrument. +func (HostEnergy) Name() string { + return "hw.host.energy" +} + +// Unit returns the semantic convention unit of the instrument +func (HostEnergy) Unit() string { + return "J" +} + +// Description returns the semantic convention description of the instrument +func (HostEnergy) Description() string { + return "Total energy consumed by the entire physical host, in joules." +} + +// Add adds incr to the existing count for attrs. +// +// The id is the an identifier for the hardware component, unique within the +// monitored host +// +// All additional attrs passed are included in the recorded value. +// +// The overall energy usage of a host MUST be reported using the specific +// `hw.host.energy` and `hw.host.power` metrics **only**, instead of the generic +// `hw.energy` and `hw.power` described in the previous section, to prevent +// summing up overlapping values. +func (m HostEnergy) Add( + ctx context.Context, + incr int64, + id string, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("hw.id", id), + )..., + ), + ) + + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// The overall energy usage of a host MUST be reported using the specific +// `hw.host.energy` and `hw.host.power` metrics **only**, instead of the generic +// `hw.energy` and `hw.power` described in the previous section, to prevent +// summing up overlapping values. +func (m HostEnergy) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AttrName returns an optional attribute for the "hw.name" semantic convention. +// It represents an easily-recognizable name for the hardware component. +func (HostEnergy) AttrName(val string) attribute.KeyValue { + return attribute.String("hw.name", val) +} + +// AttrParent returns an optional attribute for the "hw.parent" semantic +// convention. It represents the unique identifier of the parent component +// (typically the `hw.id` attribute of the enclosure, or disk controller). +func (HostEnergy) AttrParent(val string) attribute.KeyValue { + return attribute.String("hw.parent", val) +} + +// HostHeatingMargin is an instrument used to record metric values conforming to +// the "hw.host.heating_margin" semantic conventions. It represents the by how +// many degrees Celsius the temperature of the physical host can be increased, +// before reaching a warning threshold on one of the internal sensors. +type HostHeatingMargin struct { + metric.Int64Gauge +} + +// NewHostHeatingMargin returns a new HostHeatingMargin instrument. +func NewHostHeatingMargin( + m metric.Meter, + opt ...metric.Int64GaugeOption, +) (HostHeatingMargin, error) { + // Check if the meter is nil. + if m == nil { + return HostHeatingMargin{noop.Int64Gauge{}}, nil + } + + i, err := m.Int64Gauge( + "hw.host.heating_margin", + append([]metric.Int64GaugeOption{ + metric.WithDescription("By how many degrees Celsius the temperature of the physical host can be increased, before reaching a warning threshold on one of the internal sensors."), + metric.WithUnit("Cel"), + }, opt...)..., + ) + if err != nil { + return HostHeatingMargin{noop.Int64Gauge{}}, err + } + return HostHeatingMargin{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m HostHeatingMargin) Inst() metric.Int64Gauge { + return m.Int64Gauge +} + +// Name returns the semantic convention name of the instrument. +func (HostHeatingMargin) Name() string { + return "hw.host.heating_margin" +} + +// Unit returns the semantic convention unit of the instrument +func (HostHeatingMargin) Unit() string { + return "Cel" +} + +// Description returns the semantic convention description of the instrument +func (HostHeatingMargin) Description() string { + return "By how many degrees Celsius the temperature of the physical host can be increased, before reaching a warning threshold on one of the internal sensors." +} + +// Record records val to the current distribution for attrs. +// +// The id is the an identifier for the hardware component, unique within the +// monitored host +// +// All additional attrs passed are included in the recorded value. +func (m HostHeatingMargin) Record( + ctx context.Context, + val int64, + id string, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Gauge.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("hw.id", id), + )..., + ), + ) + + m.Int64Gauge.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +func (m HostHeatingMargin) RecordSet(ctx context.Context, val int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Gauge.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Gauge.Record(ctx, val, *o...) +} + +// AttrName returns an optional attribute for the "hw.name" semantic convention. +// It represents an easily-recognizable name for the hardware component. +func (HostHeatingMargin) AttrName(val string) attribute.KeyValue { + return attribute.String("hw.name", val) +} + +// AttrParent returns an optional attribute for the "hw.parent" semantic +// convention. It represents the unique identifier of the parent component +// (typically the `hw.id` attribute of the enclosure, or disk controller). +func (HostHeatingMargin) AttrParent(val string) attribute.KeyValue { + return attribute.String("hw.parent", val) +} + +// HostPower is an instrument used to record metric values conforming to the +// "hw.host.power" semantic conventions. It represents the instantaneous power +// consumed by the entire physical host in Watts (`hw.host.energy` is preferred). +type HostPower struct { + metric.Int64Gauge +} + +// NewHostPower returns a new HostPower instrument. +func NewHostPower( + m metric.Meter, + opt ...metric.Int64GaugeOption, +) (HostPower, error) { + // Check if the meter is nil. + if m == nil { + return HostPower{noop.Int64Gauge{}}, nil + } + + i, err := m.Int64Gauge( + "hw.host.power", + append([]metric.Int64GaugeOption{ + metric.WithDescription("Instantaneous power consumed by the entire physical host in Watts (`hw.host.energy` is preferred)."), + metric.WithUnit("W"), + }, opt...)..., + ) + if err != nil { + return HostPower{noop.Int64Gauge{}}, err + } + return HostPower{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m HostPower) Inst() metric.Int64Gauge { + return m.Int64Gauge +} + +// Name returns the semantic convention name of the instrument. +func (HostPower) Name() string { + return "hw.host.power" +} + +// Unit returns the semantic convention unit of the instrument +func (HostPower) Unit() string { + return "W" +} + +// Description returns the semantic convention description of the instrument +func (HostPower) Description() string { + return "Instantaneous power consumed by the entire physical host in Watts (`hw.host.energy` is preferred)." +} + +// Record records val to the current distribution for attrs. +// +// The id is the an identifier for the hardware component, unique within the +// monitored host +// +// All additional attrs passed are included in the recorded value. +// +// The overall energy usage of a host MUST be reported using the specific +// `hw.host.energy` and `hw.host.power` metrics **only**, instead of the generic +// `hw.energy` and `hw.power` described in the previous section, to prevent +// summing up overlapping values. +func (m HostPower) Record( + ctx context.Context, + val int64, + id string, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Gauge.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("hw.id", id), + )..., + ), + ) + + m.Int64Gauge.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +// +// The overall energy usage of a host MUST be reported using the specific +// `hw.host.energy` and `hw.host.power` metrics **only**, instead of the generic +// `hw.energy` and `hw.power` described in the previous section, to prevent +// summing up overlapping values. +func (m HostPower) RecordSet(ctx context.Context, val int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Gauge.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Gauge.Record(ctx, val, *o...) +} + +// AttrName returns an optional attribute for the "hw.name" semantic convention. +// It represents an easily-recognizable name for the hardware component. +func (HostPower) AttrName(val string) attribute.KeyValue { + return attribute.String("hw.name", val) +} + +// AttrParent returns an optional attribute for the "hw.parent" semantic +// convention. It represents the unique identifier of the parent component +// (typically the `hw.id` attribute of the enclosure, or disk controller). +func (HostPower) AttrParent(val string) attribute.KeyValue { + return attribute.String("hw.parent", val) +} + +// LogicalDiskLimit is an instrument used to record metric values conforming to +// the "hw.logical_disk.limit" semantic conventions. It represents the size of +// the logical disk. +type LogicalDiskLimit struct { + metric.Int64UpDownCounter +} + +// NewLogicalDiskLimit returns a new LogicalDiskLimit instrument. +func NewLogicalDiskLimit( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (LogicalDiskLimit, error) { + // Check if the meter is nil. + if m == nil { + return LogicalDiskLimit{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "hw.logical_disk.limit", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("Size of the logical disk."), + metric.WithUnit("By"), + }, opt...)..., + ) + if err != nil { + return LogicalDiskLimit{noop.Int64UpDownCounter{}}, err + } + return LogicalDiskLimit{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m LogicalDiskLimit) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (LogicalDiskLimit) Name() string { + return "hw.logical_disk.limit" +} + +// Unit returns the semantic convention unit of the instrument +func (LogicalDiskLimit) Unit() string { + return "By" +} + +// Description returns the semantic convention description of the instrument +func (LogicalDiskLimit) Description() string { + return "Size of the logical disk." +} + +// Add adds incr to the existing count for attrs. +// +// The id is the an identifier for the hardware component, unique within the +// monitored host +// +// All additional attrs passed are included in the recorded value. +func (m LogicalDiskLimit) Add( + ctx context.Context, + incr int64, + id string, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("hw.id", id), + )..., + ), + ) + + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +func (m LogicalDiskLimit) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AttrLogicalDiskRaidLevel returns an optional attribute for the +// "hw.logical_disk.raid_level" semantic convention. It represents the RAID Level +// of the logical disk. +func (LogicalDiskLimit) AttrLogicalDiskRaidLevel(val string) attribute.KeyValue { + return attribute.String("hw.logical_disk.raid_level", val) +} + +// AttrName returns an optional attribute for the "hw.name" semantic convention. +// It represents an easily-recognizable name for the hardware component. +func (LogicalDiskLimit) AttrName(val string) attribute.KeyValue { + return attribute.String("hw.name", val) +} + +// AttrParent returns an optional attribute for the "hw.parent" semantic +// convention. It represents the unique identifier of the parent component +// (typically the `hw.id` attribute of the enclosure, or disk controller). +func (LogicalDiskLimit) AttrParent(val string) attribute.KeyValue { + return attribute.String("hw.parent", val) +} + +// LogicalDiskUsage is an instrument used to record metric values conforming to +// the "hw.logical_disk.usage" semantic conventions. It represents the logical +// disk space usage. +type LogicalDiskUsage struct { + metric.Int64UpDownCounter +} + +// NewLogicalDiskUsage returns a new LogicalDiskUsage instrument. +func NewLogicalDiskUsage( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (LogicalDiskUsage, error) { + // Check if the meter is nil. + if m == nil { + return LogicalDiskUsage{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "hw.logical_disk.usage", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("Logical disk space usage."), + metric.WithUnit("By"), + }, opt...)..., + ) + if err != nil { + return LogicalDiskUsage{noop.Int64UpDownCounter{}}, err + } + return LogicalDiskUsage{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m LogicalDiskUsage) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (LogicalDiskUsage) Name() string { + return "hw.logical_disk.usage" +} + +// Unit returns the semantic convention unit of the instrument +func (LogicalDiskUsage) Unit() string { + return "By" +} + +// Description returns the semantic convention description of the instrument +func (LogicalDiskUsage) Description() string { + return "Logical disk space usage." +} + +// Add adds incr to the existing count for attrs. +// +// The id is the an identifier for the hardware component, unique within the +// monitored host +// +// The logicalDiskState is the state of the logical disk space usage +// +// All additional attrs passed are included in the recorded value. +func (m LogicalDiskUsage) Add( + ctx context.Context, + incr int64, + id string, + logicalDiskState LogicalDiskStateAttr, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("hw.id", id), + attribute.String("hw.logical_disk.state", string(logicalDiskState)), + )..., + ), + ) + + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +func (m LogicalDiskUsage) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AttrLogicalDiskRaidLevel returns an optional attribute for the +// "hw.logical_disk.raid_level" semantic convention. It represents the RAID Level +// of the logical disk. +func (LogicalDiskUsage) AttrLogicalDiskRaidLevel(val string) attribute.KeyValue { + return attribute.String("hw.logical_disk.raid_level", val) +} + +// AttrName returns an optional attribute for the "hw.name" semantic convention. +// It represents an easily-recognizable name for the hardware component. +func (LogicalDiskUsage) AttrName(val string) attribute.KeyValue { + return attribute.String("hw.name", val) +} + +// AttrParent returns an optional attribute for the "hw.parent" semantic +// convention. It represents the unique identifier of the parent component +// (typically the `hw.id` attribute of the enclosure, or disk controller). +func (LogicalDiskUsage) AttrParent(val string) attribute.KeyValue { + return attribute.String("hw.parent", val) +} + +// LogicalDiskUtilization is an instrument used to record metric values +// conforming to the "hw.logical_disk.utilization" semantic conventions. It +// represents the logical disk space utilization as a fraction. +type LogicalDiskUtilization struct { + metric.Int64Gauge +} + +// NewLogicalDiskUtilization returns a new LogicalDiskUtilization instrument. +func NewLogicalDiskUtilization( + m metric.Meter, + opt ...metric.Int64GaugeOption, +) (LogicalDiskUtilization, error) { + // Check if the meter is nil. + if m == nil { + return LogicalDiskUtilization{noop.Int64Gauge{}}, nil + } + + i, err := m.Int64Gauge( + "hw.logical_disk.utilization", + append([]metric.Int64GaugeOption{ + metric.WithDescription("Logical disk space utilization as a fraction."), + metric.WithUnit("1"), + }, opt...)..., + ) + if err != nil { + return LogicalDiskUtilization{noop.Int64Gauge{}}, err + } + return LogicalDiskUtilization{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m LogicalDiskUtilization) Inst() metric.Int64Gauge { + return m.Int64Gauge +} + +// Name returns the semantic convention name of the instrument. +func (LogicalDiskUtilization) Name() string { + return "hw.logical_disk.utilization" +} + +// Unit returns the semantic convention unit of the instrument +func (LogicalDiskUtilization) Unit() string { + return "1" +} + +// Description returns the semantic convention description of the instrument +func (LogicalDiskUtilization) Description() string { + return "Logical disk space utilization as a fraction." +} + +// Record records val to the current distribution for attrs. +// +// The id is the an identifier for the hardware component, unique within the +// monitored host +// +// The logicalDiskState is the state of the logical disk space usage +// +// All additional attrs passed are included in the recorded value. +func (m LogicalDiskUtilization) Record( + ctx context.Context, + val int64, + id string, + logicalDiskState LogicalDiskStateAttr, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Gauge.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("hw.id", id), + attribute.String("hw.logical_disk.state", string(logicalDiskState)), + )..., + ), + ) + + m.Int64Gauge.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +func (m LogicalDiskUtilization) RecordSet(ctx context.Context, val int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Gauge.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Gauge.Record(ctx, val, *o...) +} + +// AttrLogicalDiskRaidLevel returns an optional attribute for the +// "hw.logical_disk.raid_level" semantic convention. It represents the RAID Level +// of the logical disk. +func (LogicalDiskUtilization) AttrLogicalDiskRaidLevel(val string) attribute.KeyValue { + return attribute.String("hw.logical_disk.raid_level", val) +} + +// AttrName returns an optional attribute for the "hw.name" semantic convention. +// It represents an easily-recognizable name for the hardware component. +func (LogicalDiskUtilization) AttrName(val string) attribute.KeyValue { + return attribute.String("hw.name", val) +} + +// AttrParent returns an optional attribute for the "hw.parent" semantic +// convention. It represents the unique identifier of the parent component +// (typically the `hw.id` attribute of the enclosure, or disk controller). +func (LogicalDiskUtilization) AttrParent(val string) attribute.KeyValue { + return attribute.String("hw.parent", val) +} + +// MemorySize is an instrument used to record metric values conforming to the +// "hw.memory.size" semantic conventions. It represents the size of the memory +// module. +type MemorySize struct { + metric.Int64UpDownCounter +} + +// NewMemorySize returns a new MemorySize instrument. +func NewMemorySize( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (MemorySize, error) { + // Check if the meter is nil. + if m == nil { + return MemorySize{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "hw.memory.size", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("Size of the memory module."), + metric.WithUnit("By"), + }, opt...)..., + ) + if err != nil { + return MemorySize{noop.Int64UpDownCounter{}}, err + } + return MemorySize{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m MemorySize) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (MemorySize) Name() string { + return "hw.memory.size" +} + +// Unit returns the semantic convention unit of the instrument +func (MemorySize) Unit() string { + return "By" +} + +// Description returns the semantic convention description of the instrument +func (MemorySize) Description() string { + return "Size of the memory module." +} + +// Add adds incr to the existing count for attrs. +// +// The id is the an identifier for the hardware component, unique within the +// monitored host +// +// All additional attrs passed are included in the recorded value. +func (m MemorySize) Add( + ctx context.Context, + incr int64, + id string, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("hw.id", id), + )..., + ), + ) + + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +func (m MemorySize) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AttrMemoryType returns an optional attribute for the "hw.memory.type" semantic +// convention. It represents the type of the memory module. +func (MemorySize) AttrMemoryType(val string) attribute.KeyValue { + return attribute.String("hw.memory.type", val) +} + +// AttrModel returns an optional attribute for the "hw.model" semantic +// convention. It represents the descriptive model name of the hardware +// component. +func (MemorySize) AttrModel(val string) attribute.KeyValue { + return attribute.String("hw.model", val) +} + +// AttrName returns an optional attribute for the "hw.name" semantic convention. +// It represents an easily-recognizable name for the hardware component. +func (MemorySize) AttrName(val string) attribute.KeyValue { + return attribute.String("hw.name", val) +} + +// AttrParent returns an optional attribute for the "hw.parent" semantic +// convention. It represents the unique identifier of the parent component +// (typically the `hw.id` attribute of the enclosure, or disk controller). +func (MemorySize) AttrParent(val string) attribute.KeyValue { + return attribute.String("hw.parent", val) +} + +// AttrSerialNumber returns an optional attribute for the "hw.serial_number" +// semantic convention. It represents the serial number of the hardware +// component. +func (MemorySize) AttrSerialNumber(val string) attribute.KeyValue { + return attribute.String("hw.serial_number", val) +} + +// AttrVendor returns an optional attribute for the "hw.vendor" semantic +// convention. It represents the vendor name of the hardware component. +func (MemorySize) AttrVendor(val string) attribute.KeyValue { + return attribute.String("hw.vendor", val) +} + +// NetworkBandwidthLimit is an instrument used to record metric values conforming +// to the "hw.network.bandwidth.limit" semantic conventions. It represents the +// link speed. +type NetworkBandwidthLimit struct { + metric.Int64UpDownCounter +} + +// NewNetworkBandwidthLimit returns a new NetworkBandwidthLimit instrument. +func NewNetworkBandwidthLimit( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (NetworkBandwidthLimit, error) { + // Check if the meter is nil. + if m == nil { + return NetworkBandwidthLimit{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "hw.network.bandwidth.limit", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("Link speed."), + metric.WithUnit("By/s"), + }, opt...)..., + ) + if err != nil { + return NetworkBandwidthLimit{noop.Int64UpDownCounter{}}, err + } + return NetworkBandwidthLimit{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m NetworkBandwidthLimit) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (NetworkBandwidthLimit) Name() string { + return "hw.network.bandwidth.limit" +} + +// Unit returns the semantic convention unit of the instrument +func (NetworkBandwidthLimit) Unit() string { + return "By/s" +} + +// Description returns the semantic convention description of the instrument +func (NetworkBandwidthLimit) Description() string { + return "Link speed." +} + +// Add adds incr to the existing count for attrs. +// +// The id is the an identifier for the hardware component, unique within the +// monitored host +// +// All additional attrs passed are included in the recorded value. +func (m NetworkBandwidthLimit) Add( + ctx context.Context, + incr int64, + id string, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("hw.id", id), + )..., + ), + ) + + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +func (m NetworkBandwidthLimit) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AttrModel returns an optional attribute for the "hw.model" semantic +// convention. It represents the descriptive model name of the hardware +// component. +func (NetworkBandwidthLimit) AttrModel(val string) attribute.KeyValue { + return attribute.String("hw.model", val) +} + +// AttrName returns an optional attribute for the "hw.name" semantic convention. +// It represents an easily-recognizable name for the hardware component. +func (NetworkBandwidthLimit) AttrName(val string) attribute.KeyValue { + return attribute.String("hw.name", val) +} + +// AttrNetworkLogicalAddresses returns an optional attribute for the +// "hw.network.logical_addresses" semantic convention. It represents the logical +// addresses of the adapter (e.g. IP address, or WWPN). +func (NetworkBandwidthLimit) AttrNetworkLogicalAddresses(val ...string) attribute.KeyValue { + return attribute.StringSlice("hw.network.logical_addresses", val) +} + +// AttrNetworkPhysicalAddress returns an optional attribute for the +// "hw.network.physical_address" semantic convention. It represents the physical +// address of the adapter (e.g. MAC address, or WWNN). +func (NetworkBandwidthLimit) AttrNetworkPhysicalAddress(val string) attribute.KeyValue { + return attribute.String("hw.network.physical_address", val) +} + +// AttrParent returns an optional attribute for the "hw.parent" semantic +// convention. It represents the unique identifier of the parent component +// (typically the `hw.id` attribute of the enclosure, or disk controller). +func (NetworkBandwidthLimit) AttrParent(val string) attribute.KeyValue { + return attribute.String("hw.parent", val) +} + +// AttrSerialNumber returns an optional attribute for the "hw.serial_number" +// semantic convention. It represents the serial number of the hardware +// component. +func (NetworkBandwidthLimit) AttrSerialNumber(val string) attribute.KeyValue { + return attribute.String("hw.serial_number", val) +} + +// AttrVendor returns an optional attribute for the "hw.vendor" semantic +// convention. It represents the vendor name of the hardware component. +func (NetworkBandwidthLimit) AttrVendor(val string) attribute.KeyValue { + return attribute.String("hw.vendor", val) +} + +// NetworkBandwidthUtilization is an instrument used to record metric values +// conforming to the "hw.network.bandwidth.utilization" semantic conventions. It +// represents the utilization of the network bandwidth as a fraction. +type NetworkBandwidthUtilization struct { + metric.Int64Gauge +} + +// NewNetworkBandwidthUtilization returns a new NetworkBandwidthUtilization +// instrument. +func NewNetworkBandwidthUtilization( + m metric.Meter, + opt ...metric.Int64GaugeOption, +) (NetworkBandwidthUtilization, error) { + // Check if the meter is nil. + if m == nil { + return NetworkBandwidthUtilization{noop.Int64Gauge{}}, nil + } + + i, err := m.Int64Gauge( + "hw.network.bandwidth.utilization", + append([]metric.Int64GaugeOption{ + metric.WithDescription("Utilization of the network bandwidth as a fraction."), + metric.WithUnit("1"), + }, opt...)..., + ) + if err != nil { + return NetworkBandwidthUtilization{noop.Int64Gauge{}}, err + } + return NetworkBandwidthUtilization{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m NetworkBandwidthUtilization) Inst() metric.Int64Gauge { + return m.Int64Gauge +} + +// Name returns the semantic convention name of the instrument. +func (NetworkBandwidthUtilization) Name() string { + return "hw.network.bandwidth.utilization" +} + +// Unit returns the semantic convention unit of the instrument +func (NetworkBandwidthUtilization) Unit() string { + return "1" +} + +// Description returns the semantic convention description of the instrument +func (NetworkBandwidthUtilization) Description() string { + return "Utilization of the network bandwidth as a fraction." +} + +// Record records val to the current distribution for attrs. +// +// The id is the an identifier for the hardware component, unique within the +// monitored host +// +// All additional attrs passed are included in the recorded value. +func (m NetworkBandwidthUtilization) Record( + ctx context.Context, + val int64, + id string, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Gauge.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("hw.id", id), + )..., + ), + ) + + m.Int64Gauge.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +func (m NetworkBandwidthUtilization) RecordSet(ctx context.Context, val int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Gauge.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Gauge.Record(ctx, val, *o...) +} + +// AttrModel returns an optional attribute for the "hw.model" semantic +// convention. It represents the descriptive model name of the hardware +// component. +func (NetworkBandwidthUtilization) AttrModel(val string) attribute.KeyValue { + return attribute.String("hw.model", val) +} + +// AttrName returns an optional attribute for the "hw.name" semantic convention. +// It represents an easily-recognizable name for the hardware component. +func (NetworkBandwidthUtilization) AttrName(val string) attribute.KeyValue { + return attribute.String("hw.name", val) +} + +// AttrNetworkLogicalAddresses returns an optional attribute for the +// "hw.network.logical_addresses" semantic convention. It represents the logical +// addresses of the adapter (e.g. IP address, or WWPN). +func (NetworkBandwidthUtilization) AttrNetworkLogicalAddresses(val ...string) attribute.KeyValue { + return attribute.StringSlice("hw.network.logical_addresses", val) +} + +// AttrNetworkPhysicalAddress returns an optional attribute for the +// "hw.network.physical_address" semantic convention. It represents the physical +// address of the adapter (e.g. MAC address, or WWNN). +func (NetworkBandwidthUtilization) AttrNetworkPhysicalAddress(val string) attribute.KeyValue { + return attribute.String("hw.network.physical_address", val) +} + +// AttrParent returns an optional attribute for the "hw.parent" semantic +// convention. It represents the unique identifier of the parent component +// (typically the `hw.id` attribute of the enclosure, or disk controller). +func (NetworkBandwidthUtilization) AttrParent(val string) attribute.KeyValue { + return attribute.String("hw.parent", val) +} + +// AttrSerialNumber returns an optional attribute for the "hw.serial_number" +// semantic convention. It represents the serial number of the hardware +// component. +func (NetworkBandwidthUtilization) AttrSerialNumber(val string) attribute.KeyValue { + return attribute.String("hw.serial_number", val) +} + +// AttrVendor returns an optional attribute for the "hw.vendor" semantic +// convention. It represents the vendor name of the hardware component. +func (NetworkBandwidthUtilization) AttrVendor(val string) attribute.KeyValue { + return attribute.String("hw.vendor", val) +} + +// NetworkIO is an instrument used to record metric values conforming to the +// "hw.network.io" semantic conventions. It represents the received and +// transmitted network traffic in bytes. +type NetworkIO struct { + metric.Int64Counter +} + +// NewNetworkIO returns a new NetworkIO instrument. +func NewNetworkIO( + m metric.Meter, + opt ...metric.Int64CounterOption, +) (NetworkIO, error) { + // Check if the meter is nil. + if m == nil { + return NetworkIO{noop.Int64Counter{}}, nil + } + + i, err := m.Int64Counter( + "hw.network.io", + append([]metric.Int64CounterOption{ + metric.WithDescription("Received and transmitted network traffic in bytes."), + metric.WithUnit("By"), + }, opt...)..., + ) + if err != nil { + return NetworkIO{noop.Int64Counter{}}, err + } + return NetworkIO{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m NetworkIO) Inst() metric.Int64Counter { + return m.Int64Counter +} + +// Name returns the semantic convention name of the instrument. +func (NetworkIO) Name() string { + return "hw.network.io" +} + +// Unit returns the semantic convention unit of the instrument +func (NetworkIO) Unit() string { + return "By" +} + +// Description returns the semantic convention description of the instrument +func (NetworkIO) Description() string { + return "Received and transmitted network traffic in bytes." +} + +// Add adds incr to the existing count for attrs. +// +// The id is the an identifier for the hardware component, unique within the +// monitored host +// +// The networkIoDirection is the the network IO operation direction. +// +// All additional attrs passed are included in the recorded value. +func (m NetworkIO) Add( + ctx context.Context, + incr int64, + id string, + networkIoDirection NetworkIODirectionAttr, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("hw.id", id), + attribute.String("network.io.direction", string(networkIoDirection)), + )..., + ), + ) + + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +func (m NetworkIO) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AttrModel returns an optional attribute for the "hw.model" semantic +// convention. It represents the descriptive model name of the hardware +// component. +func (NetworkIO) AttrModel(val string) attribute.KeyValue { + return attribute.String("hw.model", val) +} + +// AttrName returns an optional attribute for the "hw.name" semantic convention. +// It represents an easily-recognizable name for the hardware component. +func (NetworkIO) AttrName(val string) attribute.KeyValue { + return attribute.String("hw.name", val) +} + +// AttrNetworkLogicalAddresses returns an optional attribute for the +// "hw.network.logical_addresses" semantic convention. It represents the logical +// addresses of the adapter (e.g. IP address, or WWPN). +func (NetworkIO) AttrNetworkLogicalAddresses(val ...string) attribute.KeyValue { + return attribute.StringSlice("hw.network.logical_addresses", val) +} + +// AttrNetworkPhysicalAddress returns an optional attribute for the +// "hw.network.physical_address" semantic convention. It represents the physical +// address of the adapter (e.g. MAC address, or WWNN). +func (NetworkIO) AttrNetworkPhysicalAddress(val string) attribute.KeyValue { + return attribute.String("hw.network.physical_address", val) +} + +// AttrParent returns an optional attribute for the "hw.parent" semantic +// convention. It represents the unique identifier of the parent component +// (typically the `hw.id` attribute of the enclosure, or disk controller). +func (NetworkIO) AttrParent(val string) attribute.KeyValue { + return attribute.String("hw.parent", val) +} + +// AttrSerialNumber returns an optional attribute for the "hw.serial_number" +// semantic convention. It represents the serial number of the hardware +// component. +func (NetworkIO) AttrSerialNumber(val string) attribute.KeyValue { + return attribute.String("hw.serial_number", val) +} + +// AttrVendor returns an optional attribute for the "hw.vendor" semantic +// convention. It represents the vendor name of the hardware component. +func (NetworkIO) AttrVendor(val string) attribute.KeyValue { + return attribute.String("hw.vendor", val) +} + +// NetworkPackets is an instrument used to record metric values conforming to the +// "hw.network.packets" semantic conventions. It represents the received and +// transmitted network traffic in packets (or frames). +type NetworkPackets struct { + metric.Int64Counter +} + +// NewNetworkPackets returns a new NetworkPackets instrument. +func NewNetworkPackets( + m metric.Meter, + opt ...metric.Int64CounterOption, +) (NetworkPackets, error) { + // Check if the meter is nil. + if m == nil { + return NetworkPackets{noop.Int64Counter{}}, nil + } + + i, err := m.Int64Counter( + "hw.network.packets", + append([]metric.Int64CounterOption{ + metric.WithDescription("Received and transmitted network traffic in packets (or frames)."), + metric.WithUnit("{packet}"), + }, opt...)..., + ) + if err != nil { + return NetworkPackets{noop.Int64Counter{}}, err + } + return NetworkPackets{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m NetworkPackets) Inst() metric.Int64Counter { + return m.Int64Counter +} + +// Name returns the semantic convention name of the instrument. +func (NetworkPackets) Name() string { + return "hw.network.packets" +} + +// Unit returns the semantic convention unit of the instrument +func (NetworkPackets) Unit() string { + return "{packet}" +} + +// Description returns the semantic convention description of the instrument +func (NetworkPackets) Description() string { + return "Received and transmitted network traffic in packets (or frames)." +} + +// Add adds incr to the existing count for attrs. +// +// The id is the an identifier for the hardware component, unique within the +// monitored host +// +// The networkIoDirection is the the network IO operation direction. +// +// All additional attrs passed are included in the recorded value. +func (m NetworkPackets) Add( + ctx context.Context, + incr int64, + id string, + networkIoDirection NetworkIODirectionAttr, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("hw.id", id), + attribute.String("network.io.direction", string(networkIoDirection)), + )..., + ), + ) + + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +func (m NetworkPackets) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AttrModel returns an optional attribute for the "hw.model" semantic +// convention. It represents the descriptive model name of the hardware +// component. +func (NetworkPackets) AttrModel(val string) attribute.KeyValue { + return attribute.String("hw.model", val) +} + +// AttrName returns an optional attribute for the "hw.name" semantic convention. +// It represents an easily-recognizable name for the hardware component. +func (NetworkPackets) AttrName(val string) attribute.KeyValue { + return attribute.String("hw.name", val) +} + +// AttrNetworkLogicalAddresses returns an optional attribute for the +// "hw.network.logical_addresses" semantic convention. It represents the logical +// addresses of the adapter (e.g. IP address, or WWPN). +func (NetworkPackets) AttrNetworkLogicalAddresses(val ...string) attribute.KeyValue { + return attribute.StringSlice("hw.network.logical_addresses", val) +} + +// AttrNetworkPhysicalAddress returns an optional attribute for the +// "hw.network.physical_address" semantic convention. It represents the physical +// address of the adapter (e.g. MAC address, or WWNN). +func (NetworkPackets) AttrNetworkPhysicalAddress(val string) attribute.KeyValue { + return attribute.String("hw.network.physical_address", val) +} + +// AttrParent returns an optional attribute for the "hw.parent" semantic +// convention. It represents the unique identifier of the parent component +// (typically the `hw.id` attribute of the enclosure, or disk controller). +func (NetworkPackets) AttrParent(val string) attribute.KeyValue { + return attribute.String("hw.parent", val) +} + +// AttrSerialNumber returns an optional attribute for the "hw.serial_number" +// semantic convention. It represents the serial number of the hardware +// component. +func (NetworkPackets) AttrSerialNumber(val string) attribute.KeyValue { + return attribute.String("hw.serial_number", val) +} + +// AttrVendor returns an optional attribute for the "hw.vendor" semantic +// convention. It represents the vendor name of the hardware component. +func (NetworkPackets) AttrVendor(val string) attribute.KeyValue { + return attribute.String("hw.vendor", val) +} + +// NetworkUp is an instrument used to record metric values conforming to the +// "hw.network.up" semantic conventions. It represents the link status: `1` (up) +// or `0` (down). +type NetworkUp struct { + metric.Int64UpDownCounter +} + +// NewNetworkUp returns a new NetworkUp instrument. +func NewNetworkUp( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (NetworkUp, error) { + // Check if the meter is nil. + if m == nil { + return NetworkUp{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "hw.network.up", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("Link status: `1` (up) or `0` (down)."), + metric.WithUnit("1"), + }, opt...)..., + ) + if err != nil { + return NetworkUp{noop.Int64UpDownCounter{}}, err + } + return NetworkUp{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m NetworkUp) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (NetworkUp) Name() string { + return "hw.network.up" +} + +// Unit returns the semantic convention unit of the instrument +func (NetworkUp) Unit() string { + return "1" +} + +// Description returns the semantic convention description of the instrument +func (NetworkUp) Description() string { + return "Link status: `1` (up) or `0` (down)." +} + +// Add adds incr to the existing count for attrs. +// +// The id is the an identifier for the hardware component, unique within the +// monitored host +// +// All additional attrs passed are included in the recorded value. +func (m NetworkUp) Add( + ctx context.Context, + incr int64, + id string, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("hw.id", id), + )..., + ), + ) + + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +func (m NetworkUp) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AttrModel returns an optional attribute for the "hw.model" semantic +// convention. It represents the descriptive model name of the hardware +// component. +func (NetworkUp) AttrModel(val string) attribute.KeyValue { + return attribute.String("hw.model", val) +} + +// AttrName returns an optional attribute for the "hw.name" semantic convention. +// It represents an easily-recognizable name for the hardware component. +func (NetworkUp) AttrName(val string) attribute.KeyValue { + return attribute.String("hw.name", val) +} + +// AttrNetworkLogicalAddresses returns an optional attribute for the +// "hw.network.logical_addresses" semantic convention. It represents the logical +// addresses of the adapter (e.g. IP address, or WWPN). +func (NetworkUp) AttrNetworkLogicalAddresses(val ...string) attribute.KeyValue { + return attribute.StringSlice("hw.network.logical_addresses", val) +} + +// AttrNetworkPhysicalAddress returns an optional attribute for the +// "hw.network.physical_address" semantic convention. It represents the physical +// address of the adapter (e.g. MAC address, or WWNN). +func (NetworkUp) AttrNetworkPhysicalAddress(val string) attribute.KeyValue { + return attribute.String("hw.network.physical_address", val) +} + +// AttrParent returns an optional attribute for the "hw.parent" semantic +// convention. It represents the unique identifier of the parent component +// (typically the `hw.id` attribute of the enclosure, or disk controller). +func (NetworkUp) AttrParent(val string) attribute.KeyValue { + return attribute.String("hw.parent", val) +} + +// AttrSerialNumber returns an optional attribute for the "hw.serial_number" +// semantic convention. It represents the serial number of the hardware +// component. +func (NetworkUp) AttrSerialNumber(val string) attribute.KeyValue { + return attribute.String("hw.serial_number", val) +} + +// AttrVendor returns an optional attribute for the "hw.vendor" semantic +// convention. It represents the vendor name of the hardware component. +func (NetworkUp) AttrVendor(val string) attribute.KeyValue { + return attribute.String("hw.vendor", val) +} + +// PhysicalDiskEnduranceUtilization is an instrument used to record metric values +// conforming to the "hw.physical_disk.endurance_utilization" semantic +// conventions. It represents the endurance remaining for this SSD disk. +type PhysicalDiskEnduranceUtilization struct { + metric.Int64Gauge +} + +// NewPhysicalDiskEnduranceUtilization returns a new +// PhysicalDiskEnduranceUtilization instrument. +func NewPhysicalDiskEnduranceUtilization( + m metric.Meter, + opt ...metric.Int64GaugeOption, +) (PhysicalDiskEnduranceUtilization, error) { + // Check if the meter is nil. + if m == nil { + return PhysicalDiskEnduranceUtilization{noop.Int64Gauge{}}, nil + } + + i, err := m.Int64Gauge( + "hw.physical_disk.endurance_utilization", + append([]metric.Int64GaugeOption{ + metric.WithDescription("Endurance remaining for this SSD disk."), + metric.WithUnit("1"), + }, opt...)..., + ) + if err != nil { + return PhysicalDiskEnduranceUtilization{noop.Int64Gauge{}}, err + } + return PhysicalDiskEnduranceUtilization{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m PhysicalDiskEnduranceUtilization) Inst() metric.Int64Gauge { + return m.Int64Gauge +} + +// Name returns the semantic convention name of the instrument. +func (PhysicalDiskEnduranceUtilization) Name() string { + return "hw.physical_disk.endurance_utilization" +} + +// Unit returns the semantic convention unit of the instrument +func (PhysicalDiskEnduranceUtilization) Unit() string { + return "1" +} + +// Description returns the semantic convention description of the instrument +func (PhysicalDiskEnduranceUtilization) Description() string { + return "Endurance remaining for this SSD disk." +} + +// Record records val to the current distribution for attrs. +// +// The id is the an identifier for the hardware component, unique within the +// monitored host +// +// The physicalDiskState is the state of the physical disk endurance utilization +// +// All additional attrs passed are included in the recorded value. +func (m PhysicalDiskEnduranceUtilization) Record( + ctx context.Context, + val int64, + id string, + physicalDiskState PhysicalDiskStateAttr, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Gauge.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("hw.id", id), + attribute.String("hw.physical_disk.state", string(physicalDiskState)), + )..., + ), + ) + + m.Int64Gauge.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +func (m PhysicalDiskEnduranceUtilization) RecordSet(ctx context.Context, val int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Gauge.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Gauge.Record(ctx, val, *o...) +} + +// AttrFirmwareVersion returns an optional attribute for the +// "hw.firmware_version" semantic convention. It represents the firmware version +// of the hardware component. +func (PhysicalDiskEnduranceUtilization) AttrFirmwareVersion(val string) attribute.KeyValue { + return attribute.String("hw.firmware_version", val) +} + +// AttrModel returns an optional attribute for the "hw.model" semantic +// convention. It represents the descriptive model name of the hardware +// component. +func (PhysicalDiskEnduranceUtilization) AttrModel(val string) attribute.KeyValue { + return attribute.String("hw.model", val) +} + +// AttrName returns an optional attribute for the "hw.name" semantic convention. +// It represents an easily-recognizable name for the hardware component. +func (PhysicalDiskEnduranceUtilization) AttrName(val string) attribute.KeyValue { + return attribute.String("hw.name", val) +} + +// AttrParent returns an optional attribute for the "hw.parent" semantic +// convention. It represents the unique identifier of the parent component +// (typically the `hw.id` attribute of the enclosure, or disk controller). +func (PhysicalDiskEnduranceUtilization) AttrParent(val string) attribute.KeyValue { + return attribute.String("hw.parent", val) +} + +// AttrPhysicalDiskType returns an optional attribute for the +// "hw.physical_disk.type" semantic convention. It represents the type of the +// physical disk. +func (PhysicalDiskEnduranceUtilization) AttrPhysicalDiskType(val string) attribute.KeyValue { + return attribute.String("hw.physical_disk.type", val) +} + +// AttrSerialNumber returns an optional attribute for the "hw.serial_number" +// semantic convention. It represents the serial number of the hardware +// component. +func (PhysicalDiskEnduranceUtilization) AttrSerialNumber(val string) attribute.KeyValue { + return attribute.String("hw.serial_number", val) +} + +// AttrVendor returns an optional attribute for the "hw.vendor" semantic +// convention. It represents the vendor name of the hardware component. +func (PhysicalDiskEnduranceUtilization) AttrVendor(val string) attribute.KeyValue { + return attribute.String("hw.vendor", val) +} + +// PhysicalDiskSize is an instrument used to record metric values conforming to +// the "hw.physical_disk.size" semantic conventions. It represents the size of +// the disk. +type PhysicalDiskSize struct { + metric.Int64UpDownCounter +} + +// NewPhysicalDiskSize returns a new PhysicalDiskSize instrument. +func NewPhysicalDiskSize( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (PhysicalDiskSize, error) { + // Check if the meter is nil. + if m == nil { + return PhysicalDiskSize{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "hw.physical_disk.size", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("Size of the disk."), + metric.WithUnit("By"), + }, opt...)..., + ) + if err != nil { + return PhysicalDiskSize{noop.Int64UpDownCounter{}}, err + } + return PhysicalDiskSize{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m PhysicalDiskSize) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (PhysicalDiskSize) Name() string { + return "hw.physical_disk.size" +} + +// Unit returns the semantic convention unit of the instrument +func (PhysicalDiskSize) Unit() string { + return "By" +} + +// Description returns the semantic convention description of the instrument +func (PhysicalDiskSize) Description() string { + return "Size of the disk." +} + +// Add adds incr to the existing count for attrs. +// +// The id is the an identifier for the hardware component, unique within the +// monitored host +// +// All additional attrs passed are included in the recorded value. +func (m PhysicalDiskSize) Add( + ctx context.Context, + incr int64, + id string, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("hw.id", id), + )..., + ), + ) + + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +func (m PhysicalDiskSize) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AttrFirmwareVersion returns an optional attribute for the +// "hw.firmware_version" semantic convention. It represents the firmware version +// of the hardware component. +func (PhysicalDiskSize) AttrFirmwareVersion(val string) attribute.KeyValue { + return attribute.String("hw.firmware_version", val) +} + +// AttrModel returns an optional attribute for the "hw.model" semantic +// convention. It represents the descriptive model name of the hardware +// component. +func (PhysicalDiskSize) AttrModel(val string) attribute.KeyValue { + return attribute.String("hw.model", val) +} + +// AttrName returns an optional attribute for the "hw.name" semantic convention. +// It represents an easily-recognizable name for the hardware component. +func (PhysicalDiskSize) AttrName(val string) attribute.KeyValue { + return attribute.String("hw.name", val) +} + +// AttrParent returns an optional attribute for the "hw.parent" semantic +// convention. It represents the unique identifier of the parent component +// (typically the `hw.id` attribute of the enclosure, or disk controller). +func (PhysicalDiskSize) AttrParent(val string) attribute.KeyValue { + return attribute.String("hw.parent", val) +} + +// AttrPhysicalDiskType returns an optional attribute for the +// "hw.physical_disk.type" semantic convention. It represents the type of the +// physical disk. +func (PhysicalDiskSize) AttrPhysicalDiskType(val string) attribute.KeyValue { + return attribute.String("hw.physical_disk.type", val) +} + +// AttrSerialNumber returns an optional attribute for the "hw.serial_number" +// semantic convention. It represents the serial number of the hardware +// component. +func (PhysicalDiskSize) AttrSerialNumber(val string) attribute.KeyValue { + return attribute.String("hw.serial_number", val) +} + +// AttrVendor returns an optional attribute for the "hw.vendor" semantic +// convention. It represents the vendor name of the hardware component. +func (PhysicalDiskSize) AttrVendor(val string) attribute.KeyValue { + return attribute.String("hw.vendor", val) +} + +// PhysicalDiskSmart is an instrument used to record metric values conforming to +// the "hw.physical_disk.smart" semantic conventions. It represents the value of +// the corresponding [S.M.A.R.T.] (Self-Monitoring, Analysis, and Reporting +// Technology) attribute. +// +// [S.M.A.R.T.]: https://wikipedia.org/wiki/S.M.A.R.T. +type PhysicalDiskSmart struct { + metric.Int64Gauge +} + +// NewPhysicalDiskSmart returns a new PhysicalDiskSmart instrument. +func NewPhysicalDiskSmart( + m metric.Meter, + opt ...metric.Int64GaugeOption, +) (PhysicalDiskSmart, error) { + // Check if the meter is nil. + if m == nil { + return PhysicalDiskSmart{noop.Int64Gauge{}}, nil + } + + i, err := m.Int64Gauge( + "hw.physical_disk.smart", + append([]metric.Int64GaugeOption{ + metric.WithDescription("Value of the corresponding [S.M.A.R.T.](https://wikipedia.org/wiki/S.M.A.R.T.) (Self-Monitoring, Analysis, and Reporting Technology) attribute."), + metric.WithUnit("1"), + }, opt...)..., + ) + if err != nil { + return PhysicalDiskSmart{noop.Int64Gauge{}}, err + } + return PhysicalDiskSmart{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m PhysicalDiskSmart) Inst() metric.Int64Gauge { + return m.Int64Gauge +} + +// Name returns the semantic convention name of the instrument. +func (PhysicalDiskSmart) Name() string { + return "hw.physical_disk.smart" +} + +// Unit returns the semantic convention unit of the instrument +func (PhysicalDiskSmart) Unit() string { + return "1" +} + +// Description returns the semantic convention description of the instrument +func (PhysicalDiskSmart) Description() string { + return "Value of the corresponding [S.M.A.R.T.](https://wikipedia.org/wiki/S.M.A.R.T.) (Self-Monitoring, Analysis, and Reporting Technology) attribute." +} + +// Record records val to the current distribution for attrs. +// +// The id is the an identifier for the hardware component, unique within the +// monitored host +// +// All additional attrs passed are included in the recorded value. +func (m PhysicalDiskSmart) Record( + ctx context.Context, + val int64, + id string, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Gauge.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("hw.id", id), + )..., + ), + ) + + m.Int64Gauge.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +func (m PhysicalDiskSmart) RecordSet(ctx context.Context, val int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Gauge.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Gauge.Record(ctx, val, *o...) +} + +// AttrFirmwareVersion returns an optional attribute for the +// "hw.firmware_version" semantic convention. It represents the firmware version +// of the hardware component. +func (PhysicalDiskSmart) AttrFirmwareVersion(val string) attribute.KeyValue { + return attribute.String("hw.firmware_version", val) +} + +// AttrModel returns an optional attribute for the "hw.model" semantic +// convention. It represents the descriptive model name of the hardware +// component. +func (PhysicalDiskSmart) AttrModel(val string) attribute.KeyValue { + return attribute.String("hw.model", val) +} + +// AttrName returns an optional attribute for the "hw.name" semantic convention. +// It represents an easily-recognizable name for the hardware component. +func (PhysicalDiskSmart) AttrName(val string) attribute.KeyValue { + return attribute.String("hw.name", val) +} + +// AttrParent returns an optional attribute for the "hw.parent" semantic +// convention. It represents the unique identifier of the parent component +// (typically the `hw.id` attribute of the enclosure, or disk controller). +func (PhysicalDiskSmart) AttrParent(val string) attribute.KeyValue { + return attribute.String("hw.parent", val) +} + +// AttrPhysicalDiskSmartAttribute returns an optional attribute for the +// "hw.physical_disk.smart_attribute" semantic convention. It represents the +// [S.M.A.R.T.] (Self-Monitoring, Analysis, and Reporting Technology) attribute +// of the physical disk. +// +// [S.M.A.R.T.]: https://wikipedia.org/wiki/S.M.A.R.T. +func (PhysicalDiskSmart) AttrPhysicalDiskSmartAttribute(val string) attribute.KeyValue { + return attribute.String("hw.physical_disk.smart_attribute", val) +} + +// AttrPhysicalDiskType returns an optional attribute for the +// "hw.physical_disk.type" semantic convention. It represents the type of the +// physical disk. +func (PhysicalDiskSmart) AttrPhysicalDiskType(val string) attribute.KeyValue { + return attribute.String("hw.physical_disk.type", val) +} + +// AttrSerialNumber returns an optional attribute for the "hw.serial_number" +// semantic convention. It represents the serial number of the hardware +// component. +func (PhysicalDiskSmart) AttrSerialNumber(val string) attribute.KeyValue { + return attribute.String("hw.serial_number", val) +} + +// AttrVendor returns an optional attribute for the "hw.vendor" semantic +// convention. It represents the vendor name of the hardware component. +func (PhysicalDiskSmart) AttrVendor(val string) attribute.KeyValue { + return attribute.String("hw.vendor", val) +} + +// Power is an instrument used to record metric values conforming to the +// "hw.power" semantic conventions. It represents the instantaneous power +// consumed by the component. +type Power struct { + metric.Int64Gauge +} + +// NewPower returns a new Power instrument. +func NewPower( + m metric.Meter, + opt ...metric.Int64GaugeOption, +) (Power, error) { + // Check if the meter is nil. + if m == nil { + return Power{noop.Int64Gauge{}}, nil + } + + i, err := m.Int64Gauge( + "hw.power", + append([]metric.Int64GaugeOption{ + metric.WithDescription("Instantaneous power consumed by the component."), + metric.WithUnit("W"), + }, opt...)..., + ) + if err != nil { + return Power{noop.Int64Gauge{}}, err + } + return Power{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m Power) Inst() metric.Int64Gauge { + return m.Int64Gauge +} + +// Name returns the semantic convention name of the instrument. +func (Power) Name() string { + return "hw.power" +} + +// Unit returns the semantic convention unit of the instrument +func (Power) Unit() string { + return "W" +} + +// Description returns the semantic convention description of the instrument +func (Power) Description() string { + return "Instantaneous power consumed by the component." +} + +// Record records val to the current distribution for attrs. +// +// The id is the an identifier for the hardware component, unique within the +// monitored host +// +// The hwType is the type of the component +// +// All additional attrs passed are included in the recorded value. +// +// It is recommended to report `hw.energy` instead of `hw.power` when possible. +func (m Power) Record( + ctx context.Context, + val int64, + id string, + hwType TypeAttr, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Gauge.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("hw.id", id), + attribute.String("hw.type", string(hwType)), + )..., + ), + ) + + m.Int64Gauge.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +// +// It is recommended to report `hw.energy` instead of `hw.power` when possible. +func (m Power) RecordSet(ctx context.Context, val int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Gauge.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Gauge.Record(ctx, val, *o...) +} + +// AttrName returns an optional attribute for the "hw.name" semantic convention. +// It represents an easily-recognizable name for the hardware component. +func (Power) AttrName(val string) attribute.KeyValue { + return attribute.String("hw.name", val) +} + +// AttrParent returns an optional attribute for the "hw.parent" semantic +// convention. It represents the unique identifier of the parent component +// (typically the `hw.id` attribute of the enclosure, or disk controller). +func (Power) AttrParent(val string) attribute.KeyValue { + return attribute.String("hw.parent", val) +} + +// PowerSupplyLimit is an instrument used to record metric values conforming to +// the "hw.power_supply.limit" semantic conventions. It represents the maximum +// power output of the power supply. +type PowerSupplyLimit struct { + metric.Int64UpDownCounter +} + +// NewPowerSupplyLimit returns a new PowerSupplyLimit instrument. +func NewPowerSupplyLimit( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (PowerSupplyLimit, error) { + // Check if the meter is nil. + if m == nil { + return PowerSupplyLimit{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "hw.power_supply.limit", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("Maximum power output of the power supply."), + metric.WithUnit("W"), + }, opt...)..., + ) + if err != nil { + return PowerSupplyLimit{noop.Int64UpDownCounter{}}, err + } + return PowerSupplyLimit{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m PowerSupplyLimit) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (PowerSupplyLimit) Name() string { + return "hw.power_supply.limit" +} + +// Unit returns the semantic convention unit of the instrument +func (PowerSupplyLimit) Unit() string { + return "W" +} + +// Description returns the semantic convention description of the instrument +func (PowerSupplyLimit) Description() string { + return "Maximum power output of the power supply." +} + +// Add adds incr to the existing count for attrs. +// +// The id is the an identifier for the hardware component, unique within the +// monitored host +// +// All additional attrs passed are included in the recorded value. +func (m PowerSupplyLimit) Add( + ctx context.Context, + incr int64, + id string, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("hw.id", id), + )..., + ), + ) + + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +func (m PowerSupplyLimit) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AttrLimitType returns an optional attribute for the "hw.limit_type" semantic +// convention. It represents the type of limit for hardware components. +func (PowerSupplyLimit) AttrLimitType(val LimitTypeAttr) attribute.KeyValue { + return attribute.String("hw.limit_type", string(val)) +} + +// AttrModel returns an optional attribute for the "hw.model" semantic +// convention. It represents the descriptive model name of the hardware +// component. +func (PowerSupplyLimit) AttrModel(val string) attribute.KeyValue { + return attribute.String("hw.model", val) +} + +// AttrName returns an optional attribute for the "hw.name" semantic convention. +// It represents an easily-recognizable name for the hardware component. +func (PowerSupplyLimit) AttrName(val string) attribute.KeyValue { + return attribute.String("hw.name", val) +} + +// AttrParent returns an optional attribute for the "hw.parent" semantic +// convention. It represents the unique identifier of the parent component +// (typically the `hw.id` attribute of the enclosure, or disk controller). +func (PowerSupplyLimit) AttrParent(val string) attribute.KeyValue { + return attribute.String("hw.parent", val) +} + +// AttrSerialNumber returns an optional attribute for the "hw.serial_number" +// semantic convention. It represents the serial number of the hardware +// component. +func (PowerSupplyLimit) AttrSerialNumber(val string) attribute.KeyValue { + return attribute.String("hw.serial_number", val) +} + +// AttrVendor returns an optional attribute for the "hw.vendor" semantic +// convention. It represents the vendor name of the hardware component. +func (PowerSupplyLimit) AttrVendor(val string) attribute.KeyValue { + return attribute.String("hw.vendor", val) +} + +// PowerSupplyUsage is an instrument used to record metric values conforming to +// the "hw.power_supply.usage" semantic conventions. It represents the current +// power output of the power supply. +type PowerSupplyUsage struct { + metric.Int64UpDownCounter +} + +// NewPowerSupplyUsage returns a new PowerSupplyUsage instrument. +func NewPowerSupplyUsage( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (PowerSupplyUsage, error) { + // Check if the meter is nil. + if m == nil { + return PowerSupplyUsage{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "hw.power_supply.usage", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("Current power output of the power supply."), + metric.WithUnit("W"), + }, opt...)..., + ) + if err != nil { + return PowerSupplyUsage{noop.Int64UpDownCounter{}}, err + } + return PowerSupplyUsage{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m PowerSupplyUsage) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (PowerSupplyUsage) Name() string { + return "hw.power_supply.usage" +} + +// Unit returns the semantic convention unit of the instrument +func (PowerSupplyUsage) Unit() string { + return "W" +} + +// Description returns the semantic convention description of the instrument +func (PowerSupplyUsage) Description() string { + return "Current power output of the power supply." +} + +// Add adds incr to the existing count for attrs. +// +// The id is the an identifier for the hardware component, unique within the +// monitored host +// +// All additional attrs passed are included in the recorded value. +func (m PowerSupplyUsage) Add( + ctx context.Context, + incr int64, + id string, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("hw.id", id), + )..., + ), + ) + + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +func (m PowerSupplyUsage) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AttrModel returns an optional attribute for the "hw.model" semantic +// convention. It represents the descriptive model name of the hardware +// component. +func (PowerSupplyUsage) AttrModel(val string) attribute.KeyValue { + return attribute.String("hw.model", val) +} + +// AttrName returns an optional attribute for the "hw.name" semantic convention. +// It represents an easily-recognizable name for the hardware component. +func (PowerSupplyUsage) AttrName(val string) attribute.KeyValue { + return attribute.String("hw.name", val) +} + +// AttrParent returns an optional attribute for the "hw.parent" semantic +// convention. It represents the unique identifier of the parent component +// (typically the `hw.id` attribute of the enclosure, or disk controller). +func (PowerSupplyUsage) AttrParent(val string) attribute.KeyValue { + return attribute.String("hw.parent", val) +} + +// AttrSerialNumber returns an optional attribute for the "hw.serial_number" +// semantic convention. It represents the serial number of the hardware +// component. +func (PowerSupplyUsage) AttrSerialNumber(val string) attribute.KeyValue { + return attribute.String("hw.serial_number", val) +} + +// AttrVendor returns an optional attribute for the "hw.vendor" semantic +// convention. It represents the vendor name of the hardware component. +func (PowerSupplyUsage) AttrVendor(val string) attribute.KeyValue { + return attribute.String("hw.vendor", val) +} + +// PowerSupplyUtilization is an instrument used to record metric values +// conforming to the "hw.power_supply.utilization" semantic conventions. It +// represents the utilization of the power supply as a fraction of its maximum +// output. +type PowerSupplyUtilization struct { + metric.Int64Gauge +} + +// NewPowerSupplyUtilization returns a new PowerSupplyUtilization instrument. +func NewPowerSupplyUtilization( + m metric.Meter, + opt ...metric.Int64GaugeOption, +) (PowerSupplyUtilization, error) { + // Check if the meter is nil. + if m == nil { + return PowerSupplyUtilization{noop.Int64Gauge{}}, nil + } + + i, err := m.Int64Gauge( + "hw.power_supply.utilization", + append([]metric.Int64GaugeOption{ + metric.WithDescription("Utilization of the power supply as a fraction of its maximum output."), + metric.WithUnit("1"), + }, opt...)..., + ) + if err != nil { + return PowerSupplyUtilization{noop.Int64Gauge{}}, err + } + return PowerSupplyUtilization{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m PowerSupplyUtilization) Inst() metric.Int64Gauge { + return m.Int64Gauge +} + +// Name returns the semantic convention name of the instrument. +func (PowerSupplyUtilization) Name() string { + return "hw.power_supply.utilization" +} + +// Unit returns the semantic convention unit of the instrument +func (PowerSupplyUtilization) Unit() string { + return "1" +} + +// Description returns the semantic convention description of the instrument +func (PowerSupplyUtilization) Description() string { + return "Utilization of the power supply as a fraction of its maximum output." +} + +// Record records val to the current distribution for attrs. +// +// The id is the an identifier for the hardware component, unique within the +// monitored host +// +// All additional attrs passed are included in the recorded value. +func (m PowerSupplyUtilization) Record( + ctx context.Context, + val int64, + id string, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Gauge.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("hw.id", id), + )..., + ), + ) + + m.Int64Gauge.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +func (m PowerSupplyUtilization) RecordSet(ctx context.Context, val int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Gauge.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Gauge.Record(ctx, val, *o...) +} + +// AttrModel returns an optional attribute for the "hw.model" semantic +// convention. It represents the descriptive model name of the hardware +// component. +func (PowerSupplyUtilization) AttrModel(val string) attribute.KeyValue { + return attribute.String("hw.model", val) +} + +// AttrName returns an optional attribute for the "hw.name" semantic convention. +// It represents an easily-recognizable name for the hardware component. +func (PowerSupplyUtilization) AttrName(val string) attribute.KeyValue { + return attribute.String("hw.name", val) +} + +// AttrParent returns an optional attribute for the "hw.parent" semantic +// convention. It represents the unique identifier of the parent component +// (typically the `hw.id` attribute of the enclosure, or disk controller). +func (PowerSupplyUtilization) AttrParent(val string) attribute.KeyValue { + return attribute.String("hw.parent", val) +} + +// AttrSerialNumber returns an optional attribute for the "hw.serial_number" +// semantic convention. It represents the serial number of the hardware +// component. +func (PowerSupplyUtilization) AttrSerialNumber(val string) attribute.KeyValue { + return attribute.String("hw.serial_number", val) +} + +// AttrVendor returns an optional attribute for the "hw.vendor" semantic +// convention. It represents the vendor name of the hardware component. +func (PowerSupplyUtilization) AttrVendor(val string) attribute.KeyValue { + return attribute.String("hw.vendor", val) +} + +// Status is an instrument used to record metric values conforming to the +// "hw.status" semantic conventions. It represents the operational status: `1` +// (true) or `0` (false) for each of the possible states. +type Status struct { + metric.Int64UpDownCounter +} + +// NewStatus returns a new Status instrument. +func NewStatus( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (Status, error) { + // Check if the meter is nil. + if m == nil { + return Status{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "hw.status", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("Operational status: `1` (true) or `0` (false) for each of the possible states."), + metric.WithUnit("1"), + }, opt...)..., + ) + if err != nil { + return Status{noop.Int64UpDownCounter{}}, err + } + return Status{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m Status) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (Status) Name() string { + return "hw.status" +} + +// Unit returns the semantic convention unit of the instrument +func (Status) Unit() string { + return "1" +} + +// Description returns the semantic convention description of the instrument +func (Status) Description() string { + return "Operational status: `1` (true) or `0` (false) for each of the possible states." +} + +// Add adds incr to the existing count for attrs. +// +// The id is the an identifier for the hardware component, unique within the +// monitored host +// +// The state is the the current state of the component +// +// The hwType is the type of the component +// +// All additional attrs passed are included in the recorded value. +// +// `hw.status` is currently specified as an *UpDownCounter* but would ideally be +// represented using a [*StateSet* as defined in OpenMetrics]. This semantic +// convention will be updated once *StateSet* is specified in OpenTelemetry. This +// planned change is not expected to have any consequence on the way users query +// their timeseries backend to retrieve the values of `hw.status` over time. +// +// [ [*StateSet* as defined in OpenMetrics]: https://github.com/prometheus/OpenMetrics/blob/v1.0.0/specification/OpenMetrics.md#stateset +func (m Status) Add( + ctx context.Context, + incr int64, + id string, + state StateAttr, + hwType TypeAttr, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("hw.id", id), + attribute.String("hw.state", string(state)), + attribute.String("hw.type", string(hwType)), + )..., + ), + ) + + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// `hw.status` is currently specified as an *UpDownCounter* but would ideally be +// represented using a [*StateSet* as defined in OpenMetrics]. This semantic +// convention will be updated once *StateSet* is specified in OpenTelemetry. This +// planned change is not expected to have any consequence on the way users query +// their timeseries backend to retrieve the values of `hw.status` over time. +// +// [ [*StateSet* as defined in OpenMetrics]: https://github.com/prometheus/OpenMetrics/blob/v1.0.0/specification/OpenMetrics.md#stateset +func (m Status) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AttrName returns an optional attribute for the "hw.name" semantic convention. +// It represents an easily-recognizable name for the hardware component. +func (Status) AttrName(val string) attribute.KeyValue { + return attribute.String("hw.name", val) +} + +// AttrParent returns an optional attribute for the "hw.parent" semantic +// convention. It represents the unique identifier of the parent component +// (typically the `hw.id` attribute of the enclosure, or disk controller). +func (Status) AttrParent(val string) attribute.KeyValue { + return attribute.String("hw.parent", val) +} + +// TapeDriveOperations is an instrument used to record metric values conforming +// to the "hw.tape_drive.operations" semantic conventions. It represents the +// operations performed by the tape drive. +type TapeDriveOperations struct { + metric.Int64Counter +} + +// NewTapeDriveOperations returns a new TapeDriveOperations instrument. +func NewTapeDriveOperations( + m metric.Meter, + opt ...metric.Int64CounterOption, +) (TapeDriveOperations, error) { + // Check if the meter is nil. + if m == nil { + return TapeDriveOperations{noop.Int64Counter{}}, nil + } + + i, err := m.Int64Counter( + "hw.tape_drive.operations", + append([]metric.Int64CounterOption{ + metric.WithDescription("Operations performed by the tape drive."), + metric.WithUnit("{operation}"), + }, opt...)..., + ) + if err != nil { + return TapeDriveOperations{noop.Int64Counter{}}, err + } + return TapeDriveOperations{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m TapeDriveOperations) Inst() metric.Int64Counter { + return m.Int64Counter +} + +// Name returns the semantic convention name of the instrument. +func (TapeDriveOperations) Name() string { + return "hw.tape_drive.operations" +} + +// Unit returns the semantic convention unit of the instrument +func (TapeDriveOperations) Unit() string { + return "{operation}" +} + +// Description returns the semantic convention description of the instrument +func (TapeDriveOperations) Description() string { + return "Operations performed by the tape drive." +} + +// Add adds incr to the existing count for attrs. +// +// The id is the an identifier for the hardware component, unique within the +// monitored host +// +// All additional attrs passed are included in the recorded value. +func (m TapeDriveOperations) Add( + ctx context.Context, + incr int64, + id string, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("hw.id", id), + )..., + ), + ) + + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +func (m TapeDriveOperations) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AttrModel returns an optional attribute for the "hw.model" semantic +// convention. It represents the descriptive model name of the hardware +// component. +func (TapeDriveOperations) AttrModel(val string) attribute.KeyValue { + return attribute.String("hw.model", val) +} + +// AttrName returns an optional attribute for the "hw.name" semantic convention. +// It represents an easily-recognizable name for the hardware component. +func (TapeDriveOperations) AttrName(val string) attribute.KeyValue { + return attribute.String("hw.name", val) +} + +// AttrParent returns an optional attribute for the "hw.parent" semantic +// convention. It represents the unique identifier of the parent component +// (typically the `hw.id` attribute of the enclosure, or disk controller). +func (TapeDriveOperations) AttrParent(val string) attribute.KeyValue { + return attribute.String("hw.parent", val) +} + +// AttrSerialNumber returns an optional attribute for the "hw.serial_number" +// semantic convention. It represents the serial number of the hardware +// component. +func (TapeDriveOperations) AttrSerialNumber(val string) attribute.KeyValue { + return attribute.String("hw.serial_number", val) +} + +// AttrTapeDriveOperationType returns an optional attribute for the +// "hw.tape_drive.operation_type" semantic convention. It represents the type of +// tape drive operation. +func (TapeDriveOperations) AttrTapeDriveOperationType(val TapeDriveOperationTypeAttr) attribute.KeyValue { + return attribute.String("hw.tape_drive.operation_type", string(val)) +} + +// AttrVendor returns an optional attribute for the "hw.vendor" semantic +// convention. It represents the vendor name of the hardware component. +func (TapeDriveOperations) AttrVendor(val string) attribute.KeyValue { + return attribute.String("hw.vendor", val) +} + +// Temperature is an instrument used to record metric values conforming to the +// "hw.temperature" semantic conventions. It represents the temperature in +// degrees Celsius. +type Temperature struct { + metric.Int64Gauge +} + +// NewTemperature returns a new Temperature instrument. +func NewTemperature( + m metric.Meter, + opt ...metric.Int64GaugeOption, +) (Temperature, error) { + // Check if the meter is nil. + if m == nil { + return Temperature{noop.Int64Gauge{}}, nil + } + + i, err := m.Int64Gauge( + "hw.temperature", + append([]metric.Int64GaugeOption{ + metric.WithDescription("Temperature in degrees Celsius."), + metric.WithUnit("Cel"), + }, opt...)..., + ) + if err != nil { + return Temperature{noop.Int64Gauge{}}, err + } + return Temperature{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m Temperature) Inst() metric.Int64Gauge { + return m.Int64Gauge +} + +// Name returns the semantic convention name of the instrument. +func (Temperature) Name() string { + return "hw.temperature" +} + +// Unit returns the semantic convention unit of the instrument +func (Temperature) Unit() string { + return "Cel" +} + +// Description returns the semantic convention description of the instrument +func (Temperature) Description() string { + return "Temperature in degrees Celsius." +} + +// Record records val to the current distribution for attrs. +// +// The id is the an identifier for the hardware component, unique within the +// monitored host +// +// All additional attrs passed are included in the recorded value. +func (m Temperature) Record( + ctx context.Context, + val int64, + id string, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Gauge.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("hw.id", id), + )..., + ), + ) + + m.Int64Gauge.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +func (m Temperature) RecordSet(ctx context.Context, val int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Gauge.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Gauge.Record(ctx, val, *o...) +} + +// AttrName returns an optional attribute for the "hw.name" semantic convention. +// It represents an easily-recognizable name for the hardware component. +func (Temperature) AttrName(val string) attribute.KeyValue { + return attribute.String("hw.name", val) +} + +// AttrParent returns an optional attribute for the "hw.parent" semantic +// convention. It represents the unique identifier of the parent component +// (typically the `hw.id` attribute of the enclosure, or disk controller). +func (Temperature) AttrParent(val string) attribute.KeyValue { + return attribute.String("hw.parent", val) +} + +// AttrSensorLocation returns an optional attribute for the "hw.sensor_location" +// semantic convention. It represents the location of the sensor. +func (Temperature) AttrSensorLocation(val string) attribute.KeyValue { + return attribute.String("hw.sensor_location", val) +} + +// TemperatureLimit is an instrument used to record metric values conforming to +// the "hw.temperature.limit" semantic conventions. It represents the temperature +// limit in degrees Celsius. +type TemperatureLimit struct { + metric.Int64Gauge +} + +// NewTemperatureLimit returns a new TemperatureLimit instrument. +func NewTemperatureLimit( + m metric.Meter, + opt ...metric.Int64GaugeOption, +) (TemperatureLimit, error) { + // Check if the meter is nil. + if m == nil { + return TemperatureLimit{noop.Int64Gauge{}}, nil + } + + i, err := m.Int64Gauge( + "hw.temperature.limit", + append([]metric.Int64GaugeOption{ + metric.WithDescription("Temperature limit in degrees Celsius."), + metric.WithUnit("Cel"), + }, opt...)..., + ) + if err != nil { + return TemperatureLimit{noop.Int64Gauge{}}, err + } + return TemperatureLimit{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m TemperatureLimit) Inst() metric.Int64Gauge { + return m.Int64Gauge +} + +// Name returns the semantic convention name of the instrument. +func (TemperatureLimit) Name() string { + return "hw.temperature.limit" +} + +// Unit returns the semantic convention unit of the instrument +func (TemperatureLimit) Unit() string { + return "Cel" +} + +// Description returns the semantic convention description of the instrument +func (TemperatureLimit) Description() string { + return "Temperature limit in degrees Celsius." +} + +// Record records val to the current distribution for attrs. +// +// The id is the an identifier for the hardware component, unique within the +// monitored host +// +// All additional attrs passed are included in the recorded value. +func (m TemperatureLimit) Record( + ctx context.Context, + val int64, + id string, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Gauge.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("hw.id", id), + )..., + ), + ) + + m.Int64Gauge.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +func (m TemperatureLimit) RecordSet(ctx context.Context, val int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Gauge.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Gauge.Record(ctx, val, *o...) +} + +// AttrLimitType returns an optional attribute for the "hw.limit_type" semantic +// convention. It represents the type of limit for hardware components. +func (TemperatureLimit) AttrLimitType(val LimitTypeAttr) attribute.KeyValue { + return attribute.String("hw.limit_type", string(val)) +} + +// AttrName returns an optional attribute for the "hw.name" semantic convention. +// It represents an easily-recognizable name for the hardware component. +func (TemperatureLimit) AttrName(val string) attribute.KeyValue { + return attribute.String("hw.name", val) +} + +// AttrParent returns an optional attribute for the "hw.parent" semantic +// convention. It represents the unique identifier of the parent component +// (typically the `hw.id` attribute of the enclosure, or disk controller). +func (TemperatureLimit) AttrParent(val string) attribute.KeyValue { + return attribute.String("hw.parent", val) +} + +// AttrSensorLocation returns an optional attribute for the "hw.sensor_location" +// semantic convention. It represents the location of the sensor. +func (TemperatureLimit) AttrSensorLocation(val string) attribute.KeyValue { + return attribute.String("hw.sensor_location", val) +} + +// Voltage is an instrument used to record metric values conforming to the +// "hw.voltage" semantic conventions. It represents the voltage measured by the +// sensor. +type Voltage struct { + metric.Int64Gauge +} + +// NewVoltage returns a new Voltage instrument. +func NewVoltage( + m metric.Meter, + opt ...metric.Int64GaugeOption, +) (Voltage, error) { + // Check if the meter is nil. + if m == nil { + return Voltage{noop.Int64Gauge{}}, nil + } + + i, err := m.Int64Gauge( + "hw.voltage", + append([]metric.Int64GaugeOption{ + metric.WithDescription("Voltage measured by the sensor."), + metric.WithUnit("V"), + }, opt...)..., + ) + if err != nil { + return Voltage{noop.Int64Gauge{}}, err + } + return Voltage{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m Voltage) Inst() metric.Int64Gauge { + return m.Int64Gauge +} + +// Name returns the semantic convention name of the instrument. +func (Voltage) Name() string { + return "hw.voltage" +} + +// Unit returns the semantic convention unit of the instrument +func (Voltage) Unit() string { + return "V" +} + +// Description returns the semantic convention description of the instrument +func (Voltage) Description() string { + return "Voltage measured by the sensor." +} + +// Record records val to the current distribution for attrs. +// +// The id is the an identifier for the hardware component, unique within the +// monitored host +// +// All additional attrs passed are included in the recorded value. +func (m Voltage) Record( + ctx context.Context, + val int64, + id string, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Gauge.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("hw.id", id), + )..., + ), + ) + + m.Int64Gauge.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +func (m Voltage) RecordSet(ctx context.Context, val int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Gauge.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Gauge.Record(ctx, val, *o...) +} + +// AttrName returns an optional attribute for the "hw.name" semantic convention. +// It represents an easily-recognizable name for the hardware component. +func (Voltage) AttrName(val string) attribute.KeyValue { + return attribute.String("hw.name", val) +} + +// AttrParent returns an optional attribute for the "hw.parent" semantic +// convention. It represents the unique identifier of the parent component +// (typically the `hw.id` attribute of the enclosure, or disk controller). +func (Voltage) AttrParent(val string) attribute.KeyValue { + return attribute.String("hw.parent", val) +} + +// AttrSensorLocation returns an optional attribute for the "hw.sensor_location" +// semantic convention. It represents the location of the sensor. +func (Voltage) AttrSensorLocation(val string) attribute.KeyValue { + return attribute.String("hw.sensor_location", val) +} + +// VoltageLimit is an instrument used to record metric values conforming to the +// "hw.voltage.limit" semantic conventions. It represents the voltage limit in +// Volts. +type VoltageLimit struct { + metric.Int64Gauge +} + +// NewVoltageLimit returns a new VoltageLimit instrument. +func NewVoltageLimit( + m metric.Meter, + opt ...metric.Int64GaugeOption, +) (VoltageLimit, error) { + // Check if the meter is nil. + if m == nil { + return VoltageLimit{noop.Int64Gauge{}}, nil + } + + i, err := m.Int64Gauge( + "hw.voltage.limit", + append([]metric.Int64GaugeOption{ + metric.WithDescription("Voltage limit in Volts."), + metric.WithUnit("V"), + }, opt...)..., + ) + if err != nil { + return VoltageLimit{noop.Int64Gauge{}}, err + } + return VoltageLimit{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m VoltageLimit) Inst() metric.Int64Gauge { + return m.Int64Gauge +} + +// Name returns the semantic convention name of the instrument. +func (VoltageLimit) Name() string { + return "hw.voltage.limit" +} + +// Unit returns the semantic convention unit of the instrument +func (VoltageLimit) Unit() string { + return "V" +} + +// Description returns the semantic convention description of the instrument +func (VoltageLimit) Description() string { + return "Voltage limit in Volts." +} + +// Record records val to the current distribution for attrs. +// +// The id is the an identifier for the hardware component, unique within the +// monitored host +// +// All additional attrs passed are included in the recorded value. +func (m VoltageLimit) Record( + ctx context.Context, + val int64, + id string, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Gauge.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("hw.id", id), + )..., + ), + ) + + m.Int64Gauge.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +func (m VoltageLimit) RecordSet(ctx context.Context, val int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Gauge.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Gauge.Record(ctx, val, *o...) +} + +// AttrLimitType returns an optional attribute for the "hw.limit_type" semantic +// convention. It represents the type of limit for hardware components. +func (VoltageLimit) AttrLimitType(val LimitTypeAttr) attribute.KeyValue { + return attribute.String("hw.limit_type", string(val)) +} + +// AttrName returns an optional attribute for the "hw.name" semantic convention. +// It represents an easily-recognizable name for the hardware component. +func (VoltageLimit) AttrName(val string) attribute.KeyValue { + return attribute.String("hw.name", val) +} + +// AttrParent returns an optional attribute for the "hw.parent" semantic +// convention. It represents the unique identifier of the parent component +// (typically the `hw.id` attribute of the enclosure, or disk controller). +func (VoltageLimit) AttrParent(val string) attribute.KeyValue { + return attribute.String("hw.parent", val) +} + +// AttrSensorLocation returns an optional attribute for the "hw.sensor_location" +// semantic convention. It represents the location of the sensor. +func (VoltageLimit) AttrSensorLocation(val string) attribute.KeyValue { + return attribute.String("hw.sensor_location", val) +} + +// VoltageNominal is an instrument used to record metric values conforming to the +// "hw.voltage.nominal" semantic conventions. It represents the nominal +// (expected) voltage. +type VoltageNominal struct { + metric.Int64Gauge +} + +// NewVoltageNominal returns a new VoltageNominal instrument. +func NewVoltageNominal( + m metric.Meter, + opt ...metric.Int64GaugeOption, +) (VoltageNominal, error) { + // Check if the meter is nil. + if m == nil { + return VoltageNominal{noop.Int64Gauge{}}, nil + } + + i, err := m.Int64Gauge( + "hw.voltage.nominal", + append([]metric.Int64GaugeOption{ + metric.WithDescription("Nominal (expected) voltage."), + metric.WithUnit("V"), + }, opt...)..., + ) + if err != nil { + return VoltageNominal{noop.Int64Gauge{}}, err + } + return VoltageNominal{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m VoltageNominal) Inst() metric.Int64Gauge { + return m.Int64Gauge +} + +// Name returns the semantic convention name of the instrument. +func (VoltageNominal) Name() string { + return "hw.voltage.nominal" +} + +// Unit returns the semantic convention unit of the instrument +func (VoltageNominal) Unit() string { + return "V" +} + +// Description returns the semantic convention description of the instrument +func (VoltageNominal) Description() string { + return "Nominal (expected) voltage." +} + +// Record records val to the current distribution for attrs. +// +// The id is the an identifier for the hardware component, unique within the +// monitored host +// +// All additional attrs passed are included in the recorded value. +func (m VoltageNominal) Record( + ctx context.Context, + val int64, + id string, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Gauge.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("hw.id", id), + )..., + ), + ) + + m.Int64Gauge.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +func (m VoltageNominal) RecordSet(ctx context.Context, val int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Gauge.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Gauge.Record(ctx, val, *o...) +} + +// AttrName returns an optional attribute for the "hw.name" semantic convention. +// It represents an easily-recognizable name for the hardware component. +func (VoltageNominal) AttrName(val string) attribute.KeyValue { + return attribute.String("hw.name", val) +} + +// AttrParent returns an optional attribute for the "hw.parent" semantic +// convention. It represents the unique identifier of the parent component +// (typically the `hw.id` attribute of the enclosure, or disk controller). +func (VoltageNominal) AttrParent(val string) attribute.KeyValue { + return attribute.String("hw.parent", val) +} + +// AttrSensorLocation returns an optional attribute for the "hw.sensor_location" +// semantic convention. It represents the location of the sensor. +func (VoltageNominal) AttrSensorLocation(val string) attribute.KeyValue { + return attribute.String("hw.sensor_location", val) +} \ No newline at end of file diff --git a/semconv/v1.37.0/k8sconv/metric.go b/semconv/v1.37.0/k8sconv/metric.go new file mode 100644 index 00000000000..05e39c92e01 --- /dev/null +++ b/semconv/v1.37.0/k8sconv/metric.go @@ -0,0 +1,9195 @@ +// Code generated from semantic convention specification. DO NOT EDIT. + +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package httpconv provides types and functionality for OpenTelemetry semantic +// conventions in the "k8s" namespace. +package k8sconv + +import ( + "context" + "sync" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/noop" +) + +var ( + addOptPool = &sync.Pool{New: func() any { return &[]metric.AddOption{} }} + recOptPool = &sync.Pool{New: func() any { return &[]metric.RecordOption{} }} +) + +// ContainerStatusReasonAttr is an attribute conforming to the +// k8s.container.status.reason semantic conventions. It represents the reason for +// the container state. Corresponds to the `reason` field of the: +// [K8s ContainerStateWaiting] or [K8s ContainerStateTerminated]. +// +// [K8s ContainerStateWaiting]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#containerstatewaiting-v1-core +// [K8s ContainerStateTerminated]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#containerstateterminated-v1-core +type ContainerStatusReasonAttr string + +var ( + // ContainerStatusReasonContainerCreating is the container is being created. + ContainerStatusReasonContainerCreating ContainerStatusReasonAttr = "ContainerCreating" + // ContainerStatusReasonCrashLoopBackOff is the container is in a crash loop + // back off state. + ContainerStatusReasonCrashLoopBackOff ContainerStatusReasonAttr = "CrashLoopBackOff" + // ContainerStatusReasonCreateContainerConfigError is the there was an error + // creating the container configuration. + ContainerStatusReasonCreateContainerConfigError ContainerStatusReasonAttr = "CreateContainerConfigError" + // ContainerStatusReasonErrImagePull is the there was an error pulling the + // container image. + ContainerStatusReasonErrImagePull ContainerStatusReasonAttr = "ErrImagePull" + // ContainerStatusReasonImagePullBackOff is the container image pull is in back + // off state. + ContainerStatusReasonImagePullBackOff ContainerStatusReasonAttr = "ImagePullBackOff" + // ContainerStatusReasonOomKilled is the container was killed due to out of + // memory. + ContainerStatusReasonOomKilled ContainerStatusReasonAttr = "OOMKilled" + // ContainerStatusReasonCompleted is the container has completed execution. + ContainerStatusReasonCompleted ContainerStatusReasonAttr = "Completed" + // ContainerStatusReasonError is the there was an error with the container. + ContainerStatusReasonError ContainerStatusReasonAttr = "Error" + // ContainerStatusReasonContainerCannotRun is the container cannot run. + ContainerStatusReasonContainerCannotRun ContainerStatusReasonAttr = "ContainerCannotRun" +) + +// ContainerStatusStateAttr is an attribute conforming to the +// k8s.container.status.state semantic conventions. It represents the state of +// the container. [K8s ContainerState]. +// +// [K8s ContainerState]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#containerstate-v1-core +type ContainerStatusStateAttr string + +var ( + // ContainerStatusStateTerminated is the container has terminated. + ContainerStatusStateTerminated ContainerStatusStateAttr = "terminated" + // ContainerStatusStateRunning is the container is running. + ContainerStatusStateRunning ContainerStatusStateAttr = "running" + // ContainerStatusStateWaiting is the container is waiting. + ContainerStatusStateWaiting ContainerStatusStateAttr = "waiting" +) + +// NamespacePhaseAttr is an attribute conforming to the k8s.namespace.phase +// semantic conventions. It represents the phase of the K8s namespace. +type NamespacePhaseAttr string + +var ( + // NamespacePhaseActive is the active namespace phase as described by [K8s API] + // . + // + // [K8s API]: https://pkg.go.dev/k8s.io/api@v0.31.3/core/v1#NamespacePhase + NamespacePhaseActive NamespacePhaseAttr = "active" + // NamespacePhaseTerminating is the terminating namespace phase as described by + // [K8s API]. + // + // [K8s API]: https://pkg.go.dev/k8s.io/api@v0.31.3/core/v1#NamespacePhase + NamespacePhaseTerminating NamespacePhaseAttr = "terminating" +) + +// NodeConditionStatusAttr is an attribute conforming to the +// k8s.node.condition.status semantic conventions. It represents the status of +// the condition, one of True, False, Unknown. +type NodeConditionStatusAttr string + +var ( + // NodeConditionStatusConditionTrue is the standardized value "true" of + // NodeConditionStatusAttr. + NodeConditionStatusConditionTrue NodeConditionStatusAttr = "true" + // NodeConditionStatusConditionFalse is the standardized value "false" of + // NodeConditionStatusAttr. + NodeConditionStatusConditionFalse NodeConditionStatusAttr = "false" + // NodeConditionStatusConditionUnknown is the standardized value "unknown" of + // NodeConditionStatusAttr. + NodeConditionStatusConditionUnknown NodeConditionStatusAttr = "unknown" +) + +// NodeConditionTypeAttr is an attribute conforming to the +// k8s.node.condition.type semantic conventions. It represents the condition type +// of a K8s Node. +type NodeConditionTypeAttr string + +var ( + // NodeConditionTypeReady is the node is healthy and ready to accept pods. + NodeConditionTypeReady NodeConditionTypeAttr = "Ready" + // NodeConditionTypeDiskPressure is the pressure exists on the disk size—that + // is, if the disk capacity is low. + NodeConditionTypeDiskPressure NodeConditionTypeAttr = "DiskPressure" + // NodeConditionTypeMemoryPressure is the pressure exists on the node + // memory—that is, if the node memory is low. + NodeConditionTypeMemoryPressure NodeConditionTypeAttr = "MemoryPressure" + // NodeConditionTypePIDPressure is the pressure exists on the processes—that + // is, if there are too many processes on the node. + NodeConditionTypePIDPressure NodeConditionTypeAttr = "PIDPressure" + // NodeConditionTypeNetworkUnavailable is the network for the node is not + // correctly configured. + NodeConditionTypeNetworkUnavailable NodeConditionTypeAttr = "NetworkUnavailable" +) + +// VolumeTypeAttr is an attribute conforming to the k8s.volume.type semantic +// conventions. It represents the type of the K8s volume. +type VolumeTypeAttr string + +var ( + // VolumeTypePersistentVolumeClaim is a [persistentVolumeClaim] volume. + // + // [persistentVolumeClaim]: https://v1-30.docs.kubernetes.io/docs/concepts/storage/volumes/#persistentvolumeclaim + VolumeTypePersistentVolumeClaim VolumeTypeAttr = "persistentVolumeClaim" + // VolumeTypeConfigMap is a [configMap] volume. + // + // [configMap]: https://v1-30.docs.kubernetes.io/docs/concepts/storage/volumes/#configmap + VolumeTypeConfigMap VolumeTypeAttr = "configMap" + // VolumeTypeDownwardAPI is a [downwardAPI] volume. + // + // [downwardAPI]: https://v1-30.docs.kubernetes.io/docs/concepts/storage/volumes/#downwardapi + VolumeTypeDownwardAPI VolumeTypeAttr = "downwardAPI" + // VolumeTypeEmptyDir is an [emptyDir] volume. + // + // [emptyDir]: https://v1-30.docs.kubernetes.io/docs/concepts/storage/volumes/#emptydir + VolumeTypeEmptyDir VolumeTypeAttr = "emptyDir" + // VolumeTypeSecret is a [secret] volume. + // + // [secret]: https://v1-30.docs.kubernetes.io/docs/concepts/storage/volumes/#secret + VolumeTypeSecret VolumeTypeAttr = "secret" + // VolumeTypeLocal is a [local] volume. + // + // [local]: https://v1-30.docs.kubernetes.io/docs/concepts/storage/volumes/#local + VolumeTypeLocal VolumeTypeAttr = "local" +) + +// NetworkIODirectionAttr is an attribute conforming to the network.io.direction +// semantic conventions. It represents the network IO operation direction. +type NetworkIODirectionAttr string + +var ( + // NetworkIODirectionTransmit is the standardized value "transmit" of + // NetworkIODirectionAttr. + NetworkIODirectionTransmit NetworkIODirectionAttr = "transmit" + // NetworkIODirectionReceive is the standardized value "receive" of + // NetworkIODirectionAttr. + NetworkIODirectionReceive NetworkIODirectionAttr = "receive" +) + +// ContainerCPULimit is an instrument used to record metric values conforming to +// the "k8s.container.cpu.limit" semantic conventions. It represents the maximum +// CPU resource limit set for the container. +type ContainerCPULimit struct { + metric.Int64UpDownCounter +} + +// NewContainerCPULimit returns a new ContainerCPULimit instrument. +func NewContainerCPULimit( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (ContainerCPULimit, error) { + // Check if the meter is nil. + if m == nil { + return ContainerCPULimit{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "k8s.container.cpu.limit", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("Maximum CPU resource limit set for the container."), + metric.WithUnit("{cpu}"), + }, opt...)..., + ) + if err != nil { + return ContainerCPULimit{noop.Int64UpDownCounter{}}, err + } + return ContainerCPULimit{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ContainerCPULimit) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (ContainerCPULimit) Name() string { + return "k8s.container.cpu.limit" +} + +// Unit returns the semantic convention unit of the instrument +func (ContainerCPULimit) Unit() string { + return "{cpu}" +} + +// Description returns the semantic convention description of the instrument +func (ContainerCPULimit) Description() string { + return "Maximum CPU resource limit set for the container." +} + +// Add adds incr to the existing count for attrs. +// +// See +// https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#resourcerequirements-v1-core +// for details. +func (m ContainerCPULimit) Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// See +// https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#resourcerequirements-v1-core +// for details. +func (m ContainerCPULimit) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// ContainerCPURequest is an instrument used to record metric values conforming +// to the "k8s.container.cpu.request" semantic conventions. It represents the CPU +// resource requested for the container. +type ContainerCPURequest struct { + metric.Int64UpDownCounter +} + +// NewContainerCPURequest returns a new ContainerCPURequest instrument. +func NewContainerCPURequest( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (ContainerCPURequest, error) { + // Check if the meter is nil. + if m == nil { + return ContainerCPURequest{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "k8s.container.cpu.request", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("CPU resource requested for the container."), + metric.WithUnit("{cpu}"), + }, opt...)..., + ) + if err != nil { + return ContainerCPURequest{noop.Int64UpDownCounter{}}, err + } + return ContainerCPURequest{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ContainerCPURequest) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (ContainerCPURequest) Name() string { + return "k8s.container.cpu.request" +} + +// Unit returns the semantic convention unit of the instrument +func (ContainerCPURequest) Unit() string { + return "{cpu}" +} + +// Description returns the semantic convention description of the instrument +func (ContainerCPURequest) Description() string { + return "CPU resource requested for the container." +} + +// Add adds incr to the existing count for attrs. +// +// See +// https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#resourcerequirements-v1-core +// for details. +func (m ContainerCPURequest) Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// See +// https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#resourcerequirements-v1-core +// for details. +func (m ContainerCPURequest) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// ContainerEphemeralStorageLimit is an instrument used to record metric values +// conforming to the "k8s.container.ephemeral_storage.limit" semantic +// conventions. It represents the maximum ephemeral storage resource limit set +// for the container. +type ContainerEphemeralStorageLimit struct { + metric.Int64UpDownCounter +} + +// NewContainerEphemeralStorageLimit returns a new ContainerEphemeralStorageLimit +// instrument. +func NewContainerEphemeralStorageLimit( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (ContainerEphemeralStorageLimit, error) { + // Check if the meter is nil. + if m == nil { + return ContainerEphemeralStorageLimit{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "k8s.container.ephemeral_storage.limit", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("Maximum ephemeral storage resource limit set for the container."), + metric.WithUnit("By"), + }, opt...)..., + ) + if err != nil { + return ContainerEphemeralStorageLimit{noop.Int64UpDownCounter{}}, err + } + return ContainerEphemeralStorageLimit{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ContainerEphemeralStorageLimit) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (ContainerEphemeralStorageLimit) Name() string { + return "k8s.container.ephemeral_storage.limit" +} + +// Unit returns the semantic convention unit of the instrument +func (ContainerEphemeralStorageLimit) Unit() string { + return "By" +} + +// Description returns the semantic convention description of the instrument +func (ContainerEphemeralStorageLimit) Description() string { + return "Maximum ephemeral storage resource limit set for the container." +} + +// Add adds incr to the existing count for attrs. +// +// See +// https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#resourcerequirements-v1-core +// for details. +func (m ContainerEphemeralStorageLimit) Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// See +// https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#resourcerequirements-v1-core +// for details. +func (m ContainerEphemeralStorageLimit) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// ContainerEphemeralStorageRequest is an instrument used to record metric values +// conforming to the "k8s.container.ephemeral_storage.request" semantic +// conventions. It represents the ephemeral storage resource requested for the +// container. +type ContainerEphemeralStorageRequest struct { + metric.Int64UpDownCounter +} + +// NewContainerEphemeralStorageRequest returns a new +// ContainerEphemeralStorageRequest instrument. +func NewContainerEphemeralStorageRequest( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (ContainerEphemeralStorageRequest, error) { + // Check if the meter is nil. + if m == nil { + return ContainerEphemeralStorageRequest{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "k8s.container.ephemeral_storage.request", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("Ephemeral storage resource requested for the container."), + metric.WithUnit("By"), + }, opt...)..., + ) + if err != nil { + return ContainerEphemeralStorageRequest{noop.Int64UpDownCounter{}}, err + } + return ContainerEphemeralStorageRequest{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ContainerEphemeralStorageRequest) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (ContainerEphemeralStorageRequest) Name() string { + return "k8s.container.ephemeral_storage.request" +} + +// Unit returns the semantic convention unit of the instrument +func (ContainerEphemeralStorageRequest) Unit() string { + return "By" +} + +// Description returns the semantic convention description of the instrument +func (ContainerEphemeralStorageRequest) Description() string { + return "Ephemeral storage resource requested for the container." +} + +// Add adds incr to the existing count for attrs. +// +// See +// https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#resourcerequirements-v1-core +// for details. +func (m ContainerEphemeralStorageRequest) Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// See +// https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#resourcerequirements-v1-core +// for details. +func (m ContainerEphemeralStorageRequest) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// ContainerMemoryLimit is an instrument used to record metric values conforming +// to the "k8s.container.memory.limit" semantic conventions. It represents the +// maximum memory resource limit set for the container. +type ContainerMemoryLimit struct { + metric.Int64UpDownCounter +} + +// NewContainerMemoryLimit returns a new ContainerMemoryLimit instrument. +func NewContainerMemoryLimit( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (ContainerMemoryLimit, error) { + // Check if the meter is nil. + if m == nil { + return ContainerMemoryLimit{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "k8s.container.memory.limit", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("Maximum memory resource limit set for the container."), + metric.WithUnit("By"), + }, opt...)..., + ) + if err != nil { + return ContainerMemoryLimit{noop.Int64UpDownCounter{}}, err + } + return ContainerMemoryLimit{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ContainerMemoryLimit) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (ContainerMemoryLimit) Name() string { + return "k8s.container.memory.limit" +} + +// Unit returns the semantic convention unit of the instrument +func (ContainerMemoryLimit) Unit() string { + return "By" +} + +// Description returns the semantic convention description of the instrument +func (ContainerMemoryLimit) Description() string { + return "Maximum memory resource limit set for the container." +} + +// Add adds incr to the existing count for attrs. +// +// See +// https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#resourcerequirements-v1-core +// for details. +func (m ContainerMemoryLimit) Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// See +// https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#resourcerequirements-v1-core +// for details. +func (m ContainerMemoryLimit) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// ContainerMemoryRequest is an instrument used to record metric values +// conforming to the "k8s.container.memory.request" semantic conventions. It +// represents the memory resource requested for the container. +type ContainerMemoryRequest struct { + metric.Int64UpDownCounter +} + +// NewContainerMemoryRequest returns a new ContainerMemoryRequest instrument. +func NewContainerMemoryRequest( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (ContainerMemoryRequest, error) { + // Check if the meter is nil. + if m == nil { + return ContainerMemoryRequest{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "k8s.container.memory.request", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("Memory resource requested for the container."), + metric.WithUnit("By"), + }, opt...)..., + ) + if err != nil { + return ContainerMemoryRequest{noop.Int64UpDownCounter{}}, err + } + return ContainerMemoryRequest{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ContainerMemoryRequest) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (ContainerMemoryRequest) Name() string { + return "k8s.container.memory.request" +} + +// Unit returns the semantic convention unit of the instrument +func (ContainerMemoryRequest) Unit() string { + return "By" +} + +// Description returns the semantic convention description of the instrument +func (ContainerMemoryRequest) Description() string { + return "Memory resource requested for the container." +} + +// Add adds incr to the existing count for attrs. +// +// See +// https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#resourcerequirements-v1-core +// for details. +func (m ContainerMemoryRequest) Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// See +// https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#resourcerequirements-v1-core +// for details. +func (m ContainerMemoryRequest) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// ContainerReady is an instrument used to record metric values conforming to the +// "k8s.container.ready" semantic conventions. It represents the indicates +// whether the container is currently marked as ready to accept traffic, based on +// its readiness probe (1 = ready, 0 = not ready). +type ContainerReady struct { + metric.Int64UpDownCounter +} + +// NewContainerReady returns a new ContainerReady instrument. +func NewContainerReady( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (ContainerReady, error) { + // Check if the meter is nil. + if m == nil { + return ContainerReady{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "k8s.container.ready", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("Indicates whether the container is currently marked as ready to accept traffic, based on its readiness probe (1 = ready, 0 = not ready)."), + metric.WithUnit("{container}"), + }, opt...)..., + ) + if err != nil { + return ContainerReady{noop.Int64UpDownCounter{}}, err + } + return ContainerReady{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ContainerReady) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (ContainerReady) Name() string { + return "k8s.container.ready" +} + +// Unit returns the semantic convention unit of the instrument +func (ContainerReady) Unit() string { + return "{container}" +} + +// Description returns the semantic convention description of the instrument +func (ContainerReady) Description() string { + return "Indicates whether the container is currently marked as ready to accept traffic, based on its readiness probe (1 = ready, 0 = not ready)." +} + +// Add adds incr to the existing count for attrs. +// +// This metric SHOULD reflect the value of the `ready` field in the +// [K8s ContainerStatus]. +// +// [K8s ContainerStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#containerstatus-v1-core +func (m ContainerReady) Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// This metric SHOULD reflect the value of the `ready` field in the +// [K8s ContainerStatus]. +// +// [K8s ContainerStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#containerstatus-v1-core +func (m ContainerReady) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// ContainerRestartCount is an instrument used to record metric values conforming +// to the "k8s.container.restart.count" semantic conventions. It represents the +// describes how many times the container has restarted (since the last counter +// reset). +type ContainerRestartCount struct { + metric.Int64UpDownCounter +} + +// NewContainerRestartCount returns a new ContainerRestartCount instrument. +func NewContainerRestartCount( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (ContainerRestartCount, error) { + // Check if the meter is nil. + if m == nil { + return ContainerRestartCount{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "k8s.container.restart.count", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("Describes how many times the container has restarted (since the last counter reset)."), + metric.WithUnit("{restart}"), + }, opt...)..., + ) + if err != nil { + return ContainerRestartCount{noop.Int64UpDownCounter{}}, err + } + return ContainerRestartCount{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ContainerRestartCount) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (ContainerRestartCount) Name() string { + return "k8s.container.restart.count" +} + +// Unit returns the semantic convention unit of the instrument +func (ContainerRestartCount) Unit() string { + return "{restart}" +} + +// Description returns the semantic convention description of the instrument +func (ContainerRestartCount) Description() string { + return "Describes how many times the container has restarted (since the last counter reset)." +} + +// Add adds incr to the existing count for attrs. +// +// This value is pulled directly from the K8s API and the value can go +// indefinitely high and be reset to 0 +// at any time depending on how your kubelet is configured to prune dead +// containers. +// It is best to not depend too much on the exact value but rather look at it as +// either == 0, in which case you can conclude there were no restarts in the +// recent past, or > 0, in which case +// you can conclude there were restarts in the recent past, and not try and +// analyze the value beyond that. +func (m ContainerRestartCount) Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// This value is pulled directly from the K8s API and the value can go +// indefinitely high and be reset to 0 +// at any time depending on how your kubelet is configured to prune dead +// containers. +// It is best to not depend too much on the exact value but rather look at it as +// either == 0, in which case you can conclude there were no restarts in the +// recent past, or > 0, in which case +// you can conclude there were restarts in the recent past, and not try and +// analyze the value beyond that. +func (m ContainerRestartCount) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// ContainerStatusReason is an instrument used to record metric values conforming +// to the "k8s.container.status.reason" semantic conventions. It represents the +// describes the number of K8s containers that are currently in a state for a +// given reason. +type ContainerStatusReason struct { + metric.Int64UpDownCounter +} + +// NewContainerStatusReason returns a new ContainerStatusReason instrument. +func NewContainerStatusReason( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (ContainerStatusReason, error) { + // Check if the meter is nil. + if m == nil { + return ContainerStatusReason{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "k8s.container.status.reason", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("Describes the number of K8s containers that are currently in a state for a given reason."), + metric.WithUnit("{container}"), + }, opt...)..., + ) + if err != nil { + return ContainerStatusReason{noop.Int64UpDownCounter{}}, err + } + return ContainerStatusReason{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ContainerStatusReason) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (ContainerStatusReason) Name() string { + return "k8s.container.status.reason" +} + +// Unit returns the semantic convention unit of the instrument +func (ContainerStatusReason) Unit() string { + return "{container}" +} + +// Description returns the semantic convention description of the instrument +func (ContainerStatusReason) Description() string { + return "Describes the number of K8s containers that are currently in a state for a given reason." +} + +// Add adds incr to the existing count for attrs. +// +// The containerStatusReason is the the reason for the container state. +// Corresponds to the `reason` field of the: [K8s ContainerStateWaiting] or +// [K8s ContainerStateTerminated] +// +// [K8s ContainerStateWaiting]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#containerstatewaiting-v1-core +// [K8s ContainerStateTerminated]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#containerstateterminated-v1-core +// +// All possible container state reasons will be reported at each time interval to +// avoid missing metrics. +// Only the value corresponding to the current state reason will be non-zero. +func (m ContainerStatusReason) Add( + ctx context.Context, + incr int64, + containerStatusReason ContainerStatusReasonAttr, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("k8s.container.status.reason", string(containerStatusReason)), + )..., + ), + ) + + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// All possible container state reasons will be reported at each time interval to +// avoid missing metrics. +// Only the value corresponding to the current state reason will be non-zero. +func (m ContainerStatusReason) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// ContainerStatusState is an instrument used to record metric values conforming +// to the "k8s.container.status.state" semantic conventions. It represents the +// describes the number of K8s containers that are currently in a given state. +type ContainerStatusState struct { + metric.Int64UpDownCounter +} + +// NewContainerStatusState returns a new ContainerStatusState instrument. +func NewContainerStatusState( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (ContainerStatusState, error) { + // Check if the meter is nil. + if m == nil { + return ContainerStatusState{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "k8s.container.status.state", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("Describes the number of K8s containers that are currently in a given state."), + metric.WithUnit("{container}"), + }, opt...)..., + ) + if err != nil { + return ContainerStatusState{noop.Int64UpDownCounter{}}, err + } + return ContainerStatusState{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ContainerStatusState) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (ContainerStatusState) Name() string { + return "k8s.container.status.state" +} + +// Unit returns the semantic convention unit of the instrument +func (ContainerStatusState) Unit() string { + return "{container}" +} + +// Description returns the semantic convention description of the instrument +func (ContainerStatusState) Description() string { + return "Describes the number of K8s containers that are currently in a given state." +} + +// Add adds incr to the existing count for attrs. +// +// The containerStatusState is the the state of the container. +// [K8s ContainerState] +// +// [K8s ContainerState]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#containerstate-v1-core +// +// All possible container states will be reported at each time interval to avoid +// missing metrics. +// Only the value corresponding to the current state will be non-zero. +func (m ContainerStatusState) Add( + ctx context.Context, + incr int64, + containerStatusState ContainerStatusStateAttr, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("k8s.container.status.state", string(containerStatusState)), + )..., + ), + ) + + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// All possible container states will be reported at each time interval to avoid +// missing metrics. +// Only the value corresponding to the current state will be non-zero. +func (m ContainerStatusState) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// ContainerStorageLimit is an instrument used to record metric values conforming +// to the "k8s.container.storage.limit" semantic conventions. It represents the +// maximum storage resource limit set for the container. +type ContainerStorageLimit struct { + metric.Int64UpDownCounter +} + +// NewContainerStorageLimit returns a new ContainerStorageLimit instrument. +func NewContainerStorageLimit( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (ContainerStorageLimit, error) { + // Check if the meter is nil. + if m == nil { + return ContainerStorageLimit{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "k8s.container.storage.limit", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("Maximum storage resource limit set for the container."), + metric.WithUnit("By"), + }, opt...)..., + ) + if err != nil { + return ContainerStorageLimit{noop.Int64UpDownCounter{}}, err + } + return ContainerStorageLimit{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ContainerStorageLimit) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (ContainerStorageLimit) Name() string { + return "k8s.container.storage.limit" +} + +// Unit returns the semantic convention unit of the instrument +func (ContainerStorageLimit) Unit() string { + return "By" +} + +// Description returns the semantic convention description of the instrument +func (ContainerStorageLimit) Description() string { + return "Maximum storage resource limit set for the container." +} + +// Add adds incr to the existing count for attrs. +// +// See +// https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#resourcerequirements-v1-core +// for details. +func (m ContainerStorageLimit) Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// See +// https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#resourcerequirements-v1-core +// for details. +func (m ContainerStorageLimit) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// ContainerStorageRequest is an instrument used to record metric values +// conforming to the "k8s.container.storage.request" semantic conventions. It +// represents the storage resource requested for the container. +type ContainerStorageRequest struct { + metric.Int64UpDownCounter +} + +// NewContainerStorageRequest returns a new ContainerStorageRequest instrument. +func NewContainerStorageRequest( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (ContainerStorageRequest, error) { + // Check if the meter is nil. + if m == nil { + return ContainerStorageRequest{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "k8s.container.storage.request", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("Storage resource requested for the container."), + metric.WithUnit("By"), + }, opt...)..., + ) + if err != nil { + return ContainerStorageRequest{noop.Int64UpDownCounter{}}, err + } + return ContainerStorageRequest{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ContainerStorageRequest) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (ContainerStorageRequest) Name() string { + return "k8s.container.storage.request" +} + +// Unit returns the semantic convention unit of the instrument +func (ContainerStorageRequest) Unit() string { + return "By" +} + +// Description returns the semantic convention description of the instrument +func (ContainerStorageRequest) Description() string { + return "Storage resource requested for the container." +} + +// Add adds incr to the existing count for attrs. +// +// See +// https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#resourcerequirements-v1-core +// for details. +func (m ContainerStorageRequest) Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// See +// https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#resourcerequirements-v1-core +// for details. +func (m ContainerStorageRequest) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// CronJobActiveJobs is an instrument used to record metric values conforming to +// the "k8s.cronjob.active_jobs" semantic conventions. It represents the number +// of actively running jobs for a cronjob. +type CronJobActiveJobs struct { + metric.Int64UpDownCounter +} + +// NewCronJobActiveJobs returns a new CronJobActiveJobs instrument. +func NewCronJobActiveJobs( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (CronJobActiveJobs, error) { + // Check if the meter is nil. + if m == nil { + return CronJobActiveJobs{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "k8s.cronjob.active_jobs", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("The number of actively running jobs for a cronjob."), + metric.WithUnit("{job}"), + }, opt...)..., + ) + if err != nil { + return CronJobActiveJobs{noop.Int64UpDownCounter{}}, err + } + return CronJobActiveJobs{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m CronJobActiveJobs) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (CronJobActiveJobs) Name() string { + return "k8s.cronjob.active_jobs" +} + +// Unit returns the semantic convention unit of the instrument +func (CronJobActiveJobs) Unit() string { + return "{job}" +} + +// Description returns the semantic convention description of the instrument +func (CronJobActiveJobs) Description() string { + return "The number of actively running jobs for a cronjob." +} + +// Add adds incr to the existing count for attrs. +// +// This metric aligns with the `active` field of the +// [K8s CronJobStatus]. +// +// [K8s CronJobStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#cronjobstatus-v1-batch +func (m CronJobActiveJobs) Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// This metric aligns with the `active` field of the +// [K8s CronJobStatus]. +// +// [K8s CronJobStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#cronjobstatus-v1-batch +func (m CronJobActiveJobs) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// DaemonSetCurrentScheduledNodes is an instrument used to record metric values +// conforming to the "k8s.daemonset.current_scheduled_nodes" semantic +// conventions. It represents the number of nodes that are running at least 1 +// daemon pod and are supposed to run the daemon pod. +type DaemonSetCurrentScheduledNodes struct { + metric.Int64UpDownCounter +} + +// NewDaemonSetCurrentScheduledNodes returns a new DaemonSetCurrentScheduledNodes +// instrument. +func NewDaemonSetCurrentScheduledNodes( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (DaemonSetCurrentScheduledNodes, error) { + // Check if the meter is nil. + if m == nil { + return DaemonSetCurrentScheduledNodes{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "k8s.daemonset.current_scheduled_nodes", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("Number of nodes that are running at least 1 daemon pod and are supposed to run the daemon pod."), + metric.WithUnit("{node}"), + }, opt...)..., + ) + if err != nil { + return DaemonSetCurrentScheduledNodes{noop.Int64UpDownCounter{}}, err + } + return DaemonSetCurrentScheduledNodes{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m DaemonSetCurrentScheduledNodes) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (DaemonSetCurrentScheduledNodes) Name() string { + return "k8s.daemonset.current_scheduled_nodes" +} + +// Unit returns the semantic convention unit of the instrument +func (DaemonSetCurrentScheduledNodes) Unit() string { + return "{node}" +} + +// Description returns the semantic convention description of the instrument +func (DaemonSetCurrentScheduledNodes) Description() string { + return "Number of nodes that are running at least 1 daemon pod and are supposed to run the daemon pod." +} + +// Add adds incr to the existing count for attrs. +// +// This metric aligns with the `currentNumberScheduled` field of the +// [K8s DaemonSetStatus]. +// +// [K8s DaemonSetStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#daemonsetstatus-v1-apps +func (m DaemonSetCurrentScheduledNodes) Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// This metric aligns with the `currentNumberScheduled` field of the +// [K8s DaemonSetStatus]. +// +// [K8s DaemonSetStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#daemonsetstatus-v1-apps +func (m DaemonSetCurrentScheduledNodes) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// DaemonSetDesiredScheduledNodes is an instrument used to record metric values +// conforming to the "k8s.daemonset.desired_scheduled_nodes" semantic +// conventions. It represents the number of nodes that should be running the +// daemon pod (including nodes currently running the daemon pod). +type DaemonSetDesiredScheduledNodes struct { + metric.Int64UpDownCounter +} + +// NewDaemonSetDesiredScheduledNodes returns a new DaemonSetDesiredScheduledNodes +// instrument. +func NewDaemonSetDesiredScheduledNodes( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (DaemonSetDesiredScheduledNodes, error) { + // Check if the meter is nil. + if m == nil { + return DaemonSetDesiredScheduledNodes{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "k8s.daemonset.desired_scheduled_nodes", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("Number of nodes that should be running the daemon pod (including nodes currently running the daemon pod)."), + metric.WithUnit("{node}"), + }, opt...)..., + ) + if err != nil { + return DaemonSetDesiredScheduledNodes{noop.Int64UpDownCounter{}}, err + } + return DaemonSetDesiredScheduledNodes{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m DaemonSetDesiredScheduledNodes) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (DaemonSetDesiredScheduledNodes) Name() string { + return "k8s.daemonset.desired_scheduled_nodes" +} + +// Unit returns the semantic convention unit of the instrument +func (DaemonSetDesiredScheduledNodes) Unit() string { + return "{node}" +} + +// Description returns the semantic convention description of the instrument +func (DaemonSetDesiredScheduledNodes) Description() string { + return "Number of nodes that should be running the daemon pod (including nodes currently running the daemon pod)." +} + +// Add adds incr to the existing count for attrs. +// +// This metric aligns with the `desiredNumberScheduled` field of the +// [K8s DaemonSetStatus]. +// +// [K8s DaemonSetStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#daemonsetstatus-v1-apps +func (m DaemonSetDesiredScheduledNodes) Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// This metric aligns with the `desiredNumberScheduled` field of the +// [K8s DaemonSetStatus]. +// +// [K8s DaemonSetStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#daemonsetstatus-v1-apps +func (m DaemonSetDesiredScheduledNodes) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// DaemonSetMisscheduledNodes is an instrument used to record metric values +// conforming to the "k8s.daemonset.misscheduled_nodes" semantic conventions. It +// represents the number of nodes that are running the daemon pod, but are not +// supposed to run the daemon pod. +type DaemonSetMisscheduledNodes struct { + metric.Int64UpDownCounter +} + +// NewDaemonSetMisscheduledNodes returns a new DaemonSetMisscheduledNodes +// instrument. +func NewDaemonSetMisscheduledNodes( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (DaemonSetMisscheduledNodes, error) { + // Check if the meter is nil. + if m == nil { + return DaemonSetMisscheduledNodes{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "k8s.daemonset.misscheduled_nodes", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("Number of nodes that are running the daemon pod, but are not supposed to run the daemon pod."), + metric.WithUnit("{node}"), + }, opt...)..., + ) + if err != nil { + return DaemonSetMisscheduledNodes{noop.Int64UpDownCounter{}}, err + } + return DaemonSetMisscheduledNodes{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m DaemonSetMisscheduledNodes) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (DaemonSetMisscheduledNodes) Name() string { + return "k8s.daemonset.misscheduled_nodes" +} + +// Unit returns the semantic convention unit of the instrument +func (DaemonSetMisscheduledNodes) Unit() string { + return "{node}" +} + +// Description returns the semantic convention description of the instrument +func (DaemonSetMisscheduledNodes) Description() string { + return "Number of nodes that are running the daemon pod, but are not supposed to run the daemon pod." +} + +// Add adds incr to the existing count for attrs. +// +// This metric aligns with the `numberMisscheduled` field of the +// [K8s DaemonSetStatus]. +// +// [K8s DaemonSetStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#daemonsetstatus-v1-apps +func (m DaemonSetMisscheduledNodes) Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// This metric aligns with the `numberMisscheduled` field of the +// [K8s DaemonSetStatus]. +// +// [K8s DaemonSetStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#daemonsetstatus-v1-apps +func (m DaemonSetMisscheduledNodes) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// DaemonSetReadyNodes is an instrument used to record metric values conforming +// to the "k8s.daemonset.ready_nodes" semantic conventions. It represents the +// number of nodes that should be running the daemon pod and have one or more of +// the daemon pod running and ready. +type DaemonSetReadyNodes struct { + metric.Int64UpDownCounter +} + +// NewDaemonSetReadyNodes returns a new DaemonSetReadyNodes instrument. +func NewDaemonSetReadyNodes( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (DaemonSetReadyNodes, error) { + // Check if the meter is nil. + if m == nil { + return DaemonSetReadyNodes{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "k8s.daemonset.ready_nodes", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("Number of nodes that should be running the daemon pod and have one or more of the daemon pod running and ready."), + metric.WithUnit("{node}"), + }, opt...)..., + ) + if err != nil { + return DaemonSetReadyNodes{noop.Int64UpDownCounter{}}, err + } + return DaemonSetReadyNodes{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m DaemonSetReadyNodes) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (DaemonSetReadyNodes) Name() string { + return "k8s.daemonset.ready_nodes" +} + +// Unit returns the semantic convention unit of the instrument +func (DaemonSetReadyNodes) Unit() string { + return "{node}" +} + +// Description returns the semantic convention description of the instrument +func (DaemonSetReadyNodes) Description() string { + return "Number of nodes that should be running the daemon pod and have one or more of the daemon pod running and ready." +} + +// Add adds incr to the existing count for attrs. +// +// This metric aligns with the `numberReady` field of the +// [K8s DaemonSetStatus]. +// +// [K8s DaemonSetStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#daemonsetstatus-v1-apps +func (m DaemonSetReadyNodes) Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// This metric aligns with the `numberReady` field of the +// [K8s DaemonSetStatus]. +// +// [K8s DaemonSetStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#daemonsetstatus-v1-apps +func (m DaemonSetReadyNodes) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// DeploymentAvailablePods is an instrument used to record metric values +// conforming to the "k8s.deployment.available_pods" semantic conventions. It +// represents the total number of available replica pods (ready for at least +// minReadySeconds) targeted by this deployment. +type DeploymentAvailablePods struct { + metric.Int64UpDownCounter +} + +// NewDeploymentAvailablePods returns a new DeploymentAvailablePods instrument. +func NewDeploymentAvailablePods( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (DeploymentAvailablePods, error) { + // Check if the meter is nil. + if m == nil { + return DeploymentAvailablePods{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "k8s.deployment.available_pods", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("Total number of available replica pods (ready for at least minReadySeconds) targeted by this deployment."), + metric.WithUnit("{pod}"), + }, opt...)..., + ) + if err != nil { + return DeploymentAvailablePods{noop.Int64UpDownCounter{}}, err + } + return DeploymentAvailablePods{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m DeploymentAvailablePods) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (DeploymentAvailablePods) Name() string { + return "k8s.deployment.available_pods" +} + +// Unit returns the semantic convention unit of the instrument +func (DeploymentAvailablePods) Unit() string { + return "{pod}" +} + +// Description returns the semantic convention description of the instrument +func (DeploymentAvailablePods) Description() string { + return "Total number of available replica pods (ready for at least minReadySeconds) targeted by this deployment." +} + +// Add adds incr to the existing count for attrs. +// +// This metric aligns with the `availableReplicas` field of the +// [K8s DeploymentStatus]. +// +// [K8s DeploymentStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#deploymentstatus-v1-apps +func (m DeploymentAvailablePods) Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// This metric aligns with the `availableReplicas` field of the +// [K8s DeploymentStatus]. +// +// [K8s DeploymentStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#deploymentstatus-v1-apps +func (m DeploymentAvailablePods) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// DeploymentDesiredPods is an instrument used to record metric values conforming +// to the "k8s.deployment.desired_pods" semantic conventions. It represents the +// number of desired replica pods in this deployment. +type DeploymentDesiredPods struct { + metric.Int64UpDownCounter +} + +// NewDeploymentDesiredPods returns a new DeploymentDesiredPods instrument. +func NewDeploymentDesiredPods( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (DeploymentDesiredPods, error) { + // Check if the meter is nil. + if m == nil { + return DeploymentDesiredPods{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "k8s.deployment.desired_pods", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("Number of desired replica pods in this deployment."), + metric.WithUnit("{pod}"), + }, opt...)..., + ) + if err != nil { + return DeploymentDesiredPods{noop.Int64UpDownCounter{}}, err + } + return DeploymentDesiredPods{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m DeploymentDesiredPods) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (DeploymentDesiredPods) Name() string { + return "k8s.deployment.desired_pods" +} + +// Unit returns the semantic convention unit of the instrument +func (DeploymentDesiredPods) Unit() string { + return "{pod}" +} + +// Description returns the semantic convention description of the instrument +func (DeploymentDesiredPods) Description() string { + return "Number of desired replica pods in this deployment." +} + +// Add adds incr to the existing count for attrs. +// +// This metric aligns with the `replicas` field of the +// [K8s DeploymentSpec]. +// +// [K8s DeploymentSpec]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#deploymentspec-v1-apps +func (m DeploymentDesiredPods) Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// This metric aligns with the `replicas` field of the +// [K8s DeploymentSpec]. +// +// [K8s DeploymentSpec]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#deploymentspec-v1-apps +func (m DeploymentDesiredPods) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// HPACurrentPods is an instrument used to record metric values conforming to the +// "k8s.hpa.current_pods" semantic conventions. It represents the current number +// of replica pods managed by this horizontal pod autoscaler, as last seen by the +// autoscaler. +type HPACurrentPods struct { + metric.Int64UpDownCounter +} + +// NewHPACurrentPods returns a new HPACurrentPods instrument. +func NewHPACurrentPods( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (HPACurrentPods, error) { + // Check if the meter is nil. + if m == nil { + return HPACurrentPods{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "k8s.hpa.current_pods", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("Current number of replica pods managed by this horizontal pod autoscaler, as last seen by the autoscaler."), + metric.WithUnit("{pod}"), + }, opt...)..., + ) + if err != nil { + return HPACurrentPods{noop.Int64UpDownCounter{}}, err + } + return HPACurrentPods{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m HPACurrentPods) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (HPACurrentPods) Name() string { + return "k8s.hpa.current_pods" +} + +// Unit returns the semantic convention unit of the instrument +func (HPACurrentPods) Unit() string { + return "{pod}" +} + +// Description returns the semantic convention description of the instrument +func (HPACurrentPods) Description() string { + return "Current number of replica pods managed by this horizontal pod autoscaler, as last seen by the autoscaler." +} + +// Add adds incr to the existing count for attrs. +// +// This metric aligns with the `currentReplicas` field of the +// [K8s HorizontalPodAutoscalerStatus] +// +// [K8s HorizontalPodAutoscalerStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#horizontalpodautoscalerstatus-v2-autoscaling +func (m HPACurrentPods) Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// This metric aligns with the `currentReplicas` field of the +// [K8s HorizontalPodAutoscalerStatus] +// +// [K8s HorizontalPodAutoscalerStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#horizontalpodautoscalerstatus-v2-autoscaling +func (m HPACurrentPods) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// HPADesiredPods is an instrument used to record metric values conforming to the +// "k8s.hpa.desired_pods" semantic conventions. It represents the desired number +// of replica pods managed by this horizontal pod autoscaler, as last calculated +// by the autoscaler. +type HPADesiredPods struct { + metric.Int64UpDownCounter +} + +// NewHPADesiredPods returns a new HPADesiredPods instrument. +func NewHPADesiredPods( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (HPADesiredPods, error) { + // Check if the meter is nil. + if m == nil { + return HPADesiredPods{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "k8s.hpa.desired_pods", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("Desired number of replica pods managed by this horizontal pod autoscaler, as last calculated by the autoscaler."), + metric.WithUnit("{pod}"), + }, opt...)..., + ) + if err != nil { + return HPADesiredPods{noop.Int64UpDownCounter{}}, err + } + return HPADesiredPods{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m HPADesiredPods) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (HPADesiredPods) Name() string { + return "k8s.hpa.desired_pods" +} + +// Unit returns the semantic convention unit of the instrument +func (HPADesiredPods) Unit() string { + return "{pod}" +} + +// Description returns the semantic convention description of the instrument +func (HPADesiredPods) Description() string { + return "Desired number of replica pods managed by this horizontal pod autoscaler, as last calculated by the autoscaler." +} + +// Add adds incr to the existing count for attrs. +// +// This metric aligns with the `desiredReplicas` field of the +// [K8s HorizontalPodAutoscalerStatus] +// +// [K8s HorizontalPodAutoscalerStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#horizontalpodautoscalerstatus-v2-autoscaling +func (m HPADesiredPods) Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// This metric aligns with the `desiredReplicas` field of the +// [K8s HorizontalPodAutoscalerStatus] +// +// [K8s HorizontalPodAutoscalerStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#horizontalpodautoscalerstatus-v2-autoscaling +func (m HPADesiredPods) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// HPAMaxPods is an instrument used to record metric values conforming to the +// "k8s.hpa.max_pods" semantic conventions. It represents the upper limit for the +// number of replica pods to which the autoscaler can scale up. +type HPAMaxPods struct { + metric.Int64UpDownCounter +} + +// NewHPAMaxPods returns a new HPAMaxPods instrument. +func NewHPAMaxPods( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (HPAMaxPods, error) { + // Check if the meter is nil. + if m == nil { + return HPAMaxPods{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "k8s.hpa.max_pods", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("The upper limit for the number of replica pods to which the autoscaler can scale up."), + metric.WithUnit("{pod}"), + }, opt...)..., + ) + if err != nil { + return HPAMaxPods{noop.Int64UpDownCounter{}}, err + } + return HPAMaxPods{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m HPAMaxPods) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (HPAMaxPods) Name() string { + return "k8s.hpa.max_pods" +} + +// Unit returns the semantic convention unit of the instrument +func (HPAMaxPods) Unit() string { + return "{pod}" +} + +// Description returns the semantic convention description of the instrument +func (HPAMaxPods) Description() string { + return "The upper limit for the number of replica pods to which the autoscaler can scale up." +} + +// Add adds incr to the existing count for attrs. +// +// This metric aligns with the `maxReplicas` field of the +// [K8s HorizontalPodAutoscalerSpec] +// +// [K8s HorizontalPodAutoscalerSpec]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#horizontalpodautoscalerspec-v2-autoscaling +func (m HPAMaxPods) Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// This metric aligns with the `maxReplicas` field of the +// [K8s HorizontalPodAutoscalerSpec] +// +// [K8s HorizontalPodAutoscalerSpec]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#horizontalpodautoscalerspec-v2-autoscaling +func (m HPAMaxPods) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// HPAMetricTargetCPUAverageUtilization is an instrument used to record metric +// values conforming to the "k8s.hpa.metric.target.cpu.average_utilization" +// semantic conventions. It represents the target average utilization, in +// percentage, for CPU resource in HPA config. +type HPAMetricTargetCPUAverageUtilization struct { + metric.Int64Gauge +} + +// NewHPAMetricTargetCPUAverageUtilization returns a new +// HPAMetricTargetCPUAverageUtilization instrument. +func NewHPAMetricTargetCPUAverageUtilization( + m metric.Meter, + opt ...metric.Int64GaugeOption, +) (HPAMetricTargetCPUAverageUtilization, error) { + // Check if the meter is nil. + if m == nil { + return HPAMetricTargetCPUAverageUtilization{noop.Int64Gauge{}}, nil + } + + i, err := m.Int64Gauge( + "k8s.hpa.metric.target.cpu.average_utilization", + append([]metric.Int64GaugeOption{ + metric.WithDescription("Target average utilization, in percentage, for CPU resource in HPA config."), + metric.WithUnit("1"), + }, opt...)..., + ) + if err != nil { + return HPAMetricTargetCPUAverageUtilization{noop.Int64Gauge{}}, err + } + return HPAMetricTargetCPUAverageUtilization{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m HPAMetricTargetCPUAverageUtilization) Inst() metric.Int64Gauge { + return m.Int64Gauge +} + +// Name returns the semantic convention name of the instrument. +func (HPAMetricTargetCPUAverageUtilization) Name() string { + return "k8s.hpa.metric.target.cpu.average_utilization" +} + +// Unit returns the semantic convention unit of the instrument +func (HPAMetricTargetCPUAverageUtilization) Unit() string { + return "1" +} + +// Description returns the semantic convention description of the instrument +func (HPAMetricTargetCPUAverageUtilization) Description() string { + return "Target average utilization, in percentage, for CPU resource in HPA config." +} + +// Record records val to the current distribution for attrs. +// +// All additional attrs passed are included in the recorded value. +// +// This metric aligns with the `averageUtilization` field of the +// [K8s HPA MetricTarget]. +// If the type of the metric is [`ContainerResource`], +// the `k8s.container.name` attribute MUST be set to identify the specific +// container within the pod to which the metric applies. +// +// [K8s HPA MetricTarget]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#metrictarget-v2-autoscaling +// [`ContainerResource`]: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#support-for-metrics-apis +func (m HPAMetricTargetCPUAverageUtilization) Record( + ctx context.Context, + val int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Gauge.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64Gauge.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +// +// This metric aligns with the `averageUtilization` field of the +// [K8s HPA MetricTarget]. +// If the type of the metric is [`ContainerResource`], +// the `k8s.container.name` attribute MUST be set to identify the specific +// container within the pod to which the metric applies. +// +// [K8s HPA MetricTarget]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#metrictarget-v2-autoscaling +// [`ContainerResource`]: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#support-for-metrics-apis +func (m HPAMetricTargetCPUAverageUtilization) RecordSet(ctx context.Context, val int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Gauge.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Gauge.Record(ctx, val, *o...) +} + +// AttrContainerName returns an optional attribute for the "k8s.container.name" +// semantic convention. It represents the name of the Container from Pod +// specification, must be unique within a Pod. Container runtime usually uses +// different globally unique name (`container.name`). +func (HPAMetricTargetCPUAverageUtilization) AttrContainerName(val string) attribute.KeyValue { + return attribute.String("k8s.container.name", val) +} + +// AttrHPAMetricType returns an optional attribute for the "k8s.hpa.metric.type" +// semantic convention. It represents the type of metric source for the +// horizontal pod autoscaler. +func (HPAMetricTargetCPUAverageUtilization) AttrHPAMetricType(val string) attribute.KeyValue { + return attribute.String("k8s.hpa.metric.type", val) +} + +// HPAMetricTargetCPUAverageValue is an instrument used to record metric values +// conforming to the "k8s.hpa.metric.target.cpu.average_value" semantic +// conventions. It represents the target average value for CPU resource in HPA +// config. +type HPAMetricTargetCPUAverageValue struct { + metric.Int64Gauge +} + +// NewHPAMetricTargetCPUAverageValue returns a new HPAMetricTargetCPUAverageValue +// instrument. +func NewHPAMetricTargetCPUAverageValue( + m metric.Meter, + opt ...metric.Int64GaugeOption, +) (HPAMetricTargetCPUAverageValue, error) { + // Check if the meter is nil. + if m == nil { + return HPAMetricTargetCPUAverageValue{noop.Int64Gauge{}}, nil + } + + i, err := m.Int64Gauge( + "k8s.hpa.metric.target.cpu.average_value", + append([]metric.Int64GaugeOption{ + metric.WithDescription("Target average value for CPU resource in HPA config."), + metric.WithUnit("{cpu}"), + }, opt...)..., + ) + if err != nil { + return HPAMetricTargetCPUAverageValue{noop.Int64Gauge{}}, err + } + return HPAMetricTargetCPUAverageValue{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m HPAMetricTargetCPUAverageValue) Inst() metric.Int64Gauge { + return m.Int64Gauge +} + +// Name returns the semantic convention name of the instrument. +func (HPAMetricTargetCPUAverageValue) Name() string { + return "k8s.hpa.metric.target.cpu.average_value" +} + +// Unit returns the semantic convention unit of the instrument +func (HPAMetricTargetCPUAverageValue) Unit() string { + return "{cpu}" +} + +// Description returns the semantic convention description of the instrument +func (HPAMetricTargetCPUAverageValue) Description() string { + return "Target average value for CPU resource in HPA config." +} + +// Record records val to the current distribution for attrs. +// +// All additional attrs passed are included in the recorded value. +// +// This metric aligns with the `averageValue` field of the +// [K8s HPA MetricTarget]. +// If the type of the metric is [`ContainerResource`], +// the `k8s.container.name` attribute MUST be set to identify the specific +// container within the pod to which the metric applies. +// +// [K8s HPA MetricTarget]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#metrictarget-v2-autoscaling +// [`ContainerResource`]: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#support-for-metrics-apis +func (m HPAMetricTargetCPUAverageValue) Record( + ctx context.Context, + val int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Gauge.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64Gauge.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +// +// This metric aligns with the `averageValue` field of the +// [K8s HPA MetricTarget]. +// If the type of the metric is [`ContainerResource`], +// the `k8s.container.name` attribute MUST be set to identify the specific +// container within the pod to which the metric applies. +// +// [K8s HPA MetricTarget]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#metrictarget-v2-autoscaling +// [`ContainerResource`]: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#support-for-metrics-apis +func (m HPAMetricTargetCPUAverageValue) RecordSet(ctx context.Context, val int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Gauge.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Gauge.Record(ctx, val, *o...) +} + +// AttrContainerName returns an optional attribute for the "k8s.container.name" +// semantic convention. It represents the name of the Container from Pod +// specification, must be unique within a Pod. Container runtime usually uses +// different globally unique name (`container.name`). +func (HPAMetricTargetCPUAverageValue) AttrContainerName(val string) attribute.KeyValue { + return attribute.String("k8s.container.name", val) +} + +// AttrHPAMetricType returns an optional attribute for the "k8s.hpa.metric.type" +// semantic convention. It represents the type of metric source for the +// horizontal pod autoscaler. +func (HPAMetricTargetCPUAverageValue) AttrHPAMetricType(val string) attribute.KeyValue { + return attribute.String("k8s.hpa.metric.type", val) +} + +// HPAMetricTargetCPUValue is an instrument used to record metric values +// conforming to the "k8s.hpa.metric.target.cpu.value" semantic conventions. It +// represents the target value for CPU resource in HPA config. +type HPAMetricTargetCPUValue struct { + metric.Int64Gauge +} + +// NewHPAMetricTargetCPUValue returns a new HPAMetricTargetCPUValue instrument. +func NewHPAMetricTargetCPUValue( + m metric.Meter, + opt ...metric.Int64GaugeOption, +) (HPAMetricTargetCPUValue, error) { + // Check if the meter is nil. + if m == nil { + return HPAMetricTargetCPUValue{noop.Int64Gauge{}}, nil + } + + i, err := m.Int64Gauge( + "k8s.hpa.metric.target.cpu.value", + append([]metric.Int64GaugeOption{ + metric.WithDescription("Target value for CPU resource in HPA config."), + metric.WithUnit("{cpu}"), + }, opt...)..., + ) + if err != nil { + return HPAMetricTargetCPUValue{noop.Int64Gauge{}}, err + } + return HPAMetricTargetCPUValue{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m HPAMetricTargetCPUValue) Inst() metric.Int64Gauge { + return m.Int64Gauge +} + +// Name returns the semantic convention name of the instrument. +func (HPAMetricTargetCPUValue) Name() string { + return "k8s.hpa.metric.target.cpu.value" +} + +// Unit returns the semantic convention unit of the instrument +func (HPAMetricTargetCPUValue) Unit() string { + return "{cpu}" +} + +// Description returns the semantic convention description of the instrument +func (HPAMetricTargetCPUValue) Description() string { + return "Target value for CPU resource in HPA config." +} + +// Record records val to the current distribution for attrs. +// +// All additional attrs passed are included in the recorded value. +// +// This metric aligns with the `value` field of the +// [K8s HPA MetricTarget]. +// If the type of the metric is [`ContainerResource`], +// the `k8s.container.name` attribute MUST be set to identify the specific +// container within the pod to which the metric applies. +// +// [K8s HPA MetricTarget]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#metrictarget-v2-autoscaling +// [`ContainerResource`]: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#support-for-metrics-apis +func (m HPAMetricTargetCPUValue) Record( + ctx context.Context, + val int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Gauge.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64Gauge.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +// +// This metric aligns with the `value` field of the +// [K8s HPA MetricTarget]. +// If the type of the metric is [`ContainerResource`], +// the `k8s.container.name` attribute MUST be set to identify the specific +// container within the pod to which the metric applies. +// +// [K8s HPA MetricTarget]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#metrictarget-v2-autoscaling +// [`ContainerResource`]: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#support-for-metrics-apis +func (m HPAMetricTargetCPUValue) RecordSet(ctx context.Context, val int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Gauge.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Gauge.Record(ctx, val, *o...) +} + +// AttrContainerName returns an optional attribute for the "k8s.container.name" +// semantic convention. It represents the name of the Container from Pod +// specification, must be unique within a Pod. Container runtime usually uses +// different globally unique name (`container.name`). +func (HPAMetricTargetCPUValue) AttrContainerName(val string) attribute.KeyValue { + return attribute.String("k8s.container.name", val) +} + +// AttrHPAMetricType returns an optional attribute for the "k8s.hpa.metric.type" +// semantic convention. It represents the type of metric source for the +// horizontal pod autoscaler. +func (HPAMetricTargetCPUValue) AttrHPAMetricType(val string) attribute.KeyValue { + return attribute.String("k8s.hpa.metric.type", val) +} + +// HPAMinPods is an instrument used to record metric values conforming to the +// "k8s.hpa.min_pods" semantic conventions. It represents the lower limit for the +// number of replica pods to which the autoscaler can scale down. +type HPAMinPods struct { + metric.Int64UpDownCounter +} + +// NewHPAMinPods returns a new HPAMinPods instrument. +func NewHPAMinPods( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (HPAMinPods, error) { + // Check if the meter is nil. + if m == nil { + return HPAMinPods{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "k8s.hpa.min_pods", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("The lower limit for the number of replica pods to which the autoscaler can scale down."), + metric.WithUnit("{pod}"), + }, opt...)..., + ) + if err != nil { + return HPAMinPods{noop.Int64UpDownCounter{}}, err + } + return HPAMinPods{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m HPAMinPods) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (HPAMinPods) Name() string { + return "k8s.hpa.min_pods" +} + +// Unit returns the semantic convention unit of the instrument +func (HPAMinPods) Unit() string { + return "{pod}" +} + +// Description returns the semantic convention description of the instrument +func (HPAMinPods) Description() string { + return "The lower limit for the number of replica pods to which the autoscaler can scale down." +} + +// Add adds incr to the existing count for attrs. +// +// This metric aligns with the `minReplicas` field of the +// [K8s HorizontalPodAutoscalerSpec] +// +// [K8s HorizontalPodAutoscalerSpec]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#horizontalpodautoscalerspec-v2-autoscaling +func (m HPAMinPods) Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// This metric aligns with the `minReplicas` field of the +// [K8s HorizontalPodAutoscalerSpec] +// +// [K8s HorizontalPodAutoscalerSpec]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#horizontalpodautoscalerspec-v2-autoscaling +func (m HPAMinPods) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// JobActivePods is an instrument used to record metric values conforming to the +// "k8s.job.active_pods" semantic conventions. It represents the number of +// pending and actively running pods for a job. +type JobActivePods struct { + metric.Int64UpDownCounter +} + +// NewJobActivePods returns a new JobActivePods instrument. +func NewJobActivePods( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (JobActivePods, error) { + // Check if the meter is nil. + if m == nil { + return JobActivePods{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "k8s.job.active_pods", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("The number of pending and actively running pods for a job."), + metric.WithUnit("{pod}"), + }, opt...)..., + ) + if err != nil { + return JobActivePods{noop.Int64UpDownCounter{}}, err + } + return JobActivePods{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m JobActivePods) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (JobActivePods) Name() string { + return "k8s.job.active_pods" +} + +// Unit returns the semantic convention unit of the instrument +func (JobActivePods) Unit() string { + return "{pod}" +} + +// Description returns the semantic convention description of the instrument +func (JobActivePods) Description() string { + return "The number of pending and actively running pods for a job." +} + +// Add adds incr to the existing count for attrs. +// +// This metric aligns with the `active` field of the +// [K8s JobStatus]. +// +// [K8s JobStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#jobstatus-v1-batch +func (m JobActivePods) Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// This metric aligns with the `active` field of the +// [K8s JobStatus]. +// +// [K8s JobStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#jobstatus-v1-batch +func (m JobActivePods) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// JobDesiredSuccessfulPods is an instrument used to record metric values +// conforming to the "k8s.job.desired_successful_pods" semantic conventions. It +// represents the desired number of successfully finished pods the job should be +// run with. +type JobDesiredSuccessfulPods struct { + metric.Int64UpDownCounter +} + +// NewJobDesiredSuccessfulPods returns a new JobDesiredSuccessfulPods instrument. +func NewJobDesiredSuccessfulPods( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (JobDesiredSuccessfulPods, error) { + // Check if the meter is nil. + if m == nil { + return JobDesiredSuccessfulPods{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "k8s.job.desired_successful_pods", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("The desired number of successfully finished pods the job should be run with."), + metric.WithUnit("{pod}"), + }, opt...)..., + ) + if err != nil { + return JobDesiredSuccessfulPods{noop.Int64UpDownCounter{}}, err + } + return JobDesiredSuccessfulPods{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m JobDesiredSuccessfulPods) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (JobDesiredSuccessfulPods) Name() string { + return "k8s.job.desired_successful_pods" +} + +// Unit returns the semantic convention unit of the instrument +func (JobDesiredSuccessfulPods) Unit() string { + return "{pod}" +} + +// Description returns the semantic convention description of the instrument +func (JobDesiredSuccessfulPods) Description() string { + return "The desired number of successfully finished pods the job should be run with." +} + +// Add adds incr to the existing count for attrs. +// +// This metric aligns with the `completions` field of the +// [K8s JobSpec].. +// +// [K8s JobSpec]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#jobspec-v1-batch +func (m JobDesiredSuccessfulPods) Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// This metric aligns with the `completions` field of the +// [K8s JobSpec].. +// +// [K8s JobSpec]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#jobspec-v1-batch +func (m JobDesiredSuccessfulPods) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// JobFailedPods is an instrument used to record metric values conforming to the +// "k8s.job.failed_pods" semantic conventions. It represents the number of pods +// which reached phase Failed for a job. +type JobFailedPods struct { + metric.Int64UpDownCounter +} + +// NewJobFailedPods returns a new JobFailedPods instrument. +func NewJobFailedPods( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (JobFailedPods, error) { + // Check if the meter is nil. + if m == nil { + return JobFailedPods{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "k8s.job.failed_pods", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("The number of pods which reached phase Failed for a job."), + metric.WithUnit("{pod}"), + }, opt...)..., + ) + if err != nil { + return JobFailedPods{noop.Int64UpDownCounter{}}, err + } + return JobFailedPods{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m JobFailedPods) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (JobFailedPods) Name() string { + return "k8s.job.failed_pods" +} + +// Unit returns the semantic convention unit of the instrument +func (JobFailedPods) Unit() string { + return "{pod}" +} + +// Description returns the semantic convention description of the instrument +func (JobFailedPods) Description() string { + return "The number of pods which reached phase Failed for a job." +} + +// Add adds incr to the existing count for attrs. +// +// This metric aligns with the `failed` field of the +// [K8s JobStatus]. +// +// [K8s JobStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#jobstatus-v1-batch +func (m JobFailedPods) Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// This metric aligns with the `failed` field of the +// [K8s JobStatus]. +// +// [K8s JobStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#jobstatus-v1-batch +func (m JobFailedPods) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// JobMaxParallelPods is an instrument used to record metric values conforming to +// the "k8s.job.max_parallel_pods" semantic conventions. It represents the max +// desired number of pods the job should run at any given time. +type JobMaxParallelPods struct { + metric.Int64UpDownCounter +} + +// NewJobMaxParallelPods returns a new JobMaxParallelPods instrument. +func NewJobMaxParallelPods( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (JobMaxParallelPods, error) { + // Check if the meter is nil. + if m == nil { + return JobMaxParallelPods{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "k8s.job.max_parallel_pods", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("The max desired number of pods the job should run at any given time."), + metric.WithUnit("{pod}"), + }, opt...)..., + ) + if err != nil { + return JobMaxParallelPods{noop.Int64UpDownCounter{}}, err + } + return JobMaxParallelPods{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m JobMaxParallelPods) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (JobMaxParallelPods) Name() string { + return "k8s.job.max_parallel_pods" +} + +// Unit returns the semantic convention unit of the instrument +func (JobMaxParallelPods) Unit() string { + return "{pod}" +} + +// Description returns the semantic convention description of the instrument +func (JobMaxParallelPods) Description() string { + return "The max desired number of pods the job should run at any given time." +} + +// Add adds incr to the existing count for attrs. +// +// This metric aligns with the `parallelism` field of the +// [K8s JobSpec]. +// +// [K8s JobSpec]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#jobspec-v1-batch +func (m JobMaxParallelPods) Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// This metric aligns with the `parallelism` field of the +// [K8s JobSpec]. +// +// [K8s JobSpec]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#jobspec-v1-batch +func (m JobMaxParallelPods) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// JobSuccessfulPods is an instrument used to record metric values conforming to +// the "k8s.job.successful_pods" semantic conventions. It represents the number +// of pods which reached phase Succeeded for a job. +type JobSuccessfulPods struct { + metric.Int64UpDownCounter +} + +// NewJobSuccessfulPods returns a new JobSuccessfulPods instrument. +func NewJobSuccessfulPods( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (JobSuccessfulPods, error) { + // Check if the meter is nil. + if m == nil { + return JobSuccessfulPods{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "k8s.job.successful_pods", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("The number of pods which reached phase Succeeded for a job."), + metric.WithUnit("{pod}"), + }, opt...)..., + ) + if err != nil { + return JobSuccessfulPods{noop.Int64UpDownCounter{}}, err + } + return JobSuccessfulPods{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m JobSuccessfulPods) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (JobSuccessfulPods) Name() string { + return "k8s.job.successful_pods" +} + +// Unit returns the semantic convention unit of the instrument +func (JobSuccessfulPods) Unit() string { + return "{pod}" +} + +// Description returns the semantic convention description of the instrument +func (JobSuccessfulPods) Description() string { + return "The number of pods which reached phase Succeeded for a job." +} + +// Add adds incr to the existing count for attrs. +// +// This metric aligns with the `succeeded` field of the +// [K8s JobStatus]. +// +// [K8s JobStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#jobstatus-v1-batch +func (m JobSuccessfulPods) Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// This metric aligns with the `succeeded` field of the +// [K8s JobStatus]. +// +// [K8s JobStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#jobstatus-v1-batch +func (m JobSuccessfulPods) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// NamespacePhase is an instrument used to record metric values conforming to the +// "k8s.namespace.phase" semantic conventions. It represents the describes number +// of K8s namespaces that are currently in a given phase. +type NamespacePhase struct { + metric.Int64UpDownCounter +} + +// NewNamespacePhase returns a new NamespacePhase instrument. +func NewNamespacePhase( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (NamespacePhase, error) { + // Check if the meter is nil. + if m == nil { + return NamespacePhase{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "k8s.namespace.phase", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("Describes number of K8s namespaces that are currently in a given phase."), + metric.WithUnit("{namespace}"), + }, opt...)..., + ) + if err != nil { + return NamespacePhase{noop.Int64UpDownCounter{}}, err + } + return NamespacePhase{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m NamespacePhase) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (NamespacePhase) Name() string { + return "k8s.namespace.phase" +} + +// Unit returns the semantic convention unit of the instrument +func (NamespacePhase) Unit() string { + return "{namespace}" +} + +// Description returns the semantic convention description of the instrument +func (NamespacePhase) Description() string { + return "Describes number of K8s namespaces that are currently in a given phase." +} + +// Add adds incr to the existing count for attrs. +// +// The namespacePhase is the the phase of the K8s namespace. +func (m NamespacePhase) Add( + ctx context.Context, + incr int64, + namespacePhase NamespacePhaseAttr, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("k8s.namespace.phase", string(namespacePhase)), + )..., + ), + ) + + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +func (m NamespacePhase) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// NodeAllocatableCPU is an instrument used to record metric values conforming to +// the "k8s.node.allocatable.cpu" semantic conventions. It represents the amount +// of cpu allocatable on the node. +type NodeAllocatableCPU struct { + metric.Int64UpDownCounter +} + +// NewNodeAllocatableCPU returns a new NodeAllocatableCPU instrument. +func NewNodeAllocatableCPU( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (NodeAllocatableCPU, error) { + // Check if the meter is nil. + if m == nil { + return NodeAllocatableCPU{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "k8s.node.allocatable.cpu", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("Amount of cpu allocatable on the node."), + metric.WithUnit("{cpu}"), + }, opt...)..., + ) + if err != nil { + return NodeAllocatableCPU{noop.Int64UpDownCounter{}}, err + } + return NodeAllocatableCPU{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m NodeAllocatableCPU) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (NodeAllocatableCPU) Name() string { + return "k8s.node.allocatable.cpu" +} + +// Unit returns the semantic convention unit of the instrument +func (NodeAllocatableCPU) Unit() string { + return "{cpu}" +} + +// Description returns the semantic convention description of the instrument +func (NodeAllocatableCPU) Description() string { + return "Amount of cpu allocatable on the node." +} + +// Add adds incr to the existing count for attrs. +func (m NodeAllocatableCPU) Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +func (m NodeAllocatableCPU) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// NodeAllocatableEphemeralStorage is an instrument used to record metric values +// conforming to the "k8s.node.allocatable.ephemeral_storage" semantic +// conventions. It represents the amount of ephemeral-storage allocatable on the +// node. +type NodeAllocatableEphemeralStorage struct { + metric.Int64UpDownCounter +} + +// NewNodeAllocatableEphemeralStorage returns a new +// NodeAllocatableEphemeralStorage instrument. +func NewNodeAllocatableEphemeralStorage( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (NodeAllocatableEphemeralStorage, error) { + // Check if the meter is nil. + if m == nil { + return NodeAllocatableEphemeralStorage{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "k8s.node.allocatable.ephemeral_storage", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("Amount of ephemeral-storage allocatable on the node."), + metric.WithUnit("By"), + }, opt...)..., + ) + if err != nil { + return NodeAllocatableEphemeralStorage{noop.Int64UpDownCounter{}}, err + } + return NodeAllocatableEphemeralStorage{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m NodeAllocatableEphemeralStorage) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (NodeAllocatableEphemeralStorage) Name() string { + return "k8s.node.allocatable.ephemeral_storage" +} + +// Unit returns the semantic convention unit of the instrument +func (NodeAllocatableEphemeralStorage) Unit() string { + return "By" +} + +// Description returns the semantic convention description of the instrument +func (NodeAllocatableEphemeralStorage) Description() string { + return "Amount of ephemeral-storage allocatable on the node." +} + +// Add adds incr to the existing count for attrs. +func (m NodeAllocatableEphemeralStorage) Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +func (m NodeAllocatableEphemeralStorage) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// NodeAllocatableMemory is an instrument used to record metric values conforming +// to the "k8s.node.allocatable.memory" semantic conventions. It represents the +// amount of memory allocatable on the node. +type NodeAllocatableMemory struct { + metric.Int64UpDownCounter +} + +// NewNodeAllocatableMemory returns a new NodeAllocatableMemory instrument. +func NewNodeAllocatableMemory( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (NodeAllocatableMemory, error) { + // Check if the meter is nil. + if m == nil { + return NodeAllocatableMemory{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "k8s.node.allocatable.memory", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("Amount of memory allocatable on the node."), + metric.WithUnit("By"), + }, opt...)..., + ) + if err != nil { + return NodeAllocatableMemory{noop.Int64UpDownCounter{}}, err + } + return NodeAllocatableMemory{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m NodeAllocatableMemory) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (NodeAllocatableMemory) Name() string { + return "k8s.node.allocatable.memory" +} + +// Unit returns the semantic convention unit of the instrument +func (NodeAllocatableMemory) Unit() string { + return "By" +} + +// Description returns the semantic convention description of the instrument +func (NodeAllocatableMemory) Description() string { + return "Amount of memory allocatable on the node." +} + +// Add adds incr to the existing count for attrs. +func (m NodeAllocatableMemory) Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +func (m NodeAllocatableMemory) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// NodeAllocatablePods is an instrument used to record metric values conforming +// to the "k8s.node.allocatable.pods" semantic conventions. It represents the +// amount of pods allocatable on the node. +type NodeAllocatablePods struct { + metric.Int64UpDownCounter +} + +// NewNodeAllocatablePods returns a new NodeAllocatablePods instrument. +func NewNodeAllocatablePods( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (NodeAllocatablePods, error) { + // Check if the meter is nil. + if m == nil { + return NodeAllocatablePods{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "k8s.node.allocatable.pods", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("Amount of pods allocatable on the node."), + metric.WithUnit("{pod}"), + }, opt...)..., + ) + if err != nil { + return NodeAllocatablePods{noop.Int64UpDownCounter{}}, err + } + return NodeAllocatablePods{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m NodeAllocatablePods) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (NodeAllocatablePods) Name() string { + return "k8s.node.allocatable.pods" +} + +// Unit returns the semantic convention unit of the instrument +func (NodeAllocatablePods) Unit() string { + return "{pod}" +} + +// Description returns the semantic convention description of the instrument +func (NodeAllocatablePods) Description() string { + return "Amount of pods allocatable on the node." +} + +// Add adds incr to the existing count for attrs. +func (m NodeAllocatablePods) Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +func (m NodeAllocatablePods) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// NodeConditionStatus is an instrument used to record metric values conforming +// to the "k8s.node.condition.status" semantic conventions. It represents the +// describes the condition of a particular Node. +type NodeConditionStatus struct { + metric.Int64UpDownCounter +} + +// NewNodeConditionStatus returns a new NodeConditionStatus instrument. +func NewNodeConditionStatus( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (NodeConditionStatus, error) { + // Check if the meter is nil. + if m == nil { + return NodeConditionStatus{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "k8s.node.condition.status", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("Describes the condition of a particular Node."), + metric.WithUnit("{node}"), + }, opt...)..., + ) + if err != nil { + return NodeConditionStatus{noop.Int64UpDownCounter{}}, err + } + return NodeConditionStatus{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m NodeConditionStatus) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (NodeConditionStatus) Name() string { + return "k8s.node.condition.status" +} + +// Unit returns the semantic convention unit of the instrument +func (NodeConditionStatus) Unit() string { + return "{node}" +} + +// Description returns the semantic convention description of the instrument +func (NodeConditionStatus) Description() string { + return "Describes the condition of a particular Node." +} + +// Add adds incr to the existing count for attrs. +// +// The nodeConditionStatus is the the status of the condition, one of True, +// False, Unknown. +// +// The nodeConditionType is the the condition type of a K8s Node. +// +// All possible node condition pairs (type and status) will be reported at each +// time interval to avoid missing metrics. Condition pairs corresponding to the +// current conditions' statuses will be non-zero. +func (m NodeConditionStatus) Add( + ctx context.Context, + incr int64, + nodeConditionStatus NodeConditionStatusAttr, + nodeConditionType NodeConditionTypeAttr, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("k8s.node.condition.status", string(nodeConditionStatus)), + attribute.String("k8s.node.condition.type", string(nodeConditionType)), + )..., + ), + ) + + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// All possible node condition pairs (type and status) will be reported at each +// time interval to avoid missing metrics. Condition pairs corresponding to the +// current conditions' statuses will be non-zero. +func (m NodeConditionStatus) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// NodeCPUTime is an instrument used to record metric values conforming to the +// "k8s.node.cpu.time" semantic conventions. It represents the total CPU time +// consumed. +type NodeCPUTime struct { + metric.Float64Counter +} + +// NewNodeCPUTime returns a new NodeCPUTime instrument. +func NewNodeCPUTime( + m metric.Meter, + opt ...metric.Float64CounterOption, +) (NodeCPUTime, error) { + // Check if the meter is nil. + if m == nil { + return NodeCPUTime{noop.Float64Counter{}}, nil + } + + i, err := m.Float64Counter( + "k8s.node.cpu.time", + append([]metric.Float64CounterOption{ + metric.WithDescription("Total CPU time consumed."), + metric.WithUnit("s"), + }, opt...)..., + ) + if err != nil { + return NodeCPUTime{noop.Float64Counter{}}, err + } + return NodeCPUTime{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m NodeCPUTime) Inst() metric.Float64Counter { + return m.Float64Counter +} + +// Name returns the semantic convention name of the instrument. +func (NodeCPUTime) Name() string { + return "k8s.node.cpu.time" +} + +// Unit returns the semantic convention unit of the instrument +func (NodeCPUTime) Unit() string { + return "s" +} + +// Description returns the semantic convention description of the instrument +func (NodeCPUTime) Description() string { + return "Total CPU time consumed." +} + +// Add adds incr to the existing count for attrs. +// +// Total CPU time consumed by the specific Node on all available CPU cores +func (m NodeCPUTime) Add(ctx context.Context, incr float64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Float64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Float64Counter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// Total CPU time consumed by the specific Node on all available CPU cores +func (m NodeCPUTime) AddSet(ctx context.Context, incr float64, set attribute.Set) { + if set.Len() == 0 { + m.Float64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Float64Counter.Add(ctx, incr, *o...) +} + +// NodeCPUUsage is an instrument used to record metric values conforming to the +// "k8s.node.cpu.usage" semantic conventions. It represents the node's CPU usage, +// measured in cpus. Range from 0 to the number of allocatable CPUs. +type NodeCPUUsage struct { + metric.Int64Gauge +} + +// NewNodeCPUUsage returns a new NodeCPUUsage instrument. +func NewNodeCPUUsage( + m metric.Meter, + opt ...metric.Int64GaugeOption, +) (NodeCPUUsage, error) { + // Check if the meter is nil. + if m == nil { + return NodeCPUUsage{noop.Int64Gauge{}}, nil + } + + i, err := m.Int64Gauge( + "k8s.node.cpu.usage", + append([]metric.Int64GaugeOption{ + metric.WithDescription("Node's CPU usage, measured in cpus. Range from 0 to the number of allocatable CPUs."), + metric.WithUnit("{cpu}"), + }, opt...)..., + ) + if err != nil { + return NodeCPUUsage{noop.Int64Gauge{}}, err + } + return NodeCPUUsage{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m NodeCPUUsage) Inst() metric.Int64Gauge { + return m.Int64Gauge +} + +// Name returns the semantic convention name of the instrument. +func (NodeCPUUsage) Name() string { + return "k8s.node.cpu.usage" +} + +// Unit returns the semantic convention unit of the instrument +func (NodeCPUUsage) Unit() string { + return "{cpu}" +} + +// Description returns the semantic convention description of the instrument +func (NodeCPUUsage) Description() string { + return "Node's CPU usage, measured in cpus. Range from 0 to the number of allocatable CPUs." +} + +// Record records val to the current distribution for attrs. +// +// CPU usage of the specific Node on all available CPU cores, averaged over the +// sample window +func (m NodeCPUUsage) Record(ctx context.Context, val int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64Gauge.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64Gauge.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +// +// CPU usage of the specific Node on all available CPU cores, averaged over the +// sample window +func (m NodeCPUUsage) RecordSet(ctx context.Context, val int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Gauge.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Gauge.Record(ctx, val, *o...) +} + +// NodeFilesystemAvailable is an instrument used to record metric values +// conforming to the "k8s.node.filesystem.available" semantic conventions. It +// represents the node filesystem available bytes. +type NodeFilesystemAvailable struct { + metric.Int64UpDownCounter +} + +// NewNodeFilesystemAvailable returns a new NodeFilesystemAvailable instrument. +func NewNodeFilesystemAvailable( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (NodeFilesystemAvailable, error) { + // Check if the meter is nil. + if m == nil { + return NodeFilesystemAvailable{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "k8s.node.filesystem.available", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("Node filesystem available bytes."), + metric.WithUnit("By"), + }, opt...)..., + ) + if err != nil { + return NodeFilesystemAvailable{noop.Int64UpDownCounter{}}, err + } + return NodeFilesystemAvailable{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m NodeFilesystemAvailable) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (NodeFilesystemAvailable) Name() string { + return "k8s.node.filesystem.available" +} + +// Unit returns the semantic convention unit of the instrument +func (NodeFilesystemAvailable) Unit() string { + return "By" +} + +// Description returns the semantic convention description of the instrument +func (NodeFilesystemAvailable) Description() string { + return "Node filesystem available bytes." +} + +// Add adds incr to the existing count for attrs. +// +// This metric is derived from the +// [FsStats.AvailableBytes] field +// of the [NodeStats.Fs] +// of the Kubelet's stats API. +// +// [FsStats.AvailableBytes]: https://pkg.go.dev/k8s.io/kubelet@v0.33.0/pkg/apis/stats/v1alpha1#FsStats +// [NodeStats.Fs]: https://pkg.go.dev/k8s.io/kubelet@v0.33.0/pkg/apis/stats/v1alpha1#NodeStats +func (m NodeFilesystemAvailable) Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// This metric is derived from the +// [FsStats.AvailableBytes] field +// of the [NodeStats.Fs] +// of the Kubelet's stats API. +// +// [FsStats.AvailableBytes]: https://pkg.go.dev/k8s.io/kubelet@v0.33.0/pkg/apis/stats/v1alpha1#FsStats +// [NodeStats.Fs]: https://pkg.go.dev/k8s.io/kubelet@v0.33.0/pkg/apis/stats/v1alpha1#NodeStats +func (m NodeFilesystemAvailable) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// NodeFilesystemCapacity is an instrument used to record metric values +// conforming to the "k8s.node.filesystem.capacity" semantic conventions. It +// represents the node filesystem capacity. +type NodeFilesystemCapacity struct { + metric.Int64UpDownCounter +} + +// NewNodeFilesystemCapacity returns a new NodeFilesystemCapacity instrument. +func NewNodeFilesystemCapacity( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (NodeFilesystemCapacity, error) { + // Check if the meter is nil. + if m == nil { + return NodeFilesystemCapacity{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "k8s.node.filesystem.capacity", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("Node filesystem capacity."), + metric.WithUnit("By"), + }, opt...)..., + ) + if err != nil { + return NodeFilesystemCapacity{noop.Int64UpDownCounter{}}, err + } + return NodeFilesystemCapacity{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m NodeFilesystemCapacity) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (NodeFilesystemCapacity) Name() string { + return "k8s.node.filesystem.capacity" +} + +// Unit returns the semantic convention unit of the instrument +func (NodeFilesystemCapacity) Unit() string { + return "By" +} + +// Description returns the semantic convention description of the instrument +func (NodeFilesystemCapacity) Description() string { + return "Node filesystem capacity." +} + +// Add adds incr to the existing count for attrs. +// +// This metric is derived from the +// [FsStats.CapacityBytes] field +// of the [NodeStats.Fs] +// of the Kubelet's stats API. +// +// [FsStats.CapacityBytes]: https://pkg.go.dev/k8s.io/kubelet@v0.33.0/pkg/apis/stats/v1alpha1#FsStats +// [NodeStats.Fs]: https://pkg.go.dev/k8s.io/kubelet@v0.33.0/pkg/apis/stats/v1alpha1#NodeStats +func (m NodeFilesystemCapacity) Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// This metric is derived from the +// [FsStats.CapacityBytes] field +// of the [NodeStats.Fs] +// of the Kubelet's stats API. +// +// [FsStats.CapacityBytes]: https://pkg.go.dev/k8s.io/kubelet@v0.33.0/pkg/apis/stats/v1alpha1#FsStats +// [NodeStats.Fs]: https://pkg.go.dev/k8s.io/kubelet@v0.33.0/pkg/apis/stats/v1alpha1#NodeStats +func (m NodeFilesystemCapacity) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// NodeFilesystemUsage is an instrument used to record metric values conforming +// to the "k8s.node.filesystem.usage" semantic conventions. It represents the +// node filesystem usage. +type NodeFilesystemUsage struct { + metric.Int64UpDownCounter +} + +// NewNodeFilesystemUsage returns a new NodeFilesystemUsage instrument. +func NewNodeFilesystemUsage( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (NodeFilesystemUsage, error) { + // Check if the meter is nil. + if m == nil { + return NodeFilesystemUsage{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "k8s.node.filesystem.usage", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("Node filesystem usage."), + metric.WithUnit("By"), + }, opt...)..., + ) + if err != nil { + return NodeFilesystemUsage{noop.Int64UpDownCounter{}}, err + } + return NodeFilesystemUsage{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m NodeFilesystemUsage) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (NodeFilesystemUsage) Name() string { + return "k8s.node.filesystem.usage" +} + +// Unit returns the semantic convention unit of the instrument +func (NodeFilesystemUsage) Unit() string { + return "By" +} + +// Description returns the semantic convention description of the instrument +func (NodeFilesystemUsage) Description() string { + return "Node filesystem usage." +} + +// Add adds incr to the existing count for attrs. +// +// This may not equal capacity - available. +// +// This metric is derived from the +// [FsStats.UsedBytes] field +// of the [NodeStats.Fs] +// of the Kubelet's stats API. +// +// [FsStats.UsedBytes]: https://pkg.go.dev/k8s.io/kubelet@v0.33.0/pkg/apis/stats/v1alpha1#FsStats +// [NodeStats.Fs]: https://pkg.go.dev/k8s.io/kubelet@v0.33.0/pkg/apis/stats/v1alpha1#NodeStats +func (m NodeFilesystemUsage) Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// This may not equal capacity - available. +// +// This metric is derived from the +// [FsStats.UsedBytes] field +// of the [NodeStats.Fs] +// of the Kubelet's stats API. +// +// [FsStats.UsedBytes]: https://pkg.go.dev/k8s.io/kubelet@v0.33.0/pkg/apis/stats/v1alpha1#FsStats +// [NodeStats.Fs]: https://pkg.go.dev/k8s.io/kubelet@v0.33.0/pkg/apis/stats/v1alpha1#NodeStats +func (m NodeFilesystemUsage) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// NodeMemoryUsage is an instrument used to record metric values conforming to +// the "k8s.node.memory.usage" semantic conventions. It represents the memory +// usage of the Node. +type NodeMemoryUsage struct { + metric.Int64Gauge +} + +// NewNodeMemoryUsage returns a new NodeMemoryUsage instrument. +func NewNodeMemoryUsage( + m metric.Meter, + opt ...metric.Int64GaugeOption, +) (NodeMemoryUsage, error) { + // Check if the meter is nil. + if m == nil { + return NodeMemoryUsage{noop.Int64Gauge{}}, nil + } + + i, err := m.Int64Gauge( + "k8s.node.memory.usage", + append([]metric.Int64GaugeOption{ + metric.WithDescription("Memory usage of the Node."), + metric.WithUnit("By"), + }, opt...)..., + ) + if err != nil { + return NodeMemoryUsage{noop.Int64Gauge{}}, err + } + return NodeMemoryUsage{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m NodeMemoryUsage) Inst() metric.Int64Gauge { + return m.Int64Gauge +} + +// Name returns the semantic convention name of the instrument. +func (NodeMemoryUsage) Name() string { + return "k8s.node.memory.usage" +} + +// Unit returns the semantic convention unit of the instrument +func (NodeMemoryUsage) Unit() string { + return "By" +} + +// Description returns the semantic convention description of the instrument +func (NodeMemoryUsage) Description() string { + return "Memory usage of the Node." +} + +// Record records val to the current distribution for attrs. +// +// Total memory usage of the Node +func (m NodeMemoryUsage) Record(ctx context.Context, val int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64Gauge.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64Gauge.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +// +// Total memory usage of the Node +func (m NodeMemoryUsage) RecordSet(ctx context.Context, val int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Gauge.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Gauge.Record(ctx, val, *o...) +} + +// NodeNetworkErrors is an instrument used to record metric values conforming to +// the "k8s.node.network.errors" semantic conventions. It represents the node +// network errors. +type NodeNetworkErrors struct { + metric.Int64Counter +} + +// NewNodeNetworkErrors returns a new NodeNetworkErrors instrument. +func NewNodeNetworkErrors( + m metric.Meter, + opt ...metric.Int64CounterOption, +) (NodeNetworkErrors, error) { + // Check if the meter is nil. + if m == nil { + return NodeNetworkErrors{noop.Int64Counter{}}, nil + } + + i, err := m.Int64Counter( + "k8s.node.network.errors", + append([]metric.Int64CounterOption{ + metric.WithDescription("Node network errors."), + metric.WithUnit("{error}"), + }, opt...)..., + ) + if err != nil { + return NodeNetworkErrors{noop.Int64Counter{}}, err + } + return NodeNetworkErrors{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m NodeNetworkErrors) Inst() metric.Int64Counter { + return m.Int64Counter +} + +// Name returns the semantic convention name of the instrument. +func (NodeNetworkErrors) Name() string { + return "k8s.node.network.errors" +} + +// Unit returns the semantic convention unit of the instrument +func (NodeNetworkErrors) Unit() string { + return "{error}" +} + +// Description returns the semantic convention description of the instrument +func (NodeNetworkErrors) Description() string { + return "Node network errors." +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +func (m NodeNetworkErrors) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +func (m NodeNetworkErrors) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AttrNetworkInterfaceName returns an optional attribute for the +// "network.interface.name" semantic convention. It represents the network +// interface name. +func (NodeNetworkErrors) AttrNetworkInterfaceName(val string) attribute.KeyValue { + return attribute.String("network.interface.name", val) +} + +// AttrNetworkIODirection returns an optional attribute for the +// "network.io.direction" semantic convention. It represents the network IO +// operation direction. +func (NodeNetworkErrors) AttrNetworkIODirection(val NetworkIODirectionAttr) attribute.KeyValue { + return attribute.String("network.io.direction", string(val)) +} + +// NodeNetworkIO is an instrument used to record metric values conforming to the +// "k8s.node.network.io" semantic conventions. It represents the network bytes +// for the Node. +type NodeNetworkIO struct { + metric.Int64Counter +} + +// NewNodeNetworkIO returns a new NodeNetworkIO instrument. +func NewNodeNetworkIO( + m metric.Meter, + opt ...metric.Int64CounterOption, +) (NodeNetworkIO, error) { + // Check if the meter is nil. + if m == nil { + return NodeNetworkIO{noop.Int64Counter{}}, nil + } + + i, err := m.Int64Counter( + "k8s.node.network.io", + append([]metric.Int64CounterOption{ + metric.WithDescription("Network bytes for the Node."), + metric.WithUnit("By"), + }, opt...)..., + ) + if err != nil { + return NodeNetworkIO{noop.Int64Counter{}}, err + } + return NodeNetworkIO{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m NodeNetworkIO) Inst() metric.Int64Counter { + return m.Int64Counter +} + +// Name returns the semantic convention name of the instrument. +func (NodeNetworkIO) Name() string { + return "k8s.node.network.io" +} + +// Unit returns the semantic convention unit of the instrument +func (NodeNetworkIO) Unit() string { + return "By" +} + +// Description returns the semantic convention description of the instrument +func (NodeNetworkIO) Description() string { + return "Network bytes for the Node." +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +func (m NodeNetworkIO) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +func (m NodeNetworkIO) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AttrNetworkInterfaceName returns an optional attribute for the +// "network.interface.name" semantic convention. It represents the network +// interface name. +func (NodeNetworkIO) AttrNetworkInterfaceName(val string) attribute.KeyValue { + return attribute.String("network.interface.name", val) +} + +// AttrNetworkIODirection returns an optional attribute for the +// "network.io.direction" semantic convention. It represents the network IO +// operation direction. +func (NodeNetworkIO) AttrNetworkIODirection(val NetworkIODirectionAttr) attribute.KeyValue { + return attribute.String("network.io.direction", string(val)) +} + +// NodeUptime is an instrument used to record metric values conforming to the +// "k8s.node.uptime" semantic conventions. It represents the time the Node has +// been running. +type NodeUptime struct { + metric.Float64Gauge +} + +// NewNodeUptime returns a new NodeUptime instrument. +func NewNodeUptime( + m metric.Meter, + opt ...metric.Float64GaugeOption, +) (NodeUptime, error) { + // Check if the meter is nil. + if m == nil { + return NodeUptime{noop.Float64Gauge{}}, nil + } + + i, err := m.Float64Gauge( + "k8s.node.uptime", + append([]metric.Float64GaugeOption{ + metric.WithDescription("The time the Node has been running."), + metric.WithUnit("s"), + }, opt...)..., + ) + if err != nil { + return NodeUptime{noop.Float64Gauge{}}, err + } + return NodeUptime{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m NodeUptime) Inst() metric.Float64Gauge { + return m.Float64Gauge +} + +// Name returns the semantic convention name of the instrument. +func (NodeUptime) Name() string { + return "k8s.node.uptime" +} + +// Unit returns the semantic convention unit of the instrument +func (NodeUptime) Unit() string { + return "s" +} + +// Description returns the semantic convention description of the instrument +func (NodeUptime) Description() string { + return "The time the Node has been running." +} + +// Record records val to the current distribution for attrs. +// +// Instrumentations SHOULD use a gauge with type `double` and measure uptime in +// seconds as a floating point number with the highest precision available. +// The actual accuracy would depend on the instrumentation and operating system. +func (m NodeUptime) Record(ctx context.Context, val float64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Float64Gauge.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Float64Gauge.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +// +// Instrumentations SHOULD use a gauge with type `double` and measure uptime in +// seconds as a floating point number with the highest precision available. +// The actual accuracy would depend on the instrumentation and operating system. +func (m NodeUptime) RecordSet(ctx context.Context, val float64, set attribute.Set) { + if set.Len() == 0 { + m.Float64Gauge.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Float64Gauge.Record(ctx, val, *o...) +} + +// PodCPUTime is an instrument used to record metric values conforming to the +// "k8s.pod.cpu.time" semantic conventions. It represents the total CPU time +// consumed. +type PodCPUTime struct { + metric.Float64Counter +} + +// NewPodCPUTime returns a new PodCPUTime instrument. +func NewPodCPUTime( + m metric.Meter, + opt ...metric.Float64CounterOption, +) (PodCPUTime, error) { + // Check if the meter is nil. + if m == nil { + return PodCPUTime{noop.Float64Counter{}}, nil + } + + i, err := m.Float64Counter( + "k8s.pod.cpu.time", + append([]metric.Float64CounterOption{ + metric.WithDescription("Total CPU time consumed."), + metric.WithUnit("s"), + }, opt...)..., + ) + if err != nil { + return PodCPUTime{noop.Float64Counter{}}, err + } + return PodCPUTime{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m PodCPUTime) Inst() metric.Float64Counter { + return m.Float64Counter +} + +// Name returns the semantic convention name of the instrument. +func (PodCPUTime) Name() string { + return "k8s.pod.cpu.time" +} + +// Unit returns the semantic convention unit of the instrument +func (PodCPUTime) Unit() string { + return "s" +} + +// Description returns the semantic convention description of the instrument +func (PodCPUTime) Description() string { + return "Total CPU time consumed." +} + +// Add adds incr to the existing count for attrs. +// +// Total CPU time consumed by the specific Pod on all available CPU cores +func (m PodCPUTime) Add(ctx context.Context, incr float64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Float64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Float64Counter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// Total CPU time consumed by the specific Pod on all available CPU cores +func (m PodCPUTime) AddSet(ctx context.Context, incr float64, set attribute.Set) { + if set.Len() == 0 { + m.Float64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Float64Counter.Add(ctx, incr, *o...) +} + +// PodCPUUsage is an instrument used to record metric values conforming to the +// "k8s.pod.cpu.usage" semantic conventions. It represents the pod's CPU usage, +// measured in cpus. Range from 0 to the number of allocatable CPUs. +type PodCPUUsage struct { + metric.Int64Gauge +} + +// NewPodCPUUsage returns a new PodCPUUsage instrument. +func NewPodCPUUsage( + m metric.Meter, + opt ...metric.Int64GaugeOption, +) (PodCPUUsage, error) { + // Check if the meter is nil. + if m == nil { + return PodCPUUsage{noop.Int64Gauge{}}, nil + } + + i, err := m.Int64Gauge( + "k8s.pod.cpu.usage", + append([]metric.Int64GaugeOption{ + metric.WithDescription("Pod's CPU usage, measured in cpus. Range from 0 to the number of allocatable CPUs."), + metric.WithUnit("{cpu}"), + }, opt...)..., + ) + if err != nil { + return PodCPUUsage{noop.Int64Gauge{}}, err + } + return PodCPUUsage{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m PodCPUUsage) Inst() metric.Int64Gauge { + return m.Int64Gauge +} + +// Name returns the semantic convention name of the instrument. +func (PodCPUUsage) Name() string { + return "k8s.pod.cpu.usage" +} + +// Unit returns the semantic convention unit of the instrument +func (PodCPUUsage) Unit() string { + return "{cpu}" +} + +// Description returns the semantic convention description of the instrument +func (PodCPUUsage) Description() string { + return "Pod's CPU usage, measured in cpus. Range from 0 to the number of allocatable CPUs." +} + +// Record records val to the current distribution for attrs. +// +// CPU usage of the specific Pod on all available CPU cores, averaged over the +// sample window +func (m PodCPUUsage) Record(ctx context.Context, val int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64Gauge.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64Gauge.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +// +// CPU usage of the specific Pod on all available CPU cores, averaged over the +// sample window +func (m PodCPUUsage) RecordSet(ctx context.Context, val int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Gauge.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Gauge.Record(ctx, val, *o...) +} + +// PodFilesystemAvailable is an instrument used to record metric values +// conforming to the "k8s.pod.filesystem.available" semantic conventions. It +// represents the pod filesystem available bytes. +type PodFilesystemAvailable struct { + metric.Int64UpDownCounter +} + +// NewPodFilesystemAvailable returns a new PodFilesystemAvailable instrument. +func NewPodFilesystemAvailable( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (PodFilesystemAvailable, error) { + // Check if the meter is nil. + if m == nil { + return PodFilesystemAvailable{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "k8s.pod.filesystem.available", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("Pod filesystem available bytes."), + metric.WithUnit("By"), + }, opt...)..., + ) + if err != nil { + return PodFilesystemAvailable{noop.Int64UpDownCounter{}}, err + } + return PodFilesystemAvailable{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m PodFilesystemAvailable) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (PodFilesystemAvailable) Name() string { + return "k8s.pod.filesystem.available" +} + +// Unit returns the semantic convention unit of the instrument +func (PodFilesystemAvailable) Unit() string { + return "By" +} + +// Description returns the semantic convention description of the instrument +func (PodFilesystemAvailable) Description() string { + return "Pod filesystem available bytes." +} + +// Add adds incr to the existing count for attrs. +// +// This metric is derived from the +// [FsStats.AvailableBytes] field +// of the [PodStats.EphemeralStorage] +// of the Kubelet's stats API. +// +// [FsStats.AvailableBytes]: https://pkg.go.dev/k8s.io/kubelet@v0.33.0/pkg/apis/stats/v1alpha1#FsStats +// [PodStats.EphemeralStorage]: https://pkg.go.dev/k8s.io/kubelet@v0.33.0/pkg/apis/stats/v1alpha1#PodStats +func (m PodFilesystemAvailable) Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// This metric is derived from the +// [FsStats.AvailableBytes] field +// of the [PodStats.EphemeralStorage] +// of the Kubelet's stats API. +// +// [FsStats.AvailableBytes]: https://pkg.go.dev/k8s.io/kubelet@v0.33.0/pkg/apis/stats/v1alpha1#FsStats +// [PodStats.EphemeralStorage]: https://pkg.go.dev/k8s.io/kubelet@v0.33.0/pkg/apis/stats/v1alpha1#PodStats +func (m PodFilesystemAvailable) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// PodFilesystemCapacity is an instrument used to record metric values conforming +// to the "k8s.pod.filesystem.capacity" semantic conventions. It represents the +// pod filesystem capacity. +type PodFilesystemCapacity struct { + metric.Int64UpDownCounter +} + +// NewPodFilesystemCapacity returns a new PodFilesystemCapacity instrument. +func NewPodFilesystemCapacity( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (PodFilesystemCapacity, error) { + // Check if the meter is nil. + if m == nil { + return PodFilesystemCapacity{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "k8s.pod.filesystem.capacity", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("Pod filesystem capacity."), + metric.WithUnit("By"), + }, opt...)..., + ) + if err != nil { + return PodFilesystemCapacity{noop.Int64UpDownCounter{}}, err + } + return PodFilesystemCapacity{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m PodFilesystemCapacity) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (PodFilesystemCapacity) Name() string { + return "k8s.pod.filesystem.capacity" +} + +// Unit returns the semantic convention unit of the instrument +func (PodFilesystemCapacity) Unit() string { + return "By" +} + +// Description returns the semantic convention description of the instrument +func (PodFilesystemCapacity) Description() string { + return "Pod filesystem capacity." +} + +// Add adds incr to the existing count for attrs. +// +// This metric is derived from the +// [FsStats.CapacityBytes] field +// of the [PodStats.EphemeralStorage] +// of the Kubelet's stats API. +// +// [FsStats.CapacityBytes]: https://pkg.go.dev/k8s.io/kubelet@v0.33.0/pkg/apis/stats/v1alpha1#FsStats +// [PodStats.EphemeralStorage]: https://pkg.go.dev/k8s.io/kubelet@v0.33.0/pkg/apis/stats/v1alpha1#PodStats +func (m PodFilesystemCapacity) Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// This metric is derived from the +// [FsStats.CapacityBytes] field +// of the [PodStats.EphemeralStorage] +// of the Kubelet's stats API. +// +// [FsStats.CapacityBytes]: https://pkg.go.dev/k8s.io/kubelet@v0.33.0/pkg/apis/stats/v1alpha1#FsStats +// [PodStats.EphemeralStorage]: https://pkg.go.dev/k8s.io/kubelet@v0.33.0/pkg/apis/stats/v1alpha1#PodStats +func (m PodFilesystemCapacity) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// PodFilesystemUsage is an instrument used to record metric values conforming to +// the "k8s.pod.filesystem.usage" semantic conventions. It represents the pod +// filesystem usage. +type PodFilesystemUsage struct { + metric.Int64UpDownCounter +} + +// NewPodFilesystemUsage returns a new PodFilesystemUsage instrument. +func NewPodFilesystemUsage( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (PodFilesystemUsage, error) { + // Check if the meter is nil. + if m == nil { + return PodFilesystemUsage{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "k8s.pod.filesystem.usage", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("Pod filesystem usage."), + metric.WithUnit("By"), + }, opt...)..., + ) + if err != nil { + return PodFilesystemUsage{noop.Int64UpDownCounter{}}, err + } + return PodFilesystemUsage{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m PodFilesystemUsage) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (PodFilesystemUsage) Name() string { + return "k8s.pod.filesystem.usage" +} + +// Unit returns the semantic convention unit of the instrument +func (PodFilesystemUsage) Unit() string { + return "By" +} + +// Description returns the semantic convention description of the instrument +func (PodFilesystemUsage) Description() string { + return "Pod filesystem usage." +} + +// Add adds incr to the existing count for attrs. +// +// This may not equal capacity - available. +// +// This metric is derived from the +// [FsStats.UsedBytes] field +// of the [PodStats.EphemeralStorage] +// of the Kubelet's stats API. +// +// [FsStats.UsedBytes]: https://pkg.go.dev/k8s.io/kubelet@v0.33.0/pkg/apis/stats/v1alpha1#FsStats +// [PodStats.EphemeralStorage]: https://pkg.go.dev/k8s.io/kubelet@v0.33.0/pkg/apis/stats/v1alpha1#PodStats +func (m PodFilesystemUsage) Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// This may not equal capacity - available. +// +// This metric is derived from the +// [FsStats.UsedBytes] field +// of the [PodStats.EphemeralStorage] +// of the Kubelet's stats API. +// +// [FsStats.UsedBytes]: https://pkg.go.dev/k8s.io/kubelet@v0.33.0/pkg/apis/stats/v1alpha1#FsStats +// [PodStats.EphemeralStorage]: https://pkg.go.dev/k8s.io/kubelet@v0.33.0/pkg/apis/stats/v1alpha1#PodStats +func (m PodFilesystemUsage) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// PodMemoryUsage is an instrument used to record metric values conforming to the +// "k8s.pod.memory.usage" semantic conventions. It represents the memory usage of +// the Pod. +type PodMemoryUsage struct { + metric.Int64Gauge +} + +// NewPodMemoryUsage returns a new PodMemoryUsage instrument. +func NewPodMemoryUsage( + m metric.Meter, + opt ...metric.Int64GaugeOption, +) (PodMemoryUsage, error) { + // Check if the meter is nil. + if m == nil { + return PodMemoryUsage{noop.Int64Gauge{}}, nil + } + + i, err := m.Int64Gauge( + "k8s.pod.memory.usage", + append([]metric.Int64GaugeOption{ + metric.WithDescription("Memory usage of the Pod."), + metric.WithUnit("By"), + }, opt...)..., + ) + if err != nil { + return PodMemoryUsage{noop.Int64Gauge{}}, err + } + return PodMemoryUsage{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m PodMemoryUsage) Inst() metric.Int64Gauge { + return m.Int64Gauge +} + +// Name returns the semantic convention name of the instrument. +func (PodMemoryUsage) Name() string { + return "k8s.pod.memory.usage" +} + +// Unit returns the semantic convention unit of the instrument +func (PodMemoryUsage) Unit() string { + return "By" +} + +// Description returns the semantic convention description of the instrument +func (PodMemoryUsage) Description() string { + return "Memory usage of the Pod." +} + +// Record records val to the current distribution for attrs. +// +// Total memory usage of the Pod +func (m PodMemoryUsage) Record(ctx context.Context, val int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64Gauge.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64Gauge.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +// +// Total memory usage of the Pod +func (m PodMemoryUsage) RecordSet(ctx context.Context, val int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Gauge.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Gauge.Record(ctx, val, *o...) +} + +// PodNetworkErrors is an instrument used to record metric values conforming to +// the "k8s.pod.network.errors" semantic conventions. It represents the pod +// network errors. +type PodNetworkErrors struct { + metric.Int64Counter +} + +// NewPodNetworkErrors returns a new PodNetworkErrors instrument. +func NewPodNetworkErrors( + m metric.Meter, + opt ...metric.Int64CounterOption, +) (PodNetworkErrors, error) { + // Check if the meter is nil. + if m == nil { + return PodNetworkErrors{noop.Int64Counter{}}, nil + } + + i, err := m.Int64Counter( + "k8s.pod.network.errors", + append([]metric.Int64CounterOption{ + metric.WithDescription("Pod network errors."), + metric.WithUnit("{error}"), + }, opt...)..., + ) + if err != nil { + return PodNetworkErrors{noop.Int64Counter{}}, err + } + return PodNetworkErrors{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m PodNetworkErrors) Inst() metric.Int64Counter { + return m.Int64Counter +} + +// Name returns the semantic convention name of the instrument. +func (PodNetworkErrors) Name() string { + return "k8s.pod.network.errors" +} + +// Unit returns the semantic convention unit of the instrument +func (PodNetworkErrors) Unit() string { + return "{error}" +} + +// Description returns the semantic convention description of the instrument +func (PodNetworkErrors) Description() string { + return "Pod network errors." +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +func (m PodNetworkErrors) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +func (m PodNetworkErrors) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AttrNetworkInterfaceName returns an optional attribute for the +// "network.interface.name" semantic convention. It represents the network +// interface name. +func (PodNetworkErrors) AttrNetworkInterfaceName(val string) attribute.KeyValue { + return attribute.String("network.interface.name", val) +} + +// AttrNetworkIODirection returns an optional attribute for the +// "network.io.direction" semantic convention. It represents the network IO +// operation direction. +func (PodNetworkErrors) AttrNetworkIODirection(val NetworkIODirectionAttr) attribute.KeyValue { + return attribute.String("network.io.direction", string(val)) +} + +// PodNetworkIO is an instrument used to record metric values conforming to the +// "k8s.pod.network.io" semantic conventions. It represents the network bytes for +// the Pod. +type PodNetworkIO struct { + metric.Int64Counter +} + +// NewPodNetworkIO returns a new PodNetworkIO instrument. +func NewPodNetworkIO( + m metric.Meter, + opt ...metric.Int64CounterOption, +) (PodNetworkIO, error) { + // Check if the meter is nil. + if m == nil { + return PodNetworkIO{noop.Int64Counter{}}, nil + } + + i, err := m.Int64Counter( + "k8s.pod.network.io", + append([]metric.Int64CounterOption{ + metric.WithDescription("Network bytes for the Pod."), + metric.WithUnit("By"), + }, opt...)..., + ) + if err != nil { + return PodNetworkIO{noop.Int64Counter{}}, err + } + return PodNetworkIO{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m PodNetworkIO) Inst() metric.Int64Counter { + return m.Int64Counter +} + +// Name returns the semantic convention name of the instrument. +func (PodNetworkIO) Name() string { + return "k8s.pod.network.io" +} + +// Unit returns the semantic convention unit of the instrument +func (PodNetworkIO) Unit() string { + return "By" +} + +// Description returns the semantic convention description of the instrument +func (PodNetworkIO) Description() string { + return "Network bytes for the Pod." +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +func (m PodNetworkIO) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +func (m PodNetworkIO) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AttrNetworkInterfaceName returns an optional attribute for the +// "network.interface.name" semantic convention. It represents the network +// interface name. +func (PodNetworkIO) AttrNetworkInterfaceName(val string) attribute.KeyValue { + return attribute.String("network.interface.name", val) +} + +// AttrNetworkIODirection returns an optional attribute for the +// "network.io.direction" semantic convention. It represents the network IO +// operation direction. +func (PodNetworkIO) AttrNetworkIODirection(val NetworkIODirectionAttr) attribute.KeyValue { + return attribute.String("network.io.direction", string(val)) +} + +// PodUptime is an instrument used to record metric values conforming to the +// "k8s.pod.uptime" semantic conventions. It represents the time the Pod has been +// running. +type PodUptime struct { + metric.Float64Gauge +} + +// NewPodUptime returns a new PodUptime instrument. +func NewPodUptime( + m metric.Meter, + opt ...metric.Float64GaugeOption, +) (PodUptime, error) { + // Check if the meter is nil. + if m == nil { + return PodUptime{noop.Float64Gauge{}}, nil + } + + i, err := m.Float64Gauge( + "k8s.pod.uptime", + append([]metric.Float64GaugeOption{ + metric.WithDescription("The time the Pod has been running."), + metric.WithUnit("s"), + }, opt...)..., + ) + if err != nil { + return PodUptime{noop.Float64Gauge{}}, err + } + return PodUptime{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m PodUptime) Inst() metric.Float64Gauge { + return m.Float64Gauge +} + +// Name returns the semantic convention name of the instrument. +func (PodUptime) Name() string { + return "k8s.pod.uptime" +} + +// Unit returns the semantic convention unit of the instrument +func (PodUptime) Unit() string { + return "s" +} + +// Description returns the semantic convention description of the instrument +func (PodUptime) Description() string { + return "The time the Pod has been running." +} + +// Record records val to the current distribution for attrs. +// +// Instrumentations SHOULD use a gauge with type `double` and measure uptime in +// seconds as a floating point number with the highest precision available. +// The actual accuracy would depend on the instrumentation and operating system. +func (m PodUptime) Record(ctx context.Context, val float64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Float64Gauge.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Float64Gauge.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +// +// Instrumentations SHOULD use a gauge with type `double` and measure uptime in +// seconds as a floating point number with the highest precision available. +// The actual accuracy would depend on the instrumentation and operating system. +func (m PodUptime) RecordSet(ctx context.Context, val float64, set attribute.Set) { + if set.Len() == 0 { + m.Float64Gauge.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Float64Gauge.Record(ctx, val, *o...) +} + +// PodVolumeAvailable is an instrument used to record metric values conforming to +// the "k8s.pod.volume.available" semantic conventions. It represents the pod +// volume storage space available. +type PodVolumeAvailable struct { + metric.Int64UpDownCounter +} + +// NewPodVolumeAvailable returns a new PodVolumeAvailable instrument. +func NewPodVolumeAvailable( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (PodVolumeAvailable, error) { + // Check if the meter is nil. + if m == nil { + return PodVolumeAvailable{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "k8s.pod.volume.available", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("Pod volume storage space available."), + metric.WithUnit("By"), + }, opt...)..., + ) + if err != nil { + return PodVolumeAvailable{noop.Int64UpDownCounter{}}, err + } + return PodVolumeAvailable{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m PodVolumeAvailable) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (PodVolumeAvailable) Name() string { + return "k8s.pod.volume.available" +} + +// Unit returns the semantic convention unit of the instrument +func (PodVolumeAvailable) Unit() string { + return "By" +} + +// Description returns the semantic convention description of the instrument +func (PodVolumeAvailable) Description() string { + return "Pod volume storage space available." +} + +// Add adds incr to the existing count for attrs. +// +// The volumeName is the the name of the K8s volume. +// +// All additional attrs passed are included in the recorded value. +// +// This metric is derived from the +// [VolumeStats.AvailableBytes] field +// of the [PodStats] of the +// Kubelet's stats API. +// +// [VolumeStats.AvailableBytes]: https://pkg.go.dev/k8s.io/kubelet@v0.33.0/pkg/apis/stats/v1alpha1#VolumeStats +// [PodStats]: https://pkg.go.dev/k8s.io/kubelet@v0.33.0/pkg/apis/stats/v1alpha1#PodStats +func (m PodVolumeAvailable) Add( + ctx context.Context, + incr int64, + volumeName string, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("k8s.volume.name", volumeName), + )..., + ), + ) + + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// This metric is derived from the +// [VolumeStats.AvailableBytes] field +// of the [PodStats] of the +// Kubelet's stats API. +// +// [VolumeStats.AvailableBytes]: https://pkg.go.dev/k8s.io/kubelet@v0.33.0/pkg/apis/stats/v1alpha1#VolumeStats +// [PodStats]: https://pkg.go.dev/k8s.io/kubelet@v0.33.0/pkg/apis/stats/v1alpha1#PodStats +func (m PodVolumeAvailable) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AttrVolumeType returns an optional attribute for the "k8s.volume.type" +// semantic convention. It represents the type of the K8s volume. +func (PodVolumeAvailable) AttrVolumeType(val VolumeTypeAttr) attribute.KeyValue { + return attribute.String("k8s.volume.type", string(val)) +} + +// PodVolumeCapacity is an instrument used to record metric values conforming to +// the "k8s.pod.volume.capacity" semantic conventions. It represents the pod +// volume total capacity. +type PodVolumeCapacity struct { + metric.Int64UpDownCounter +} + +// NewPodVolumeCapacity returns a new PodVolumeCapacity instrument. +func NewPodVolumeCapacity( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (PodVolumeCapacity, error) { + // Check if the meter is nil. + if m == nil { + return PodVolumeCapacity{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "k8s.pod.volume.capacity", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("Pod volume total capacity."), + metric.WithUnit("By"), + }, opt...)..., + ) + if err != nil { + return PodVolumeCapacity{noop.Int64UpDownCounter{}}, err + } + return PodVolumeCapacity{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m PodVolumeCapacity) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (PodVolumeCapacity) Name() string { + return "k8s.pod.volume.capacity" +} + +// Unit returns the semantic convention unit of the instrument +func (PodVolumeCapacity) Unit() string { + return "By" +} + +// Description returns the semantic convention description of the instrument +func (PodVolumeCapacity) Description() string { + return "Pod volume total capacity." +} + +// Add adds incr to the existing count for attrs. +// +// The volumeName is the the name of the K8s volume. +// +// All additional attrs passed are included in the recorded value. +// +// This metric is derived from the +// [VolumeStats.CapacityBytes] field +// of the [PodStats] of the +// Kubelet's stats API. +// +// [VolumeStats.CapacityBytes]: https://pkg.go.dev/k8s.io/kubelet@v0.33.0/pkg/apis/stats/v1alpha1#VolumeStats +// [PodStats]: https://pkg.go.dev/k8s.io/kubelet@v0.33.0/pkg/apis/stats/v1alpha1#PodStats +func (m PodVolumeCapacity) Add( + ctx context.Context, + incr int64, + volumeName string, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("k8s.volume.name", volumeName), + )..., + ), + ) + + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// This metric is derived from the +// [VolumeStats.CapacityBytes] field +// of the [PodStats] of the +// Kubelet's stats API. +// +// [VolumeStats.CapacityBytes]: https://pkg.go.dev/k8s.io/kubelet@v0.33.0/pkg/apis/stats/v1alpha1#VolumeStats +// [PodStats]: https://pkg.go.dev/k8s.io/kubelet@v0.33.0/pkg/apis/stats/v1alpha1#PodStats +func (m PodVolumeCapacity) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AttrVolumeType returns an optional attribute for the "k8s.volume.type" +// semantic convention. It represents the type of the K8s volume. +func (PodVolumeCapacity) AttrVolumeType(val VolumeTypeAttr) attribute.KeyValue { + return attribute.String("k8s.volume.type", string(val)) +} + +// PodVolumeInodeCount is an instrument used to record metric values conforming +// to the "k8s.pod.volume.inode.count" semantic conventions. It represents the +// total inodes in the filesystem of the Pod's volume. +type PodVolumeInodeCount struct { + metric.Int64UpDownCounter +} + +// NewPodVolumeInodeCount returns a new PodVolumeInodeCount instrument. +func NewPodVolumeInodeCount( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (PodVolumeInodeCount, error) { + // Check if the meter is nil. + if m == nil { + return PodVolumeInodeCount{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "k8s.pod.volume.inode.count", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("The total inodes in the filesystem of the Pod's volume."), + metric.WithUnit("{inode}"), + }, opt...)..., + ) + if err != nil { + return PodVolumeInodeCount{noop.Int64UpDownCounter{}}, err + } + return PodVolumeInodeCount{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m PodVolumeInodeCount) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (PodVolumeInodeCount) Name() string { + return "k8s.pod.volume.inode.count" +} + +// Unit returns the semantic convention unit of the instrument +func (PodVolumeInodeCount) Unit() string { + return "{inode}" +} + +// Description returns the semantic convention description of the instrument +func (PodVolumeInodeCount) Description() string { + return "The total inodes in the filesystem of the Pod's volume." +} + +// Add adds incr to the existing count for attrs. +// +// The volumeName is the the name of the K8s volume. +// +// All additional attrs passed are included in the recorded value. +// +// This metric is derived from the +// [VolumeStats.Inodes] field +// of the [PodStats] of the +// Kubelet's stats API. +// +// [VolumeStats.Inodes]: https://pkg.go.dev/k8s.io/kubelet@v0.33.0/pkg/apis/stats/v1alpha1#VolumeStats +// [PodStats]: https://pkg.go.dev/k8s.io/kubelet@v0.33.0/pkg/apis/stats/v1alpha1#PodStats +func (m PodVolumeInodeCount) Add( + ctx context.Context, + incr int64, + volumeName string, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("k8s.volume.name", volumeName), + )..., + ), + ) + + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// This metric is derived from the +// [VolumeStats.Inodes] field +// of the [PodStats] of the +// Kubelet's stats API. +// +// [VolumeStats.Inodes]: https://pkg.go.dev/k8s.io/kubelet@v0.33.0/pkg/apis/stats/v1alpha1#VolumeStats +// [PodStats]: https://pkg.go.dev/k8s.io/kubelet@v0.33.0/pkg/apis/stats/v1alpha1#PodStats +func (m PodVolumeInodeCount) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AttrVolumeType returns an optional attribute for the "k8s.volume.type" +// semantic convention. It represents the type of the K8s volume. +func (PodVolumeInodeCount) AttrVolumeType(val VolumeTypeAttr) attribute.KeyValue { + return attribute.String("k8s.volume.type", string(val)) +} + +// PodVolumeInodeFree is an instrument used to record metric values conforming to +// the "k8s.pod.volume.inode.free" semantic conventions. It represents the free +// inodes in the filesystem of the Pod's volume. +type PodVolumeInodeFree struct { + metric.Int64UpDownCounter +} + +// NewPodVolumeInodeFree returns a new PodVolumeInodeFree instrument. +func NewPodVolumeInodeFree( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (PodVolumeInodeFree, error) { + // Check if the meter is nil. + if m == nil { + return PodVolumeInodeFree{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "k8s.pod.volume.inode.free", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("The free inodes in the filesystem of the Pod's volume."), + metric.WithUnit("{inode}"), + }, opt...)..., + ) + if err != nil { + return PodVolumeInodeFree{noop.Int64UpDownCounter{}}, err + } + return PodVolumeInodeFree{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m PodVolumeInodeFree) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (PodVolumeInodeFree) Name() string { + return "k8s.pod.volume.inode.free" +} + +// Unit returns the semantic convention unit of the instrument +func (PodVolumeInodeFree) Unit() string { + return "{inode}" +} + +// Description returns the semantic convention description of the instrument +func (PodVolumeInodeFree) Description() string { + return "The free inodes in the filesystem of the Pod's volume." +} + +// Add adds incr to the existing count for attrs. +// +// The volumeName is the the name of the K8s volume. +// +// All additional attrs passed are included in the recorded value. +// +// This metric is derived from the +// [VolumeStats.InodesFree] field +// of the [PodStats] of the +// Kubelet's stats API. +// +// [VolumeStats.InodesFree]: https://pkg.go.dev/k8s.io/kubelet@v0.33.0/pkg/apis/stats/v1alpha1#VolumeStats +// [PodStats]: https://pkg.go.dev/k8s.io/kubelet@v0.33.0/pkg/apis/stats/v1alpha1#PodStats +func (m PodVolumeInodeFree) Add( + ctx context.Context, + incr int64, + volumeName string, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("k8s.volume.name", volumeName), + )..., + ), + ) + + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// This metric is derived from the +// [VolumeStats.InodesFree] field +// of the [PodStats] of the +// Kubelet's stats API. +// +// [VolumeStats.InodesFree]: https://pkg.go.dev/k8s.io/kubelet@v0.33.0/pkg/apis/stats/v1alpha1#VolumeStats +// [PodStats]: https://pkg.go.dev/k8s.io/kubelet@v0.33.0/pkg/apis/stats/v1alpha1#PodStats +func (m PodVolumeInodeFree) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AttrVolumeType returns an optional attribute for the "k8s.volume.type" +// semantic convention. It represents the type of the K8s volume. +func (PodVolumeInodeFree) AttrVolumeType(val VolumeTypeAttr) attribute.KeyValue { + return attribute.String("k8s.volume.type", string(val)) +} + +// PodVolumeInodeUsed is an instrument used to record metric values conforming to +// the "k8s.pod.volume.inode.used" semantic conventions. It represents the inodes +// used by the filesystem of the Pod's volume. +type PodVolumeInodeUsed struct { + metric.Int64UpDownCounter +} + +// NewPodVolumeInodeUsed returns a new PodVolumeInodeUsed instrument. +func NewPodVolumeInodeUsed( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (PodVolumeInodeUsed, error) { + // Check if the meter is nil. + if m == nil { + return PodVolumeInodeUsed{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "k8s.pod.volume.inode.used", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("The inodes used by the filesystem of the Pod's volume."), + metric.WithUnit("{inode}"), + }, opt...)..., + ) + if err != nil { + return PodVolumeInodeUsed{noop.Int64UpDownCounter{}}, err + } + return PodVolumeInodeUsed{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m PodVolumeInodeUsed) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (PodVolumeInodeUsed) Name() string { + return "k8s.pod.volume.inode.used" +} + +// Unit returns the semantic convention unit of the instrument +func (PodVolumeInodeUsed) Unit() string { + return "{inode}" +} + +// Description returns the semantic convention description of the instrument +func (PodVolumeInodeUsed) Description() string { + return "The inodes used by the filesystem of the Pod's volume." +} + +// Add adds incr to the existing count for attrs. +// +// The volumeName is the the name of the K8s volume. +// +// All additional attrs passed are included in the recorded value. +// +// This metric is derived from the +// [VolumeStats.InodesUsed] field +// of the [PodStats] of the +// Kubelet's stats API. +// +// This may not be equal to `inodes - free` because filesystem may share inodes +// with other filesystems. +// +// [VolumeStats.InodesUsed]: https://pkg.go.dev/k8s.io/kubelet@v0.33.0/pkg/apis/stats/v1alpha1#VolumeStats +// [PodStats]: https://pkg.go.dev/k8s.io/kubelet@v0.33.0/pkg/apis/stats/v1alpha1#PodStats +func (m PodVolumeInodeUsed) Add( + ctx context.Context, + incr int64, + volumeName string, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("k8s.volume.name", volumeName), + )..., + ), + ) + + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// This metric is derived from the +// [VolumeStats.InodesUsed] field +// of the [PodStats] of the +// Kubelet's stats API. +// +// This may not be equal to `inodes - free` because filesystem may share inodes +// with other filesystems. +// +// [VolumeStats.InodesUsed]: https://pkg.go.dev/k8s.io/kubelet@v0.33.0/pkg/apis/stats/v1alpha1#VolumeStats +// [PodStats]: https://pkg.go.dev/k8s.io/kubelet@v0.33.0/pkg/apis/stats/v1alpha1#PodStats +func (m PodVolumeInodeUsed) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AttrVolumeType returns an optional attribute for the "k8s.volume.type" +// semantic convention. It represents the type of the K8s volume. +func (PodVolumeInodeUsed) AttrVolumeType(val VolumeTypeAttr) attribute.KeyValue { + return attribute.String("k8s.volume.type", string(val)) +} + +// PodVolumeUsage is an instrument used to record metric values conforming to the +// "k8s.pod.volume.usage" semantic conventions. It represents the pod volume +// usage. +type PodVolumeUsage struct { + metric.Int64UpDownCounter +} + +// NewPodVolumeUsage returns a new PodVolumeUsage instrument. +func NewPodVolumeUsage( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (PodVolumeUsage, error) { + // Check if the meter is nil. + if m == nil { + return PodVolumeUsage{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "k8s.pod.volume.usage", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("Pod volume usage."), + metric.WithUnit("By"), + }, opt...)..., + ) + if err != nil { + return PodVolumeUsage{noop.Int64UpDownCounter{}}, err + } + return PodVolumeUsage{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m PodVolumeUsage) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (PodVolumeUsage) Name() string { + return "k8s.pod.volume.usage" +} + +// Unit returns the semantic convention unit of the instrument +func (PodVolumeUsage) Unit() string { + return "By" +} + +// Description returns the semantic convention description of the instrument +func (PodVolumeUsage) Description() string { + return "Pod volume usage." +} + +// Add adds incr to the existing count for attrs. +// +// The volumeName is the the name of the K8s volume. +// +// All additional attrs passed are included in the recorded value. +// +// This may not equal capacity - available. +// +// This metric is derived from the +// [VolumeStats.UsedBytes] field +// of the [PodStats] of the +// Kubelet's stats API. +// +// [VolumeStats.UsedBytes]: https://pkg.go.dev/k8s.io/kubelet@v0.33.0/pkg/apis/stats/v1alpha1#VolumeStats +// [PodStats]: https://pkg.go.dev/k8s.io/kubelet@v0.33.0/pkg/apis/stats/v1alpha1#PodStats +func (m PodVolumeUsage) Add( + ctx context.Context, + incr int64, + volumeName string, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("k8s.volume.name", volumeName), + )..., + ), + ) + + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// This may not equal capacity - available. +// +// This metric is derived from the +// [VolumeStats.UsedBytes] field +// of the [PodStats] of the +// Kubelet's stats API. +// +// [VolumeStats.UsedBytes]: https://pkg.go.dev/k8s.io/kubelet@v0.33.0/pkg/apis/stats/v1alpha1#VolumeStats +// [PodStats]: https://pkg.go.dev/k8s.io/kubelet@v0.33.0/pkg/apis/stats/v1alpha1#PodStats +func (m PodVolumeUsage) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AttrVolumeType returns an optional attribute for the "k8s.volume.type" +// semantic convention. It represents the type of the K8s volume. +func (PodVolumeUsage) AttrVolumeType(val VolumeTypeAttr) attribute.KeyValue { + return attribute.String("k8s.volume.type", string(val)) +} + +// ReplicaSetAvailablePods is an instrument used to record metric values +// conforming to the "k8s.replicaset.available_pods" semantic conventions. It +// represents the total number of available replica pods (ready for at least +// minReadySeconds) targeted by this replicaset. +type ReplicaSetAvailablePods struct { + metric.Int64UpDownCounter +} + +// NewReplicaSetAvailablePods returns a new ReplicaSetAvailablePods instrument. +func NewReplicaSetAvailablePods( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (ReplicaSetAvailablePods, error) { + // Check if the meter is nil. + if m == nil { + return ReplicaSetAvailablePods{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "k8s.replicaset.available_pods", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("Total number of available replica pods (ready for at least minReadySeconds) targeted by this replicaset."), + metric.WithUnit("{pod}"), + }, opt...)..., + ) + if err != nil { + return ReplicaSetAvailablePods{noop.Int64UpDownCounter{}}, err + } + return ReplicaSetAvailablePods{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ReplicaSetAvailablePods) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (ReplicaSetAvailablePods) Name() string { + return "k8s.replicaset.available_pods" +} + +// Unit returns the semantic convention unit of the instrument +func (ReplicaSetAvailablePods) Unit() string { + return "{pod}" +} + +// Description returns the semantic convention description of the instrument +func (ReplicaSetAvailablePods) Description() string { + return "Total number of available replica pods (ready for at least minReadySeconds) targeted by this replicaset." +} + +// Add adds incr to the existing count for attrs. +// +// This metric aligns with the `availableReplicas` field of the +// [K8s ReplicaSetStatus]. +// +// [K8s ReplicaSetStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#replicasetstatus-v1-apps +func (m ReplicaSetAvailablePods) Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// This metric aligns with the `availableReplicas` field of the +// [K8s ReplicaSetStatus]. +// +// [K8s ReplicaSetStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#replicasetstatus-v1-apps +func (m ReplicaSetAvailablePods) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// ReplicaSetDesiredPods is an instrument used to record metric values conforming +// to the "k8s.replicaset.desired_pods" semantic conventions. It represents the +// number of desired replica pods in this replicaset. +type ReplicaSetDesiredPods struct { + metric.Int64UpDownCounter +} + +// NewReplicaSetDesiredPods returns a new ReplicaSetDesiredPods instrument. +func NewReplicaSetDesiredPods( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (ReplicaSetDesiredPods, error) { + // Check if the meter is nil. + if m == nil { + return ReplicaSetDesiredPods{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "k8s.replicaset.desired_pods", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("Number of desired replica pods in this replicaset."), + metric.WithUnit("{pod}"), + }, opt...)..., + ) + if err != nil { + return ReplicaSetDesiredPods{noop.Int64UpDownCounter{}}, err + } + return ReplicaSetDesiredPods{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ReplicaSetDesiredPods) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (ReplicaSetDesiredPods) Name() string { + return "k8s.replicaset.desired_pods" +} + +// Unit returns the semantic convention unit of the instrument +func (ReplicaSetDesiredPods) Unit() string { + return "{pod}" +} + +// Description returns the semantic convention description of the instrument +func (ReplicaSetDesiredPods) Description() string { + return "Number of desired replica pods in this replicaset." +} + +// Add adds incr to the existing count for attrs. +// +// This metric aligns with the `replicas` field of the +// [K8s ReplicaSetSpec]. +// +// [K8s ReplicaSetSpec]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#replicasetspec-v1-apps +func (m ReplicaSetDesiredPods) Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// This metric aligns with the `replicas` field of the +// [K8s ReplicaSetSpec]. +// +// [K8s ReplicaSetSpec]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#replicasetspec-v1-apps +func (m ReplicaSetDesiredPods) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// ReplicationControllerAvailablePods is an instrument used to record metric +// values conforming to the "k8s.replicationcontroller.available_pods" semantic +// conventions. It represents the total number of available replica pods (ready +// for at least minReadySeconds) targeted by this replication controller. +type ReplicationControllerAvailablePods struct { + metric.Int64UpDownCounter +} + +// NewReplicationControllerAvailablePods returns a new +// ReplicationControllerAvailablePods instrument. +func NewReplicationControllerAvailablePods( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (ReplicationControllerAvailablePods, error) { + // Check if the meter is nil. + if m == nil { + return ReplicationControllerAvailablePods{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "k8s.replicationcontroller.available_pods", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("Total number of available replica pods (ready for at least minReadySeconds) targeted by this replication controller."), + metric.WithUnit("{pod}"), + }, opt...)..., + ) + if err != nil { + return ReplicationControllerAvailablePods{noop.Int64UpDownCounter{}}, err + } + return ReplicationControllerAvailablePods{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ReplicationControllerAvailablePods) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (ReplicationControllerAvailablePods) Name() string { + return "k8s.replicationcontroller.available_pods" +} + +// Unit returns the semantic convention unit of the instrument +func (ReplicationControllerAvailablePods) Unit() string { + return "{pod}" +} + +// Description returns the semantic convention description of the instrument +func (ReplicationControllerAvailablePods) Description() string { + return "Total number of available replica pods (ready for at least minReadySeconds) targeted by this replication controller." +} + +// Add adds incr to the existing count for attrs. +// +// This metric aligns with the `availableReplicas` field of the +// [K8s ReplicationControllerStatus] +// +// [K8s ReplicationControllerStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#replicationcontrollerstatus-v1-core +func (m ReplicationControllerAvailablePods) Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// This metric aligns with the `availableReplicas` field of the +// [K8s ReplicationControllerStatus] +// +// [K8s ReplicationControllerStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#replicationcontrollerstatus-v1-core +func (m ReplicationControllerAvailablePods) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// ReplicationControllerDesiredPods is an instrument used to record metric values +// conforming to the "k8s.replicationcontroller.desired_pods" semantic +// conventions. It represents the number of desired replica pods in this +// replication controller. +type ReplicationControllerDesiredPods struct { + metric.Int64UpDownCounter +} + +// NewReplicationControllerDesiredPods returns a new +// ReplicationControllerDesiredPods instrument. +func NewReplicationControllerDesiredPods( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (ReplicationControllerDesiredPods, error) { + // Check if the meter is nil. + if m == nil { + return ReplicationControllerDesiredPods{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "k8s.replicationcontroller.desired_pods", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("Number of desired replica pods in this replication controller."), + metric.WithUnit("{pod}"), + }, opt...)..., + ) + if err != nil { + return ReplicationControllerDesiredPods{noop.Int64UpDownCounter{}}, err + } + return ReplicationControllerDesiredPods{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ReplicationControllerDesiredPods) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (ReplicationControllerDesiredPods) Name() string { + return "k8s.replicationcontroller.desired_pods" +} + +// Unit returns the semantic convention unit of the instrument +func (ReplicationControllerDesiredPods) Unit() string { + return "{pod}" +} + +// Description returns the semantic convention description of the instrument +func (ReplicationControllerDesiredPods) Description() string { + return "Number of desired replica pods in this replication controller." +} + +// Add adds incr to the existing count for attrs. +// +// This metric aligns with the `replicas` field of the +// [K8s ReplicationControllerSpec] +// +// [K8s ReplicationControllerSpec]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#replicationcontrollerspec-v1-core +func (m ReplicationControllerDesiredPods) Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// This metric aligns with the `replicas` field of the +// [K8s ReplicationControllerSpec] +// +// [K8s ReplicationControllerSpec]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#replicationcontrollerspec-v1-core +func (m ReplicationControllerDesiredPods) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// ResourceQuotaCPULimitHard is an instrument used to record metric values +// conforming to the "k8s.resourcequota.cpu.limit.hard" semantic conventions. It +// represents the CPU limits in a specific namespace. +// The value represents the configured quota limit of the resource in the +// namespace. +type ResourceQuotaCPULimitHard struct { + metric.Int64UpDownCounter +} + +// NewResourceQuotaCPULimitHard returns a new ResourceQuotaCPULimitHard +// instrument. +func NewResourceQuotaCPULimitHard( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (ResourceQuotaCPULimitHard, error) { + // Check if the meter is nil. + if m == nil { + return ResourceQuotaCPULimitHard{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "k8s.resourcequota.cpu.limit.hard", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("The CPU limits in a specific namespace. The value represents the configured quota limit of the resource in the namespace."), + metric.WithUnit("{cpu}"), + }, opt...)..., + ) + if err != nil { + return ResourceQuotaCPULimitHard{noop.Int64UpDownCounter{}}, err + } + return ResourceQuotaCPULimitHard{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ResourceQuotaCPULimitHard) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (ResourceQuotaCPULimitHard) Name() string { + return "k8s.resourcequota.cpu.limit.hard" +} + +// Unit returns the semantic convention unit of the instrument +func (ResourceQuotaCPULimitHard) Unit() string { + return "{cpu}" +} + +// Description returns the semantic convention description of the instrument +func (ResourceQuotaCPULimitHard) Description() string { + return "The CPU limits in a specific namespace. The value represents the configured quota limit of the resource in the namespace." +} + +// Add adds incr to the existing count for attrs. +// +// This metric is retrieved from the `hard` field of the +// [K8s ResourceQuotaStatus]. +// +// [K8s ResourceQuotaStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core +func (m ResourceQuotaCPULimitHard) Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// This metric is retrieved from the `hard` field of the +// [K8s ResourceQuotaStatus]. +// +// [K8s ResourceQuotaStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core +func (m ResourceQuotaCPULimitHard) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// ResourceQuotaCPULimitUsed is an instrument used to record metric values +// conforming to the "k8s.resourcequota.cpu.limit.used" semantic conventions. It +// represents the CPU limits in a specific namespace. +// The value represents the current observed total usage of the resource in the +// namespace. +type ResourceQuotaCPULimitUsed struct { + metric.Int64UpDownCounter +} + +// NewResourceQuotaCPULimitUsed returns a new ResourceQuotaCPULimitUsed +// instrument. +func NewResourceQuotaCPULimitUsed( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (ResourceQuotaCPULimitUsed, error) { + // Check if the meter is nil. + if m == nil { + return ResourceQuotaCPULimitUsed{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "k8s.resourcequota.cpu.limit.used", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("The CPU limits in a specific namespace. The value represents the current observed total usage of the resource in the namespace."), + metric.WithUnit("{cpu}"), + }, opt...)..., + ) + if err != nil { + return ResourceQuotaCPULimitUsed{noop.Int64UpDownCounter{}}, err + } + return ResourceQuotaCPULimitUsed{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ResourceQuotaCPULimitUsed) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (ResourceQuotaCPULimitUsed) Name() string { + return "k8s.resourcequota.cpu.limit.used" +} + +// Unit returns the semantic convention unit of the instrument +func (ResourceQuotaCPULimitUsed) Unit() string { + return "{cpu}" +} + +// Description returns the semantic convention description of the instrument +func (ResourceQuotaCPULimitUsed) Description() string { + return "The CPU limits in a specific namespace. The value represents the current observed total usage of the resource in the namespace." +} + +// Add adds incr to the existing count for attrs. +// +// This metric is retrieved from the `used` field of the +// [K8s ResourceQuotaStatus]. +// +// [K8s ResourceQuotaStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core +func (m ResourceQuotaCPULimitUsed) Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// This metric is retrieved from the `used` field of the +// [K8s ResourceQuotaStatus]. +// +// [K8s ResourceQuotaStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core +func (m ResourceQuotaCPULimitUsed) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// ResourceQuotaCPURequestHard is an instrument used to record metric values +// conforming to the "k8s.resourcequota.cpu.request.hard" semantic conventions. +// It represents the CPU requests in a specific namespace. +// The value represents the configured quota limit of the resource in the +// namespace. +type ResourceQuotaCPURequestHard struct { + metric.Int64UpDownCounter +} + +// NewResourceQuotaCPURequestHard returns a new ResourceQuotaCPURequestHard +// instrument. +func NewResourceQuotaCPURequestHard( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (ResourceQuotaCPURequestHard, error) { + // Check if the meter is nil. + if m == nil { + return ResourceQuotaCPURequestHard{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "k8s.resourcequota.cpu.request.hard", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("The CPU requests in a specific namespace. The value represents the configured quota limit of the resource in the namespace."), + metric.WithUnit("{cpu}"), + }, opt...)..., + ) + if err != nil { + return ResourceQuotaCPURequestHard{noop.Int64UpDownCounter{}}, err + } + return ResourceQuotaCPURequestHard{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ResourceQuotaCPURequestHard) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (ResourceQuotaCPURequestHard) Name() string { + return "k8s.resourcequota.cpu.request.hard" +} + +// Unit returns the semantic convention unit of the instrument +func (ResourceQuotaCPURequestHard) Unit() string { + return "{cpu}" +} + +// Description returns the semantic convention description of the instrument +func (ResourceQuotaCPURequestHard) Description() string { + return "The CPU requests in a specific namespace. The value represents the configured quota limit of the resource in the namespace." +} + +// Add adds incr to the existing count for attrs. +// +// This metric is retrieved from the `hard` field of the +// [K8s ResourceQuotaStatus]. +// +// [K8s ResourceQuotaStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core +func (m ResourceQuotaCPURequestHard) Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// This metric is retrieved from the `hard` field of the +// [K8s ResourceQuotaStatus]. +// +// [K8s ResourceQuotaStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core +func (m ResourceQuotaCPURequestHard) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// ResourceQuotaCPURequestUsed is an instrument used to record metric values +// conforming to the "k8s.resourcequota.cpu.request.used" semantic conventions. +// It represents the CPU requests in a specific namespace. +// The value represents the current observed total usage of the resource in the +// namespace. +type ResourceQuotaCPURequestUsed struct { + metric.Int64UpDownCounter +} + +// NewResourceQuotaCPURequestUsed returns a new ResourceQuotaCPURequestUsed +// instrument. +func NewResourceQuotaCPURequestUsed( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (ResourceQuotaCPURequestUsed, error) { + // Check if the meter is nil. + if m == nil { + return ResourceQuotaCPURequestUsed{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "k8s.resourcequota.cpu.request.used", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("The CPU requests in a specific namespace. The value represents the current observed total usage of the resource in the namespace."), + metric.WithUnit("{cpu}"), + }, opt...)..., + ) + if err != nil { + return ResourceQuotaCPURequestUsed{noop.Int64UpDownCounter{}}, err + } + return ResourceQuotaCPURequestUsed{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ResourceQuotaCPURequestUsed) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (ResourceQuotaCPURequestUsed) Name() string { + return "k8s.resourcequota.cpu.request.used" +} + +// Unit returns the semantic convention unit of the instrument +func (ResourceQuotaCPURequestUsed) Unit() string { + return "{cpu}" +} + +// Description returns the semantic convention description of the instrument +func (ResourceQuotaCPURequestUsed) Description() string { + return "The CPU requests in a specific namespace. The value represents the current observed total usage of the resource in the namespace." +} + +// Add adds incr to the existing count for attrs. +// +// This metric is retrieved from the `used` field of the +// [K8s ResourceQuotaStatus]. +// +// [K8s ResourceQuotaStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core +func (m ResourceQuotaCPURequestUsed) Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// This metric is retrieved from the `used` field of the +// [K8s ResourceQuotaStatus]. +// +// [K8s ResourceQuotaStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core +func (m ResourceQuotaCPURequestUsed) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// ResourceQuotaEphemeralStorageLimitHard is an instrument used to record metric +// values conforming to the "k8s.resourcequota.ephemeral_storage.limit.hard" +// semantic conventions. It represents the sum of local ephemeral storage limits +// in the namespace. +// The value represents the configured quota limit of the resource in the +// namespace. +type ResourceQuotaEphemeralStorageLimitHard struct { + metric.Int64UpDownCounter +} + +// NewResourceQuotaEphemeralStorageLimitHard returns a new +// ResourceQuotaEphemeralStorageLimitHard instrument. +func NewResourceQuotaEphemeralStorageLimitHard( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (ResourceQuotaEphemeralStorageLimitHard, error) { + // Check if the meter is nil. + if m == nil { + return ResourceQuotaEphemeralStorageLimitHard{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "k8s.resourcequota.ephemeral_storage.limit.hard", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("The sum of local ephemeral storage limits in the namespace. The value represents the configured quota limit of the resource in the namespace."), + metric.WithUnit("By"), + }, opt...)..., + ) + if err != nil { + return ResourceQuotaEphemeralStorageLimitHard{noop.Int64UpDownCounter{}}, err + } + return ResourceQuotaEphemeralStorageLimitHard{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ResourceQuotaEphemeralStorageLimitHard) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (ResourceQuotaEphemeralStorageLimitHard) Name() string { + return "k8s.resourcequota.ephemeral_storage.limit.hard" +} + +// Unit returns the semantic convention unit of the instrument +func (ResourceQuotaEphemeralStorageLimitHard) Unit() string { + return "By" +} + +// Description returns the semantic convention description of the instrument +func (ResourceQuotaEphemeralStorageLimitHard) Description() string { + return "The sum of local ephemeral storage limits in the namespace. The value represents the configured quota limit of the resource in the namespace." +} + +// Add adds incr to the existing count for attrs. +// +// This metric is retrieved from the `hard` field of the +// [K8s ResourceQuotaStatus]. +// +// [K8s ResourceQuotaStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core +func (m ResourceQuotaEphemeralStorageLimitHard) Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// This metric is retrieved from the `hard` field of the +// [K8s ResourceQuotaStatus]. +// +// [K8s ResourceQuotaStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core +func (m ResourceQuotaEphemeralStorageLimitHard) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// ResourceQuotaEphemeralStorageLimitUsed is an instrument used to record metric +// values conforming to the "k8s.resourcequota.ephemeral_storage.limit.used" +// semantic conventions. It represents the sum of local ephemeral storage limits +// in the namespace. +// The value represents the current observed total usage of the resource in the +// namespace. +type ResourceQuotaEphemeralStorageLimitUsed struct { + metric.Int64UpDownCounter +} + +// NewResourceQuotaEphemeralStorageLimitUsed returns a new +// ResourceQuotaEphemeralStorageLimitUsed instrument. +func NewResourceQuotaEphemeralStorageLimitUsed( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (ResourceQuotaEphemeralStorageLimitUsed, error) { + // Check if the meter is nil. + if m == nil { + return ResourceQuotaEphemeralStorageLimitUsed{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "k8s.resourcequota.ephemeral_storage.limit.used", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("The sum of local ephemeral storage limits in the namespace. The value represents the current observed total usage of the resource in the namespace."), + metric.WithUnit("By"), + }, opt...)..., + ) + if err != nil { + return ResourceQuotaEphemeralStorageLimitUsed{noop.Int64UpDownCounter{}}, err + } + return ResourceQuotaEphemeralStorageLimitUsed{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ResourceQuotaEphemeralStorageLimitUsed) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (ResourceQuotaEphemeralStorageLimitUsed) Name() string { + return "k8s.resourcequota.ephemeral_storage.limit.used" +} + +// Unit returns the semantic convention unit of the instrument +func (ResourceQuotaEphemeralStorageLimitUsed) Unit() string { + return "By" +} + +// Description returns the semantic convention description of the instrument +func (ResourceQuotaEphemeralStorageLimitUsed) Description() string { + return "The sum of local ephemeral storage limits in the namespace. The value represents the current observed total usage of the resource in the namespace." +} + +// Add adds incr to the existing count for attrs. +// +// This metric is retrieved from the `used` field of the +// [K8s ResourceQuotaStatus]. +// +// [K8s ResourceQuotaStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core +func (m ResourceQuotaEphemeralStorageLimitUsed) Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// This metric is retrieved from the `used` field of the +// [K8s ResourceQuotaStatus]. +// +// [K8s ResourceQuotaStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core +func (m ResourceQuotaEphemeralStorageLimitUsed) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// ResourceQuotaEphemeralStorageRequestHard is an instrument used to record +// metric values conforming to the +// "k8s.resourcequota.ephemeral_storage.request.hard" semantic conventions. It +// represents the sum of local ephemeral storage requests in the namespace. +// The value represents the configured quota limit of the resource in the +// namespace. +type ResourceQuotaEphemeralStorageRequestHard struct { + metric.Int64UpDownCounter +} + +// NewResourceQuotaEphemeralStorageRequestHard returns a new +// ResourceQuotaEphemeralStorageRequestHard instrument. +func NewResourceQuotaEphemeralStorageRequestHard( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (ResourceQuotaEphemeralStorageRequestHard, error) { + // Check if the meter is nil. + if m == nil { + return ResourceQuotaEphemeralStorageRequestHard{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "k8s.resourcequota.ephemeral_storage.request.hard", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("The sum of local ephemeral storage requests in the namespace. The value represents the configured quota limit of the resource in the namespace."), + metric.WithUnit("By"), + }, opt...)..., + ) + if err != nil { + return ResourceQuotaEphemeralStorageRequestHard{noop.Int64UpDownCounter{}}, err + } + return ResourceQuotaEphemeralStorageRequestHard{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ResourceQuotaEphemeralStorageRequestHard) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (ResourceQuotaEphemeralStorageRequestHard) Name() string { + return "k8s.resourcequota.ephemeral_storage.request.hard" +} + +// Unit returns the semantic convention unit of the instrument +func (ResourceQuotaEphemeralStorageRequestHard) Unit() string { + return "By" +} + +// Description returns the semantic convention description of the instrument +func (ResourceQuotaEphemeralStorageRequestHard) Description() string { + return "The sum of local ephemeral storage requests in the namespace. The value represents the configured quota limit of the resource in the namespace." +} + +// Add adds incr to the existing count for attrs. +// +// This metric is retrieved from the `hard` field of the +// [K8s ResourceQuotaStatus]. +// +// [K8s ResourceQuotaStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core +func (m ResourceQuotaEphemeralStorageRequestHard) Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// This metric is retrieved from the `hard` field of the +// [K8s ResourceQuotaStatus]. +// +// [K8s ResourceQuotaStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core +func (m ResourceQuotaEphemeralStorageRequestHard) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// ResourceQuotaEphemeralStorageRequestUsed is an instrument used to record +// metric values conforming to the +// "k8s.resourcequota.ephemeral_storage.request.used" semantic conventions. It +// represents the sum of local ephemeral storage requests in the namespace. +// The value represents the current observed total usage of the resource in the +// namespace. +type ResourceQuotaEphemeralStorageRequestUsed struct { + metric.Int64UpDownCounter +} + +// NewResourceQuotaEphemeralStorageRequestUsed returns a new +// ResourceQuotaEphemeralStorageRequestUsed instrument. +func NewResourceQuotaEphemeralStorageRequestUsed( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (ResourceQuotaEphemeralStorageRequestUsed, error) { + // Check if the meter is nil. + if m == nil { + return ResourceQuotaEphemeralStorageRequestUsed{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "k8s.resourcequota.ephemeral_storage.request.used", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("The sum of local ephemeral storage requests in the namespace. The value represents the current observed total usage of the resource in the namespace."), + metric.WithUnit("By"), + }, opt...)..., + ) + if err != nil { + return ResourceQuotaEphemeralStorageRequestUsed{noop.Int64UpDownCounter{}}, err + } + return ResourceQuotaEphemeralStorageRequestUsed{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ResourceQuotaEphemeralStorageRequestUsed) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (ResourceQuotaEphemeralStorageRequestUsed) Name() string { + return "k8s.resourcequota.ephemeral_storage.request.used" +} + +// Unit returns the semantic convention unit of the instrument +func (ResourceQuotaEphemeralStorageRequestUsed) Unit() string { + return "By" +} + +// Description returns the semantic convention description of the instrument +func (ResourceQuotaEphemeralStorageRequestUsed) Description() string { + return "The sum of local ephemeral storage requests in the namespace. The value represents the current observed total usage of the resource in the namespace." +} + +// Add adds incr to the existing count for attrs. +// +// This metric is retrieved from the `used` field of the +// [K8s ResourceQuotaStatus]. +// +// [K8s ResourceQuotaStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core +func (m ResourceQuotaEphemeralStorageRequestUsed) Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// This metric is retrieved from the `used` field of the +// [K8s ResourceQuotaStatus]. +// +// [K8s ResourceQuotaStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core +func (m ResourceQuotaEphemeralStorageRequestUsed) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// ResourceQuotaHugepageCountRequestHard is an instrument used to record metric +// values conforming to the "k8s.resourcequota.hugepage_count.request.hard" +// semantic conventions. It represents the huge page requests in a specific +// namespace. +// The value represents the configured quota limit of the resource in the +// namespace. +type ResourceQuotaHugepageCountRequestHard struct { + metric.Int64UpDownCounter +} + +// NewResourceQuotaHugepageCountRequestHard returns a new +// ResourceQuotaHugepageCountRequestHard instrument. +func NewResourceQuotaHugepageCountRequestHard( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (ResourceQuotaHugepageCountRequestHard, error) { + // Check if the meter is nil. + if m == nil { + return ResourceQuotaHugepageCountRequestHard{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "k8s.resourcequota.hugepage_count.request.hard", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("The huge page requests in a specific namespace. The value represents the configured quota limit of the resource in the namespace."), + metric.WithUnit("{hugepage}"), + }, opt...)..., + ) + if err != nil { + return ResourceQuotaHugepageCountRequestHard{noop.Int64UpDownCounter{}}, err + } + return ResourceQuotaHugepageCountRequestHard{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ResourceQuotaHugepageCountRequestHard) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (ResourceQuotaHugepageCountRequestHard) Name() string { + return "k8s.resourcequota.hugepage_count.request.hard" +} + +// Unit returns the semantic convention unit of the instrument +func (ResourceQuotaHugepageCountRequestHard) Unit() string { + return "{hugepage}" +} + +// Description returns the semantic convention description of the instrument +func (ResourceQuotaHugepageCountRequestHard) Description() string { + return "The huge page requests in a specific namespace. The value represents the configured quota limit of the resource in the namespace." +} + +// Add adds incr to the existing count for attrs. +// +// The hugepageSize is the the size (identifier) of the K8s huge page. +// +// This metric is retrieved from the `hard` field of the +// [K8s ResourceQuotaStatus]. +// +// [K8s ResourceQuotaStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core +func (m ResourceQuotaHugepageCountRequestHard) Add( + ctx context.Context, + incr int64, + hugepageSize string, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("k8s.hugepage.size", hugepageSize), + )..., + ), + ) + + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// This metric is retrieved from the `hard` field of the +// [K8s ResourceQuotaStatus]. +// +// [K8s ResourceQuotaStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core +func (m ResourceQuotaHugepageCountRequestHard) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// ResourceQuotaHugepageCountRequestUsed is an instrument used to record metric +// values conforming to the "k8s.resourcequota.hugepage_count.request.used" +// semantic conventions. It represents the huge page requests in a specific +// namespace. +// The value represents the current observed total usage of the resource in the +// namespace. +type ResourceQuotaHugepageCountRequestUsed struct { + metric.Int64UpDownCounter +} + +// NewResourceQuotaHugepageCountRequestUsed returns a new +// ResourceQuotaHugepageCountRequestUsed instrument. +func NewResourceQuotaHugepageCountRequestUsed( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (ResourceQuotaHugepageCountRequestUsed, error) { + // Check if the meter is nil. + if m == nil { + return ResourceQuotaHugepageCountRequestUsed{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "k8s.resourcequota.hugepage_count.request.used", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("The huge page requests in a specific namespace. The value represents the current observed total usage of the resource in the namespace."), + metric.WithUnit("{hugepage}"), + }, opt...)..., + ) + if err != nil { + return ResourceQuotaHugepageCountRequestUsed{noop.Int64UpDownCounter{}}, err + } + return ResourceQuotaHugepageCountRequestUsed{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ResourceQuotaHugepageCountRequestUsed) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (ResourceQuotaHugepageCountRequestUsed) Name() string { + return "k8s.resourcequota.hugepage_count.request.used" +} + +// Unit returns the semantic convention unit of the instrument +func (ResourceQuotaHugepageCountRequestUsed) Unit() string { + return "{hugepage}" +} + +// Description returns the semantic convention description of the instrument +func (ResourceQuotaHugepageCountRequestUsed) Description() string { + return "The huge page requests in a specific namespace. The value represents the current observed total usage of the resource in the namespace." +} + +// Add adds incr to the existing count for attrs. +// +// The hugepageSize is the the size (identifier) of the K8s huge page. +// +// This metric is retrieved from the `used` field of the +// [K8s ResourceQuotaStatus]. +// +// [K8s ResourceQuotaStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core +func (m ResourceQuotaHugepageCountRequestUsed) Add( + ctx context.Context, + incr int64, + hugepageSize string, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("k8s.hugepage.size", hugepageSize), + )..., + ), + ) + + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// This metric is retrieved from the `used` field of the +// [K8s ResourceQuotaStatus]. +// +// [K8s ResourceQuotaStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core +func (m ResourceQuotaHugepageCountRequestUsed) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// ResourceQuotaMemoryLimitHard is an instrument used to record metric values +// conforming to the "k8s.resourcequota.memory.limit.hard" semantic conventions. +// It represents the memory limits in a specific namespace. +// The value represents the configured quota limit of the resource in the +// namespace. +type ResourceQuotaMemoryLimitHard struct { + metric.Int64UpDownCounter +} + +// NewResourceQuotaMemoryLimitHard returns a new ResourceQuotaMemoryLimitHard +// instrument. +func NewResourceQuotaMemoryLimitHard( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (ResourceQuotaMemoryLimitHard, error) { + // Check if the meter is nil. + if m == nil { + return ResourceQuotaMemoryLimitHard{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "k8s.resourcequota.memory.limit.hard", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("The memory limits in a specific namespace. The value represents the configured quota limit of the resource in the namespace."), + metric.WithUnit("By"), + }, opt...)..., + ) + if err != nil { + return ResourceQuotaMemoryLimitHard{noop.Int64UpDownCounter{}}, err + } + return ResourceQuotaMemoryLimitHard{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ResourceQuotaMemoryLimitHard) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (ResourceQuotaMemoryLimitHard) Name() string { + return "k8s.resourcequota.memory.limit.hard" +} + +// Unit returns the semantic convention unit of the instrument +func (ResourceQuotaMemoryLimitHard) Unit() string { + return "By" +} + +// Description returns the semantic convention description of the instrument +func (ResourceQuotaMemoryLimitHard) Description() string { + return "The memory limits in a specific namespace. The value represents the configured quota limit of the resource in the namespace." +} + +// Add adds incr to the existing count for attrs. +// +// This metric is retrieved from the `hard` field of the +// [K8s ResourceQuotaStatus]. +// +// [K8s ResourceQuotaStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core +func (m ResourceQuotaMemoryLimitHard) Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// This metric is retrieved from the `hard` field of the +// [K8s ResourceQuotaStatus]. +// +// [K8s ResourceQuotaStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core +func (m ResourceQuotaMemoryLimitHard) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// ResourceQuotaMemoryLimitUsed is an instrument used to record metric values +// conforming to the "k8s.resourcequota.memory.limit.used" semantic conventions. +// It represents the memory limits in a specific namespace. +// The value represents the current observed total usage of the resource in the +// namespace. +type ResourceQuotaMemoryLimitUsed struct { + metric.Int64UpDownCounter +} + +// NewResourceQuotaMemoryLimitUsed returns a new ResourceQuotaMemoryLimitUsed +// instrument. +func NewResourceQuotaMemoryLimitUsed( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (ResourceQuotaMemoryLimitUsed, error) { + // Check if the meter is nil. + if m == nil { + return ResourceQuotaMemoryLimitUsed{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "k8s.resourcequota.memory.limit.used", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("The memory limits in a specific namespace. The value represents the current observed total usage of the resource in the namespace."), + metric.WithUnit("By"), + }, opt...)..., + ) + if err != nil { + return ResourceQuotaMemoryLimitUsed{noop.Int64UpDownCounter{}}, err + } + return ResourceQuotaMemoryLimitUsed{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ResourceQuotaMemoryLimitUsed) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (ResourceQuotaMemoryLimitUsed) Name() string { + return "k8s.resourcequota.memory.limit.used" +} + +// Unit returns the semantic convention unit of the instrument +func (ResourceQuotaMemoryLimitUsed) Unit() string { + return "By" +} + +// Description returns the semantic convention description of the instrument +func (ResourceQuotaMemoryLimitUsed) Description() string { + return "The memory limits in a specific namespace. The value represents the current observed total usage of the resource in the namespace." +} + +// Add adds incr to the existing count for attrs. +// +// This metric is retrieved from the `used` field of the +// [K8s ResourceQuotaStatus]. +// +// [K8s ResourceQuotaStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core +func (m ResourceQuotaMemoryLimitUsed) Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// This metric is retrieved from the `used` field of the +// [K8s ResourceQuotaStatus]. +// +// [K8s ResourceQuotaStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core +func (m ResourceQuotaMemoryLimitUsed) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// ResourceQuotaMemoryRequestHard is an instrument used to record metric values +// conforming to the "k8s.resourcequota.memory.request.hard" semantic +// conventions. It represents the memory requests in a specific namespace. +// The value represents the configured quota limit of the resource in the +// namespace. +type ResourceQuotaMemoryRequestHard struct { + metric.Int64UpDownCounter +} + +// NewResourceQuotaMemoryRequestHard returns a new ResourceQuotaMemoryRequestHard +// instrument. +func NewResourceQuotaMemoryRequestHard( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (ResourceQuotaMemoryRequestHard, error) { + // Check if the meter is nil. + if m == nil { + return ResourceQuotaMemoryRequestHard{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "k8s.resourcequota.memory.request.hard", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("The memory requests in a specific namespace. The value represents the configured quota limit of the resource in the namespace."), + metric.WithUnit("By"), + }, opt...)..., + ) + if err != nil { + return ResourceQuotaMemoryRequestHard{noop.Int64UpDownCounter{}}, err + } + return ResourceQuotaMemoryRequestHard{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ResourceQuotaMemoryRequestHard) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (ResourceQuotaMemoryRequestHard) Name() string { + return "k8s.resourcequota.memory.request.hard" +} + +// Unit returns the semantic convention unit of the instrument +func (ResourceQuotaMemoryRequestHard) Unit() string { + return "By" +} + +// Description returns the semantic convention description of the instrument +func (ResourceQuotaMemoryRequestHard) Description() string { + return "The memory requests in a specific namespace. The value represents the configured quota limit of the resource in the namespace." +} + +// Add adds incr to the existing count for attrs. +// +// This metric is retrieved from the `hard` field of the +// [K8s ResourceQuotaStatus]. +// +// [K8s ResourceQuotaStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core +func (m ResourceQuotaMemoryRequestHard) Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// This metric is retrieved from the `hard` field of the +// [K8s ResourceQuotaStatus]. +// +// [K8s ResourceQuotaStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core +func (m ResourceQuotaMemoryRequestHard) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// ResourceQuotaMemoryRequestUsed is an instrument used to record metric values +// conforming to the "k8s.resourcequota.memory.request.used" semantic +// conventions. It represents the memory requests in a specific namespace. +// The value represents the current observed total usage of the resource in the +// namespace. +type ResourceQuotaMemoryRequestUsed struct { + metric.Int64UpDownCounter +} + +// NewResourceQuotaMemoryRequestUsed returns a new ResourceQuotaMemoryRequestUsed +// instrument. +func NewResourceQuotaMemoryRequestUsed( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (ResourceQuotaMemoryRequestUsed, error) { + // Check if the meter is nil. + if m == nil { + return ResourceQuotaMemoryRequestUsed{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "k8s.resourcequota.memory.request.used", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("The memory requests in a specific namespace. The value represents the current observed total usage of the resource in the namespace."), + metric.WithUnit("By"), + }, opt...)..., + ) + if err != nil { + return ResourceQuotaMemoryRequestUsed{noop.Int64UpDownCounter{}}, err + } + return ResourceQuotaMemoryRequestUsed{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ResourceQuotaMemoryRequestUsed) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (ResourceQuotaMemoryRequestUsed) Name() string { + return "k8s.resourcequota.memory.request.used" +} + +// Unit returns the semantic convention unit of the instrument +func (ResourceQuotaMemoryRequestUsed) Unit() string { + return "By" +} + +// Description returns the semantic convention description of the instrument +func (ResourceQuotaMemoryRequestUsed) Description() string { + return "The memory requests in a specific namespace. The value represents the current observed total usage of the resource in the namespace." +} + +// Add adds incr to the existing count for attrs. +// +// This metric is retrieved from the `used` field of the +// [K8s ResourceQuotaStatus]. +// +// [K8s ResourceQuotaStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core +func (m ResourceQuotaMemoryRequestUsed) Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// This metric is retrieved from the `used` field of the +// [K8s ResourceQuotaStatus]. +// +// [K8s ResourceQuotaStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core +func (m ResourceQuotaMemoryRequestUsed) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// ResourceQuotaObjectCountHard is an instrument used to record metric values +// conforming to the "k8s.resourcequota.object_count.hard" semantic conventions. +// It represents the object count limits in a specific namespace. +// The value represents the configured quota limit of the resource in the +// namespace. +type ResourceQuotaObjectCountHard struct { + metric.Int64UpDownCounter +} + +// NewResourceQuotaObjectCountHard returns a new ResourceQuotaObjectCountHard +// instrument. +func NewResourceQuotaObjectCountHard( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (ResourceQuotaObjectCountHard, error) { + // Check if the meter is nil. + if m == nil { + return ResourceQuotaObjectCountHard{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "k8s.resourcequota.object_count.hard", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("The object count limits in a specific namespace. The value represents the configured quota limit of the resource in the namespace."), + metric.WithUnit("{object}"), + }, opt...)..., + ) + if err != nil { + return ResourceQuotaObjectCountHard{noop.Int64UpDownCounter{}}, err + } + return ResourceQuotaObjectCountHard{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ResourceQuotaObjectCountHard) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (ResourceQuotaObjectCountHard) Name() string { + return "k8s.resourcequota.object_count.hard" +} + +// Unit returns the semantic convention unit of the instrument +func (ResourceQuotaObjectCountHard) Unit() string { + return "{object}" +} + +// Description returns the semantic convention description of the instrument +func (ResourceQuotaObjectCountHard) Description() string { + return "The object count limits in a specific namespace. The value represents the configured quota limit of the resource in the namespace." +} + +// Add adds incr to the existing count for attrs. +// +// The resourcequotaResourceName is the the name of the K8s resource a resource +// quota defines. +// +// This metric is retrieved from the `hard` field of the +// [K8s ResourceQuotaStatus]. +// +// [K8s ResourceQuotaStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core +func (m ResourceQuotaObjectCountHard) Add( + ctx context.Context, + incr int64, + resourcequotaResourceName string, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("k8s.resourcequota.resource_name", resourcequotaResourceName), + )..., + ), + ) + + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// This metric is retrieved from the `hard` field of the +// [K8s ResourceQuotaStatus]. +// +// [K8s ResourceQuotaStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core +func (m ResourceQuotaObjectCountHard) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// ResourceQuotaObjectCountUsed is an instrument used to record metric values +// conforming to the "k8s.resourcequota.object_count.used" semantic conventions. +// It represents the object count limits in a specific namespace. +// The value represents the current observed total usage of the resource in the +// namespace. +type ResourceQuotaObjectCountUsed struct { + metric.Int64UpDownCounter +} + +// NewResourceQuotaObjectCountUsed returns a new ResourceQuotaObjectCountUsed +// instrument. +func NewResourceQuotaObjectCountUsed( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (ResourceQuotaObjectCountUsed, error) { + // Check if the meter is nil. + if m == nil { + return ResourceQuotaObjectCountUsed{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "k8s.resourcequota.object_count.used", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("The object count limits in a specific namespace. The value represents the current observed total usage of the resource in the namespace."), + metric.WithUnit("{object}"), + }, opt...)..., + ) + if err != nil { + return ResourceQuotaObjectCountUsed{noop.Int64UpDownCounter{}}, err + } + return ResourceQuotaObjectCountUsed{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ResourceQuotaObjectCountUsed) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (ResourceQuotaObjectCountUsed) Name() string { + return "k8s.resourcequota.object_count.used" +} + +// Unit returns the semantic convention unit of the instrument +func (ResourceQuotaObjectCountUsed) Unit() string { + return "{object}" +} + +// Description returns the semantic convention description of the instrument +func (ResourceQuotaObjectCountUsed) Description() string { + return "The object count limits in a specific namespace. The value represents the current observed total usage of the resource in the namespace." +} + +// Add adds incr to the existing count for attrs. +// +// The resourcequotaResourceName is the the name of the K8s resource a resource +// quota defines. +// +// This metric is retrieved from the `used` field of the +// [K8s ResourceQuotaStatus]. +// +// [K8s ResourceQuotaStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core +func (m ResourceQuotaObjectCountUsed) Add( + ctx context.Context, + incr int64, + resourcequotaResourceName string, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("k8s.resourcequota.resource_name", resourcequotaResourceName), + )..., + ), + ) + + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// This metric is retrieved from the `used` field of the +// [K8s ResourceQuotaStatus]. +// +// [K8s ResourceQuotaStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core +func (m ResourceQuotaObjectCountUsed) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// ResourceQuotaPersistentvolumeclaimCountHard is an instrument used to record +// metric values conforming to the +// "k8s.resourcequota.persistentvolumeclaim_count.hard" semantic conventions. It +// represents the total number of PersistentVolumeClaims that can exist in the +// namespace. +// The value represents the configured quota limit of the resource in the +// namespace. +type ResourceQuotaPersistentvolumeclaimCountHard struct { + metric.Int64UpDownCounter +} + +// NewResourceQuotaPersistentvolumeclaimCountHard returns a new +// ResourceQuotaPersistentvolumeclaimCountHard instrument. +func NewResourceQuotaPersistentvolumeclaimCountHard( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (ResourceQuotaPersistentvolumeclaimCountHard, error) { + // Check if the meter is nil. + if m == nil { + return ResourceQuotaPersistentvolumeclaimCountHard{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "k8s.resourcequota.persistentvolumeclaim_count.hard", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("The total number of PersistentVolumeClaims that can exist in the namespace. The value represents the configured quota limit of the resource in the namespace."), + metric.WithUnit("{persistentvolumeclaim}"), + }, opt...)..., + ) + if err != nil { + return ResourceQuotaPersistentvolumeclaimCountHard{noop.Int64UpDownCounter{}}, err + } + return ResourceQuotaPersistentvolumeclaimCountHard{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ResourceQuotaPersistentvolumeclaimCountHard) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (ResourceQuotaPersistentvolumeclaimCountHard) Name() string { + return "k8s.resourcequota.persistentvolumeclaim_count.hard" +} + +// Unit returns the semantic convention unit of the instrument +func (ResourceQuotaPersistentvolumeclaimCountHard) Unit() string { + return "{persistentvolumeclaim}" +} + +// Description returns the semantic convention description of the instrument +func (ResourceQuotaPersistentvolumeclaimCountHard) Description() string { + return "The total number of PersistentVolumeClaims that can exist in the namespace. The value represents the configured quota limit of the resource in the namespace." +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +// +// This metric is retrieved from the `hard` field of the +// [K8s ResourceQuotaStatus]. +// +// The `k8s.storageclass.name` should be required when a resource quota is +// defined for a specific +// storage class. +// +// [K8s ResourceQuotaStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core +func (m ResourceQuotaPersistentvolumeclaimCountHard) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// This metric is retrieved from the `hard` field of the +// [K8s ResourceQuotaStatus]. +// +// The `k8s.storageclass.name` should be required when a resource quota is +// defined for a specific +// storage class. +// +// [K8s ResourceQuotaStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core +func (m ResourceQuotaPersistentvolumeclaimCountHard) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AttrStorageclassName returns an optional attribute for the +// "k8s.storageclass.name" semantic convention. It represents the name of K8s +// [StorageClass] object. +// +// [StorageClass]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#storageclass-v1-storage-k8s-io +func (ResourceQuotaPersistentvolumeclaimCountHard) AttrStorageclassName(val string) attribute.KeyValue { + return attribute.String("k8s.storageclass.name", val) +} + +// ResourceQuotaPersistentvolumeclaimCountUsed is an instrument used to record +// metric values conforming to the +// "k8s.resourcequota.persistentvolumeclaim_count.used" semantic conventions. It +// represents the total number of PersistentVolumeClaims that can exist in the +// namespace. +// The value represents the current observed total usage of the resource in the +// namespace. +type ResourceQuotaPersistentvolumeclaimCountUsed struct { + metric.Int64UpDownCounter +} + +// NewResourceQuotaPersistentvolumeclaimCountUsed returns a new +// ResourceQuotaPersistentvolumeclaimCountUsed instrument. +func NewResourceQuotaPersistentvolumeclaimCountUsed( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (ResourceQuotaPersistentvolumeclaimCountUsed, error) { + // Check if the meter is nil. + if m == nil { + return ResourceQuotaPersistentvolumeclaimCountUsed{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "k8s.resourcequota.persistentvolumeclaim_count.used", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("The total number of PersistentVolumeClaims that can exist in the namespace. The value represents the current observed total usage of the resource in the namespace."), + metric.WithUnit("{persistentvolumeclaim}"), + }, opt...)..., + ) + if err != nil { + return ResourceQuotaPersistentvolumeclaimCountUsed{noop.Int64UpDownCounter{}}, err + } + return ResourceQuotaPersistentvolumeclaimCountUsed{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ResourceQuotaPersistentvolumeclaimCountUsed) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (ResourceQuotaPersistentvolumeclaimCountUsed) Name() string { + return "k8s.resourcequota.persistentvolumeclaim_count.used" +} + +// Unit returns the semantic convention unit of the instrument +func (ResourceQuotaPersistentvolumeclaimCountUsed) Unit() string { + return "{persistentvolumeclaim}" +} + +// Description returns the semantic convention description of the instrument +func (ResourceQuotaPersistentvolumeclaimCountUsed) Description() string { + return "The total number of PersistentVolumeClaims that can exist in the namespace. The value represents the current observed total usage of the resource in the namespace." +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +// +// This metric is retrieved from the `used` field of the +// [K8s ResourceQuotaStatus]. +// +// The `k8s.storageclass.name` should be required when a resource quota is +// defined for a specific +// storage class. +// +// [K8s ResourceQuotaStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core +func (m ResourceQuotaPersistentvolumeclaimCountUsed) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// This metric is retrieved from the `used` field of the +// [K8s ResourceQuotaStatus]. +// +// The `k8s.storageclass.name` should be required when a resource quota is +// defined for a specific +// storage class. +// +// [K8s ResourceQuotaStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core +func (m ResourceQuotaPersistentvolumeclaimCountUsed) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AttrStorageclassName returns an optional attribute for the +// "k8s.storageclass.name" semantic convention. It represents the name of K8s +// [StorageClass] object. +// +// [StorageClass]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#storageclass-v1-storage-k8s-io +func (ResourceQuotaPersistentvolumeclaimCountUsed) AttrStorageclassName(val string) attribute.KeyValue { + return attribute.String("k8s.storageclass.name", val) +} + +// ResourceQuotaStorageRequestHard is an instrument used to record metric values +// conforming to the "k8s.resourcequota.storage.request.hard" semantic +// conventions. It represents the storage requests in a specific namespace. +// The value represents the configured quota limit of the resource in the +// namespace. +type ResourceQuotaStorageRequestHard struct { + metric.Int64UpDownCounter +} + +// NewResourceQuotaStorageRequestHard returns a new +// ResourceQuotaStorageRequestHard instrument. +func NewResourceQuotaStorageRequestHard( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (ResourceQuotaStorageRequestHard, error) { + // Check if the meter is nil. + if m == nil { + return ResourceQuotaStorageRequestHard{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "k8s.resourcequota.storage.request.hard", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("The storage requests in a specific namespace. The value represents the configured quota limit of the resource in the namespace."), + metric.WithUnit("By"), + }, opt...)..., + ) + if err != nil { + return ResourceQuotaStorageRequestHard{noop.Int64UpDownCounter{}}, err + } + return ResourceQuotaStorageRequestHard{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ResourceQuotaStorageRequestHard) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (ResourceQuotaStorageRequestHard) Name() string { + return "k8s.resourcequota.storage.request.hard" +} + +// Unit returns the semantic convention unit of the instrument +func (ResourceQuotaStorageRequestHard) Unit() string { + return "By" +} + +// Description returns the semantic convention description of the instrument +func (ResourceQuotaStorageRequestHard) Description() string { + return "The storage requests in a specific namespace. The value represents the configured quota limit of the resource in the namespace." +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +// +// This metric is retrieved from the `hard` field of the +// [K8s ResourceQuotaStatus]. +// +// The `k8s.storageclass.name` should be required when a resource quota is +// defined for a specific +// storage class. +// +// [K8s ResourceQuotaStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core +func (m ResourceQuotaStorageRequestHard) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// This metric is retrieved from the `hard` field of the +// [K8s ResourceQuotaStatus]. +// +// The `k8s.storageclass.name` should be required when a resource quota is +// defined for a specific +// storage class. +// +// [K8s ResourceQuotaStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core +func (m ResourceQuotaStorageRequestHard) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AttrStorageclassName returns an optional attribute for the +// "k8s.storageclass.name" semantic convention. It represents the name of K8s +// [StorageClass] object. +// +// [StorageClass]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#storageclass-v1-storage-k8s-io +func (ResourceQuotaStorageRequestHard) AttrStorageclassName(val string) attribute.KeyValue { + return attribute.String("k8s.storageclass.name", val) +} + +// ResourceQuotaStorageRequestUsed is an instrument used to record metric values +// conforming to the "k8s.resourcequota.storage.request.used" semantic +// conventions. It represents the storage requests in a specific namespace. +// The value represents the current observed total usage of the resource in the +// namespace. +type ResourceQuotaStorageRequestUsed struct { + metric.Int64UpDownCounter +} + +// NewResourceQuotaStorageRequestUsed returns a new +// ResourceQuotaStorageRequestUsed instrument. +func NewResourceQuotaStorageRequestUsed( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (ResourceQuotaStorageRequestUsed, error) { + // Check if the meter is nil. + if m == nil { + return ResourceQuotaStorageRequestUsed{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "k8s.resourcequota.storage.request.used", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("The storage requests in a specific namespace. The value represents the current observed total usage of the resource in the namespace."), + metric.WithUnit("By"), + }, opt...)..., + ) + if err != nil { + return ResourceQuotaStorageRequestUsed{noop.Int64UpDownCounter{}}, err + } + return ResourceQuotaStorageRequestUsed{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ResourceQuotaStorageRequestUsed) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (ResourceQuotaStorageRequestUsed) Name() string { + return "k8s.resourcequota.storage.request.used" +} + +// Unit returns the semantic convention unit of the instrument +func (ResourceQuotaStorageRequestUsed) Unit() string { + return "By" +} + +// Description returns the semantic convention description of the instrument +func (ResourceQuotaStorageRequestUsed) Description() string { + return "The storage requests in a specific namespace. The value represents the current observed total usage of the resource in the namespace." +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +// +// This metric is retrieved from the `used` field of the +// [K8s ResourceQuotaStatus]. +// +// The `k8s.storageclass.name` should be required when a resource quota is +// defined for a specific +// storage class. +// +// [K8s ResourceQuotaStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core +func (m ResourceQuotaStorageRequestUsed) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// This metric is retrieved from the `used` field of the +// [K8s ResourceQuotaStatus]. +// +// The `k8s.storageclass.name` should be required when a resource quota is +// defined for a specific +// storage class. +// +// [K8s ResourceQuotaStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcequotastatus-v1-core +func (m ResourceQuotaStorageRequestUsed) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AttrStorageclassName returns an optional attribute for the +// "k8s.storageclass.name" semantic convention. It represents the name of K8s +// [StorageClass] object. +// +// [StorageClass]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#storageclass-v1-storage-k8s-io +func (ResourceQuotaStorageRequestUsed) AttrStorageclassName(val string) attribute.KeyValue { + return attribute.String("k8s.storageclass.name", val) +} + +// StatefulSetCurrentPods is an instrument used to record metric values +// conforming to the "k8s.statefulset.current_pods" semantic conventions. It +// represents the number of replica pods created by the statefulset controller +// from the statefulset version indicated by currentRevision. +type StatefulSetCurrentPods struct { + metric.Int64UpDownCounter +} + +// NewStatefulSetCurrentPods returns a new StatefulSetCurrentPods instrument. +func NewStatefulSetCurrentPods( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (StatefulSetCurrentPods, error) { + // Check if the meter is nil. + if m == nil { + return StatefulSetCurrentPods{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "k8s.statefulset.current_pods", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("The number of replica pods created by the statefulset controller from the statefulset version indicated by currentRevision."), + metric.WithUnit("{pod}"), + }, opt...)..., + ) + if err != nil { + return StatefulSetCurrentPods{noop.Int64UpDownCounter{}}, err + } + return StatefulSetCurrentPods{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m StatefulSetCurrentPods) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (StatefulSetCurrentPods) Name() string { + return "k8s.statefulset.current_pods" +} + +// Unit returns the semantic convention unit of the instrument +func (StatefulSetCurrentPods) Unit() string { + return "{pod}" +} + +// Description returns the semantic convention description of the instrument +func (StatefulSetCurrentPods) Description() string { + return "The number of replica pods created by the statefulset controller from the statefulset version indicated by currentRevision." +} + +// Add adds incr to the existing count for attrs. +// +// This metric aligns with the `currentReplicas` field of the +// [K8s StatefulSetStatus]. +// +// [K8s StatefulSetStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#statefulsetstatus-v1-apps +func (m StatefulSetCurrentPods) Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// This metric aligns with the `currentReplicas` field of the +// [K8s StatefulSetStatus]. +// +// [K8s StatefulSetStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#statefulsetstatus-v1-apps +func (m StatefulSetCurrentPods) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// StatefulSetDesiredPods is an instrument used to record metric values +// conforming to the "k8s.statefulset.desired_pods" semantic conventions. It +// represents the number of desired replica pods in this statefulset. +type StatefulSetDesiredPods struct { + metric.Int64UpDownCounter +} + +// NewStatefulSetDesiredPods returns a new StatefulSetDesiredPods instrument. +func NewStatefulSetDesiredPods( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (StatefulSetDesiredPods, error) { + // Check if the meter is nil. + if m == nil { + return StatefulSetDesiredPods{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "k8s.statefulset.desired_pods", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("Number of desired replica pods in this statefulset."), + metric.WithUnit("{pod}"), + }, opt...)..., + ) + if err != nil { + return StatefulSetDesiredPods{noop.Int64UpDownCounter{}}, err + } + return StatefulSetDesiredPods{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m StatefulSetDesiredPods) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (StatefulSetDesiredPods) Name() string { + return "k8s.statefulset.desired_pods" +} + +// Unit returns the semantic convention unit of the instrument +func (StatefulSetDesiredPods) Unit() string { + return "{pod}" +} + +// Description returns the semantic convention description of the instrument +func (StatefulSetDesiredPods) Description() string { + return "Number of desired replica pods in this statefulset." +} + +// Add adds incr to the existing count for attrs. +// +// This metric aligns with the `replicas` field of the +// [K8s StatefulSetSpec]. +// +// [K8s StatefulSetSpec]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#statefulsetspec-v1-apps +func (m StatefulSetDesiredPods) Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// This metric aligns with the `replicas` field of the +// [K8s StatefulSetSpec]. +// +// [K8s StatefulSetSpec]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#statefulsetspec-v1-apps +func (m StatefulSetDesiredPods) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// StatefulSetReadyPods is an instrument used to record metric values conforming +// to the "k8s.statefulset.ready_pods" semantic conventions. It represents the +// number of replica pods created for this statefulset with a Ready Condition. +type StatefulSetReadyPods struct { + metric.Int64UpDownCounter +} + +// NewStatefulSetReadyPods returns a new StatefulSetReadyPods instrument. +func NewStatefulSetReadyPods( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (StatefulSetReadyPods, error) { + // Check if the meter is nil. + if m == nil { + return StatefulSetReadyPods{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "k8s.statefulset.ready_pods", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("The number of replica pods created for this statefulset with a Ready Condition."), + metric.WithUnit("{pod}"), + }, opt...)..., + ) + if err != nil { + return StatefulSetReadyPods{noop.Int64UpDownCounter{}}, err + } + return StatefulSetReadyPods{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m StatefulSetReadyPods) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (StatefulSetReadyPods) Name() string { + return "k8s.statefulset.ready_pods" +} + +// Unit returns the semantic convention unit of the instrument +func (StatefulSetReadyPods) Unit() string { + return "{pod}" +} + +// Description returns the semantic convention description of the instrument +func (StatefulSetReadyPods) Description() string { + return "The number of replica pods created for this statefulset with a Ready Condition." +} + +// Add adds incr to the existing count for attrs. +// +// This metric aligns with the `readyReplicas` field of the +// [K8s StatefulSetStatus]. +// +// [K8s StatefulSetStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#statefulsetstatus-v1-apps +func (m StatefulSetReadyPods) Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// This metric aligns with the `readyReplicas` field of the +// [K8s StatefulSetStatus]. +// +// [K8s StatefulSetStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#statefulsetstatus-v1-apps +func (m StatefulSetReadyPods) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// StatefulSetUpdatedPods is an instrument used to record metric values +// conforming to the "k8s.statefulset.updated_pods" semantic conventions. It +// represents the number of replica pods created by the statefulset controller +// from the statefulset version indicated by updateRevision. +type StatefulSetUpdatedPods struct { + metric.Int64UpDownCounter +} + +// NewStatefulSetUpdatedPods returns a new StatefulSetUpdatedPods instrument. +func NewStatefulSetUpdatedPods( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (StatefulSetUpdatedPods, error) { + // Check if the meter is nil. + if m == nil { + return StatefulSetUpdatedPods{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "k8s.statefulset.updated_pods", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("Number of replica pods created by the statefulset controller from the statefulset version indicated by updateRevision."), + metric.WithUnit("{pod}"), + }, opt...)..., + ) + if err != nil { + return StatefulSetUpdatedPods{noop.Int64UpDownCounter{}}, err + } + return StatefulSetUpdatedPods{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m StatefulSetUpdatedPods) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (StatefulSetUpdatedPods) Name() string { + return "k8s.statefulset.updated_pods" +} + +// Unit returns the semantic convention unit of the instrument +func (StatefulSetUpdatedPods) Unit() string { + return "{pod}" +} + +// Description returns the semantic convention description of the instrument +func (StatefulSetUpdatedPods) Description() string { + return "Number of replica pods created by the statefulset controller from the statefulset version indicated by updateRevision." +} + +// Add adds incr to the existing count for attrs. +// +// This metric aligns with the `updatedReplicas` field of the +// [K8s StatefulSetStatus]. +// +// [K8s StatefulSetStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#statefulsetstatus-v1-apps +func (m StatefulSetUpdatedPods) Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// This metric aligns with the `updatedReplicas` field of the +// [K8s StatefulSetStatus]. +// +// [K8s StatefulSetStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#statefulsetstatus-v1-apps +func (m StatefulSetUpdatedPods) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} \ No newline at end of file diff --git a/semconv/v1.37.0/messagingconv/metric.go b/semconv/v1.37.0/messagingconv/metric.go new file mode 100644 index 00000000000..0887eabf530 --- /dev/null +++ b/semconv/v1.37.0/messagingconv/metric.go @@ -0,0 +1,765 @@ +// Code generated from semantic convention specification. DO NOT EDIT. + +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package httpconv provides types and functionality for OpenTelemetry semantic +// conventions in the "messaging" namespace. +package messagingconv + +import ( + "context" + "sync" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/noop" +) + +var ( + addOptPool = &sync.Pool{New: func() any { return &[]metric.AddOption{} }} + recOptPool = &sync.Pool{New: func() any { return &[]metric.RecordOption{} }} +) + +// ErrorTypeAttr is an attribute conforming to the error.type semantic +// conventions. It represents the describes a class of error the operation ended +// with. +type ErrorTypeAttr string + +var ( + // ErrorTypeOther is a fallback error value to be used when the instrumentation + // doesn't define a custom value. + ErrorTypeOther ErrorTypeAttr = "_OTHER" +) + +// OperationTypeAttr is an attribute conforming to the messaging.operation.type +// semantic conventions. It represents a string identifying the type of the +// messaging operation. +type OperationTypeAttr string + +var ( + // OperationTypeCreate is a message is created. "Create" spans always refer to a + // single message and are used to provide a unique creation context for messages + // in batch sending scenarios. + OperationTypeCreate OperationTypeAttr = "create" + // OperationTypeSend is the one or more messages are provided for sending to an + // intermediary. If a single message is sent, the context of the "Send" span can + // be used as the creation context and no "Create" span needs to be created. + OperationTypeSend OperationTypeAttr = "send" + // OperationTypeReceive is the one or more messages are requested by a consumer. + // This operation refers to pull-based scenarios, where consumers explicitly + // call methods of messaging SDKs to receive messages. + OperationTypeReceive OperationTypeAttr = "receive" + // OperationTypeProcess is the one or more messages are processed by a consumer. + OperationTypeProcess OperationTypeAttr = "process" + // OperationTypeSettle is the one or more messages are settled. + OperationTypeSettle OperationTypeAttr = "settle" +) + +// SystemAttr is an attribute conforming to the messaging.system semantic +// conventions. It represents the messaging system as identified by the client +// instrumentation. +type SystemAttr string + +var ( + // SystemActiveMQ is the apache ActiveMQ. + SystemActiveMQ SystemAttr = "activemq" + // SystemAWSSNS is the amazon Simple Notification Service (SNS). + SystemAWSSNS SystemAttr = "aws.sns" + // SystemAWSSQS is the amazon Simple Queue Service (SQS). + SystemAWSSQS SystemAttr = "aws_sqs" + // SystemEventGrid is the azure Event Grid. + SystemEventGrid SystemAttr = "eventgrid" + // SystemEventHubs is the azure Event Hubs. + SystemEventHubs SystemAttr = "eventhubs" + // SystemServiceBus is the azure Service Bus. + SystemServiceBus SystemAttr = "servicebus" + // SystemGCPPubSub is the google Cloud Pub/Sub. + SystemGCPPubSub SystemAttr = "gcp_pubsub" + // SystemJMS is the java Message Service. + SystemJMS SystemAttr = "jms" + // SystemKafka is the apache Kafka. + SystemKafka SystemAttr = "kafka" + // SystemRabbitMQ is the rabbitMQ. + SystemRabbitMQ SystemAttr = "rabbitmq" + // SystemRocketMQ is the apache RocketMQ. + SystemRocketMQ SystemAttr = "rocketmq" + // SystemPulsar is the apache Pulsar. + SystemPulsar SystemAttr = "pulsar" +) + +// ClientConsumedMessages is an instrument used to record metric values +// conforming to the "messaging.client.consumed.messages" semantic conventions. +// It represents the number of messages that were delivered to the application. +type ClientConsumedMessages struct { + metric.Int64Counter +} + +// NewClientConsumedMessages returns a new ClientConsumedMessages instrument. +func NewClientConsumedMessages( + m metric.Meter, + opt ...metric.Int64CounterOption, +) (ClientConsumedMessages, error) { + // Check if the meter is nil. + if m == nil { + return ClientConsumedMessages{noop.Int64Counter{}}, nil + } + + i, err := m.Int64Counter( + "messaging.client.consumed.messages", + append([]metric.Int64CounterOption{ + metric.WithDescription("Number of messages that were delivered to the application."), + metric.WithUnit("{message}"), + }, opt...)..., + ) + if err != nil { + return ClientConsumedMessages{noop.Int64Counter{}}, err + } + return ClientConsumedMessages{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ClientConsumedMessages) Inst() metric.Int64Counter { + return m.Int64Counter +} + +// Name returns the semantic convention name of the instrument. +func (ClientConsumedMessages) Name() string { + return "messaging.client.consumed.messages" +} + +// Unit returns the semantic convention unit of the instrument +func (ClientConsumedMessages) Unit() string { + return "{message}" +} + +// Description returns the semantic convention description of the instrument +func (ClientConsumedMessages) Description() string { + return "Number of messages that were delivered to the application." +} + +// Add adds incr to the existing count for attrs. +// +// The operationName is the the system-specific name of the messaging operation. +// +// The system is the the messaging system as identified by the client +// instrumentation. +// +// All additional attrs passed are included in the recorded value. +// +// Records the number of messages pulled from the broker or number of messages +// dispatched to the application in push-based scenarios. +// The metric SHOULD be reported once per message delivery. For example, if +// receiving and processing operations are both instrumented for a single message +// delivery, this counter is incremented when the message is received and not +// reported when it is processed. +func (m ClientConsumedMessages) Add( + ctx context.Context, + incr int64, + operationName string, + system SystemAttr, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("messaging.operation.name", operationName), + attribute.String("messaging.system", string(system)), + )..., + ), + ) + + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// Records the number of messages pulled from the broker or number of messages +// dispatched to the application in push-based scenarios. +// The metric SHOULD be reported once per message delivery. For example, if +// receiving and processing operations are both instrumented for a single message +// delivery, this counter is incremented when the message is received and not +// reported when it is processed. +func (m ClientConsumedMessages) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AttrErrorType returns an optional attribute for the "error.type" semantic +// convention. It represents the describes a class of error the operation ended +// with. +func (ClientConsumedMessages) AttrErrorType(val ErrorTypeAttr) attribute.KeyValue { + return attribute.String("error.type", string(val)) +} + +// AttrConsumerGroupName returns an optional attribute for the +// "messaging.consumer.group.name" semantic convention. It represents the name of +// the consumer group with which a consumer is associated. +func (ClientConsumedMessages) AttrConsumerGroupName(val string) attribute.KeyValue { + return attribute.String("messaging.consumer.group.name", val) +} + +// AttrDestinationName returns an optional attribute for the +// "messaging.destination.name" semantic convention. It represents the message +// destination name. +func (ClientConsumedMessages) AttrDestinationName(val string) attribute.KeyValue { + return attribute.String("messaging.destination.name", val) +} + +// AttrDestinationSubscriptionName returns an optional attribute for the +// "messaging.destination.subscription.name" semantic convention. It represents +// the name of the destination subscription from which a message is consumed. +func (ClientConsumedMessages) AttrDestinationSubscriptionName(val string) attribute.KeyValue { + return attribute.String("messaging.destination.subscription.name", val) +} + +// AttrDestinationTemplate returns an optional attribute for the +// "messaging.destination.template" semantic convention. It represents the low +// cardinality representation of the messaging destination name. +func (ClientConsumedMessages) AttrDestinationTemplate(val string) attribute.KeyValue { + return attribute.String("messaging.destination.template", val) +} + +// AttrServerAddress returns an optional attribute for the "server.address" +// semantic convention. It represents the server domain name if available without +// reverse DNS lookup; otherwise, IP address or Unix domain socket name. +func (ClientConsumedMessages) AttrServerAddress(val string) attribute.KeyValue { + return attribute.String("server.address", val) +} + +// AttrDestinationPartitionID returns an optional attribute for the +// "messaging.destination.partition.id" semantic convention. It represents the +// identifier of the partition messages are sent to or received from, unique +// within the `messaging.destination.name`. +func (ClientConsumedMessages) AttrDestinationPartitionID(val string) attribute.KeyValue { + return attribute.String("messaging.destination.partition.id", val) +} + +// AttrServerPort returns an optional attribute for the "server.port" semantic +// convention. It represents the server port number. +func (ClientConsumedMessages) AttrServerPort(val int) attribute.KeyValue { + return attribute.Int("server.port", val) +} + +// ClientOperationDuration is an instrument used to record metric values +// conforming to the "messaging.client.operation.duration" semantic conventions. +// It represents the duration of messaging operation initiated by a producer or +// consumer client. +type ClientOperationDuration struct { + metric.Float64Histogram +} + +// NewClientOperationDuration returns a new ClientOperationDuration instrument. +func NewClientOperationDuration( + m metric.Meter, + opt ...metric.Float64HistogramOption, +) (ClientOperationDuration, error) { + // Check if the meter is nil. + if m == nil { + return ClientOperationDuration{noop.Float64Histogram{}}, nil + } + + i, err := m.Float64Histogram( + "messaging.client.operation.duration", + append([]metric.Float64HistogramOption{ + metric.WithDescription("Duration of messaging operation initiated by a producer or consumer client."), + metric.WithUnit("s"), + }, opt...)..., + ) + if err != nil { + return ClientOperationDuration{noop.Float64Histogram{}}, err + } + return ClientOperationDuration{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ClientOperationDuration) Inst() metric.Float64Histogram { + return m.Float64Histogram +} + +// Name returns the semantic convention name of the instrument. +func (ClientOperationDuration) Name() string { + return "messaging.client.operation.duration" +} + +// Unit returns the semantic convention unit of the instrument +func (ClientOperationDuration) Unit() string { + return "s" +} + +// Description returns the semantic convention description of the instrument +func (ClientOperationDuration) Description() string { + return "Duration of messaging operation initiated by a producer or consumer client." +} + +// Record records val to the current distribution for attrs. +// +// The operationName is the the system-specific name of the messaging operation. +// +// The system is the the messaging system as identified by the client +// instrumentation. +// +// All additional attrs passed are included in the recorded value. +// +// This metric SHOULD NOT be used to report processing duration - processing +// duration is reported in `messaging.process.duration` metric. +func (m ClientOperationDuration) Record( + ctx context.Context, + val float64, + operationName string, + system SystemAttr, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Float64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("messaging.operation.name", operationName), + attribute.String("messaging.system", string(system)), + )..., + ), + ) + + m.Float64Histogram.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +// +// This metric SHOULD NOT be used to report processing duration - processing +// duration is reported in `messaging.process.duration` metric. +func (m ClientOperationDuration) RecordSet(ctx context.Context, val float64, set attribute.Set) { + if set.Len() == 0 { + m.Float64Histogram.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Float64Histogram.Record(ctx, val, *o...) +} + +// AttrErrorType returns an optional attribute for the "error.type" semantic +// convention. It represents the describes a class of error the operation ended +// with. +func (ClientOperationDuration) AttrErrorType(val ErrorTypeAttr) attribute.KeyValue { + return attribute.String("error.type", string(val)) +} + +// AttrConsumerGroupName returns an optional attribute for the +// "messaging.consumer.group.name" semantic convention. It represents the name of +// the consumer group with which a consumer is associated. +func (ClientOperationDuration) AttrConsumerGroupName(val string) attribute.KeyValue { + return attribute.String("messaging.consumer.group.name", val) +} + +// AttrDestinationName returns an optional attribute for the +// "messaging.destination.name" semantic convention. It represents the message +// destination name. +func (ClientOperationDuration) AttrDestinationName(val string) attribute.KeyValue { + return attribute.String("messaging.destination.name", val) +} + +// AttrDestinationSubscriptionName returns an optional attribute for the +// "messaging.destination.subscription.name" semantic convention. It represents +// the name of the destination subscription from which a message is consumed. +func (ClientOperationDuration) AttrDestinationSubscriptionName(val string) attribute.KeyValue { + return attribute.String("messaging.destination.subscription.name", val) +} + +// AttrDestinationTemplate returns an optional attribute for the +// "messaging.destination.template" semantic convention. It represents the low +// cardinality representation of the messaging destination name. +func (ClientOperationDuration) AttrDestinationTemplate(val string) attribute.KeyValue { + return attribute.String("messaging.destination.template", val) +} + +// AttrOperationType returns an optional attribute for the +// "messaging.operation.type" semantic convention. It represents a string +// identifying the type of the messaging operation. +func (ClientOperationDuration) AttrOperationType(val OperationTypeAttr) attribute.KeyValue { + return attribute.String("messaging.operation.type", string(val)) +} + +// AttrServerAddress returns an optional attribute for the "server.address" +// semantic convention. It represents the server domain name if available without +// reverse DNS lookup; otherwise, IP address or Unix domain socket name. +func (ClientOperationDuration) AttrServerAddress(val string) attribute.KeyValue { + return attribute.String("server.address", val) +} + +// AttrDestinationPartitionID returns an optional attribute for the +// "messaging.destination.partition.id" semantic convention. It represents the +// identifier of the partition messages are sent to or received from, unique +// within the `messaging.destination.name`. +func (ClientOperationDuration) AttrDestinationPartitionID(val string) attribute.KeyValue { + return attribute.String("messaging.destination.partition.id", val) +} + +// AttrServerPort returns an optional attribute for the "server.port" semantic +// convention. It represents the server port number. +func (ClientOperationDuration) AttrServerPort(val int) attribute.KeyValue { + return attribute.Int("server.port", val) +} + +// ClientSentMessages is an instrument used to record metric values conforming to +// the "messaging.client.sent.messages" semantic conventions. It represents the +// number of messages producer attempted to send to the broker. +type ClientSentMessages struct { + metric.Int64Counter +} + +// NewClientSentMessages returns a new ClientSentMessages instrument. +func NewClientSentMessages( + m metric.Meter, + opt ...metric.Int64CounterOption, +) (ClientSentMessages, error) { + // Check if the meter is nil. + if m == nil { + return ClientSentMessages{noop.Int64Counter{}}, nil + } + + i, err := m.Int64Counter( + "messaging.client.sent.messages", + append([]metric.Int64CounterOption{ + metric.WithDescription("Number of messages producer attempted to send to the broker."), + metric.WithUnit("{message}"), + }, opt...)..., + ) + if err != nil { + return ClientSentMessages{noop.Int64Counter{}}, err + } + return ClientSentMessages{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ClientSentMessages) Inst() metric.Int64Counter { + return m.Int64Counter +} + +// Name returns the semantic convention name of the instrument. +func (ClientSentMessages) Name() string { + return "messaging.client.sent.messages" +} + +// Unit returns the semantic convention unit of the instrument +func (ClientSentMessages) Unit() string { + return "{message}" +} + +// Description returns the semantic convention description of the instrument +func (ClientSentMessages) Description() string { + return "Number of messages producer attempted to send to the broker." +} + +// Add adds incr to the existing count for attrs. +// +// The operationName is the the system-specific name of the messaging operation. +// +// The system is the the messaging system as identified by the client +// instrumentation. +// +// All additional attrs passed are included in the recorded value. +// +// This metric MUST NOT count messages that were created but haven't yet been +// sent. +func (m ClientSentMessages) Add( + ctx context.Context, + incr int64, + operationName string, + system SystemAttr, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("messaging.operation.name", operationName), + attribute.String("messaging.system", string(system)), + )..., + ), + ) + + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// This metric MUST NOT count messages that were created but haven't yet been +// sent. +func (m ClientSentMessages) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AttrErrorType returns an optional attribute for the "error.type" semantic +// convention. It represents the describes a class of error the operation ended +// with. +func (ClientSentMessages) AttrErrorType(val ErrorTypeAttr) attribute.KeyValue { + return attribute.String("error.type", string(val)) +} + +// AttrDestinationName returns an optional attribute for the +// "messaging.destination.name" semantic convention. It represents the message +// destination name. +func (ClientSentMessages) AttrDestinationName(val string) attribute.KeyValue { + return attribute.String("messaging.destination.name", val) +} + +// AttrDestinationTemplate returns an optional attribute for the +// "messaging.destination.template" semantic convention. It represents the low +// cardinality representation of the messaging destination name. +func (ClientSentMessages) AttrDestinationTemplate(val string) attribute.KeyValue { + return attribute.String("messaging.destination.template", val) +} + +// AttrServerAddress returns an optional attribute for the "server.address" +// semantic convention. It represents the server domain name if available without +// reverse DNS lookup; otherwise, IP address or Unix domain socket name. +func (ClientSentMessages) AttrServerAddress(val string) attribute.KeyValue { + return attribute.String("server.address", val) +} + +// AttrDestinationPartitionID returns an optional attribute for the +// "messaging.destination.partition.id" semantic convention. It represents the +// identifier of the partition messages are sent to or received from, unique +// within the `messaging.destination.name`. +func (ClientSentMessages) AttrDestinationPartitionID(val string) attribute.KeyValue { + return attribute.String("messaging.destination.partition.id", val) +} + +// AttrServerPort returns an optional attribute for the "server.port" semantic +// convention. It represents the server port number. +func (ClientSentMessages) AttrServerPort(val int) attribute.KeyValue { + return attribute.Int("server.port", val) +} + +// ProcessDuration is an instrument used to record metric values conforming to +// the "messaging.process.duration" semantic conventions. It represents the +// duration of processing operation. +type ProcessDuration struct { + metric.Float64Histogram +} + +// NewProcessDuration returns a new ProcessDuration instrument. +func NewProcessDuration( + m metric.Meter, + opt ...metric.Float64HistogramOption, +) (ProcessDuration, error) { + // Check if the meter is nil. + if m == nil { + return ProcessDuration{noop.Float64Histogram{}}, nil + } + + i, err := m.Float64Histogram( + "messaging.process.duration", + append([]metric.Float64HistogramOption{ + metric.WithDescription("Duration of processing operation."), + metric.WithUnit("s"), + }, opt...)..., + ) + if err != nil { + return ProcessDuration{noop.Float64Histogram{}}, err + } + return ProcessDuration{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ProcessDuration) Inst() metric.Float64Histogram { + return m.Float64Histogram +} + +// Name returns the semantic convention name of the instrument. +func (ProcessDuration) Name() string { + return "messaging.process.duration" +} + +// Unit returns the semantic convention unit of the instrument +func (ProcessDuration) Unit() string { + return "s" +} + +// Description returns the semantic convention description of the instrument +func (ProcessDuration) Description() string { + return "Duration of processing operation." +} + +// Record records val to the current distribution for attrs. +// +// The operationName is the the system-specific name of the messaging operation. +// +// The system is the the messaging system as identified by the client +// instrumentation. +// +// All additional attrs passed are included in the recorded value. +// +// This metric MUST be reported for operations with `messaging.operation.type` +// that matches `process`. +func (m ProcessDuration) Record( + ctx context.Context, + val float64, + operationName string, + system SystemAttr, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Float64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("messaging.operation.name", operationName), + attribute.String("messaging.system", string(system)), + )..., + ), + ) + + m.Float64Histogram.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +// +// This metric MUST be reported for operations with `messaging.operation.type` +// that matches `process`. +func (m ProcessDuration) RecordSet(ctx context.Context, val float64, set attribute.Set) { + if set.Len() == 0 { + m.Float64Histogram.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Float64Histogram.Record(ctx, val, *o...) +} + +// AttrErrorType returns an optional attribute for the "error.type" semantic +// convention. It represents the describes a class of error the operation ended +// with. +func (ProcessDuration) AttrErrorType(val ErrorTypeAttr) attribute.KeyValue { + return attribute.String("error.type", string(val)) +} + +// AttrConsumerGroupName returns an optional attribute for the +// "messaging.consumer.group.name" semantic convention. It represents the name of +// the consumer group with which a consumer is associated. +func (ProcessDuration) AttrConsumerGroupName(val string) attribute.KeyValue { + return attribute.String("messaging.consumer.group.name", val) +} + +// AttrDestinationName returns an optional attribute for the +// "messaging.destination.name" semantic convention. It represents the message +// destination name. +func (ProcessDuration) AttrDestinationName(val string) attribute.KeyValue { + return attribute.String("messaging.destination.name", val) +} + +// AttrDestinationSubscriptionName returns an optional attribute for the +// "messaging.destination.subscription.name" semantic convention. It represents +// the name of the destination subscription from which a message is consumed. +func (ProcessDuration) AttrDestinationSubscriptionName(val string) attribute.KeyValue { + return attribute.String("messaging.destination.subscription.name", val) +} + +// AttrDestinationTemplate returns an optional attribute for the +// "messaging.destination.template" semantic convention. It represents the low +// cardinality representation of the messaging destination name. +func (ProcessDuration) AttrDestinationTemplate(val string) attribute.KeyValue { + return attribute.String("messaging.destination.template", val) +} + +// AttrServerAddress returns an optional attribute for the "server.address" +// semantic convention. It represents the server domain name if available without +// reverse DNS lookup; otherwise, IP address or Unix domain socket name. +func (ProcessDuration) AttrServerAddress(val string) attribute.KeyValue { + return attribute.String("server.address", val) +} + +// AttrDestinationPartitionID returns an optional attribute for the +// "messaging.destination.partition.id" semantic convention. It represents the +// identifier of the partition messages are sent to or received from, unique +// within the `messaging.destination.name`. +func (ProcessDuration) AttrDestinationPartitionID(val string) attribute.KeyValue { + return attribute.String("messaging.destination.partition.id", val) +} + +// AttrServerPort returns an optional attribute for the "server.port" semantic +// convention. It represents the server port number. +func (ProcessDuration) AttrServerPort(val int) attribute.KeyValue { + return attribute.Int("server.port", val) +} \ No newline at end of file diff --git a/semconv/v1.37.0/otelconv/metric.go b/semconv/v1.37.0/otelconv/metric.go new file mode 100644 index 00000000000..a78eafd1fa3 --- /dev/null +++ b/semconv/v1.37.0/otelconv/metric.go @@ -0,0 +1,2126 @@ +// Code generated from semantic convention specification. DO NOT EDIT. + +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package httpconv provides types and functionality for OpenTelemetry semantic +// conventions in the "otel" namespace. +package otelconv + +import ( + "context" + "sync" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/noop" +) + +var ( + addOptPool = &sync.Pool{New: func() any { return &[]metric.AddOption{} }} + recOptPool = &sync.Pool{New: func() any { return &[]metric.RecordOption{} }} +) + +// ErrorTypeAttr is an attribute conforming to the error.type semantic +// conventions. It represents the describes a class of error the operation ended +// with. +type ErrorTypeAttr string + +var ( + // ErrorTypeOther is a fallback error value to be used when the instrumentation + // doesn't define a custom value. + ErrorTypeOther ErrorTypeAttr = "_OTHER" +) + +// ComponentTypeAttr is an attribute conforming to the otel.component.type +// semantic conventions. It represents a name identifying the type of the +// OpenTelemetry component. +type ComponentTypeAttr string + +var ( + // ComponentTypeBatchingSpanProcessor is the builtin SDK batching span + // processor. + ComponentTypeBatchingSpanProcessor ComponentTypeAttr = "batching_span_processor" + // ComponentTypeSimpleSpanProcessor is the builtin SDK simple span processor. + ComponentTypeSimpleSpanProcessor ComponentTypeAttr = "simple_span_processor" + // ComponentTypeBatchingLogProcessor is the builtin SDK batching log record + // processor. + ComponentTypeBatchingLogProcessor ComponentTypeAttr = "batching_log_processor" + // ComponentTypeSimpleLogProcessor is the builtin SDK simple log record + // processor. + ComponentTypeSimpleLogProcessor ComponentTypeAttr = "simple_log_processor" + // ComponentTypeOtlpGRPCSpanExporter is the OTLP span exporter over gRPC with + // protobuf serialization. + ComponentTypeOtlpGRPCSpanExporter ComponentTypeAttr = "otlp_grpc_span_exporter" + // ComponentTypeOtlpHTTPSpanExporter is the OTLP span exporter over HTTP with + // protobuf serialization. + ComponentTypeOtlpHTTPSpanExporter ComponentTypeAttr = "otlp_http_span_exporter" + // ComponentTypeOtlpHTTPJSONSpanExporter is the OTLP span exporter over HTTP + // with JSON serialization. + ComponentTypeOtlpHTTPJSONSpanExporter ComponentTypeAttr = "otlp_http_json_span_exporter" + // ComponentTypeZipkinHTTPSpanExporter is the zipkin span exporter over HTTP. + ComponentTypeZipkinHTTPSpanExporter ComponentTypeAttr = "zipkin_http_span_exporter" + // ComponentTypeOtlpGRPCLogExporter is the OTLP log record exporter over gRPC + // with protobuf serialization. + ComponentTypeOtlpGRPCLogExporter ComponentTypeAttr = "otlp_grpc_log_exporter" + // ComponentTypeOtlpHTTPLogExporter is the OTLP log record exporter over HTTP + // with protobuf serialization. + ComponentTypeOtlpHTTPLogExporter ComponentTypeAttr = "otlp_http_log_exporter" + // ComponentTypeOtlpHTTPJSONLogExporter is the OTLP log record exporter over + // HTTP with JSON serialization. + ComponentTypeOtlpHTTPJSONLogExporter ComponentTypeAttr = "otlp_http_json_log_exporter" + // ComponentTypePeriodicMetricReader is the builtin SDK periodically exporting + // metric reader. + ComponentTypePeriodicMetricReader ComponentTypeAttr = "periodic_metric_reader" + // ComponentTypeOtlpGRPCMetricExporter is the OTLP metric exporter over gRPC + // with protobuf serialization. + ComponentTypeOtlpGRPCMetricExporter ComponentTypeAttr = "otlp_grpc_metric_exporter" + // ComponentTypeOtlpHTTPMetricExporter is the OTLP metric exporter over HTTP + // with protobuf serialization. + ComponentTypeOtlpHTTPMetricExporter ComponentTypeAttr = "otlp_http_metric_exporter" + // ComponentTypeOtlpHTTPJSONMetricExporter is the OTLP metric exporter over HTTP + // with JSON serialization. + ComponentTypeOtlpHTTPJSONMetricExporter ComponentTypeAttr = "otlp_http_json_metric_exporter" + // ComponentTypePrometheusHTTPTextMetricExporter is the prometheus metric + // exporter over HTTP with the default text-based format. + ComponentTypePrometheusHTTPTextMetricExporter ComponentTypeAttr = "prometheus_http_text_metric_exporter" +) + +// SpanParentOriginAttr is an attribute conforming to the otel.span.parent.origin +// semantic conventions. It represents the determines whether the span has a +// parent span, and if so, [whether it is a remote parent]. +// +// [whether it is a remote parent]: https://opentelemetry.io/docs/specs/otel/trace/api/#isremote +type SpanParentOriginAttr string + +var ( + // SpanParentOriginNone is the span does not have a parent, it is a root span. + SpanParentOriginNone SpanParentOriginAttr = "none" + // SpanParentOriginLocal is the span has a parent and the parent's span context + // [isRemote()] is false. + // + // [isRemote()]: https://opentelemetry.io/docs/specs/otel/trace/api/#isremote + SpanParentOriginLocal SpanParentOriginAttr = "local" + // SpanParentOriginRemote is the span has a parent and the parent's span context + // [isRemote()] is true. + // + // [isRemote()]: https://opentelemetry.io/docs/specs/otel/trace/api/#isremote + SpanParentOriginRemote SpanParentOriginAttr = "remote" +) + +// SpanSamplingResultAttr is an attribute conforming to the +// otel.span.sampling_result semantic conventions. It represents the result value +// of the sampler for this span. +type SpanSamplingResultAttr string + +var ( + // SpanSamplingResultDrop is the span is not sampled and not recording. + SpanSamplingResultDrop SpanSamplingResultAttr = "DROP" + // SpanSamplingResultRecordOnly is the span is not sampled, but recording. + SpanSamplingResultRecordOnly SpanSamplingResultAttr = "RECORD_ONLY" + // SpanSamplingResultRecordAndSample is the span is sampled and recording. + SpanSamplingResultRecordAndSample SpanSamplingResultAttr = "RECORD_AND_SAMPLE" +) + +// RPCGRPCStatusCodeAttr is an attribute conforming to the rpc.grpc.status_code +// semantic conventions. It represents the gRPC status code of the last gRPC +// requests performed in scope of this export call. +type RPCGRPCStatusCodeAttr int64 + +var ( + // RPCGRPCStatusCodeOk is the OK. + RPCGRPCStatusCodeOk RPCGRPCStatusCodeAttr = 0 + // RPCGRPCStatusCodeCancelled is the CANCELLED. + RPCGRPCStatusCodeCancelled RPCGRPCStatusCodeAttr = 1 + // RPCGRPCStatusCodeUnknown is the UNKNOWN. + RPCGRPCStatusCodeUnknown RPCGRPCStatusCodeAttr = 2 + // RPCGRPCStatusCodeInvalidArgument is the INVALID_ARGUMENT. + RPCGRPCStatusCodeInvalidArgument RPCGRPCStatusCodeAttr = 3 + // RPCGRPCStatusCodeDeadlineExceeded is the DEADLINE_EXCEEDED. + RPCGRPCStatusCodeDeadlineExceeded RPCGRPCStatusCodeAttr = 4 + // RPCGRPCStatusCodeNotFound is the NOT_FOUND. + RPCGRPCStatusCodeNotFound RPCGRPCStatusCodeAttr = 5 + // RPCGRPCStatusCodeAlreadyExists is the ALREADY_EXISTS. + RPCGRPCStatusCodeAlreadyExists RPCGRPCStatusCodeAttr = 6 + // RPCGRPCStatusCodePermissionDenied is the PERMISSION_DENIED. + RPCGRPCStatusCodePermissionDenied RPCGRPCStatusCodeAttr = 7 + // RPCGRPCStatusCodeResourceExhausted is the RESOURCE_EXHAUSTED. + RPCGRPCStatusCodeResourceExhausted RPCGRPCStatusCodeAttr = 8 + // RPCGRPCStatusCodeFailedPrecondition is the FAILED_PRECONDITION. + RPCGRPCStatusCodeFailedPrecondition RPCGRPCStatusCodeAttr = 9 + // RPCGRPCStatusCodeAborted is the ABORTED. + RPCGRPCStatusCodeAborted RPCGRPCStatusCodeAttr = 10 + // RPCGRPCStatusCodeOutOfRange is the OUT_OF_RANGE. + RPCGRPCStatusCodeOutOfRange RPCGRPCStatusCodeAttr = 11 + // RPCGRPCStatusCodeUnimplemented is the UNIMPLEMENTED. + RPCGRPCStatusCodeUnimplemented RPCGRPCStatusCodeAttr = 12 + // RPCGRPCStatusCodeInternal is the INTERNAL. + RPCGRPCStatusCodeInternal RPCGRPCStatusCodeAttr = 13 + // RPCGRPCStatusCodeUnavailable is the UNAVAILABLE. + RPCGRPCStatusCodeUnavailable RPCGRPCStatusCodeAttr = 14 + // RPCGRPCStatusCodeDataLoss is the DATA_LOSS. + RPCGRPCStatusCodeDataLoss RPCGRPCStatusCodeAttr = 15 + // RPCGRPCStatusCodeUnauthenticated is the UNAUTHENTICATED. + RPCGRPCStatusCodeUnauthenticated RPCGRPCStatusCodeAttr = 16 +) + +// SDKExporterLogExported is an instrument used to record metric values +// conforming to the "otel.sdk.exporter.log.exported" semantic conventions. It +// represents the number of log records for which the export has finished, either +// successful or failed. +type SDKExporterLogExported struct { + metric.Int64Counter +} + +// NewSDKExporterLogExported returns a new SDKExporterLogExported instrument. +func NewSDKExporterLogExported( + m metric.Meter, + opt ...metric.Int64CounterOption, +) (SDKExporterLogExported, error) { + // Check if the meter is nil. + if m == nil { + return SDKExporterLogExported{noop.Int64Counter{}}, nil + } + + i, err := m.Int64Counter( + "otel.sdk.exporter.log.exported", + append([]metric.Int64CounterOption{ + metric.WithDescription("The number of log records for which the export has finished, either successful or failed."), + metric.WithUnit("{log_record}"), + }, opt...)..., + ) + if err != nil { + return SDKExporterLogExported{noop.Int64Counter{}}, err + } + return SDKExporterLogExported{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SDKExporterLogExported) Inst() metric.Int64Counter { + return m.Int64Counter +} + +// Name returns the semantic convention name of the instrument. +func (SDKExporterLogExported) Name() string { + return "otel.sdk.exporter.log.exported" +} + +// Unit returns the semantic convention unit of the instrument +func (SDKExporterLogExported) Unit() string { + return "{log_record}" +} + +// Description returns the semantic convention description of the instrument +func (SDKExporterLogExported) Description() string { + return "The number of log records for which the export has finished, either successful or failed." +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +// +// For successful exports, `error.type` MUST NOT be set. For failed exports, +// `error.type` MUST contain the failure cause. +// For exporters with partial success semantics (e.g. OTLP with +// `rejected_log_records`), rejected log records MUST count as failed and only +// non-rejected log records count as success. +// If no rejection reason is available, `rejected` SHOULD be used as value for +// `error.type`. +func (m SDKExporterLogExported) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// For successful exports, `error.type` MUST NOT be set. For failed exports, +// `error.type` MUST contain the failure cause. +// For exporters with partial success semantics (e.g. OTLP with +// `rejected_log_records`), rejected log records MUST count as failed and only +// non-rejected log records count as success. +// If no rejection reason is available, `rejected` SHOULD be used as value for +// `error.type`. +func (m SDKExporterLogExported) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AttrErrorType returns an optional attribute for the "error.type" semantic +// convention. It represents the describes a class of error the operation ended +// with. +func (SDKExporterLogExported) AttrErrorType(val ErrorTypeAttr) attribute.KeyValue { + return attribute.String("error.type", string(val)) +} + +// AttrComponentName returns an optional attribute for the "otel.component.name" +// semantic convention. It represents a name uniquely identifying the instance of +// the OpenTelemetry component within its containing SDK instance. +func (SDKExporterLogExported) AttrComponentName(val string) attribute.KeyValue { + return attribute.String("otel.component.name", val) +} + +// AttrComponentType returns an optional attribute for the "otel.component.type" +// semantic convention. It represents a name identifying the type of the +// OpenTelemetry component. +func (SDKExporterLogExported) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue { + return attribute.String("otel.component.type", string(val)) +} + +// AttrServerAddress returns an optional attribute for the "server.address" +// semantic convention. It represents the server domain name if available without +// reverse DNS lookup; otherwise, IP address or Unix domain socket name. +func (SDKExporterLogExported) AttrServerAddress(val string) attribute.KeyValue { + return attribute.String("server.address", val) +} + +// AttrServerPort returns an optional attribute for the "server.port" semantic +// convention. It represents the server port number. +func (SDKExporterLogExported) AttrServerPort(val int) attribute.KeyValue { + return attribute.Int("server.port", val) +} + +// SDKExporterLogInflight is an instrument used to record metric values +// conforming to the "otel.sdk.exporter.log.inflight" semantic conventions. It +// represents the number of log records which were passed to the exporter, but +// that have not been exported yet (neither successful, nor failed). +type SDKExporterLogInflight struct { + metric.Int64UpDownCounter +} + +// NewSDKExporterLogInflight returns a new SDKExporterLogInflight instrument. +func NewSDKExporterLogInflight( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (SDKExporterLogInflight, error) { + // Check if the meter is nil. + if m == nil { + return SDKExporterLogInflight{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "otel.sdk.exporter.log.inflight", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("The number of log records which were passed to the exporter, but that have not been exported yet (neither successful, nor failed)."), + metric.WithUnit("{log_record}"), + }, opt...)..., + ) + if err != nil { + return SDKExporterLogInflight{noop.Int64UpDownCounter{}}, err + } + return SDKExporterLogInflight{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SDKExporterLogInflight) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (SDKExporterLogInflight) Name() string { + return "otel.sdk.exporter.log.inflight" +} + +// Unit returns the semantic convention unit of the instrument +func (SDKExporterLogInflight) Unit() string { + return "{log_record}" +} + +// Description returns the semantic convention description of the instrument +func (SDKExporterLogInflight) Description() string { + return "The number of log records which were passed to the exporter, but that have not been exported yet (neither successful, nor failed)." +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +// +// For successful exports, `error.type` MUST NOT be set. For failed exports, +// `error.type` MUST contain the failure cause. +func (m SDKExporterLogInflight) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// For successful exports, `error.type` MUST NOT be set. For failed exports, +// `error.type` MUST contain the failure cause. +func (m SDKExporterLogInflight) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AttrComponentName returns an optional attribute for the "otel.component.name" +// semantic convention. It represents a name uniquely identifying the instance of +// the OpenTelemetry component within its containing SDK instance. +func (SDKExporterLogInflight) AttrComponentName(val string) attribute.KeyValue { + return attribute.String("otel.component.name", val) +} + +// AttrComponentType returns an optional attribute for the "otel.component.type" +// semantic convention. It represents a name identifying the type of the +// OpenTelemetry component. +func (SDKExporterLogInflight) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue { + return attribute.String("otel.component.type", string(val)) +} + +// AttrServerAddress returns an optional attribute for the "server.address" +// semantic convention. It represents the server domain name if available without +// reverse DNS lookup; otherwise, IP address or Unix domain socket name. +func (SDKExporterLogInflight) AttrServerAddress(val string) attribute.KeyValue { + return attribute.String("server.address", val) +} + +// AttrServerPort returns an optional attribute for the "server.port" semantic +// convention. It represents the server port number. +func (SDKExporterLogInflight) AttrServerPort(val int) attribute.KeyValue { + return attribute.Int("server.port", val) +} + +// SDKExporterMetricDataPointExported is an instrument used to record metric +// values conforming to the "otel.sdk.exporter.metric_data_point.exported" +// semantic conventions. It represents the number of metric data points for which +// the export has finished, either successful or failed. +type SDKExporterMetricDataPointExported struct { + metric.Int64Counter +} + +// NewSDKExporterMetricDataPointExported returns a new +// SDKExporterMetricDataPointExported instrument. +func NewSDKExporterMetricDataPointExported( + m metric.Meter, + opt ...metric.Int64CounterOption, +) (SDKExporterMetricDataPointExported, error) { + // Check if the meter is nil. + if m == nil { + return SDKExporterMetricDataPointExported{noop.Int64Counter{}}, nil + } + + i, err := m.Int64Counter( + "otel.sdk.exporter.metric_data_point.exported", + append([]metric.Int64CounterOption{ + metric.WithDescription("The number of metric data points for which the export has finished, either successful or failed."), + metric.WithUnit("{data_point}"), + }, opt...)..., + ) + if err != nil { + return SDKExporterMetricDataPointExported{noop.Int64Counter{}}, err + } + return SDKExporterMetricDataPointExported{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SDKExporterMetricDataPointExported) Inst() metric.Int64Counter { + return m.Int64Counter +} + +// Name returns the semantic convention name of the instrument. +func (SDKExporterMetricDataPointExported) Name() string { + return "otel.sdk.exporter.metric_data_point.exported" +} + +// Unit returns the semantic convention unit of the instrument +func (SDKExporterMetricDataPointExported) Unit() string { + return "{data_point}" +} + +// Description returns the semantic convention description of the instrument +func (SDKExporterMetricDataPointExported) Description() string { + return "The number of metric data points for which the export has finished, either successful or failed." +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +// +// For successful exports, `error.type` MUST NOT be set. For failed exports, +// `error.type` MUST contain the failure cause. +// For exporters with partial success semantics (e.g. OTLP with +// `rejected_data_points`), rejected data points MUST count as failed and only +// non-rejected data points count as success. +// If no rejection reason is available, `rejected` SHOULD be used as value for +// `error.type`. +func (m SDKExporterMetricDataPointExported) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// For successful exports, `error.type` MUST NOT be set. For failed exports, +// `error.type` MUST contain the failure cause. +// For exporters with partial success semantics (e.g. OTLP with +// `rejected_data_points`), rejected data points MUST count as failed and only +// non-rejected data points count as success. +// If no rejection reason is available, `rejected` SHOULD be used as value for +// `error.type`. +func (m SDKExporterMetricDataPointExported) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AttrErrorType returns an optional attribute for the "error.type" semantic +// convention. It represents the describes a class of error the operation ended +// with. +func (SDKExporterMetricDataPointExported) AttrErrorType(val ErrorTypeAttr) attribute.KeyValue { + return attribute.String("error.type", string(val)) +} + +// AttrComponentName returns an optional attribute for the "otel.component.name" +// semantic convention. It represents a name uniquely identifying the instance of +// the OpenTelemetry component within its containing SDK instance. +func (SDKExporterMetricDataPointExported) AttrComponentName(val string) attribute.KeyValue { + return attribute.String("otel.component.name", val) +} + +// AttrComponentType returns an optional attribute for the "otel.component.type" +// semantic convention. It represents a name identifying the type of the +// OpenTelemetry component. +func (SDKExporterMetricDataPointExported) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue { + return attribute.String("otel.component.type", string(val)) +} + +// AttrServerAddress returns an optional attribute for the "server.address" +// semantic convention. It represents the server domain name if available without +// reverse DNS lookup; otherwise, IP address or Unix domain socket name. +func (SDKExporterMetricDataPointExported) AttrServerAddress(val string) attribute.KeyValue { + return attribute.String("server.address", val) +} + +// AttrServerPort returns an optional attribute for the "server.port" semantic +// convention. It represents the server port number. +func (SDKExporterMetricDataPointExported) AttrServerPort(val int) attribute.KeyValue { + return attribute.Int("server.port", val) +} + +// SDKExporterMetricDataPointInflight is an instrument used to record metric +// values conforming to the "otel.sdk.exporter.metric_data_point.inflight" +// semantic conventions. It represents the number of metric data points which +// were passed to the exporter, but that have not been exported yet (neither +// successful, nor failed). +type SDKExporterMetricDataPointInflight struct { + metric.Int64UpDownCounter +} + +// NewSDKExporterMetricDataPointInflight returns a new +// SDKExporterMetricDataPointInflight instrument. +func NewSDKExporterMetricDataPointInflight( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (SDKExporterMetricDataPointInflight, error) { + // Check if the meter is nil. + if m == nil { + return SDKExporterMetricDataPointInflight{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "otel.sdk.exporter.metric_data_point.inflight", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("The number of metric data points which were passed to the exporter, but that have not been exported yet (neither successful, nor failed)."), + metric.WithUnit("{data_point}"), + }, opt...)..., + ) + if err != nil { + return SDKExporterMetricDataPointInflight{noop.Int64UpDownCounter{}}, err + } + return SDKExporterMetricDataPointInflight{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SDKExporterMetricDataPointInflight) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (SDKExporterMetricDataPointInflight) Name() string { + return "otel.sdk.exporter.metric_data_point.inflight" +} + +// Unit returns the semantic convention unit of the instrument +func (SDKExporterMetricDataPointInflight) Unit() string { + return "{data_point}" +} + +// Description returns the semantic convention description of the instrument +func (SDKExporterMetricDataPointInflight) Description() string { + return "The number of metric data points which were passed to the exporter, but that have not been exported yet (neither successful, nor failed)." +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +// +// For successful exports, `error.type` MUST NOT be set. For failed exports, +// `error.type` MUST contain the failure cause. +func (m SDKExporterMetricDataPointInflight) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// For successful exports, `error.type` MUST NOT be set. For failed exports, +// `error.type` MUST contain the failure cause. +func (m SDKExporterMetricDataPointInflight) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AttrComponentName returns an optional attribute for the "otel.component.name" +// semantic convention. It represents a name uniquely identifying the instance of +// the OpenTelemetry component within its containing SDK instance. +func (SDKExporterMetricDataPointInflight) AttrComponentName(val string) attribute.KeyValue { + return attribute.String("otel.component.name", val) +} + +// AttrComponentType returns an optional attribute for the "otel.component.type" +// semantic convention. It represents a name identifying the type of the +// OpenTelemetry component. +func (SDKExporterMetricDataPointInflight) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue { + return attribute.String("otel.component.type", string(val)) +} + +// AttrServerAddress returns an optional attribute for the "server.address" +// semantic convention. It represents the server domain name if available without +// reverse DNS lookup; otherwise, IP address or Unix domain socket name. +func (SDKExporterMetricDataPointInflight) AttrServerAddress(val string) attribute.KeyValue { + return attribute.String("server.address", val) +} + +// AttrServerPort returns an optional attribute for the "server.port" semantic +// convention. It represents the server port number. +func (SDKExporterMetricDataPointInflight) AttrServerPort(val int) attribute.KeyValue { + return attribute.Int("server.port", val) +} + +// SDKExporterOperationDuration is an instrument used to record metric values +// conforming to the "otel.sdk.exporter.operation.duration" semantic conventions. +// It represents the duration of exporting a batch of telemetry records. +type SDKExporterOperationDuration struct { + metric.Float64Histogram +} + +// NewSDKExporterOperationDuration returns a new SDKExporterOperationDuration +// instrument. +func NewSDKExporterOperationDuration( + m metric.Meter, + opt ...metric.Float64HistogramOption, +) (SDKExporterOperationDuration, error) { + // Check if the meter is nil. + if m == nil { + return SDKExporterOperationDuration{noop.Float64Histogram{}}, nil + } + + i, err := m.Float64Histogram( + "otel.sdk.exporter.operation.duration", + append([]metric.Float64HistogramOption{ + metric.WithDescription("The duration of exporting a batch of telemetry records."), + metric.WithUnit("s"), + }, opt...)..., + ) + if err != nil { + return SDKExporterOperationDuration{noop.Float64Histogram{}}, err + } + return SDKExporterOperationDuration{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SDKExporterOperationDuration) Inst() metric.Float64Histogram { + return m.Float64Histogram +} + +// Name returns the semantic convention name of the instrument. +func (SDKExporterOperationDuration) Name() string { + return "otel.sdk.exporter.operation.duration" +} + +// Unit returns the semantic convention unit of the instrument +func (SDKExporterOperationDuration) Unit() string { + return "s" +} + +// Description returns the semantic convention description of the instrument +func (SDKExporterOperationDuration) Description() string { + return "The duration of exporting a batch of telemetry records." +} + +// Record records val to the current distribution for attrs. +// +// All additional attrs passed are included in the recorded value. +// +// This metric defines successful operations using the full success definitions +// for [http] +// and [grpc]. Anything else is defined as an unsuccessful operation. For +// successful +// operations, `error.type` MUST NOT be set. For unsuccessful export operations, +// `error.type` MUST contain a relevant failure cause. +// +// [http]: https://github.com/open-telemetry/opentelemetry-proto/blob/v1.5.0/docs/specification.md#full-success-1 +// [grpc]: https://github.com/open-telemetry/opentelemetry-proto/blob/v1.5.0/docs/specification.md#full-success +func (m SDKExporterOperationDuration) Record( + ctx context.Context, + val float64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Float64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Float64Histogram.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +// +// This metric defines successful operations using the full success definitions +// for [http] +// and [grpc]. Anything else is defined as an unsuccessful operation. For +// successful +// operations, `error.type` MUST NOT be set. For unsuccessful export operations, +// `error.type` MUST contain a relevant failure cause. +// +// [http]: https://github.com/open-telemetry/opentelemetry-proto/blob/v1.5.0/docs/specification.md#full-success-1 +// [grpc]: https://github.com/open-telemetry/opentelemetry-proto/blob/v1.5.0/docs/specification.md#full-success +func (m SDKExporterOperationDuration) RecordSet(ctx context.Context, val float64, set attribute.Set) { + if set.Len() == 0 { + m.Float64Histogram.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Float64Histogram.Record(ctx, val, *o...) +} + +// AttrErrorType returns an optional attribute for the "error.type" semantic +// convention. It represents the describes a class of error the operation ended +// with. +func (SDKExporterOperationDuration) AttrErrorType(val ErrorTypeAttr) attribute.KeyValue { + return attribute.String("error.type", string(val)) +} + +// AttrHTTPResponseStatusCode returns an optional attribute for the +// "http.response.status_code" semantic convention. It represents the HTTP status +// code of the last HTTP request performed in scope of this export call. +func (SDKExporterOperationDuration) AttrHTTPResponseStatusCode(val int) attribute.KeyValue { + return attribute.Int("http.response.status_code", val) +} + +// AttrComponentName returns an optional attribute for the "otel.component.name" +// semantic convention. It represents a name uniquely identifying the instance of +// the OpenTelemetry component within its containing SDK instance. +func (SDKExporterOperationDuration) AttrComponentName(val string) attribute.KeyValue { + return attribute.String("otel.component.name", val) +} + +// AttrComponentType returns an optional attribute for the "otel.component.type" +// semantic convention. It represents a name identifying the type of the +// OpenTelemetry component. +func (SDKExporterOperationDuration) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue { + return attribute.String("otel.component.type", string(val)) +} + +// AttrRPCGRPCStatusCode returns an optional attribute for the +// "rpc.grpc.status_code" semantic convention. It represents the gRPC status code +// of the last gRPC requests performed in scope of this export call. +func (SDKExporterOperationDuration) AttrRPCGRPCStatusCode(val RPCGRPCStatusCodeAttr) attribute.KeyValue { + return attribute.Int64("rpc.grpc.status_code", int64(val)) +} + +// AttrServerAddress returns an optional attribute for the "server.address" +// semantic convention. It represents the server domain name if available without +// reverse DNS lookup; otherwise, IP address or Unix domain socket name. +func (SDKExporterOperationDuration) AttrServerAddress(val string) attribute.KeyValue { + return attribute.String("server.address", val) +} + +// AttrServerPort returns an optional attribute for the "server.port" semantic +// convention. It represents the server port number. +func (SDKExporterOperationDuration) AttrServerPort(val int) attribute.KeyValue { + return attribute.Int("server.port", val) +} + +// SDKExporterSpanExported is an instrument used to record metric values +// conforming to the "otel.sdk.exporter.span.exported" semantic conventions. It +// represents the number of spans for which the export has finished, either +// successful or failed. +type SDKExporterSpanExported struct { + metric.Int64Counter +} + +// NewSDKExporterSpanExported returns a new SDKExporterSpanExported instrument. +func NewSDKExporterSpanExported( + m metric.Meter, + opt ...metric.Int64CounterOption, +) (SDKExporterSpanExported, error) { + // Check if the meter is nil. + if m == nil { + return SDKExporterSpanExported{noop.Int64Counter{}}, nil + } + + i, err := m.Int64Counter( + "otel.sdk.exporter.span.exported", + append([]metric.Int64CounterOption{ + metric.WithDescription("The number of spans for which the export has finished, either successful or failed."), + metric.WithUnit("{span}"), + }, opt...)..., + ) + if err != nil { + return SDKExporterSpanExported{noop.Int64Counter{}}, err + } + return SDKExporterSpanExported{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SDKExporterSpanExported) Inst() metric.Int64Counter { + return m.Int64Counter +} + +// Name returns the semantic convention name of the instrument. +func (SDKExporterSpanExported) Name() string { + return "otel.sdk.exporter.span.exported" +} + +// Unit returns the semantic convention unit of the instrument +func (SDKExporterSpanExported) Unit() string { + return "{span}" +} + +// Description returns the semantic convention description of the instrument +func (SDKExporterSpanExported) Description() string { + return "The number of spans for which the export has finished, either successful or failed." +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +// +// For successful exports, `error.type` MUST NOT be set. For failed exports, +// `error.type` MUST contain the failure cause. +// For exporters with partial success semantics (e.g. OTLP with `rejected_spans` +// ), rejected spans MUST count as failed and only non-rejected spans count as +// success. +// If no rejection reason is available, `rejected` SHOULD be used as value for +// `error.type`. +func (m SDKExporterSpanExported) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// For successful exports, `error.type` MUST NOT be set. For failed exports, +// `error.type` MUST contain the failure cause. +// For exporters with partial success semantics (e.g. OTLP with `rejected_spans` +// ), rejected spans MUST count as failed and only non-rejected spans count as +// success. +// If no rejection reason is available, `rejected` SHOULD be used as value for +// `error.type`. +func (m SDKExporterSpanExported) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AttrErrorType returns an optional attribute for the "error.type" semantic +// convention. It represents the describes a class of error the operation ended +// with. +func (SDKExporterSpanExported) AttrErrorType(val ErrorTypeAttr) attribute.KeyValue { + return attribute.String("error.type", string(val)) +} + +// AttrComponentName returns an optional attribute for the "otel.component.name" +// semantic convention. It represents a name uniquely identifying the instance of +// the OpenTelemetry component within its containing SDK instance. +func (SDKExporterSpanExported) AttrComponentName(val string) attribute.KeyValue { + return attribute.String("otel.component.name", val) +} + +// AttrComponentType returns an optional attribute for the "otel.component.type" +// semantic convention. It represents a name identifying the type of the +// OpenTelemetry component. +func (SDKExporterSpanExported) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue { + return attribute.String("otel.component.type", string(val)) +} + +// AttrServerAddress returns an optional attribute for the "server.address" +// semantic convention. It represents the server domain name if available without +// reverse DNS lookup; otherwise, IP address or Unix domain socket name. +func (SDKExporterSpanExported) AttrServerAddress(val string) attribute.KeyValue { + return attribute.String("server.address", val) +} + +// AttrServerPort returns an optional attribute for the "server.port" semantic +// convention. It represents the server port number. +func (SDKExporterSpanExported) AttrServerPort(val int) attribute.KeyValue { + return attribute.Int("server.port", val) +} + +// SDKExporterSpanInflight is an instrument used to record metric values +// conforming to the "otel.sdk.exporter.span.inflight" semantic conventions. It +// represents the number of spans which were passed to the exporter, but that +// have not been exported yet (neither successful, nor failed). +type SDKExporterSpanInflight struct { + metric.Int64UpDownCounter +} + +// NewSDKExporterSpanInflight returns a new SDKExporterSpanInflight instrument. +func NewSDKExporterSpanInflight( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (SDKExporterSpanInflight, error) { + // Check if the meter is nil. + if m == nil { + return SDKExporterSpanInflight{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "otel.sdk.exporter.span.inflight", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("The number of spans which were passed to the exporter, but that have not been exported yet (neither successful, nor failed)."), + metric.WithUnit("{span}"), + }, opt...)..., + ) + if err != nil { + return SDKExporterSpanInflight{noop.Int64UpDownCounter{}}, err + } + return SDKExporterSpanInflight{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SDKExporterSpanInflight) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (SDKExporterSpanInflight) Name() string { + return "otel.sdk.exporter.span.inflight" +} + +// Unit returns the semantic convention unit of the instrument +func (SDKExporterSpanInflight) Unit() string { + return "{span}" +} + +// Description returns the semantic convention description of the instrument +func (SDKExporterSpanInflight) Description() string { + return "The number of spans which were passed to the exporter, but that have not been exported yet (neither successful, nor failed)." +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +// +// For successful exports, `error.type` MUST NOT be set. For failed exports, +// `error.type` MUST contain the failure cause. +func (m SDKExporterSpanInflight) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// For successful exports, `error.type` MUST NOT be set. For failed exports, +// `error.type` MUST contain the failure cause. +func (m SDKExporterSpanInflight) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AttrComponentName returns an optional attribute for the "otel.component.name" +// semantic convention. It represents a name uniquely identifying the instance of +// the OpenTelemetry component within its containing SDK instance. +func (SDKExporterSpanInflight) AttrComponentName(val string) attribute.KeyValue { + return attribute.String("otel.component.name", val) +} + +// AttrComponentType returns an optional attribute for the "otel.component.type" +// semantic convention. It represents a name identifying the type of the +// OpenTelemetry component. +func (SDKExporterSpanInflight) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue { + return attribute.String("otel.component.type", string(val)) +} + +// AttrServerAddress returns an optional attribute for the "server.address" +// semantic convention. It represents the server domain name if available without +// reverse DNS lookup; otherwise, IP address or Unix domain socket name. +func (SDKExporterSpanInflight) AttrServerAddress(val string) attribute.KeyValue { + return attribute.String("server.address", val) +} + +// AttrServerPort returns an optional attribute for the "server.port" semantic +// convention. It represents the server port number. +func (SDKExporterSpanInflight) AttrServerPort(val int) attribute.KeyValue { + return attribute.Int("server.port", val) +} + +// SDKLogCreated is an instrument used to record metric values conforming to the +// "otel.sdk.log.created" semantic conventions. It represents the number of logs +// submitted to enabled SDK Loggers. +type SDKLogCreated struct { + metric.Int64Counter +} + +// NewSDKLogCreated returns a new SDKLogCreated instrument. +func NewSDKLogCreated( + m metric.Meter, + opt ...metric.Int64CounterOption, +) (SDKLogCreated, error) { + // Check if the meter is nil. + if m == nil { + return SDKLogCreated{noop.Int64Counter{}}, nil + } + + i, err := m.Int64Counter( + "otel.sdk.log.created", + append([]metric.Int64CounterOption{ + metric.WithDescription("The number of logs submitted to enabled SDK Loggers."), + metric.WithUnit("{log_record}"), + }, opt...)..., + ) + if err != nil { + return SDKLogCreated{noop.Int64Counter{}}, err + } + return SDKLogCreated{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SDKLogCreated) Inst() metric.Int64Counter { + return m.Int64Counter +} + +// Name returns the semantic convention name of the instrument. +func (SDKLogCreated) Name() string { + return "otel.sdk.log.created" +} + +// Unit returns the semantic convention unit of the instrument +func (SDKLogCreated) Unit() string { + return "{log_record}" +} + +// Description returns the semantic convention description of the instrument +func (SDKLogCreated) Description() string { + return "The number of logs submitted to enabled SDK Loggers." +} + +// Add adds incr to the existing count for attrs. +func (m SDKLogCreated) Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +func (m SDKLogCreated) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Counter.Add(ctx, incr, *o...) +} + +// SDKMetricReaderCollectionDuration is an instrument used to record metric +// values conforming to the "otel.sdk.metric_reader.collection.duration" semantic +// conventions. It represents the duration of the collect operation of the metric +// reader. +type SDKMetricReaderCollectionDuration struct { + metric.Float64Histogram +} + +// NewSDKMetricReaderCollectionDuration returns a new +// SDKMetricReaderCollectionDuration instrument. +func NewSDKMetricReaderCollectionDuration( + m metric.Meter, + opt ...metric.Float64HistogramOption, +) (SDKMetricReaderCollectionDuration, error) { + // Check if the meter is nil. + if m == nil { + return SDKMetricReaderCollectionDuration{noop.Float64Histogram{}}, nil + } + + i, err := m.Float64Histogram( + "otel.sdk.metric_reader.collection.duration", + append([]metric.Float64HistogramOption{ + metric.WithDescription("The duration of the collect operation of the metric reader."), + metric.WithUnit("s"), + }, opt...)..., + ) + if err != nil { + return SDKMetricReaderCollectionDuration{noop.Float64Histogram{}}, err + } + return SDKMetricReaderCollectionDuration{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SDKMetricReaderCollectionDuration) Inst() metric.Float64Histogram { + return m.Float64Histogram +} + +// Name returns the semantic convention name of the instrument. +func (SDKMetricReaderCollectionDuration) Name() string { + return "otel.sdk.metric_reader.collection.duration" +} + +// Unit returns the semantic convention unit of the instrument +func (SDKMetricReaderCollectionDuration) Unit() string { + return "s" +} + +// Description returns the semantic convention description of the instrument +func (SDKMetricReaderCollectionDuration) Description() string { + return "The duration of the collect operation of the metric reader." +} + +// Record records val to the current distribution for attrs. +// +// All additional attrs passed are included in the recorded value. +// +// For successful collections, `error.type` MUST NOT be set. For failed +// collections, `error.type` SHOULD contain the failure cause. +// It can happen that metrics collection is successful for some MetricProducers, +// while others fail. In that case `error.type` SHOULD be set to any of the +// failure causes. +func (m SDKMetricReaderCollectionDuration) Record( + ctx context.Context, + val float64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Float64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Float64Histogram.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +// +// For successful collections, `error.type` MUST NOT be set. For failed +// collections, `error.type` SHOULD contain the failure cause. +// It can happen that metrics collection is successful for some MetricProducers, +// while others fail. In that case `error.type` SHOULD be set to any of the +// failure causes. +func (m SDKMetricReaderCollectionDuration) RecordSet(ctx context.Context, val float64, set attribute.Set) { + if set.Len() == 0 { + m.Float64Histogram.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Float64Histogram.Record(ctx, val, *o...) +} + +// AttrErrorType returns an optional attribute for the "error.type" semantic +// convention. It represents the describes a class of error the operation ended +// with. +func (SDKMetricReaderCollectionDuration) AttrErrorType(val ErrorTypeAttr) attribute.KeyValue { + return attribute.String("error.type", string(val)) +} + +// AttrComponentName returns an optional attribute for the "otel.component.name" +// semantic convention. It represents a name uniquely identifying the instance of +// the OpenTelemetry component within its containing SDK instance. +func (SDKMetricReaderCollectionDuration) AttrComponentName(val string) attribute.KeyValue { + return attribute.String("otel.component.name", val) +} + +// AttrComponentType returns an optional attribute for the "otel.component.type" +// semantic convention. It represents a name identifying the type of the +// OpenTelemetry component. +func (SDKMetricReaderCollectionDuration) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue { + return attribute.String("otel.component.type", string(val)) +} + +// SDKProcessorLogProcessed is an instrument used to record metric values +// conforming to the "otel.sdk.processor.log.processed" semantic conventions. It +// represents the number of log records for which the processing has finished, +// either successful or failed. +type SDKProcessorLogProcessed struct { + metric.Int64Counter +} + +// NewSDKProcessorLogProcessed returns a new SDKProcessorLogProcessed instrument. +func NewSDKProcessorLogProcessed( + m metric.Meter, + opt ...metric.Int64CounterOption, +) (SDKProcessorLogProcessed, error) { + // Check if the meter is nil. + if m == nil { + return SDKProcessorLogProcessed{noop.Int64Counter{}}, nil + } + + i, err := m.Int64Counter( + "otel.sdk.processor.log.processed", + append([]metric.Int64CounterOption{ + metric.WithDescription("The number of log records for which the processing has finished, either successful or failed."), + metric.WithUnit("{log_record}"), + }, opt...)..., + ) + if err != nil { + return SDKProcessorLogProcessed{noop.Int64Counter{}}, err + } + return SDKProcessorLogProcessed{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SDKProcessorLogProcessed) Inst() metric.Int64Counter { + return m.Int64Counter +} + +// Name returns the semantic convention name of the instrument. +func (SDKProcessorLogProcessed) Name() string { + return "otel.sdk.processor.log.processed" +} + +// Unit returns the semantic convention unit of the instrument +func (SDKProcessorLogProcessed) Unit() string { + return "{log_record}" +} + +// Description returns the semantic convention description of the instrument +func (SDKProcessorLogProcessed) Description() string { + return "The number of log records for which the processing has finished, either successful or failed." +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +// +// For successful processing, `error.type` MUST NOT be set. For failed +// processing, `error.type` MUST contain the failure cause. +// For the SDK Simple and Batching Log Record Processor a log record is +// considered to be processed already when it has been submitted to the exporter, +// not when the corresponding export call has finished. +func (m SDKProcessorLogProcessed) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// For successful processing, `error.type` MUST NOT be set. For failed +// processing, `error.type` MUST contain the failure cause. +// For the SDK Simple and Batching Log Record Processor a log record is +// considered to be processed already when it has been submitted to the exporter, +// not when the corresponding export call has finished. +func (m SDKProcessorLogProcessed) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AttrErrorType returns an optional attribute for the "error.type" semantic +// convention. It represents a low-cardinality description of the failure reason. +// SDK Batching Log Record Processors MUST use `queue_full` for log records +// dropped due to a full queue. +func (SDKProcessorLogProcessed) AttrErrorType(val ErrorTypeAttr) attribute.KeyValue { + return attribute.String("error.type", string(val)) +} + +// AttrComponentName returns an optional attribute for the "otel.component.name" +// semantic convention. It represents a name uniquely identifying the instance of +// the OpenTelemetry component within its containing SDK instance. +func (SDKProcessorLogProcessed) AttrComponentName(val string) attribute.KeyValue { + return attribute.String("otel.component.name", val) +} + +// AttrComponentType returns an optional attribute for the "otel.component.type" +// semantic convention. It represents a name identifying the type of the +// OpenTelemetry component. +func (SDKProcessorLogProcessed) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue { + return attribute.String("otel.component.type", string(val)) +} + +// SDKProcessorLogQueueCapacity is an instrument used to record metric values +// conforming to the "otel.sdk.processor.log.queue.capacity" semantic +// conventions. It represents the maximum number of log records the queue of a +// given instance of an SDK Log Record processor can hold. +type SDKProcessorLogQueueCapacity struct { + metric.Int64ObservableUpDownCounter +} + +// NewSDKProcessorLogQueueCapacity returns a new SDKProcessorLogQueueCapacity +// instrument. +func NewSDKProcessorLogQueueCapacity( + m metric.Meter, + opt ...metric.Int64ObservableUpDownCounterOption, +) (SDKProcessorLogQueueCapacity, error) { + // Check if the meter is nil. + if m == nil { + return SDKProcessorLogQueueCapacity{noop.Int64ObservableUpDownCounter{}}, nil + } + + i, err := m.Int64ObservableUpDownCounter( + "otel.sdk.processor.log.queue.capacity", + append([]metric.Int64ObservableUpDownCounterOption{ + metric.WithDescription("The maximum number of log records the queue of a given instance of an SDK Log Record processor can hold."), + metric.WithUnit("{log_record}"), + }, opt...)..., + ) + if err != nil { + return SDKProcessorLogQueueCapacity{noop.Int64ObservableUpDownCounter{}}, err + } + return SDKProcessorLogQueueCapacity{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SDKProcessorLogQueueCapacity) Inst() metric.Int64ObservableUpDownCounter { + return m.Int64ObservableUpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (SDKProcessorLogQueueCapacity) Name() string { + return "otel.sdk.processor.log.queue.capacity" +} + +// Unit returns the semantic convention unit of the instrument +func (SDKProcessorLogQueueCapacity) Unit() string { + return "{log_record}" +} + +// Description returns the semantic convention description of the instrument +func (SDKProcessorLogQueueCapacity) Description() string { + return "The maximum number of log records the queue of a given instance of an SDK Log Record processor can hold." +} + +// AttrComponentName returns an optional attribute for the "otel.component.name" +// semantic convention. It represents a name uniquely identifying the instance of +// the OpenTelemetry component within its containing SDK instance. +func (SDKProcessorLogQueueCapacity) AttrComponentName(val string) attribute.KeyValue { + return attribute.String("otel.component.name", val) +} + +// AttrComponentType returns an optional attribute for the "otel.component.type" +// semantic convention. It represents a name identifying the type of the +// OpenTelemetry component. +func (SDKProcessorLogQueueCapacity) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue { + return attribute.String("otel.component.type", string(val)) +} + +// SDKProcessorLogQueueSize is an instrument used to record metric values +// conforming to the "otel.sdk.processor.log.queue.size" semantic conventions. It +// represents the number of log records in the queue of a given instance of an +// SDK log processor. +type SDKProcessorLogQueueSize struct { + metric.Int64ObservableUpDownCounter +} + +// NewSDKProcessorLogQueueSize returns a new SDKProcessorLogQueueSize instrument. +func NewSDKProcessorLogQueueSize( + m metric.Meter, + opt ...metric.Int64ObservableUpDownCounterOption, +) (SDKProcessorLogQueueSize, error) { + // Check if the meter is nil. + if m == nil { + return SDKProcessorLogQueueSize{noop.Int64ObservableUpDownCounter{}}, nil + } + + i, err := m.Int64ObservableUpDownCounter( + "otel.sdk.processor.log.queue.size", + append([]metric.Int64ObservableUpDownCounterOption{ + metric.WithDescription("The number of log records in the queue of a given instance of an SDK log processor."), + metric.WithUnit("{log_record}"), + }, opt...)..., + ) + if err != nil { + return SDKProcessorLogQueueSize{noop.Int64ObservableUpDownCounter{}}, err + } + return SDKProcessorLogQueueSize{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SDKProcessorLogQueueSize) Inst() metric.Int64ObservableUpDownCounter { + return m.Int64ObservableUpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (SDKProcessorLogQueueSize) Name() string { + return "otel.sdk.processor.log.queue.size" +} + +// Unit returns the semantic convention unit of the instrument +func (SDKProcessorLogQueueSize) Unit() string { + return "{log_record}" +} + +// Description returns the semantic convention description of the instrument +func (SDKProcessorLogQueueSize) Description() string { + return "The number of log records in the queue of a given instance of an SDK log processor." +} + +// AttrComponentName returns an optional attribute for the "otel.component.name" +// semantic convention. It represents a name uniquely identifying the instance of +// the OpenTelemetry component within its containing SDK instance. +func (SDKProcessorLogQueueSize) AttrComponentName(val string) attribute.KeyValue { + return attribute.String("otel.component.name", val) +} + +// AttrComponentType returns an optional attribute for the "otel.component.type" +// semantic convention. It represents a name identifying the type of the +// OpenTelemetry component. +func (SDKProcessorLogQueueSize) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue { + return attribute.String("otel.component.type", string(val)) +} + +// SDKProcessorSpanProcessed is an instrument used to record metric values +// conforming to the "otel.sdk.processor.span.processed" semantic conventions. It +// represents the number of spans for which the processing has finished, either +// successful or failed. +type SDKProcessorSpanProcessed struct { + metric.Int64Counter +} + +// NewSDKProcessorSpanProcessed returns a new SDKProcessorSpanProcessed +// instrument. +func NewSDKProcessorSpanProcessed( + m metric.Meter, + opt ...metric.Int64CounterOption, +) (SDKProcessorSpanProcessed, error) { + // Check if the meter is nil. + if m == nil { + return SDKProcessorSpanProcessed{noop.Int64Counter{}}, nil + } + + i, err := m.Int64Counter( + "otel.sdk.processor.span.processed", + append([]metric.Int64CounterOption{ + metric.WithDescription("The number of spans for which the processing has finished, either successful or failed."), + metric.WithUnit("{span}"), + }, opt...)..., + ) + if err != nil { + return SDKProcessorSpanProcessed{noop.Int64Counter{}}, err + } + return SDKProcessorSpanProcessed{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SDKProcessorSpanProcessed) Inst() metric.Int64Counter { + return m.Int64Counter +} + +// Name returns the semantic convention name of the instrument. +func (SDKProcessorSpanProcessed) Name() string { + return "otel.sdk.processor.span.processed" +} + +// Unit returns the semantic convention unit of the instrument +func (SDKProcessorSpanProcessed) Unit() string { + return "{span}" +} + +// Description returns the semantic convention description of the instrument +func (SDKProcessorSpanProcessed) Description() string { + return "The number of spans for which the processing has finished, either successful or failed." +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +// +// For successful processing, `error.type` MUST NOT be set. For failed +// processing, `error.type` MUST contain the failure cause. +// For the SDK Simple and Batching Span Processor a span is considered to be +// processed already when it has been submitted to the exporter, not when the +// corresponding export call has finished. +func (m SDKProcessorSpanProcessed) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// For successful processing, `error.type` MUST NOT be set. For failed +// processing, `error.type` MUST contain the failure cause. +// For the SDK Simple and Batching Span Processor a span is considered to be +// processed already when it has been submitted to the exporter, not when the +// corresponding export call has finished. +func (m SDKProcessorSpanProcessed) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AttrErrorType returns an optional attribute for the "error.type" semantic +// convention. It represents a low-cardinality description of the failure reason. +// SDK Batching Span Processors MUST use `queue_full` for spans dropped due to a +// full queue. +func (SDKProcessorSpanProcessed) AttrErrorType(val ErrorTypeAttr) attribute.KeyValue { + return attribute.String("error.type", string(val)) +} + +// AttrComponentName returns an optional attribute for the "otel.component.name" +// semantic convention. It represents a name uniquely identifying the instance of +// the OpenTelemetry component within its containing SDK instance. +func (SDKProcessorSpanProcessed) AttrComponentName(val string) attribute.KeyValue { + return attribute.String("otel.component.name", val) +} + +// AttrComponentType returns an optional attribute for the "otel.component.type" +// semantic convention. It represents a name identifying the type of the +// OpenTelemetry component. +func (SDKProcessorSpanProcessed) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue { + return attribute.String("otel.component.type", string(val)) +} + +// SDKProcessorSpanQueueCapacity is an instrument used to record metric values +// conforming to the "otel.sdk.processor.span.queue.capacity" semantic +// conventions. It represents the maximum number of spans the queue of a given +// instance of an SDK span processor can hold. +type SDKProcessorSpanQueueCapacity struct { + metric.Int64ObservableUpDownCounter +} + +// NewSDKProcessorSpanQueueCapacity returns a new SDKProcessorSpanQueueCapacity +// instrument. +func NewSDKProcessorSpanQueueCapacity( + m metric.Meter, + opt ...metric.Int64ObservableUpDownCounterOption, +) (SDKProcessorSpanQueueCapacity, error) { + // Check if the meter is nil. + if m == nil { + return SDKProcessorSpanQueueCapacity{noop.Int64ObservableUpDownCounter{}}, nil + } + + i, err := m.Int64ObservableUpDownCounter( + "otel.sdk.processor.span.queue.capacity", + append([]metric.Int64ObservableUpDownCounterOption{ + metric.WithDescription("The maximum number of spans the queue of a given instance of an SDK span processor can hold."), + metric.WithUnit("{span}"), + }, opt...)..., + ) + if err != nil { + return SDKProcessorSpanQueueCapacity{noop.Int64ObservableUpDownCounter{}}, err + } + return SDKProcessorSpanQueueCapacity{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SDKProcessorSpanQueueCapacity) Inst() metric.Int64ObservableUpDownCounter { + return m.Int64ObservableUpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (SDKProcessorSpanQueueCapacity) Name() string { + return "otel.sdk.processor.span.queue.capacity" +} + +// Unit returns the semantic convention unit of the instrument +func (SDKProcessorSpanQueueCapacity) Unit() string { + return "{span}" +} + +// Description returns the semantic convention description of the instrument +func (SDKProcessorSpanQueueCapacity) Description() string { + return "The maximum number of spans the queue of a given instance of an SDK span processor can hold." +} + +// AttrComponentName returns an optional attribute for the "otel.component.name" +// semantic convention. It represents a name uniquely identifying the instance of +// the OpenTelemetry component within its containing SDK instance. +func (SDKProcessorSpanQueueCapacity) AttrComponentName(val string) attribute.KeyValue { + return attribute.String("otel.component.name", val) +} + +// AttrComponentType returns an optional attribute for the "otel.component.type" +// semantic convention. It represents a name identifying the type of the +// OpenTelemetry component. +func (SDKProcessorSpanQueueCapacity) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue { + return attribute.String("otel.component.type", string(val)) +} + +// SDKProcessorSpanQueueSize is an instrument used to record metric values +// conforming to the "otel.sdk.processor.span.queue.size" semantic conventions. +// It represents the number of spans in the queue of a given instance of an SDK +// span processor. +type SDKProcessorSpanQueueSize struct { + metric.Int64ObservableUpDownCounter +} + +// NewSDKProcessorSpanQueueSize returns a new SDKProcessorSpanQueueSize +// instrument. +func NewSDKProcessorSpanQueueSize( + m metric.Meter, + opt ...metric.Int64ObservableUpDownCounterOption, +) (SDKProcessorSpanQueueSize, error) { + // Check if the meter is nil. + if m == nil { + return SDKProcessorSpanQueueSize{noop.Int64ObservableUpDownCounter{}}, nil + } + + i, err := m.Int64ObservableUpDownCounter( + "otel.sdk.processor.span.queue.size", + append([]metric.Int64ObservableUpDownCounterOption{ + metric.WithDescription("The number of spans in the queue of a given instance of an SDK span processor."), + metric.WithUnit("{span}"), + }, opt...)..., + ) + if err != nil { + return SDKProcessorSpanQueueSize{noop.Int64ObservableUpDownCounter{}}, err + } + return SDKProcessorSpanQueueSize{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SDKProcessorSpanQueueSize) Inst() metric.Int64ObservableUpDownCounter { + return m.Int64ObservableUpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (SDKProcessorSpanQueueSize) Name() string { + return "otel.sdk.processor.span.queue.size" +} + +// Unit returns the semantic convention unit of the instrument +func (SDKProcessorSpanQueueSize) Unit() string { + return "{span}" +} + +// Description returns the semantic convention description of the instrument +func (SDKProcessorSpanQueueSize) Description() string { + return "The number of spans in the queue of a given instance of an SDK span processor." +} + +// AttrComponentName returns an optional attribute for the "otel.component.name" +// semantic convention. It represents a name uniquely identifying the instance of +// the OpenTelemetry component within its containing SDK instance. +func (SDKProcessorSpanQueueSize) AttrComponentName(val string) attribute.KeyValue { + return attribute.String("otel.component.name", val) +} + +// AttrComponentType returns an optional attribute for the "otel.component.type" +// semantic convention. It represents a name identifying the type of the +// OpenTelemetry component. +func (SDKProcessorSpanQueueSize) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue { + return attribute.String("otel.component.type", string(val)) +} + +// SDKSpanLive is an instrument used to record metric values conforming to the +// "otel.sdk.span.live" semantic conventions. It represents the number of created +// spans with `recording=true` for which the end operation has not been called +// yet. +type SDKSpanLive struct { + metric.Int64UpDownCounter +} + +// NewSDKSpanLive returns a new SDKSpanLive instrument. +func NewSDKSpanLive( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (SDKSpanLive, error) { + // Check if the meter is nil. + if m == nil { + return SDKSpanLive{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "otel.sdk.span.live", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("The number of created spans with `recording=true` for which the end operation has not been called yet."), + metric.WithUnit("{span}"), + }, opt...)..., + ) + if err != nil { + return SDKSpanLive{noop.Int64UpDownCounter{}}, err + } + return SDKSpanLive{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SDKSpanLive) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (SDKSpanLive) Name() string { + return "otel.sdk.span.live" +} + +// Unit returns the semantic convention unit of the instrument +func (SDKSpanLive) Unit() string { + return "{span}" +} + +// Description returns the semantic convention description of the instrument +func (SDKSpanLive) Description() string { + return "The number of created spans with `recording=true` for which the end operation has not been called yet." +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +func (m SDKSpanLive) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +func (m SDKSpanLive) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AttrSpanSamplingResult returns an optional attribute for the +// "otel.span.sampling_result" semantic convention. It represents the result +// value of the sampler for this span. +func (SDKSpanLive) AttrSpanSamplingResult(val SpanSamplingResultAttr) attribute.KeyValue { + return attribute.String("otel.span.sampling_result", string(val)) +} + +// SDKSpanStarted is an instrument used to record metric values conforming to the +// "otel.sdk.span.started" semantic conventions. It represents the number of +// created spans. +type SDKSpanStarted struct { + metric.Int64Counter +} + +// NewSDKSpanStarted returns a new SDKSpanStarted instrument. +func NewSDKSpanStarted( + m metric.Meter, + opt ...metric.Int64CounterOption, +) (SDKSpanStarted, error) { + // Check if the meter is nil. + if m == nil { + return SDKSpanStarted{noop.Int64Counter{}}, nil + } + + i, err := m.Int64Counter( + "otel.sdk.span.started", + append([]metric.Int64CounterOption{ + metric.WithDescription("The number of created spans."), + metric.WithUnit("{span}"), + }, opt...)..., + ) + if err != nil { + return SDKSpanStarted{noop.Int64Counter{}}, err + } + return SDKSpanStarted{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m SDKSpanStarted) Inst() metric.Int64Counter { + return m.Int64Counter +} + +// Name returns the semantic convention name of the instrument. +func (SDKSpanStarted) Name() string { + return "otel.sdk.span.started" +} + +// Unit returns the semantic convention unit of the instrument +func (SDKSpanStarted) Unit() string { + return "{span}" +} + +// Description returns the semantic convention description of the instrument +func (SDKSpanStarted) Description() string { + return "The number of created spans." +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +// +// Implementations MUST record this metric for all spans, even for non-recording +// ones. +func (m SDKSpanStarted) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// Implementations MUST record this metric for all spans, even for non-recording +// ones. +func (m SDKSpanStarted) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AttrSpanParentOrigin returns an optional attribute for the +// "otel.span.parent.origin" semantic convention. It represents the determines +// whether the span has a parent span, and if so, [whether it is a remote parent] +// . +// +// [whether it is a remote parent]: https://opentelemetry.io/docs/specs/otel/trace/api/#isremote +func (SDKSpanStarted) AttrSpanParentOrigin(val SpanParentOriginAttr) attribute.KeyValue { + return attribute.String("otel.span.parent.origin", string(val)) +} + +// AttrSpanSamplingResult returns an optional attribute for the +// "otel.span.sampling_result" semantic convention. It represents the result +// value of the sampler for this span. +func (SDKSpanStarted) AttrSpanSamplingResult(val SpanSamplingResultAttr) attribute.KeyValue { + return attribute.String("otel.span.sampling_result", string(val)) +} \ No newline at end of file diff --git a/semconv/v1.37.0/processconv/metric.go b/semconv/v1.37.0/processconv/metric.go new file mode 100644 index 00000000000..64921e0410d --- /dev/null +++ b/semconv/v1.37.0/processconv/metric.go @@ -0,0 +1,1101 @@ +// Code generated from semantic convention specification. DO NOT EDIT. + +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package httpconv provides types and functionality for OpenTelemetry semantic +// conventions in the "process" namespace. +package processconv + +import ( + "context" + "sync" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/noop" +) + +var ( + addOptPool = &sync.Pool{New: func() any { return &[]metric.AddOption{} }} + recOptPool = &sync.Pool{New: func() any { return &[]metric.RecordOption{} }} +) + +// CPUModeAttr is an attribute conforming to the cpu.mode semantic conventions. +// It represents a process SHOULD be characterized *either* by data points with +// no `mode` labels, *or only* data points with `mode` labels. +type CPUModeAttr string + +var ( + // CPUModeUser is the user. + CPUModeUser CPUModeAttr = "user" + // CPUModeSystem is the system. + CPUModeSystem CPUModeAttr = "system" + // CPUModeNice is the nice. + CPUModeNice CPUModeAttr = "nice" + // CPUModeIdle is the idle. + CPUModeIdle CPUModeAttr = "idle" + // CPUModeIOWait is the IO Wait. + CPUModeIOWait CPUModeAttr = "iowait" + // CPUModeInterrupt is the interrupt. + CPUModeInterrupt CPUModeAttr = "interrupt" + // CPUModeSteal is the steal. + CPUModeSteal CPUModeAttr = "steal" + // CPUModeKernel is the kernel. + CPUModeKernel CPUModeAttr = "kernel" +) + +// DiskIODirectionAttr is an attribute conforming to the disk.io.direction +// semantic conventions. It represents the disk IO operation direction. +type DiskIODirectionAttr string + +var ( + // DiskIODirectionRead is the standardized value "read" of DiskIODirectionAttr. + DiskIODirectionRead DiskIODirectionAttr = "read" + // DiskIODirectionWrite is the standardized value "write" of + // DiskIODirectionAttr. + DiskIODirectionWrite DiskIODirectionAttr = "write" +) + +// NetworkIODirectionAttr is an attribute conforming to the network.io.direction +// semantic conventions. It represents the network IO operation direction. +type NetworkIODirectionAttr string + +var ( + // NetworkIODirectionTransmit is the standardized value "transmit" of + // NetworkIODirectionAttr. + NetworkIODirectionTransmit NetworkIODirectionAttr = "transmit" + // NetworkIODirectionReceive is the standardized value "receive" of + // NetworkIODirectionAttr. + NetworkIODirectionReceive NetworkIODirectionAttr = "receive" +) + +// ContextSwitchTypeAttr is an attribute conforming to the +// process.context_switch_type semantic conventions. It represents the specifies +// whether the context switches for this data point were voluntary or +// involuntary. +type ContextSwitchTypeAttr string + +var ( + // ContextSwitchTypeVoluntary is the standardized value "voluntary" of + // ContextSwitchTypeAttr. + ContextSwitchTypeVoluntary ContextSwitchTypeAttr = "voluntary" + // ContextSwitchTypeInvoluntary is the standardized value "involuntary" of + // ContextSwitchTypeAttr. + ContextSwitchTypeInvoluntary ContextSwitchTypeAttr = "involuntary" +) + +// PagingFaultTypeAttr is an attribute conforming to the +// process.paging.fault_type semantic conventions. It represents the type of page +// fault for this data point. Type `major` is for major/hard page faults, and +// `minor` is for minor/soft page faults. +type PagingFaultTypeAttr string + +var ( + // PagingFaultTypeMajor is the standardized value "major" of + // PagingFaultTypeAttr. + PagingFaultTypeMajor PagingFaultTypeAttr = "major" + // PagingFaultTypeMinor is the standardized value "minor" of + // PagingFaultTypeAttr. + PagingFaultTypeMinor PagingFaultTypeAttr = "minor" +) + +// ContextSwitches is an instrument used to record metric values conforming to +// the "process.context_switches" semantic conventions. It represents the number +// of times the process has been context switched. +type ContextSwitches struct { + metric.Int64Counter +} + +// NewContextSwitches returns a new ContextSwitches instrument. +func NewContextSwitches( + m metric.Meter, + opt ...metric.Int64CounterOption, +) (ContextSwitches, error) { + // Check if the meter is nil. + if m == nil { + return ContextSwitches{noop.Int64Counter{}}, nil + } + + i, err := m.Int64Counter( + "process.context_switches", + append([]metric.Int64CounterOption{ + metric.WithDescription("Number of times the process has been context switched."), + metric.WithUnit("{context_switch}"), + }, opt...)..., + ) + if err != nil { + return ContextSwitches{noop.Int64Counter{}}, err + } + return ContextSwitches{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ContextSwitches) Inst() metric.Int64Counter { + return m.Int64Counter +} + +// Name returns the semantic convention name of the instrument. +func (ContextSwitches) Name() string { + return "process.context_switches" +} + +// Unit returns the semantic convention unit of the instrument +func (ContextSwitches) Unit() string { + return "{context_switch}" +} + +// Description returns the semantic convention description of the instrument +func (ContextSwitches) Description() string { + return "Number of times the process has been context switched." +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +func (m ContextSwitches) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +func (m ContextSwitches) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AttrContextSwitchType returns an optional attribute for the +// "process.context_switch_type" semantic convention. It represents the specifies +// whether the context switches for this data point were voluntary or +// involuntary. +func (ContextSwitches) AttrContextSwitchType(val ContextSwitchTypeAttr) attribute.KeyValue { + return attribute.String("process.context_switch_type", string(val)) +} + +// CPUTime is an instrument used to record metric values conforming to the +// "process.cpu.time" semantic conventions. It represents the total CPU seconds +// broken down by different states. +type CPUTime struct { + metric.Float64ObservableCounter +} + +// NewCPUTime returns a new CPUTime instrument. +func NewCPUTime( + m metric.Meter, + opt ...metric.Float64ObservableCounterOption, +) (CPUTime, error) { + // Check if the meter is nil. + if m == nil { + return CPUTime{noop.Float64ObservableCounter{}}, nil + } + + i, err := m.Float64ObservableCounter( + "process.cpu.time", + append([]metric.Float64ObservableCounterOption{ + metric.WithDescription("Total CPU seconds broken down by different states."), + metric.WithUnit("s"), + }, opt...)..., + ) + if err != nil { + return CPUTime{noop.Float64ObservableCounter{}}, err + } + return CPUTime{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m CPUTime) Inst() metric.Float64ObservableCounter { + return m.Float64ObservableCounter +} + +// Name returns the semantic convention name of the instrument. +func (CPUTime) Name() string { + return "process.cpu.time" +} + +// Unit returns the semantic convention unit of the instrument +func (CPUTime) Unit() string { + return "s" +} + +// Description returns the semantic convention description of the instrument +func (CPUTime) Description() string { + return "Total CPU seconds broken down by different states." +} + +// AttrCPUMode returns an optional attribute for the "cpu.mode" semantic +// convention. It represents a process SHOULD be characterized *either* by data +// points with no `mode` labels, *or only* data points with `mode` labels. +func (CPUTime) AttrCPUMode(val CPUModeAttr) attribute.KeyValue { + return attribute.String("cpu.mode", string(val)) +} + +// CPUUtilization is an instrument used to record metric values conforming to the +// "process.cpu.utilization" semantic conventions. It represents the difference +// in process.cpu.time since the last measurement, divided by the elapsed time +// and number of CPUs available to the process. +type CPUUtilization struct { + metric.Int64Gauge +} + +// NewCPUUtilization returns a new CPUUtilization instrument. +func NewCPUUtilization( + m metric.Meter, + opt ...metric.Int64GaugeOption, +) (CPUUtilization, error) { + // Check if the meter is nil. + if m == nil { + return CPUUtilization{noop.Int64Gauge{}}, nil + } + + i, err := m.Int64Gauge( + "process.cpu.utilization", + append([]metric.Int64GaugeOption{ + metric.WithDescription("Difference in process.cpu.time since the last measurement, divided by the elapsed time and number of CPUs available to the process."), + metric.WithUnit("1"), + }, opt...)..., + ) + if err != nil { + return CPUUtilization{noop.Int64Gauge{}}, err + } + return CPUUtilization{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m CPUUtilization) Inst() metric.Int64Gauge { + return m.Int64Gauge +} + +// Name returns the semantic convention name of the instrument. +func (CPUUtilization) Name() string { + return "process.cpu.utilization" +} + +// Unit returns the semantic convention unit of the instrument +func (CPUUtilization) Unit() string { + return "1" +} + +// Description returns the semantic convention description of the instrument +func (CPUUtilization) Description() string { + return "Difference in process.cpu.time since the last measurement, divided by the elapsed time and number of CPUs available to the process." +} + +// Record records val to the current distribution for attrs. +// +// All additional attrs passed are included in the recorded value. +func (m CPUUtilization) Record( + ctx context.Context, + val int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Gauge.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64Gauge.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +func (m CPUUtilization) RecordSet(ctx context.Context, val int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Gauge.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Gauge.Record(ctx, val, *o...) +} + +// AttrCPUMode returns an optional attribute for the "cpu.mode" semantic +// convention. It represents a process SHOULD be characterized *either* by data +// points with no `mode` labels, *or only* data points with `mode` labels. +func (CPUUtilization) AttrCPUMode(val CPUModeAttr) attribute.KeyValue { + return attribute.String("cpu.mode", string(val)) +} + +// DiskIO is an instrument used to record metric values conforming to the +// "process.disk.io" semantic conventions. It represents the disk bytes +// transferred. +type DiskIO struct { + metric.Int64Counter +} + +// NewDiskIO returns a new DiskIO instrument. +func NewDiskIO( + m metric.Meter, + opt ...metric.Int64CounterOption, +) (DiskIO, error) { + // Check if the meter is nil. + if m == nil { + return DiskIO{noop.Int64Counter{}}, nil + } + + i, err := m.Int64Counter( + "process.disk.io", + append([]metric.Int64CounterOption{ + metric.WithDescription("Disk bytes transferred."), + metric.WithUnit("By"), + }, opt...)..., + ) + if err != nil { + return DiskIO{noop.Int64Counter{}}, err + } + return DiskIO{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m DiskIO) Inst() metric.Int64Counter { + return m.Int64Counter +} + +// Name returns the semantic convention name of the instrument. +func (DiskIO) Name() string { + return "process.disk.io" +} + +// Unit returns the semantic convention unit of the instrument +func (DiskIO) Unit() string { + return "By" +} + +// Description returns the semantic convention description of the instrument +func (DiskIO) Description() string { + return "Disk bytes transferred." +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +func (m DiskIO) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +func (m DiskIO) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AttrDiskIODirection returns an optional attribute for the "disk.io.direction" +// semantic convention. It represents the disk IO operation direction. +func (DiskIO) AttrDiskIODirection(val DiskIODirectionAttr) attribute.KeyValue { + return attribute.String("disk.io.direction", string(val)) +} + +// MemoryUsage is an instrument used to record metric values conforming to the +// "process.memory.usage" semantic conventions. It represents the amount of +// physical memory in use. +type MemoryUsage struct { + metric.Int64UpDownCounter +} + +// NewMemoryUsage returns a new MemoryUsage instrument. +func NewMemoryUsage( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (MemoryUsage, error) { + // Check if the meter is nil. + if m == nil { + return MemoryUsage{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "process.memory.usage", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("The amount of physical memory in use."), + metric.WithUnit("By"), + }, opt...)..., + ) + if err != nil { + return MemoryUsage{noop.Int64UpDownCounter{}}, err + } + return MemoryUsage{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m MemoryUsage) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (MemoryUsage) Name() string { + return "process.memory.usage" +} + +// Unit returns the semantic convention unit of the instrument +func (MemoryUsage) Unit() string { + return "By" +} + +// Description returns the semantic convention description of the instrument +func (MemoryUsage) Description() string { + return "The amount of physical memory in use." +} + +// Add adds incr to the existing count for attrs. +func (m MemoryUsage) Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +func (m MemoryUsage) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// MemoryVirtual is an instrument used to record metric values conforming to the +// "process.memory.virtual" semantic conventions. It represents the amount of +// committed virtual memory. +type MemoryVirtual struct { + metric.Int64UpDownCounter +} + +// NewMemoryVirtual returns a new MemoryVirtual instrument. +func NewMemoryVirtual( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (MemoryVirtual, error) { + // Check if the meter is nil. + if m == nil { + return MemoryVirtual{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "process.memory.virtual", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("The amount of committed virtual memory."), + metric.WithUnit("By"), + }, opt...)..., + ) + if err != nil { + return MemoryVirtual{noop.Int64UpDownCounter{}}, err + } + return MemoryVirtual{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m MemoryVirtual) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (MemoryVirtual) Name() string { + return "process.memory.virtual" +} + +// Unit returns the semantic convention unit of the instrument +func (MemoryVirtual) Unit() string { + return "By" +} + +// Description returns the semantic convention description of the instrument +func (MemoryVirtual) Description() string { + return "The amount of committed virtual memory." +} + +// Add adds incr to the existing count for attrs. +func (m MemoryVirtual) Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +func (m MemoryVirtual) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// NetworkIO is an instrument used to record metric values conforming to the +// "process.network.io" semantic conventions. It represents the network bytes +// transferred. +type NetworkIO struct { + metric.Int64Counter +} + +// NewNetworkIO returns a new NetworkIO instrument. +func NewNetworkIO( + m metric.Meter, + opt ...metric.Int64CounterOption, +) (NetworkIO, error) { + // Check if the meter is nil. + if m == nil { + return NetworkIO{noop.Int64Counter{}}, nil + } + + i, err := m.Int64Counter( + "process.network.io", + append([]metric.Int64CounterOption{ + metric.WithDescription("Network bytes transferred."), + metric.WithUnit("By"), + }, opt...)..., + ) + if err != nil { + return NetworkIO{noop.Int64Counter{}}, err + } + return NetworkIO{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m NetworkIO) Inst() metric.Int64Counter { + return m.Int64Counter +} + +// Name returns the semantic convention name of the instrument. +func (NetworkIO) Name() string { + return "process.network.io" +} + +// Unit returns the semantic convention unit of the instrument +func (NetworkIO) Unit() string { + return "By" +} + +// Description returns the semantic convention description of the instrument +func (NetworkIO) Description() string { + return "Network bytes transferred." +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +func (m NetworkIO) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +func (m NetworkIO) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AttrNetworkIODirection returns an optional attribute for the +// "network.io.direction" semantic convention. It represents the network IO +// operation direction. +func (NetworkIO) AttrNetworkIODirection(val NetworkIODirectionAttr) attribute.KeyValue { + return attribute.String("network.io.direction", string(val)) +} + +// OpenFileDescriptorCount is an instrument used to record metric values +// conforming to the "process.open_file_descriptor.count" semantic conventions. +// It represents the number of file descriptors in use by the process. +type OpenFileDescriptorCount struct { + metric.Int64UpDownCounter +} + +// NewOpenFileDescriptorCount returns a new OpenFileDescriptorCount instrument. +func NewOpenFileDescriptorCount( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (OpenFileDescriptorCount, error) { + // Check if the meter is nil. + if m == nil { + return OpenFileDescriptorCount{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "process.open_file_descriptor.count", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("Number of file descriptors in use by the process."), + metric.WithUnit("{file_descriptor}"), + }, opt...)..., + ) + if err != nil { + return OpenFileDescriptorCount{noop.Int64UpDownCounter{}}, err + } + return OpenFileDescriptorCount{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m OpenFileDescriptorCount) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (OpenFileDescriptorCount) Name() string { + return "process.open_file_descriptor.count" +} + +// Unit returns the semantic convention unit of the instrument +func (OpenFileDescriptorCount) Unit() string { + return "{file_descriptor}" +} + +// Description returns the semantic convention description of the instrument +func (OpenFileDescriptorCount) Description() string { + return "Number of file descriptors in use by the process." +} + +// Add adds incr to the existing count for attrs. +func (m OpenFileDescriptorCount) Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +func (m OpenFileDescriptorCount) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// PagingFaults is an instrument used to record metric values conforming to the +// "process.paging.faults" semantic conventions. It represents the number of page +// faults the process has made. +type PagingFaults struct { + metric.Int64Counter +} + +// NewPagingFaults returns a new PagingFaults instrument. +func NewPagingFaults( + m metric.Meter, + opt ...metric.Int64CounterOption, +) (PagingFaults, error) { + // Check if the meter is nil. + if m == nil { + return PagingFaults{noop.Int64Counter{}}, nil + } + + i, err := m.Int64Counter( + "process.paging.faults", + append([]metric.Int64CounterOption{ + metric.WithDescription("Number of page faults the process has made."), + metric.WithUnit("{fault}"), + }, opt...)..., + ) + if err != nil { + return PagingFaults{noop.Int64Counter{}}, err + } + return PagingFaults{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m PagingFaults) Inst() metric.Int64Counter { + return m.Int64Counter +} + +// Name returns the semantic convention name of the instrument. +func (PagingFaults) Name() string { + return "process.paging.faults" +} + +// Unit returns the semantic convention unit of the instrument +func (PagingFaults) Unit() string { + return "{fault}" +} + +// Description returns the semantic convention description of the instrument +func (PagingFaults) Description() string { + return "Number of page faults the process has made." +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +func (m PagingFaults) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +func (m PagingFaults) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AttrPagingFaultType returns an optional attribute for the +// "process.paging.fault_type" semantic convention. It represents the type of +// page fault for this data point. Type `major` is for major/hard page faults, +// and `minor` is for minor/soft page faults. +func (PagingFaults) AttrPagingFaultType(val PagingFaultTypeAttr) attribute.KeyValue { + return attribute.String("process.paging.fault_type", string(val)) +} + +// ThreadCount is an instrument used to record metric values conforming to the +// "process.thread.count" semantic conventions. It represents the process threads +// count. +type ThreadCount struct { + metric.Int64UpDownCounter +} + +// NewThreadCount returns a new ThreadCount instrument. +func NewThreadCount( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (ThreadCount, error) { + // Check if the meter is nil. + if m == nil { + return ThreadCount{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "process.thread.count", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("Process threads count."), + metric.WithUnit("{thread}"), + }, opt...)..., + ) + if err != nil { + return ThreadCount{noop.Int64UpDownCounter{}}, err + } + return ThreadCount{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ThreadCount) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (ThreadCount) Name() string { + return "process.thread.count" +} + +// Unit returns the semantic convention unit of the instrument +func (ThreadCount) Unit() string { + return "{thread}" +} + +// Description returns the semantic convention description of the instrument +func (ThreadCount) Description() string { + return "Process threads count." +} + +// Add adds incr to the existing count for attrs. +func (m ThreadCount) Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +func (m ThreadCount) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// Uptime is an instrument used to record metric values conforming to the +// "process.uptime" semantic conventions. It represents the time the process has +// been running. +type Uptime struct { + metric.Float64Gauge +} + +// NewUptime returns a new Uptime instrument. +func NewUptime( + m metric.Meter, + opt ...metric.Float64GaugeOption, +) (Uptime, error) { + // Check if the meter is nil. + if m == nil { + return Uptime{noop.Float64Gauge{}}, nil + } + + i, err := m.Float64Gauge( + "process.uptime", + append([]metric.Float64GaugeOption{ + metric.WithDescription("The time the process has been running."), + metric.WithUnit("s"), + }, opt...)..., + ) + if err != nil { + return Uptime{noop.Float64Gauge{}}, err + } + return Uptime{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m Uptime) Inst() metric.Float64Gauge { + return m.Float64Gauge +} + +// Name returns the semantic convention name of the instrument. +func (Uptime) Name() string { + return "process.uptime" +} + +// Unit returns the semantic convention unit of the instrument +func (Uptime) Unit() string { + return "s" +} + +// Description returns the semantic convention description of the instrument +func (Uptime) Description() string { + return "The time the process has been running." +} + +// Record records val to the current distribution for attrs. +// +// Instrumentations SHOULD use a gauge with type `double` and measure uptime in +// seconds as a floating point number with the highest precision available. +// The actual accuracy would depend on the instrumentation and operating system. +func (m Uptime) Record(ctx context.Context, val float64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Float64Gauge.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Float64Gauge.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +// +// Instrumentations SHOULD use a gauge with type `double` and measure uptime in +// seconds as a floating point number with the highest precision available. +// The actual accuracy would depend on the instrumentation and operating system. +func (m Uptime) RecordSet(ctx context.Context, val float64, set attribute.Set) { + if set.Len() == 0 { + m.Float64Gauge.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Float64Gauge.Record(ctx, val, *o...) +} \ No newline at end of file diff --git a/semconv/v1.37.0/rpcconv/metric.go b/semconv/v1.37.0/rpcconv/metric.go new file mode 100644 index 00000000000..146b7eda62c --- /dev/null +++ b/semconv/v1.37.0/rpcconv/metric.go @@ -0,0 +1,920 @@ +// Code generated from semantic convention specification. DO NOT EDIT. + +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package httpconv provides types and functionality for OpenTelemetry semantic +// conventions in the "rpc" namespace. +package rpcconv + +import ( + "context" + "sync" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/noop" +) + +var ( + addOptPool = &sync.Pool{New: func() any { return &[]metric.AddOption{} }} + recOptPool = &sync.Pool{New: func() any { return &[]metric.RecordOption{} }} +) + +// ClientDuration is an instrument used to record metric values conforming to the +// "rpc.client.duration" semantic conventions. It represents the measures the +// duration of outbound RPC. +type ClientDuration struct { + metric.Float64Histogram +} + +// NewClientDuration returns a new ClientDuration instrument. +func NewClientDuration( + m metric.Meter, + opt ...metric.Float64HistogramOption, +) (ClientDuration, error) { + // Check if the meter is nil. + if m == nil { + return ClientDuration{noop.Float64Histogram{}}, nil + } + + i, err := m.Float64Histogram( + "rpc.client.duration", + append([]metric.Float64HistogramOption{ + metric.WithDescription("Measures the duration of outbound RPC."), + metric.WithUnit("ms"), + }, opt...)..., + ) + if err != nil { + return ClientDuration{noop.Float64Histogram{}}, err + } + return ClientDuration{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ClientDuration) Inst() metric.Float64Histogram { + return m.Float64Histogram +} + +// Name returns the semantic convention name of the instrument. +func (ClientDuration) Name() string { + return "rpc.client.duration" +} + +// Unit returns the semantic convention unit of the instrument +func (ClientDuration) Unit() string { + return "ms" +} + +// Description returns the semantic convention description of the instrument +func (ClientDuration) Description() string { + return "Measures the duration of outbound RPC." +} + +// Record records val to the current distribution for attrs. +// +// While streaming RPCs may record this metric as start-of-batch +// to end-of-batch, it's hard to interpret in practice. +// +// **Streaming**: N/A. +func (m ClientDuration) Record(ctx context.Context, val float64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Float64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Float64Histogram.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +// +// While streaming RPCs may record this metric as start-of-batch +// to end-of-batch, it's hard to interpret in practice. +// +// **Streaming**: N/A. +func (m ClientDuration) RecordSet(ctx context.Context, val float64, set attribute.Set) { + if set.Len() == 0 { + m.Float64Histogram.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Float64Histogram.Record(ctx, val, *o...) +} + +// ClientRequestSize is an instrument used to record metric values conforming to +// the "rpc.client.request.size" semantic conventions. It represents the measures +// the size of RPC request messages (uncompressed). +type ClientRequestSize struct { + metric.Int64Histogram +} + +// NewClientRequestSize returns a new ClientRequestSize instrument. +func NewClientRequestSize( + m metric.Meter, + opt ...metric.Int64HistogramOption, +) (ClientRequestSize, error) { + // Check if the meter is nil. + if m == nil { + return ClientRequestSize{noop.Int64Histogram{}}, nil + } + + i, err := m.Int64Histogram( + "rpc.client.request.size", + append([]metric.Int64HistogramOption{ + metric.WithDescription("Measures the size of RPC request messages (uncompressed)."), + metric.WithUnit("By"), + }, opt...)..., + ) + if err != nil { + return ClientRequestSize{noop.Int64Histogram{}}, err + } + return ClientRequestSize{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ClientRequestSize) Inst() metric.Int64Histogram { + return m.Int64Histogram +} + +// Name returns the semantic convention name of the instrument. +func (ClientRequestSize) Name() string { + return "rpc.client.request.size" +} + +// Unit returns the semantic convention unit of the instrument +func (ClientRequestSize) Unit() string { + return "By" +} + +// Description returns the semantic convention description of the instrument +func (ClientRequestSize) Description() string { + return "Measures the size of RPC request messages (uncompressed)." +} + +// Record records val to the current distribution for attrs. +// +// **Streaming**: Recorded per message in a streaming batch +func (m ClientRequestSize) Record(ctx context.Context, val int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64Histogram.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +// +// **Streaming**: Recorded per message in a streaming batch +func (m ClientRequestSize) RecordSet(ctx context.Context, val int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Histogram.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Histogram.Record(ctx, val, *o...) +} + +// ClientRequestsPerRPC is an instrument used to record metric values conforming +// to the "rpc.client.requests_per_rpc" semantic conventions. It represents the +// measures the number of messages received per RPC. +type ClientRequestsPerRPC struct { + metric.Int64Histogram +} + +// NewClientRequestsPerRPC returns a new ClientRequestsPerRPC instrument. +func NewClientRequestsPerRPC( + m metric.Meter, + opt ...metric.Int64HistogramOption, +) (ClientRequestsPerRPC, error) { + // Check if the meter is nil. + if m == nil { + return ClientRequestsPerRPC{noop.Int64Histogram{}}, nil + } + + i, err := m.Int64Histogram( + "rpc.client.requests_per_rpc", + append([]metric.Int64HistogramOption{ + metric.WithDescription("Measures the number of messages received per RPC."), + metric.WithUnit("{count}"), + }, opt...)..., + ) + if err != nil { + return ClientRequestsPerRPC{noop.Int64Histogram{}}, err + } + return ClientRequestsPerRPC{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ClientRequestsPerRPC) Inst() metric.Int64Histogram { + return m.Int64Histogram +} + +// Name returns the semantic convention name of the instrument. +func (ClientRequestsPerRPC) Name() string { + return "rpc.client.requests_per_rpc" +} + +// Unit returns the semantic convention unit of the instrument +func (ClientRequestsPerRPC) Unit() string { + return "{count}" +} + +// Description returns the semantic convention description of the instrument +func (ClientRequestsPerRPC) Description() string { + return "Measures the number of messages received per RPC." +} + +// Record records val to the current distribution for attrs. +// +// Should be 1 for all non-streaming RPCs. +// +// **Streaming**: This metric is required for server and client streaming RPCs +func (m ClientRequestsPerRPC) Record(ctx context.Context, val int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64Histogram.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +// +// Should be 1 for all non-streaming RPCs. +// +// **Streaming**: This metric is required for server and client streaming RPCs +func (m ClientRequestsPerRPC) RecordSet(ctx context.Context, val int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Histogram.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Histogram.Record(ctx, val, *o...) +} + +// ClientResponseSize is an instrument used to record metric values conforming to +// the "rpc.client.response.size" semantic conventions. It represents the +// measures the size of RPC response messages (uncompressed). +type ClientResponseSize struct { + metric.Int64Histogram +} + +// NewClientResponseSize returns a new ClientResponseSize instrument. +func NewClientResponseSize( + m metric.Meter, + opt ...metric.Int64HistogramOption, +) (ClientResponseSize, error) { + // Check if the meter is nil. + if m == nil { + return ClientResponseSize{noop.Int64Histogram{}}, nil + } + + i, err := m.Int64Histogram( + "rpc.client.response.size", + append([]metric.Int64HistogramOption{ + metric.WithDescription("Measures the size of RPC response messages (uncompressed)."), + metric.WithUnit("By"), + }, opt...)..., + ) + if err != nil { + return ClientResponseSize{noop.Int64Histogram{}}, err + } + return ClientResponseSize{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ClientResponseSize) Inst() metric.Int64Histogram { + return m.Int64Histogram +} + +// Name returns the semantic convention name of the instrument. +func (ClientResponseSize) Name() string { + return "rpc.client.response.size" +} + +// Unit returns the semantic convention unit of the instrument +func (ClientResponseSize) Unit() string { + return "By" +} + +// Description returns the semantic convention description of the instrument +func (ClientResponseSize) Description() string { + return "Measures the size of RPC response messages (uncompressed)." +} + +// Record records val to the current distribution for attrs. +// +// **Streaming**: Recorded per response in a streaming batch +func (m ClientResponseSize) Record(ctx context.Context, val int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64Histogram.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +// +// **Streaming**: Recorded per response in a streaming batch +func (m ClientResponseSize) RecordSet(ctx context.Context, val int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Histogram.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Histogram.Record(ctx, val, *o...) +} + +// ClientResponsesPerRPC is an instrument used to record metric values conforming +// to the "rpc.client.responses_per_rpc" semantic conventions. It represents the +// measures the number of messages sent per RPC. +type ClientResponsesPerRPC struct { + metric.Int64Histogram +} + +// NewClientResponsesPerRPC returns a new ClientResponsesPerRPC instrument. +func NewClientResponsesPerRPC( + m metric.Meter, + opt ...metric.Int64HistogramOption, +) (ClientResponsesPerRPC, error) { + // Check if the meter is nil. + if m == nil { + return ClientResponsesPerRPC{noop.Int64Histogram{}}, nil + } + + i, err := m.Int64Histogram( + "rpc.client.responses_per_rpc", + append([]metric.Int64HistogramOption{ + metric.WithDescription("Measures the number of messages sent per RPC."), + metric.WithUnit("{count}"), + }, opt...)..., + ) + if err != nil { + return ClientResponsesPerRPC{noop.Int64Histogram{}}, err + } + return ClientResponsesPerRPC{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ClientResponsesPerRPC) Inst() metric.Int64Histogram { + return m.Int64Histogram +} + +// Name returns the semantic convention name of the instrument. +func (ClientResponsesPerRPC) Name() string { + return "rpc.client.responses_per_rpc" +} + +// Unit returns the semantic convention unit of the instrument +func (ClientResponsesPerRPC) Unit() string { + return "{count}" +} + +// Description returns the semantic convention description of the instrument +func (ClientResponsesPerRPC) Description() string { + return "Measures the number of messages sent per RPC." +} + +// Record records val to the current distribution for attrs. +// +// Should be 1 for all non-streaming RPCs. +// +// **Streaming**: This metric is required for server and client streaming RPCs +func (m ClientResponsesPerRPC) Record(ctx context.Context, val int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64Histogram.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +// +// Should be 1 for all non-streaming RPCs. +// +// **Streaming**: This metric is required for server and client streaming RPCs +func (m ClientResponsesPerRPC) RecordSet(ctx context.Context, val int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Histogram.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Histogram.Record(ctx, val, *o...) +} + +// ServerDuration is an instrument used to record metric values conforming to the +// "rpc.server.duration" semantic conventions. It represents the measures the +// duration of inbound RPC. +type ServerDuration struct { + metric.Float64Histogram +} + +// NewServerDuration returns a new ServerDuration instrument. +func NewServerDuration( + m metric.Meter, + opt ...metric.Float64HistogramOption, +) (ServerDuration, error) { + // Check if the meter is nil. + if m == nil { + return ServerDuration{noop.Float64Histogram{}}, nil + } + + i, err := m.Float64Histogram( + "rpc.server.duration", + append([]metric.Float64HistogramOption{ + metric.WithDescription("Measures the duration of inbound RPC."), + metric.WithUnit("ms"), + }, opt...)..., + ) + if err != nil { + return ServerDuration{noop.Float64Histogram{}}, err + } + return ServerDuration{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ServerDuration) Inst() metric.Float64Histogram { + return m.Float64Histogram +} + +// Name returns the semantic convention name of the instrument. +func (ServerDuration) Name() string { + return "rpc.server.duration" +} + +// Unit returns the semantic convention unit of the instrument +func (ServerDuration) Unit() string { + return "ms" +} + +// Description returns the semantic convention description of the instrument +func (ServerDuration) Description() string { + return "Measures the duration of inbound RPC." +} + +// Record records val to the current distribution for attrs. +// +// While streaming RPCs may record this metric as start-of-batch +// to end-of-batch, it's hard to interpret in practice. +// +// **Streaming**: N/A. +func (m ServerDuration) Record(ctx context.Context, val float64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Float64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Float64Histogram.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +// +// While streaming RPCs may record this metric as start-of-batch +// to end-of-batch, it's hard to interpret in practice. +// +// **Streaming**: N/A. +func (m ServerDuration) RecordSet(ctx context.Context, val float64, set attribute.Set) { + if set.Len() == 0 { + m.Float64Histogram.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Float64Histogram.Record(ctx, val, *o...) +} + +// ServerRequestSize is an instrument used to record metric values conforming to +// the "rpc.server.request.size" semantic conventions. It represents the measures +// the size of RPC request messages (uncompressed). +type ServerRequestSize struct { + metric.Int64Histogram +} + +// NewServerRequestSize returns a new ServerRequestSize instrument. +func NewServerRequestSize( + m metric.Meter, + opt ...metric.Int64HistogramOption, +) (ServerRequestSize, error) { + // Check if the meter is nil. + if m == nil { + return ServerRequestSize{noop.Int64Histogram{}}, nil + } + + i, err := m.Int64Histogram( + "rpc.server.request.size", + append([]metric.Int64HistogramOption{ + metric.WithDescription("Measures the size of RPC request messages (uncompressed)."), + metric.WithUnit("By"), + }, opt...)..., + ) + if err != nil { + return ServerRequestSize{noop.Int64Histogram{}}, err + } + return ServerRequestSize{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ServerRequestSize) Inst() metric.Int64Histogram { + return m.Int64Histogram +} + +// Name returns the semantic convention name of the instrument. +func (ServerRequestSize) Name() string { + return "rpc.server.request.size" +} + +// Unit returns the semantic convention unit of the instrument +func (ServerRequestSize) Unit() string { + return "By" +} + +// Description returns the semantic convention description of the instrument +func (ServerRequestSize) Description() string { + return "Measures the size of RPC request messages (uncompressed)." +} + +// Record records val to the current distribution for attrs. +// +// **Streaming**: Recorded per message in a streaming batch +func (m ServerRequestSize) Record(ctx context.Context, val int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64Histogram.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +// +// **Streaming**: Recorded per message in a streaming batch +func (m ServerRequestSize) RecordSet(ctx context.Context, val int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Histogram.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Histogram.Record(ctx, val, *o...) +} + +// ServerRequestsPerRPC is an instrument used to record metric values conforming +// to the "rpc.server.requests_per_rpc" semantic conventions. It represents the +// measures the number of messages received per RPC. +type ServerRequestsPerRPC struct { + metric.Int64Histogram +} + +// NewServerRequestsPerRPC returns a new ServerRequestsPerRPC instrument. +func NewServerRequestsPerRPC( + m metric.Meter, + opt ...metric.Int64HistogramOption, +) (ServerRequestsPerRPC, error) { + // Check if the meter is nil. + if m == nil { + return ServerRequestsPerRPC{noop.Int64Histogram{}}, nil + } + + i, err := m.Int64Histogram( + "rpc.server.requests_per_rpc", + append([]metric.Int64HistogramOption{ + metric.WithDescription("Measures the number of messages received per RPC."), + metric.WithUnit("{count}"), + }, opt...)..., + ) + if err != nil { + return ServerRequestsPerRPC{noop.Int64Histogram{}}, err + } + return ServerRequestsPerRPC{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ServerRequestsPerRPC) Inst() metric.Int64Histogram { + return m.Int64Histogram +} + +// Name returns the semantic convention name of the instrument. +func (ServerRequestsPerRPC) Name() string { + return "rpc.server.requests_per_rpc" +} + +// Unit returns the semantic convention unit of the instrument +func (ServerRequestsPerRPC) Unit() string { + return "{count}" +} + +// Description returns the semantic convention description of the instrument +func (ServerRequestsPerRPC) Description() string { + return "Measures the number of messages received per RPC." +} + +// Record records val to the current distribution for attrs. +// +// Should be 1 for all non-streaming RPCs. +// +// **Streaming** : This metric is required for server and client streaming RPCs +func (m ServerRequestsPerRPC) Record(ctx context.Context, val int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64Histogram.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +// +// Should be 1 for all non-streaming RPCs. +// +// **Streaming** : This metric is required for server and client streaming RPCs +func (m ServerRequestsPerRPC) RecordSet(ctx context.Context, val int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Histogram.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Histogram.Record(ctx, val, *o...) +} + +// ServerResponseSize is an instrument used to record metric values conforming to +// the "rpc.server.response.size" semantic conventions. It represents the +// measures the size of RPC response messages (uncompressed). +type ServerResponseSize struct { + metric.Int64Histogram +} + +// NewServerResponseSize returns a new ServerResponseSize instrument. +func NewServerResponseSize( + m metric.Meter, + opt ...metric.Int64HistogramOption, +) (ServerResponseSize, error) { + // Check if the meter is nil. + if m == nil { + return ServerResponseSize{noop.Int64Histogram{}}, nil + } + + i, err := m.Int64Histogram( + "rpc.server.response.size", + append([]metric.Int64HistogramOption{ + metric.WithDescription("Measures the size of RPC response messages (uncompressed)."), + metric.WithUnit("By"), + }, opt...)..., + ) + if err != nil { + return ServerResponseSize{noop.Int64Histogram{}}, err + } + return ServerResponseSize{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ServerResponseSize) Inst() metric.Int64Histogram { + return m.Int64Histogram +} + +// Name returns the semantic convention name of the instrument. +func (ServerResponseSize) Name() string { + return "rpc.server.response.size" +} + +// Unit returns the semantic convention unit of the instrument +func (ServerResponseSize) Unit() string { + return "By" +} + +// Description returns the semantic convention description of the instrument +func (ServerResponseSize) Description() string { + return "Measures the size of RPC response messages (uncompressed)." +} + +// Record records val to the current distribution for attrs. +// +// **Streaming**: Recorded per response in a streaming batch +func (m ServerResponseSize) Record(ctx context.Context, val int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64Histogram.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +// +// **Streaming**: Recorded per response in a streaming batch +func (m ServerResponseSize) RecordSet(ctx context.Context, val int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Histogram.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Histogram.Record(ctx, val, *o...) +} + +// ServerResponsesPerRPC is an instrument used to record metric values conforming +// to the "rpc.server.responses_per_rpc" semantic conventions. It represents the +// measures the number of messages sent per RPC. +type ServerResponsesPerRPC struct { + metric.Int64Histogram +} + +// NewServerResponsesPerRPC returns a new ServerResponsesPerRPC instrument. +func NewServerResponsesPerRPC( + m metric.Meter, + opt ...metric.Int64HistogramOption, +) (ServerResponsesPerRPC, error) { + // Check if the meter is nil. + if m == nil { + return ServerResponsesPerRPC{noop.Int64Histogram{}}, nil + } + + i, err := m.Int64Histogram( + "rpc.server.responses_per_rpc", + append([]metric.Int64HistogramOption{ + metric.WithDescription("Measures the number of messages sent per RPC."), + metric.WithUnit("{count}"), + }, opt...)..., + ) + if err != nil { + return ServerResponsesPerRPC{noop.Int64Histogram{}}, err + } + return ServerResponsesPerRPC{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ServerResponsesPerRPC) Inst() metric.Int64Histogram { + return m.Int64Histogram +} + +// Name returns the semantic convention name of the instrument. +func (ServerResponsesPerRPC) Name() string { + return "rpc.server.responses_per_rpc" +} + +// Unit returns the semantic convention unit of the instrument +func (ServerResponsesPerRPC) Unit() string { + return "{count}" +} + +// Description returns the semantic convention description of the instrument +func (ServerResponsesPerRPC) Description() string { + return "Measures the number of messages sent per RPC." +} + +// Record records val to the current distribution for attrs. +// +// Should be 1 for all non-streaming RPCs. +// +// **Streaming**: This metric is required for server and client streaming RPCs +func (m ServerResponsesPerRPC) Record(ctx context.Context, val int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64Histogram.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +// +// Should be 1 for all non-streaming RPCs. +// +// **Streaming**: This metric is required for server and client streaming RPCs +func (m ServerResponsesPerRPC) RecordSet(ctx context.Context, val int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Histogram.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Histogram.Record(ctx, val, *o...) +} \ No newline at end of file diff --git a/semconv/v1.37.0/schema.go b/semconv/v1.37.0/schema.go new file mode 100644 index 00000000000..f8a0b704418 --- /dev/null +++ b/semconv/v1.37.0/schema.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.37.0" + +// SchemaURL is the schema URL that matches the version of the semantic conventions +// that this package defines. Semconv packages starting from v1.4.0 must declare +// non-empty schema URL in the form https://opentelemetry.io/schemas/ +const SchemaURL = "https://opentelemetry.io/schemas/1.37.0" diff --git a/semconv/v1.37.0/signalrconv/metric.go b/semconv/v1.37.0/signalrconv/metric.go new file mode 100644 index 00000000000..57fb9528645 --- /dev/null +++ b/semconv/v1.37.0/signalrconv/metric.go @@ -0,0 +1,285 @@ +// Code generated from semantic convention specification. DO NOT EDIT. + +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package httpconv provides types and functionality for OpenTelemetry semantic +// conventions in the "signalr" namespace. +package signalrconv + +import ( + "context" + "sync" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/noop" +) + +var ( + addOptPool = &sync.Pool{New: func() any { return &[]metric.AddOption{} }} + recOptPool = &sync.Pool{New: func() any { return &[]metric.RecordOption{} }} +) + +// ConnectionStatusAttr is an attribute conforming to the +// signalr.connection.status semantic conventions. It represents the signalR HTTP +// connection closure status. +type ConnectionStatusAttr string + +var ( + // ConnectionStatusNormalClosure is the connection was closed normally. + ConnectionStatusNormalClosure ConnectionStatusAttr = "normal_closure" + // ConnectionStatusTimeout is the connection was closed due to a timeout. + ConnectionStatusTimeout ConnectionStatusAttr = "timeout" + // ConnectionStatusAppShutdown is the connection was closed because the app is + // shutting down. + ConnectionStatusAppShutdown ConnectionStatusAttr = "app_shutdown" +) + +// TransportAttr is an attribute conforming to the signalr.transport semantic +// conventions. It represents the [SignalR transport type]. +// +// [SignalR transport type]: https://github.com/dotnet/aspnetcore/blob/main/src/SignalR/docs/specs/TransportProtocols.md +type TransportAttr string + +var ( + // TransportServerSentEvents is the serverSentEvents protocol. + TransportServerSentEvents TransportAttr = "server_sent_events" + // TransportLongPolling is the longPolling protocol. + TransportLongPolling TransportAttr = "long_polling" + // TransportWebSockets is the webSockets protocol. + TransportWebSockets TransportAttr = "web_sockets" +) + +// ServerActiveConnections is an instrument used to record metric values +// conforming to the "signalr.server.active_connections" semantic conventions. It +// represents the number of connections that are currently active on the server. +type ServerActiveConnections struct { + metric.Int64UpDownCounter +} + +// NewServerActiveConnections returns a new ServerActiveConnections instrument. +func NewServerActiveConnections( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (ServerActiveConnections, error) { + // Check if the meter is nil. + if m == nil { + return ServerActiveConnections{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "signalr.server.active_connections", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("Number of connections that are currently active on the server."), + metric.WithUnit("{connection}"), + }, opt...)..., + ) + if err != nil { + return ServerActiveConnections{noop.Int64UpDownCounter{}}, err + } + return ServerActiveConnections{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ServerActiveConnections) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (ServerActiveConnections) Name() string { + return "signalr.server.active_connections" +} + +// Unit returns the semantic convention unit of the instrument +func (ServerActiveConnections) Unit() string { + return "{connection}" +} + +// Description returns the semantic convention description of the instrument +func (ServerActiveConnections) Description() string { + return "Number of connections that are currently active on the server." +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +// +// Meter name: `Microsoft.AspNetCore.Http.Connections`; Added in: ASP.NET Core +// 8.0 +func (m ServerActiveConnections) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// Meter name: `Microsoft.AspNetCore.Http.Connections`; Added in: ASP.NET Core +// 8.0 +func (m ServerActiveConnections) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AttrConnectionStatus returns an optional attribute for the +// "signalr.connection.status" semantic convention. It represents the signalR +// HTTP connection closure status. +func (ServerActiveConnections) AttrConnectionStatus(val ConnectionStatusAttr) attribute.KeyValue { + return attribute.String("signalr.connection.status", string(val)) +} + +// AttrTransport returns an optional attribute for the "signalr.transport" +// semantic convention. It represents the [SignalR transport type]. +// +// [SignalR transport type]: https://github.com/dotnet/aspnetcore/blob/main/src/SignalR/docs/specs/TransportProtocols.md +func (ServerActiveConnections) AttrTransport(val TransportAttr) attribute.KeyValue { + return attribute.String("signalr.transport", string(val)) +} + +// ServerConnectionDuration is an instrument used to record metric values +// conforming to the "signalr.server.connection.duration" semantic conventions. +// It represents the duration of connections on the server. +type ServerConnectionDuration struct { + metric.Float64Histogram +} + +// NewServerConnectionDuration returns a new ServerConnectionDuration instrument. +func NewServerConnectionDuration( + m metric.Meter, + opt ...metric.Float64HistogramOption, +) (ServerConnectionDuration, error) { + // Check if the meter is nil. + if m == nil { + return ServerConnectionDuration{noop.Float64Histogram{}}, nil + } + + i, err := m.Float64Histogram( + "signalr.server.connection.duration", + append([]metric.Float64HistogramOption{ + metric.WithDescription("The duration of connections on the server."), + metric.WithUnit("s"), + }, opt...)..., + ) + if err != nil { + return ServerConnectionDuration{noop.Float64Histogram{}}, err + } + return ServerConnectionDuration{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ServerConnectionDuration) Inst() metric.Float64Histogram { + return m.Float64Histogram +} + +// Name returns the semantic convention name of the instrument. +func (ServerConnectionDuration) Name() string { + return "signalr.server.connection.duration" +} + +// Unit returns the semantic convention unit of the instrument +func (ServerConnectionDuration) Unit() string { + return "s" +} + +// Description returns the semantic convention description of the instrument +func (ServerConnectionDuration) Description() string { + return "The duration of connections on the server." +} + +// Record records val to the current distribution for attrs. +// +// All additional attrs passed are included in the recorded value. +// +// Meter name: `Microsoft.AspNetCore.Http.Connections`; Added in: ASP.NET Core +// 8.0 +func (m ServerConnectionDuration) Record( + ctx context.Context, + val float64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Float64Histogram.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Float64Histogram.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +// +// Meter name: `Microsoft.AspNetCore.Http.Connections`; Added in: ASP.NET Core +// 8.0 +func (m ServerConnectionDuration) RecordSet(ctx context.Context, val float64, set attribute.Set) { + if set.Len() == 0 { + m.Float64Histogram.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Float64Histogram.Record(ctx, val, *o...) +} + +// AttrConnectionStatus returns an optional attribute for the +// "signalr.connection.status" semantic convention. It represents the signalR +// HTTP connection closure status. +func (ServerConnectionDuration) AttrConnectionStatus(val ConnectionStatusAttr) attribute.KeyValue { + return attribute.String("signalr.connection.status", string(val)) +} + +// AttrTransport returns an optional attribute for the "signalr.transport" +// semantic convention. It represents the [SignalR transport type]. +// +// [SignalR transport type]: https://github.com/dotnet/aspnetcore/blob/main/src/SignalR/docs/specs/TransportProtocols.md +func (ServerConnectionDuration) AttrTransport(val TransportAttr) attribute.KeyValue { + return attribute.String("signalr.transport", string(val)) +} \ No newline at end of file diff --git a/semconv/v1.37.0/systemconv/metric.go b/semconv/v1.37.0/systemconv/metric.go new file mode 100644 index 00000000000..6c5443e0dc5 --- /dev/null +++ b/semconv/v1.37.0/systemconv/metric.go @@ -0,0 +1,3556 @@ +// Code generated from semantic convention specification. DO NOT EDIT. + +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package httpconv provides types and functionality for OpenTelemetry semantic +// conventions in the "system" namespace. +package systemconv + +import ( + "context" + "sync" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/noop" +) + +var ( + addOptPool = &sync.Pool{New: func() any { return &[]metric.AddOption{} }} + recOptPool = &sync.Pool{New: func() any { return &[]metric.RecordOption{} }} +) + +// CPUModeAttr is an attribute conforming to the cpu.mode semantic conventions. +// It represents the mode of the CPU. +type CPUModeAttr string + +var ( + // CPUModeUser is the user. + CPUModeUser CPUModeAttr = "user" + // CPUModeSystem is the system. + CPUModeSystem CPUModeAttr = "system" + // CPUModeNice is the nice. + CPUModeNice CPUModeAttr = "nice" + // CPUModeIdle is the idle. + CPUModeIdle CPUModeAttr = "idle" + // CPUModeIOWait is the IO Wait. + CPUModeIOWait CPUModeAttr = "iowait" + // CPUModeInterrupt is the interrupt. + CPUModeInterrupt CPUModeAttr = "interrupt" + // CPUModeSteal is the steal. + CPUModeSteal CPUModeAttr = "steal" + // CPUModeKernel is the kernel. + CPUModeKernel CPUModeAttr = "kernel" +) + +// DiskIODirectionAttr is an attribute conforming to the disk.io.direction +// semantic conventions. It represents the disk IO operation direction. +type DiskIODirectionAttr string + +var ( + // DiskIODirectionRead is the standardized value "read" of DiskIODirectionAttr. + DiskIODirectionRead DiskIODirectionAttr = "read" + // DiskIODirectionWrite is the standardized value "write" of + // DiskIODirectionAttr. + DiskIODirectionWrite DiskIODirectionAttr = "write" +) + +// LinuxMemorySlabStateAttr is an attribute conforming to the +// linux.memory.slab.state semantic conventions. It represents the Linux Slab +// memory state. +type LinuxMemorySlabStateAttr string + +var ( + // LinuxMemorySlabStateReclaimable is the standardized value "reclaimable" of + // LinuxMemorySlabStateAttr. + LinuxMemorySlabStateReclaimable LinuxMemorySlabStateAttr = "reclaimable" + // LinuxMemorySlabStateUnreclaimable is the standardized value "unreclaimable" + // of LinuxMemorySlabStateAttr. + LinuxMemorySlabStateUnreclaimable LinuxMemorySlabStateAttr = "unreclaimable" +) + +// NetworkConnectionStateAttr is an attribute conforming to the +// network.connection.state semantic conventions. It represents the state of +// network connection. +type NetworkConnectionStateAttr string + +var ( + // NetworkConnectionStateClosed is the standardized value "closed" of + // NetworkConnectionStateAttr. + NetworkConnectionStateClosed NetworkConnectionStateAttr = "closed" + // NetworkConnectionStateCloseWait is the standardized value "close_wait" of + // NetworkConnectionStateAttr. + NetworkConnectionStateCloseWait NetworkConnectionStateAttr = "close_wait" + // NetworkConnectionStateClosing is the standardized value "closing" of + // NetworkConnectionStateAttr. + NetworkConnectionStateClosing NetworkConnectionStateAttr = "closing" + // NetworkConnectionStateEstablished is the standardized value "established" of + // NetworkConnectionStateAttr. + NetworkConnectionStateEstablished NetworkConnectionStateAttr = "established" + // NetworkConnectionStateFinWait1 is the standardized value "fin_wait_1" of + // NetworkConnectionStateAttr. + NetworkConnectionStateFinWait1 NetworkConnectionStateAttr = "fin_wait_1" + // NetworkConnectionStateFinWait2 is the standardized value "fin_wait_2" of + // NetworkConnectionStateAttr. + NetworkConnectionStateFinWait2 NetworkConnectionStateAttr = "fin_wait_2" + // NetworkConnectionStateLastAck is the standardized value "last_ack" of + // NetworkConnectionStateAttr. + NetworkConnectionStateLastAck NetworkConnectionStateAttr = "last_ack" + // NetworkConnectionStateListen is the standardized value "listen" of + // NetworkConnectionStateAttr. + NetworkConnectionStateListen NetworkConnectionStateAttr = "listen" + // NetworkConnectionStateSynReceived is the standardized value "syn_received" of + // NetworkConnectionStateAttr. + NetworkConnectionStateSynReceived NetworkConnectionStateAttr = "syn_received" + // NetworkConnectionStateSynSent is the standardized value "syn_sent" of + // NetworkConnectionStateAttr. + NetworkConnectionStateSynSent NetworkConnectionStateAttr = "syn_sent" + // NetworkConnectionStateTimeWait is the standardized value "time_wait" of + // NetworkConnectionStateAttr. + NetworkConnectionStateTimeWait NetworkConnectionStateAttr = "time_wait" +) + +// NetworkIODirectionAttr is an attribute conforming to the network.io.direction +// semantic conventions. It represents the network IO operation direction. +type NetworkIODirectionAttr string + +var ( + // NetworkIODirectionTransmit is the standardized value "transmit" of + // NetworkIODirectionAttr. + NetworkIODirectionTransmit NetworkIODirectionAttr = "transmit" + // NetworkIODirectionReceive is the standardized value "receive" of + // NetworkIODirectionAttr. + NetworkIODirectionReceive NetworkIODirectionAttr = "receive" +) + +// NetworkTransportAttr is an attribute conforming to the network.transport +// semantic conventions. It represents the [OSI transport layer] or +// [inter-process communication method]. +// +// [OSI transport layer]: https://wikipedia.org/wiki/Transport_layer +// [inter-process communication method]: https://wikipedia.org/wiki/Inter-process_communication +type NetworkTransportAttr string + +var ( + // NetworkTransportTCP is the TCP. + NetworkTransportTCP NetworkTransportAttr = "tcp" + // NetworkTransportUDP is the UDP. + NetworkTransportUDP NetworkTransportAttr = "udp" + // NetworkTransportPipe is the named or anonymous pipe. + NetworkTransportPipe NetworkTransportAttr = "pipe" + // NetworkTransportUnix is the unix domain socket. + NetworkTransportUnix NetworkTransportAttr = "unix" + // NetworkTransportQUIC is the QUIC. + NetworkTransportQUIC NetworkTransportAttr = "quic" +) + +// FilesystemStateAttr is an attribute conforming to the system.filesystem.state +// semantic conventions. It represents the filesystem state. +type FilesystemStateAttr string + +var ( + // FilesystemStateUsed is the standardized value "used" of FilesystemStateAttr. + FilesystemStateUsed FilesystemStateAttr = "used" + // FilesystemStateFree is the standardized value "free" of FilesystemStateAttr. + FilesystemStateFree FilesystemStateAttr = "free" + // FilesystemStateReserved is the standardized value "reserved" of + // FilesystemStateAttr. + FilesystemStateReserved FilesystemStateAttr = "reserved" +) + +// FilesystemTypeAttr is an attribute conforming to the system.filesystem.type +// semantic conventions. It represents the filesystem type. +type FilesystemTypeAttr string + +var ( + // FilesystemTypeFat32 is the standardized value "fat32" of FilesystemTypeAttr. + FilesystemTypeFat32 FilesystemTypeAttr = "fat32" + // FilesystemTypeExfat is the standardized value "exfat" of FilesystemTypeAttr. + FilesystemTypeExfat FilesystemTypeAttr = "exfat" + // FilesystemTypeNtfs is the standardized value "ntfs" of FilesystemTypeAttr. + FilesystemTypeNtfs FilesystemTypeAttr = "ntfs" + // FilesystemTypeRefs is the standardized value "refs" of FilesystemTypeAttr. + FilesystemTypeRefs FilesystemTypeAttr = "refs" + // FilesystemTypeHfsplus is the standardized value "hfsplus" of + // FilesystemTypeAttr. + FilesystemTypeHfsplus FilesystemTypeAttr = "hfsplus" + // FilesystemTypeExt4 is the standardized value "ext4" of FilesystemTypeAttr. + FilesystemTypeExt4 FilesystemTypeAttr = "ext4" +) + +// MemoryStateAttr is an attribute conforming to the system.memory.state semantic +// conventions. It represents the memory state. +type MemoryStateAttr string + +var ( + // MemoryStateUsed is the actual used virtual memory in bytes. + MemoryStateUsed MemoryStateAttr = "used" + // MemoryStateFree is the standardized value "free" of MemoryStateAttr. + MemoryStateFree MemoryStateAttr = "free" + // MemoryStateBuffers is the standardized value "buffers" of MemoryStateAttr. + MemoryStateBuffers MemoryStateAttr = "buffers" + // MemoryStateCached is the standardized value "cached" of MemoryStateAttr. + MemoryStateCached MemoryStateAttr = "cached" +) + +// PagingDirectionAttr is an attribute conforming to the system.paging.direction +// semantic conventions. It represents the paging access direction. +type PagingDirectionAttr string + +var ( + // PagingDirectionIn is the standardized value "in" of PagingDirectionAttr. + PagingDirectionIn PagingDirectionAttr = "in" + // PagingDirectionOut is the standardized value "out" of PagingDirectionAttr. + PagingDirectionOut PagingDirectionAttr = "out" +) + +// PagingStateAttr is an attribute conforming to the system.paging.state semantic +// conventions. It represents the memory paging state. +type PagingStateAttr string + +var ( + // PagingStateUsed is the standardized value "used" of PagingStateAttr. + PagingStateUsed PagingStateAttr = "used" + // PagingStateFree is the standardized value "free" of PagingStateAttr. + PagingStateFree PagingStateAttr = "free" +) + +// PagingTypeAttr is an attribute conforming to the system.paging.type semantic +// conventions. It represents the memory paging type. +type PagingTypeAttr string + +var ( + // PagingTypeMajor is the standardized value "major" of PagingTypeAttr. + PagingTypeMajor PagingTypeAttr = "major" + // PagingTypeMinor is the standardized value "minor" of PagingTypeAttr. + PagingTypeMinor PagingTypeAttr = "minor" +) + +// ProcessStatusAttr is an attribute conforming to the system.process.status +// semantic conventions. It represents the process state, e.g., +// [Linux Process State Codes]. +// +// [Linux Process State Codes]: https://man7.org/linux/man-pages/man1/ps.1.html#PROCESS_STATE_CODES +type ProcessStatusAttr string + +var ( + // ProcessStatusRunning is the standardized value "running" of + // ProcessStatusAttr. + ProcessStatusRunning ProcessStatusAttr = "running" + // ProcessStatusSleeping is the standardized value "sleeping" of + // ProcessStatusAttr. + ProcessStatusSleeping ProcessStatusAttr = "sleeping" + // ProcessStatusStopped is the standardized value "stopped" of + // ProcessStatusAttr. + ProcessStatusStopped ProcessStatusAttr = "stopped" + // ProcessStatusDefunct is the standardized value "defunct" of + // ProcessStatusAttr. + ProcessStatusDefunct ProcessStatusAttr = "defunct" +) + +// CPUFrequency is an instrument used to record metric values conforming to the +// "system.cpu.frequency" semantic conventions. It represents the operating +// frequency of the logical CPU in Hertz. +type CPUFrequency struct { + metric.Int64Gauge +} + +// NewCPUFrequency returns a new CPUFrequency instrument. +func NewCPUFrequency( + m metric.Meter, + opt ...metric.Int64GaugeOption, +) (CPUFrequency, error) { + // Check if the meter is nil. + if m == nil { + return CPUFrequency{noop.Int64Gauge{}}, nil + } + + i, err := m.Int64Gauge( + "system.cpu.frequency", + append([]metric.Int64GaugeOption{ + metric.WithDescription("Operating frequency of the logical CPU in Hertz."), + metric.WithUnit("Hz"), + }, opt...)..., + ) + if err != nil { + return CPUFrequency{noop.Int64Gauge{}}, err + } + return CPUFrequency{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m CPUFrequency) Inst() metric.Int64Gauge { + return m.Int64Gauge +} + +// Name returns the semantic convention name of the instrument. +func (CPUFrequency) Name() string { + return "system.cpu.frequency" +} + +// Unit returns the semantic convention unit of the instrument +func (CPUFrequency) Unit() string { + return "Hz" +} + +// Description returns the semantic convention description of the instrument +func (CPUFrequency) Description() string { + return "Operating frequency of the logical CPU in Hertz." +} + +// Record records val to the current distribution for attrs. +// +// All additional attrs passed are included in the recorded value. +func (m CPUFrequency) Record( + ctx context.Context, + val int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Gauge.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64Gauge.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +func (m CPUFrequency) RecordSet(ctx context.Context, val int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Gauge.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Gauge.Record(ctx, val, *o...) +} + +// AttrCPULogicalNumber returns an optional attribute for the +// "cpu.logical_number" semantic convention. It represents the logical CPU number +// [0..n-1]. +func (CPUFrequency) AttrCPULogicalNumber(val int) attribute.KeyValue { + return attribute.Int("cpu.logical_number", val) +} + +// CPULogicalCount is an instrument used to record metric values conforming to +// the "system.cpu.logical.count" semantic conventions. It represents the reports +// the number of logical (virtual) processor cores created by the operating +// system to manage multitasking. +type CPULogicalCount struct { + metric.Int64UpDownCounter +} + +// NewCPULogicalCount returns a new CPULogicalCount instrument. +func NewCPULogicalCount( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (CPULogicalCount, error) { + // Check if the meter is nil. + if m == nil { + return CPULogicalCount{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "system.cpu.logical.count", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("Reports the number of logical (virtual) processor cores created by the operating system to manage multitasking."), + metric.WithUnit("{cpu}"), + }, opt...)..., + ) + if err != nil { + return CPULogicalCount{noop.Int64UpDownCounter{}}, err + } + return CPULogicalCount{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m CPULogicalCount) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (CPULogicalCount) Name() string { + return "system.cpu.logical.count" +} + +// Unit returns the semantic convention unit of the instrument +func (CPULogicalCount) Unit() string { + return "{cpu}" +} + +// Description returns the semantic convention description of the instrument +func (CPULogicalCount) Description() string { + return "Reports the number of logical (virtual) processor cores created by the operating system to manage multitasking." +} + +// Add adds incr to the existing count for attrs. +// +// Calculated by multiplying the number of sockets by the number of cores per +// socket, and then by the number of threads per core +func (m CPULogicalCount) Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// Calculated by multiplying the number of sockets by the number of cores per +// socket, and then by the number of threads per core +func (m CPULogicalCount) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// CPUPhysicalCount is an instrument used to record metric values conforming to +// the "system.cpu.physical.count" semantic conventions. It represents the +// reports the number of actual physical processor cores on the hardware. +type CPUPhysicalCount struct { + metric.Int64UpDownCounter +} + +// NewCPUPhysicalCount returns a new CPUPhysicalCount instrument. +func NewCPUPhysicalCount( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (CPUPhysicalCount, error) { + // Check if the meter is nil. + if m == nil { + return CPUPhysicalCount{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "system.cpu.physical.count", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("Reports the number of actual physical processor cores on the hardware."), + metric.WithUnit("{cpu}"), + }, opt...)..., + ) + if err != nil { + return CPUPhysicalCount{noop.Int64UpDownCounter{}}, err + } + return CPUPhysicalCount{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m CPUPhysicalCount) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (CPUPhysicalCount) Name() string { + return "system.cpu.physical.count" +} + +// Unit returns the semantic convention unit of the instrument +func (CPUPhysicalCount) Unit() string { + return "{cpu}" +} + +// Description returns the semantic convention description of the instrument +func (CPUPhysicalCount) Description() string { + return "Reports the number of actual physical processor cores on the hardware." +} + +// Add adds incr to the existing count for attrs. +// +// Calculated by multiplying the number of sockets by the number of cores per +// socket +func (m CPUPhysicalCount) Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// Calculated by multiplying the number of sockets by the number of cores per +// socket +func (m CPUPhysicalCount) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// CPUTime is an instrument used to record metric values conforming to the +// "system.cpu.time" semantic conventions. It represents the seconds each logical +// CPU spent on each mode. +type CPUTime struct { + metric.Float64ObservableCounter +} + +// NewCPUTime returns a new CPUTime instrument. +func NewCPUTime( + m metric.Meter, + opt ...metric.Float64ObservableCounterOption, +) (CPUTime, error) { + // Check if the meter is nil. + if m == nil { + return CPUTime{noop.Float64ObservableCounter{}}, nil + } + + i, err := m.Float64ObservableCounter( + "system.cpu.time", + append([]metric.Float64ObservableCounterOption{ + metric.WithDescription("Seconds each logical CPU spent on each mode."), + metric.WithUnit("s"), + }, opt...)..., + ) + if err != nil { + return CPUTime{noop.Float64ObservableCounter{}}, err + } + return CPUTime{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m CPUTime) Inst() metric.Float64ObservableCounter { + return m.Float64ObservableCounter +} + +// Name returns the semantic convention name of the instrument. +func (CPUTime) Name() string { + return "system.cpu.time" +} + +// Unit returns the semantic convention unit of the instrument +func (CPUTime) Unit() string { + return "s" +} + +// Description returns the semantic convention description of the instrument +func (CPUTime) Description() string { + return "Seconds each logical CPU spent on each mode." +} + +// AttrCPULogicalNumber returns an optional attribute for the +// "cpu.logical_number" semantic convention. It represents the logical CPU number +// [0..n-1]. +func (CPUTime) AttrCPULogicalNumber(val int) attribute.KeyValue { + return attribute.Int("cpu.logical_number", val) +} + +// AttrCPUMode returns an optional attribute for the "cpu.mode" semantic +// convention. It represents the mode of the CPU. +func (CPUTime) AttrCPUMode(val CPUModeAttr) attribute.KeyValue { + return attribute.String("cpu.mode", string(val)) +} + +// CPUUtilization is an instrument used to record metric values conforming to the +// "system.cpu.utilization" semantic conventions. It represents the for each +// logical CPU, the utilization is calculated as the change in cumulative CPU +// time (cpu.time) over a measurement interval, divided by the elapsed time. +type CPUUtilization struct { + metric.Int64Gauge +} + +// NewCPUUtilization returns a new CPUUtilization instrument. +func NewCPUUtilization( + m metric.Meter, + opt ...metric.Int64GaugeOption, +) (CPUUtilization, error) { + // Check if the meter is nil. + if m == nil { + return CPUUtilization{noop.Int64Gauge{}}, nil + } + + i, err := m.Int64Gauge( + "system.cpu.utilization", + append([]metric.Int64GaugeOption{ + metric.WithDescription("For each logical CPU, the utilization is calculated as the change in cumulative CPU time (cpu.time) over a measurement interval, divided by the elapsed time."), + metric.WithUnit("1"), + }, opt...)..., + ) + if err != nil { + return CPUUtilization{noop.Int64Gauge{}}, err + } + return CPUUtilization{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m CPUUtilization) Inst() metric.Int64Gauge { + return m.Int64Gauge +} + +// Name returns the semantic convention name of the instrument. +func (CPUUtilization) Name() string { + return "system.cpu.utilization" +} + +// Unit returns the semantic convention unit of the instrument +func (CPUUtilization) Unit() string { + return "1" +} + +// Description returns the semantic convention description of the instrument +func (CPUUtilization) Description() string { + return "For each logical CPU, the utilization is calculated as the change in cumulative CPU time (cpu.time) over a measurement interval, divided by the elapsed time." +} + +// Record records val to the current distribution for attrs. +// +// All additional attrs passed are included in the recorded value. +func (m CPUUtilization) Record( + ctx context.Context, + val int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Gauge.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64Gauge.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +func (m CPUUtilization) RecordSet(ctx context.Context, val int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Gauge.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Gauge.Record(ctx, val, *o...) +} + +// AttrCPULogicalNumber returns an optional attribute for the +// "cpu.logical_number" semantic convention. It represents the logical CPU number +// [0..n-1]. +func (CPUUtilization) AttrCPULogicalNumber(val int) attribute.KeyValue { + return attribute.Int("cpu.logical_number", val) +} + +// AttrCPUMode returns an optional attribute for the "cpu.mode" semantic +// convention. It represents the mode of the CPU. +func (CPUUtilization) AttrCPUMode(val CPUModeAttr) attribute.KeyValue { + return attribute.String("cpu.mode", string(val)) +} + +// DiskIO is an instrument used to record metric values conforming to the +// "system.disk.io" semantic conventions. It represents the TODO. +type DiskIO struct { + metric.Int64Counter +} + +// NewDiskIO returns a new DiskIO instrument. +func NewDiskIO( + m metric.Meter, + opt ...metric.Int64CounterOption, +) (DiskIO, error) { + // Check if the meter is nil. + if m == nil { + return DiskIO{noop.Int64Counter{}}, nil + } + + i, err := m.Int64Counter( + "system.disk.io", + append([]metric.Int64CounterOption{ + metric.WithDescription("TODO."), + metric.WithUnit("By"), + }, opt...)..., + ) + if err != nil { + return DiskIO{noop.Int64Counter{}}, err + } + return DiskIO{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m DiskIO) Inst() metric.Int64Counter { + return m.Int64Counter +} + +// Name returns the semantic convention name of the instrument. +func (DiskIO) Name() string { + return "system.disk.io" +} + +// Unit returns the semantic convention unit of the instrument +func (DiskIO) Unit() string { + return "By" +} + +// Description returns the semantic convention description of the instrument +func (DiskIO) Description() string { + return "TODO." +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +func (m DiskIO) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +func (m DiskIO) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AttrDiskIODirection returns an optional attribute for the "disk.io.direction" +// semantic convention. It represents the disk IO operation direction. +func (DiskIO) AttrDiskIODirection(val DiskIODirectionAttr) attribute.KeyValue { + return attribute.String("disk.io.direction", string(val)) +} + +// AttrDevice returns an optional attribute for the "system.device" semantic +// convention. It represents the device identifier. +func (DiskIO) AttrDevice(val string) attribute.KeyValue { + return attribute.String("system.device", val) +} + +// DiskIOTime is an instrument used to record metric values conforming to the +// "system.disk.io_time" semantic conventions. It represents the time disk spent +// activated. +type DiskIOTime struct { + metric.Float64Counter +} + +// NewDiskIOTime returns a new DiskIOTime instrument. +func NewDiskIOTime( + m metric.Meter, + opt ...metric.Float64CounterOption, +) (DiskIOTime, error) { + // Check if the meter is nil. + if m == nil { + return DiskIOTime{noop.Float64Counter{}}, nil + } + + i, err := m.Float64Counter( + "system.disk.io_time", + append([]metric.Float64CounterOption{ + metric.WithDescription("Time disk spent activated."), + metric.WithUnit("s"), + }, opt...)..., + ) + if err != nil { + return DiskIOTime{noop.Float64Counter{}}, err + } + return DiskIOTime{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m DiskIOTime) Inst() metric.Float64Counter { + return m.Float64Counter +} + +// Name returns the semantic convention name of the instrument. +func (DiskIOTime) Name() string { + return "system.disk.io_time" +} + +// Unit returns the semantic convention unit of the instrument +func (DiskIOTime) Unit() string { + return "s" +} + +// Description returns the semantic convention description of the instrument +func (DiskIOTime) Description() string { + return "Time disk spent activated." +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +// +// The real elapsed time ("wall clock") used in the I/O path (time from +// operations running in parallel are not counted). Measured as: +// +// - Linux: Field 13 from [procfs-diskstats] +// - Windows: The complement of +// ["Disk% Idle Time"] +// performance counter: `uptime * (100 - "Disk\% Idle Time") / 100` +// +// +// [procfs-diskstats]: https://www.kernel.org/doc/Documentation/ABI/testing/procfs-diskstats +// ["Disk% Idle Time"]: https://learn.microsoft.com/archive/blogs/askcore/windows-performance-monitor-disk-counters-explained#windows-performance-monitor-disk-counters-explained +func (m DiskIOTime) Add( + ctx context.Context, + incr float64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Float64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Float64Counter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// The real elapsed time ("wall clock") used in the I/O path (time from +// operations running in parallel are not counted). Measured as: +// +// - Linux: Field 13 from [procfs-diskstats] +// - Windows: The complement of +// ["Disk% Idle Time"] +// performance counter: `uptime * (100 - "Disk\% Idle Time") / 100` +// +// +// [procfs-diskstats]: https://www.kernel.org/doc/Documentation/ABI/testing/procfs-diskstats +// ["Disk% Idle Time"]: https://learn.microsoft.com/archive/blogs/askcore/windows-performance-monitor-disk-counters-explained#windows-performance-monitor-disk-counters-explained +func (m DiskIOTime) AddSet(ctx context.Context, incr float64, set attribute.Set) { + if set.Len() == 0 { + m.Float64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Float64Counter.Add(ctx, incr, *o...) +} + +// AttrDevice returns an optional attribute for the "system.device" semantic +// convention. It represents the device identifier. +func (DiskIOTime) AttrDevice(val string) attribute.KeyValue { + return attribute.String("system.device", val) +} + +// DiskLimit is an instrument used to record metric values conforming to the +// "system.disk.limit" semantic conventions. It represents the total storage +// capacity of the disk. +type DiskLimit struct { + metric.Int64UpDownCounter +} + +// NewDiskLimit returns a new DiskLimit instrument. +func NewDiskLimit( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (DiskLimit, error) { + // Check if the meter is nil. + if m == nil { + return DiskLimit{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "system.disk.limit", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("The total storage capacity of the disk."), + metric.WithUnit("By"), + }, opt...)..., + ) + if err != nil { + return DiskLimit{noop.Int64UpDownCounter{}}, err + } + return DiskLimit{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m DiskLimit) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (DiskLimit) Name() string { + return "system.disk.limit" +} + +// Unit returns the semantic convention unit of the instrument +func (DiskLimit) Unit() string { + return "By" +} + +// Description returns the semantic convention description of the instrument +func (DiskLimit) Description() string { + return "The total storage capacity of the disk." +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +func (m DiskLimit) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +func (m DiskLimit) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AttrDevice returns an optional attribute for the "system.device" semantic +// convention. It represents the device identifier. +func (DiskLimit) AttrDevice(val string) attribute.KeyValue { + return attribute.String("system.device", val) +} + +// DiskMerged is an instrument used to record metric values conforming to the +// "system.disk.merged" semantic conventions. It represents the TODO. +type DiskMerged struct { + metric.Int64Counter +} + +// NewDiskMerged returns a new DiskMerged instrument. +func NewDiskMerged( + m metric.Meter, + opt ...metric.Int64CounterOption, +) (DiskMerged, error) { + // Check if the meter is nil. + if m == nil { + return DiskMerged{noop.Int64Counter{}}, nil + } + + i, err := m.Int64Counter( + "system.disk.merged", + append([]metric.Int64CounterOption{ + metric.WithDescription("TODO."), + metric.WithUnit("{operation}"), + }, opt...)..., + ) + if err != nil { + return DiskMerged{noop.Int64Counter{}}, err + } + return DiskMerged{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m DiskMerged) Inst() metric.Int64Counter { + return m.Int64Counter +} + +// Name returns the semantic convention name of the instrument. +func (DiskMerged) Name() string { + return "system.disk.merged" +} + +// Unit returns the semantic convention unit of the instrument +func (DiskMerged) Unit() string { + return "{operation}" +} + +// Description returns the semantic convention description of the instrument +func (DiskMerged) Description() string { + return "TODO." +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +func (m DiskMerged) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +func (m DiskMerged) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AttrDiskIODirection returns an optional attribute for the "disk.io.direction" +// semantic convention. It represents the disk IO operation direction. +func (DiskMerged) AttrDiskIODirection(val DiskIODirectionAttr) attribute.KeyValue { + return attribute.String("disk.io.direction", string(val)) +} + +// AttrDevice returns an optional attribute for the "system.device" semantic +// convention. It represents the device identifier. +func (DiskMerged) AttrDevice(val string) attribute.KeyValue { + return attribute.String("system.device", val) +} + +// DiskOperationTime is an instrument used to record metric values conforming to +// the "system.disk.operation_time" semantic conventions. It represents the sum +// of the time each operation took to complete. +type DiskOperationTime struct { + metric.Float64Counter +} + +// NewDiskOperationTime returns a new DiskOperationTime instrument. +func NewDiskOperationTime( + m metric.Meter, + opt ...metric.Float64CounterOption, +) (DiskOperationTime, error) { + // Check if the meter is nil. + if m == nil { + return DiskOperationTime{noop.Float64Counter{}}, nil + } + + i, err := m.Float64Counter( + "system.disk.operation_time", + append([]metric.Float64CounterOption{ + metric.WithDescription("Sum of the time each operation took to complete."), + metric.WithUnit("s"), + }, opt...)..., + ) + if err != nil { + return DiskOperationTime{noop.Float64Counter{}}, err + } + return DiskOperationTime{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m DiskOperationTime) Inst() metric.Float64Counter { + return m.Float64Counter +} + +// Name returns the semantic convention name of the instrument. +func (DiskOperationTime) Name() string { + return "system.disk.operation_time" +} + +// Unit returns the semantic convention unit of the instrument +func (DiskOperationTime) Unit() string { + return "s" +} + +// Description returns the semantic convention description of the instrument +func (DiskOperationTime) Description() string { + return "Sum of the time each operation took to complete." +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +// +// Because it is the sum of time each request took, parallel-issued requests each +// contribute to make the count grow. Measured as: +// +// - Linux: Fields 7 & 11 from [procfs-diskstats] +// - Windows: "Avg. Disk sec/Read" perf counter multiplied by "Disk Reads/sec" +// perf counter (similar for Writes) +// +// +// [procfs-diskstats]: https://www.kernel.org/doc/Documentation/ABI/testing/procfs-diskstats +func (m DiskOperationTime) Add( + ctx context.Context, + incr float64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Float64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Float64Counter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// Because it is the sum of time each request took, parallel-issued requests each +// contribute to make the count grow. Measured as: +// +// - Linux: Fields 7 & 11 from [procfs-diskstats] +// - Windows: "Avg. Disk sec/Read" perf counter multiplied by "Disk Reads/sec" +// perf counter (similar for Writes) +// +// +// [procfs-diskstats]: https://www.kernel.org/doc/Documentation/ABI/testing/procfs-diskstats +func (m DiskOperationTime) AddSet(ctx context.Context, incr float64, set attribute.Set) { + if set.Len() == 0 { + m.Float64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Float64Counter.Add(ctx, incr, *o...) +} + +// AttrDiskIODirection returns an optional attribute for the "disk.io.direction" +// semantic convention. It represents the disk IO operation direction. +func (DiskOperationTime) AttrDiskIODirection(val DiskIODirectionAttr) attribute.KeyValue { + return attribute.String("disk.io.direction", string(val)) +} + +// AttrDevice returns an optional attribute for the "system.device" semantic +// convention. It represents the device identifier. +func (DiskOperationTime) AttrDevice(val string) attribute.KeyValue { + return attribute.String("system.device", val) +} + +// DiskOperations is an instrument used to record metric values conforming to the +// "system.disk.operations" semantic conventions. It represents the TODO. +type DiskOperations struct { + metric.Int64Counter +} + +// NewDiskOperations returns a new DiskOperations instrument. +func NewDiskOperations( + m metric.Meter, + opt ...metric.Int64CounterOption, +) (DiskOperations, error) { + // Check if the meter is nil. + if m == nil { + return DiskOperations{noop.Int64Counter{}}, nil + } + + i, err := m.Int64Counter( + "system.disk.operations", + append([]metric.Int64CounterOption{ + metric.WithDescription("TODO."), + metric.WithUnit("{operation}"), + }, opt...)..., + ) + if err != nil { + return DiskOperations{noop.Int64Counter{}}, err + } + return DiskOperations{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m DiskOperations) Inst() metric.Int64Counter { + return m.Int64Counter +} + +// Name returns the semantic convention name of the instrument. +func (DiskOperations) Name() string { + return "system.disk.operations" +} + +// Unit returns the semantic convention unit of the instrument +func (DiskOperations) Unit() string { + return "{operation}" +} + +// Description returns the semantic convention description of the instrument +func (DiskOperations) Description() string { + return "TODO." +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +func (m DiskOperations) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +func (m DiskOperations) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AttrDiskIODirection returns an optional attribute for the "disk.io.direction" +// semantic convention. It represents the disk IO operation direction. +func (DiskOperations) AttrDiskIODirection(val DiskIODirectionAttr) attribute.KeyValue { + return attribute.String("disk.io.direction", string(val)) +} + +// AttrDevice returns an optional attribute for the "system.device" semantic +// convention. It represents the device identifier. +func (DiskOperations) AttrDevice(val string) attribute.KeyValue { + return attribute.String("system.device", val) +} + +// FilesystemLimit is an instrument used to record metric values conforming to +// the "system.filesystem.limit" semantic conventions. It represents the total +// storage capacity of the filesystem. +type FilesystemLimit struct { + metric.Int64UpDownCounter +} + +// NewFilesystemLimit returns a new FilesystemLimit instrument. +func NewFilesystemLimit( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (FilesystemLimit, error) { + // Check if the meter is nil. + if m == nil { + return FilesystemLimit{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "system.filesystem.limit", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("The total storage capacity of the filesystem."), + metric.WithUnit("By"), + }, opt...)..., + ) + if err != nil { + return FilesystemLimit{noop.Int64UpDownCounter{}}, err + } + return FilesystemLimit{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m FilesystemLimit) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (FilesystemLimit) Name() string { + return "system.filesystem.limit" +} + +// Unit returns the semantic convention unit of the instrument +func (FilesystemLimit) Unit() string { + return "By" +} + +// Description returns the semantic convention description of the instrument +func (FilesystemLimit) Description() string { + return "The total storage capacity of the filesystem." +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +func (m FilesystemLimit) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +func (m FilesystemLimit) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AttrDevice returns an optional attribute for the "system.device" semantic +// convention. It represents the identifier for the device where the filesystem +// resides. +func (FilesystemLimit) AttrDevice(val string) attribute.KeyValue { + return attribute.String("system.device", val) +} + +// AttrFilesystemMode returns an optional attribute for the +// "system.filesystem.mode" semantic convention. It represents the filesystem +// mode. +func (FilesystemLimit) AttrFilesystemMode(val string) attribute.KeyValue { + return attribute.String("system.filesystem.mode", val) +} + +// AttrFilesystemMountpoint returns an optional attribute for the +// "system.filesystem.mountpoint" semantic convention. It represents the +// filesystem mount path. +func (FilesystemLimit) AttrFilesystemMountpoint(val string) attribute.KeyValue { + return attribute.String("system.filesystem.mountpoint", val) +} + +// AttrFilesystemType returns an optional attribute for the +// "system.filesystem.type" semantic convention. It represents the filesystem +// type. +func (FilesystemLimit) AttrFilesystemType(val FilesystemTypeAttr) attribute.KeyValue { + return attribute.String("system.filesystem.type", string(val)) +} + +// FilesystemUsage is an instrument used to record metric values conforming to +// the "system.filesystem.usage" semantic conventions. It represents the reports +// a filesystem's space usage across different states. +type FilesystemUsage struct { + metric.Int64UpDownCounter +} + +// NewFilesystemUsage returns a new FilesystemUsage instrument. +func NewFilesystemUsage( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (FilesystemUsage, error) { + // Check if the meter is nil. + if m == nil { + return FilesystemUsage{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "system.filesystem.usage", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("Reports a filesystem's space usage across different states."), + metric.WithUnit("By"), + }, opt...)..., + ) + if err != nil { + return FilesystemUsage{noop.Int64UpDownCounter{}}, err + } + return FilesystemUsage{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m FilesystemUsage) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (FilesystemUsage) Name() string { + return "system.filesystem.usage" +} + +// Unit returns the semantic convention unit of the instrument +func (FilesystemUsage) Unit() string { + return "By" +} + +// Description returns the semantic convention description of the instrument +func (FilesystemUsage) Description() string { + return "Reports a filesystem's space usage across different states." +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +// +// The sum of all `system.filesystem.usage` values over the different +// `system.filesystem.state` attributes +// SHOULD equal the total storage capacity of the filesystem, that is +// `system.filesystem.limit`. +func (m FilesystemUsage) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// The sum of all `system.filesystem.usage` values over the different +// `system.filesystem.state` attributes +// SHOULD equal the total storage capacity of the filesystem, that is +// `system.filesystem.limit`. +func (m FilesystemUsage) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AttrDevice returns an optional attribute for the "system.device" semantic +// convention. It represents the identifier for the device where the filesystem +// resides. +func (FilesystemUsage) AttrDevice(val string) attribute.KeyValue { + return attribute.String("system.device", val) +} + +// AttrFilesystemMode returns an optional attribute for the +// "system.filesystem.mode" semantic convention. It represents the filesystem +// mode. +func (FilesystemUsage) AttrFilesystemMode(val string) attribute.KeyValue { + return attribute.String("system.filesystem.mode", val) +} + +// AttrFilesystemMountpoint returns an optional attribute for the +// "system.filesystem.mountpoint" semantic convention. It represents the +// filesystem mount path. +func (FilesystemUsage) AttrFilesystemMountpoint(val string) attribute.KeyValue { + return attribute.String("system.filesystem.mountpoint", val) +} + +// AttrFilesystemState returns an optional attribute for the +// "system.filesystem.state" semantic convention. It represents the filesystem +// state. +func (FilesystemUsage) AttrFilesystemState(val FilesystemStateAttr) attribute.KeyValue { + return attribute.String("system.filesystem.state", string(val)) +} + +// AttrFilesystemType returns an optional attribute for the +// "system.filesystem.type" semantic convention. It represents the filesystem +// type. +func (FilesystemUsage) AttrFilesystemType(val FilesystemTypeAttr) attribute.KeyValue { + return attribute.String("system.filesystem.type", string(val)) +} + +// FilesystemUtilization is an instrument used to record metric values conforming +// to the "system.filesystem.utilization" semantic conventions. It represents the +// TODO. +type FilesystemUtilization struct { + metric.Int64Gauge +} + +// NewFilesystemUtilization returns a new FilesystemUtilization instrument. +func NewFilesystemUtilization( + m metric.Meter, + opt ...metric.Int64GaugeOption, +) (FilesystemUtilization, error) { + // Check if the meter is nil. + if m == nil { + return FilesystemUtilization{noop.Int64Gauge{}}, nil + } + + i, err := m.Int64Gauge( + "system.filesystem.utilization", + append([]metric.Int64GaugeOption{ + metric.WithDescription("TODO."), + metric.WithUnit("1"), + }, opt...)..., + ) + if err != nil { + return FilesystemUtilization{noop.Int64Gauge{}}, err + } + return FilesystemUtilization{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m FilesystemUtilization) Inst() metric.Int64Gauge { + return m.Int64Gauge +} + +// Name returns the semantic convention name of the instrument. +func (FilesystemUtilization) Name() string { + return "system.filesystem.utilization" +} + +// Unit returns the semantic convention unit of the instrument +func (FilesystemUtilization) Unit() string { + return "1" +} + +// Description returns the semantic convention description of the instrument +func (FilesystemUtilization) Description() string { + return "TODO." +} + +// Record records val to the current distribution for attrs. +// +// All additional attrs passed are included in the recorded value. +func (m FilesystemUtilization) Record( + ctx context.Context, + val int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Gauge.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64Gauge.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +func (m FilesystemUtilization) RecordSet(ctx context.Context, val int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Gauge.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Gauge.Record(ctx, val, *o...) +} + +// AttrDevice returns an optional attribute for the "system.device" semantic +// convention. It represents the identifier for the device where the filesystem +// resides. +func (FilesystemUtilization) AttrDevice(val string) attribute.KeyValue { + return attribute.String("system.device", val) +} + +// AttrFilesystemMode returns an optional attribute for the +// "system.filesystem.mode" semantic convention. It represents the filesystem +// mode. +func (FilesystemUtilization) AttrFilesystemMode(val string) attribute.KeyValue { + return attribute.String("system.filesystem.mode", val) +} + +// AttrFilesystemMountpoint returns an optional attribute for the +// "system.filesystem.mountpoint" semantic convention. It represents the +// filesystem mount path. +func (FilesystemUtilization) AttrFilesystemMountpoint(val string) attribute.KeyValue { + return attribute.String("system.filesystem.mountpoint", val) +} + +// AttrFilesystemState returns an optional attribute for the +// "system.filesystem.state" semantic convention. It represents the filesystem +// state. +func (FilesystemUtilization) AttrFilesystemState(val FilesystemStateAttr) attribute.KeyValue { + return attribute.String("system.filesystem.state", string(val)) +} + +// AttrFilesystemType returns an optional attribute for the +// "system.filesystem.type" semantic convention. It represents the filesystem +// type. +func (FilesystemUtilization) AttrFilesystemType(val FilesystemTypeAttr) attribute.KeyValue { + return attribute.String("system.filesystem.type", string(val)) +} + +// LinuxMemoryAvailable is an instrument used to record metric values conforming +// to the "system.linux.memory.available" semantic conventions. It represents an +// estimate of how much memory is available for starting new applications, +// without causing swapping. +type LinuxMemoryAvailable struct { + metric.Int64UpDownCounter +} + +// NewLinuxMemoryAvailable returns a new LinuxMemoryAvailable instrument. +func NewLinuxMemoryAvailable( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (LinuxMemoryAvailable, error) { + // Check if the meter is nil. + if m == nil { + return LinuxMemoryAvailable{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "system.linux.memory.available", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("An estimate of how much memory is available for starting new applications, without causing swapping."), + metric.WithUnit("By"), + }, opt...)..., + ) + if err != nil { + return LinuxMemoryAvailable{noop.Int64UpDownCounter{}}, err + } + return LinuxMemoryAvailable{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m LinuxMemoryAvailable) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (LinuxMemoryAvailable) Name() string { + return "system.linux.memory.available" +} + +// Unit returns the semantic convention unit of the instrument +func (LinuxMemoryAvailable) Unit() string { + return "By" +} + +// Description returns the semantic convention description of the instrument +func (LinuxMemoryAvailable) Description() string { + return "An estimate of how much memory is available for starting new applications, without causing swapping." +} + +// Add adds incr to the existing count for attrs. +// +// This is an alternative to `system.memory.usage` metric with `state=free`. +// Linux starting from 3.14 exports "available" memory. It takes "free" memory as +// a baseline, and then factors in kernel-specific values. +// This is supposed to be more accurate than just "free" memory. +// For reference, see the calculations [here]. +// See also `MemAvailable` in [/proc/meminfo]. +// +// [here]: https://superuser.com/a/980821 +// [/proc/meminfo]: https://man7.org/linux/man-pages/man5/proc.5.html +func (m LinuxMemoryAvailable) Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// This is an alternative to `system.memory.usage` metric with `state=free`. +// Linux starting from 3.14 exports "available" memory. It takes "free" memory as +// a baseline, and then factors in kernel-specific values. +// This is supposed to be more accurate than just "free" memory. +// For reference, see the calculations [here]. +// See also `MemAvailable` in [/proc/meminfo]. +// +// [here]: https://superuser.com/a/980821 +// [/proc/meminfo]: https://man7.org/linux/man-pages/man5/proc.5.html +func (m LinuxMemoryAvailable) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// LinuxMemorySlabUsage is an instrument used to record metric values conforming +// to the "system.linux.memory.slab.usage" semantic conventions. It represents +// the reports the memory used by the Linux kernel for managing caches of +// frequently used objects. +type LinuxMemorySlabUsage struct { + metric.Int64UpDownCounter +} + +// NewLinuxMemorySlabUsage returns a new LinuxMemorySlabUsage instrument. +func NewLinuxMemorySlabUsage( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (LinuxMemorySlabUsage, error) { + // Check if the meter is nil. + if m == nil { + return LinuxMemorySlabUsage{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "system.linux.memory.slab.usage", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("Reports the memory used by the Linux kernel for managing caches of frequently used objects."), + metric.WithUnit("By"), + }, opt...)..., + ) + if err != nil { + return LinuxMemorySlabUsage{noop.Int64UpDownCounter{}}, err + } + return LinuxMemorySlabUsage{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m LinuxMemorySlabUsage) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (LinuxMemorySlabUsage) Name() string { + return "system.linux.memory.slab.usage" +} + +// Unit returns the semantic convention unit of the instrument +func (LinuxMemorySlabUsage) Unit() string { + return "By" +} + +// Description returns the semantic convention description of the instrument +func (LinuxMemorySlabUsage) Description() string { + return "Reports the memory used by the Linux kernel for managing caches of frequently used objects." +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +// +// The sum over the `reclaimable` and `unreclaimable` state values in +// `linux.memory.slab.usage` SHOULD be equal to the total slab memory available +// on the system. +// Note that the total slab memory is not constant and may vary over time. +// See also the [Slab allocator] and `Slab` in [/proc/meminfo]. +// +// [Slab allocator]: https://blogs.oracle.com/linux/post/understanding-linux-kernel-memory-statistics +// [/proc/meminfo]: https://man7.org/linux/man-pages/man5/proc.5.html +func (m LinuxMemorySlabUsage) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// The sum over the `reclaimable` and `unreclaimable` state values in +// `linux.memory.slab.usage` SHOULD be equal to the total slab memory available +// on the system. +// Note that the total slab memory is not constant and may vary over time. +// See also the [Slab allocator] and `Slab` in [/proc/meminfo]. +// +// [Slab allocator]: https://blogs.oracle.com/linux/post/understanding-linux-kernel-memory-statistics +// [/proc/meminfo]: https://man7.org/linux/man-pages/man5/proc.5.html +func (m LinuxMemorySlabUsage) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AttrLinuxMemorySlabState returns an optional attribute for the +// "linux.memory.slab.state" semantic convention. It represents the Linux Slab +// memory state. +func (LinuxMemorySlabUsage) AttrLinuxMemorySlabState(val LinuxMemorySlabStateAttr) attribute.KeyValue { + return attribute.String("linux.memory.slab.state", string(val)) +} + +// MemoryLimit is an instrument used to record metric values conforming to the +// "system.memory.limit" semantic conventions. It represents the total virtual +// memory available in the system. +type MemoryLimit struct { + metric.Int64UpDownCounter +} + +// NewMemoryLimit returns a new MemoryLimit instrument. +func NewMemoryLimit( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (MemoryLimit, error) { + // Check if the meter is nil. + if m == nil { + return MemoryLimit{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "system.memory.limit", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("Total virtual memory available in the system."), + metric.WithUnit("By"), + }, opt...)..., + ) + if err != nil { + return MemoryLimit{noop.Int64UpDownCounter{}}, err + } + return MemoryLimit{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m MemoryLimit) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (MemoryLimit) Name() string { + return "system.memory.limit" +} + +// Unit returns the semantic convention unit of the instrument +func (MemoryLimit) Unit() string { + return "By" +} + +// Description returns the semantic convention description of the instrument +func (MemoryLimit) Description() string { + return "Total virtual memory available in the system." +} + +// Add adds incr to the existing count for attrs. +func (m MemoryLimit) Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +func (m MemoryLimit) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// MemoryShared is an instrument used to record metric values conforming to the +// "system.memory.shared" semantic conventions. It represents the shared memory +// used (mostly by tmpfs). +type MemoryShared struct { + metric.Int64UpDownCounter +} + +// NewMemoryShared returns a new MemoryShared instrument. +func NewMemoryShared( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (MemoryShared, error) { + // Check if the meter is nil. + if m == nil { + return MemoryShared{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "system.memory.shared", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("Shared memory used (mostly by tmpfs)."), + metric.WithUnit("By"), + }, opt...)..., + ) + if err != nil { + return MemoryShared{noop.Int64UpDownCounter{}}, err + } + return MemoryShared{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m MemoryShared) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (MemoryShared) Name() string { + return "system.memory.shared" +} + +// Unit returns the semantic convention unit of the instrument +func (MemoryShared) Unit() string { + return "By" +} + +// Description returns the semantic convention description of the instrument +func (MemoryShared) Description() string { + return "Shared memory used (mostly by tmpfs)." +} + +// Add adds incr to the existing count for attrs. +// +// Equivalent of `shared` from [`free` command] or +// `Shmem` from [`/proc/meminfo`]" +// +// [`free` command]: https://man7.org/linux/man-pages/man1/free.1.html +// [`/proc/meminfo`]: https://man7.org/linux/man-pages/man5/proc.5.html +func (m MemoryShared) Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// Equivalent of `shared` from [`free` command] or +// `Shmem` from [`/proc/meminfo`]" +// +// [`free` command]: https://man7.org/linux/man-pages/man1/free.1.html +// [`/proc/meminfo`]: https://man7.org/linux/man-pages/man5/proc.5.html +func (m MemoryShared) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// MemoryUsage is an instrument used to record metric values conforming to the +// "system.memory.usage" semantic conventions. It represents the reports memory +// in use by state. +type MemoryUsage struct { + metric.Int64ObservableUpDownCounter +} + +// NewMemoryUsage returns a new MemoryUsage instrument. +func NewMemoryUsage( + m metric.Meter, + opt ...metric.Int64ObservableUpDownCounterOption, +) (MemoryUsage, error) { + // Check if the meter is nil. + if m == nil { + return MemoryUsage{noop.Int64ObservableUpDownCounter{}}, nil + } + + i, err := m.Int64ObservableUpDownCounter( + "system.memory.usage", + append([]metric.Int64ObservableUpDownCounterOption{ + metric.WithDescription("Reports memory in use by state."), + metric.WithUnit("By"), + }, opt...)..., + ) + if err != nil { + return MemoryUsage{noop.Int64ObservableUpDownCounter{}}, err + } + return MemoryUsage{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m MemoryUsage) Inst() metric.Int64ObservableUpDownCounter { + return m.Int64ObservableUpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (MemoryUsage) Name() string { + return "system.memory.usage" +} + +// Unit returns the semantic convention unit of the instrument +func (MemoryUsage) Unit() string { + return "By" +} + +// Description returns the semantic convention description of the instrument +func (MemoryUsage) Description() string { + return "Reports memory in use by state." +} + +// AttrMemoryState returns an optional attribute for the "system.memory.state" +// semantic convention. It represents the memory state. +func (MemoryUsage) AttrMemoryState(val MemoryStateAttr) attribute.KeyValue { + return attribute.String("system.memory.state", string(val)) +} + +// MemoryUtilization is an instrument used to record metric values conforming to +// the "system.memory.utilization" semantic conventions. It represents the TODO. +type MemoryUtilization struct { + metric.Float64ObservableGauge +} + +// NewMemoryUtilization returns a new MemoryUtilization instrument. +func NewMemoryUtilization( + m metric.Meter, + opt ...metric.Float64ObservableGaugeOption, +) (MemoryUtilization, error) { + // Check if the meter is nil. + if m == nil { + return MemoryUtilization{noop.Float64ObservableGauge{}}, nil + } + + i, err := m.Float64ObservableGauge( + "system.memory.utilization", + append([]metric.Float64ObservableGaugeOption{ + metric.WithDescription("TODO."), + metric.WithUnit("1"), + }, opt...)..., + ) + if err != nil { + return MemoryUtilization{noop.Float64ObservableGauge{}}, err + } + return MemoryUtilization{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m MemoryUtilization) Inst() metric.Float64ObservableGauge { + return m.Float64ObservableGauge +} + +// Name returns the semantic convention name of the instrument. +func (MemoryUtilization) Name() string { + return "system.memory.utilization" +} + +// Unit returns the semantic convention unit of the instrument +func (MemoryUtilization) Unit() string { + return "1" +} + +// Description returns the semantic convention description of the instrument +func (MemoryUtilization) Description() string { + return "TODO." +} + +// AttrMemoryState returns an optional attribute for the "system.memory.state" +// semantic convention. It represents the memory state. +func (MemoryUtilization) AttrMemoryState(val MemoryStateAttr) attribute.KeyValue { + return attribute.String("system.memory.state", string(val)) +} + +// NetworkConnectionCount is an instrument used to record metric values +// conforming to the "system.network.connection.count" semantic conventions. It +// represents the TODO. +type NetworkConnectionCount struct { + metric.Int64UpDownCounter +} + +// NewNetworkConnectionCount returns a new NetworkConnectionCount instrument. +func NewNetworkConnectionCount( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (NetworkConnectionCount, error) { + // Check if the meter is nil. + if m == nil { + return NetworkConnectionCount{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "system.network.connection.count", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("TODO."), + metric.WithUnit("{connection}"), + }, opt...)..., + ) + if err != nil { + return NetworkConnectionCount{noop.Int64UpDownCounter{}}, err + } + return NetworkConnectionCount{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m NetworkConnectionCount) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (NetworkConnectionCount) Name() string { + return "system.network.connection.count" +} + +// Unit returns the semantic convention unit of the instrument +func (NetworkConnectionCount) Unit() string { + return "{connection}" +} + +// Description returns the semantic convention description of the instrument +func (NetworkConnectionCount) Description() string { + return "TODO." +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +func (m NetworkConnectionCount) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +func (m NetworkConnectionCount) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AttrNetworkConnectionState returns an optional attribute for the +// "network.connection.state" semantic convention. It represents the state of +// network connection. +func (NetworkConnectionCount) AttrNetworkConnectionState(val NetworkConnectionStateAttr) attribute.KeyValue { + return attribute.String("network.connection.state", string(val)) +} + +// AttrNetworkInterfaceName returns an optional attribute for the +// "network.interface.name" semantic convention. It represents the network +// interface name. +func (NetworkConnectionCount) AttrNetworkInterfaceName(val string) attribute.KeyValue { + return attribute.String("network.interface.name", val) +} + +// AttrNetworkTransport returns an optional attribute for the "network.transport" +// semantic convention. It represents the [OSI transport layer] or +// [inter-process communication method]. +// +// [OSI transport layer]: https://wikipedia.org/wiki/Transport_layer +// [inter-process communication method]: https://wikipedia.org/wiki/Inter-process_communication +func (NetworkConnectionCount) AttrNetworkTransport(val NetworkTransportAttr) attribute.KeyValue { + return attribute.String("network.transport", string(val)) +} + +// NetworkErrors is an instrument used to record metric values conforming to the +// "system.network.errors" semantic conventions. It represents the count of +// network errors detected. +type NetworkErrors struct { + metric.Int64Counter +} + +// NewNetworkErrors returns a new NetworkErrors instrument. +func NewNetworkErrors( + m metric.Meter, + opt ...metric.Int64CounterOption, +) (NetworkErrors, error) { + // Check if the meter is nil. + if m == nil { + return NetworkErrors{noop.Int64Counter{}}, nil + } + + i, err := m.Int64Counter( + "system.network.errors", + append([]metric.Int64CounterOption{ + metric.WithDescription("Count of network errors detected."), + metric.WithUnit("{error}"), + }, opt...)..., + ) + if err != nil { + return NetworkErrors{noop.Int64Counter{}}, err + } + return NetworkErrors{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m NetworkErrors) Inst() metric.Int64Counter { + return m.Int64Counter +} + +// Name returns the semantic convention name of the instrument. +func (NetworkErrors) Name() string { + return "system.network.errors" +} + +// Unit returns the semantic convention unit of the instrument +func (NetworkErrors) Unit() string { + return "{error}" +} + +// Description returns the semantic convention description of the instrument +func (NetworkErrors) Description() string { + return "Count of network errors detected." +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +// +// Measured as: +// +// - Linux: the `errs` column in `/proc/net/dev` ([source]). +// - Windows: [`InErrors`/`OutErrors`] +// from [`GetIfEntry2`]. +// +// +// [source]: https://web.archive.org/web/20180321091318/http://www.onlamp.com/pub/a/linux/2000/11/16/LinuxAdmin.html +// [`InErrors`/`OutErrors`]: https://docs.microsoft.com/windows/win32/api/netioapi/ns-netioapi-mib_if_row2 +// [`GetIfEntry2`]: https://docs.microsoft.com/windows/win32/api/netioapi/nf-netioapi-getifentry2 +func (m NetworkErrors) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// Measured as: +// +// - Linux: the `errs` column in `/proc/net/dev` ([source]). +// - Windows: [`InErrors`/`OutErrors`] +// from [`GetIfEntry2`]. +// +// +// [source]: https://web.archive.org/web/20180321091318/http://www.onlamp.com/pub/a/linux/2000/11/16/LinuxAdmin.html +// [`InErrors`/`OutErrors`]: https://docs.microsoft.com/windows/win32/api/netioapi/ns-netioapi-mib_if_row2 +// [`GetIfEntry2`]: https://docs.microsoft.com/windows/win32/api/netioapi/nf-netioapi-getifentry2 +func (m NetworkErrors) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AttrNetworkInterfaceName returns an optional attribute for the +// "network.interface.name" semantic convention. It represents the network +// interface name. +func (NetworkErrors) AttrNetworkInterfaceName(val string) attribute.KeyValue { + return attribute.String("network.interface.name", val) +} + +// AttrNetworkIODirection returns an optional attribute for the +// "network.io.direction" semantic convention. It represents the network IO +// operation direction. +func (NetworkErrors) AttrNetworkIODirection(val NetworkIODirectionAttr) attribute.KeyValue { + return attribute.String("network.io.direction", string(val)) +} + +// NetworkIO is an instrument used to record metric values conforming to the +// "system.network.io" semantic conventions. It represents the TODO. +type NetworkIO struct { + metric.Int64ObservableCounter +} + +// NewNetworkIO returns a new NetworkIO instrument. +func NewNetworkIO( + m metric.Meter, + opt ...metric.Int64ObservableCounterOption, +) (NetworkIO, error) { + // Check if the meter is nil. + if m == nil { + return NetworkIO{noop.Int64ObservableCounter{}}, nil + } + + i, err := m.Int64ObservableCounter( + "system.network.io", + append([]metric.Int64ObservableCounterOption{ + metric.WithDescription("TODO."), + metric.WithUnit("By"), + }, opt...)..., + ) + if err != nil { + return NetworkIO{noop.Int64ObservableCounter{}}, err + } + return NetworkIO{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m NetworkIO) Inst() metric.Int64ObservableCounter { + return m.Int64ObservableCounter +} + +// Name returns the semantic convention name of the instrument. +func (NetworkIO) Name() string { + return "system.network.io" +} + +// Unit returns the semantic convention unit of the instrument +func (NetworkIO) Unit() string { + return "By" +} + +// Description returns the semantic convention description of the instrument +func (NetworkIO) Description() string { + return "TODO." +} + +// AttrNetworkInterfaceName returns an optional attribute for the +// "network.interface.name" semantic convention. It represents the network +// interface name. +func (NetworkIO) AttrNetworkInterfaceName(val string) attribute.KeyValue { + return attribute.String("network.interface.name", val) +} + +// AttrNetworkIODirection returns an optional attribute for the +// "network.io.direction" semantic convention. It represents the network IO +// operation direction. +func (NetworkIO) AttrNetworkIODirection(val NetworkIODirectionAttr) attribute.KeyValue { + return attribute.String("network.io.direction", string(val)) +} + +// NetworkPacketCount is an instrument used to record metric values conforming to +// the "system.network.packet.count" semantic conventions. It represents the +// TODO. +type NetworkPacketCount struct { + metric.Int64Counter +} + +// NewNetworkPacketCount returns a new NetworkPacketCount instrument. +func NewNetworkPacketCount( + m metric.Meter, + opt ...metric.Int64CounterOption, +) (NetworkPacketCount, error) { + // Check if the meter is nil. + if m == nil { + return NetworkPacketCount{noop.Int64Counter{}}, nil + } + + i, err := m.Int64Counter( + "system.network.packet.count", + append([]metric.Int64CounterOption{ + metric.WithDescription("TODO."), + metric.WithUnit("{packet}"), + }, opt...)..., + ) + if err != nil { + return NetworkPacketCount{noop.Int64Counter{}}, err + } + return NetworkPacketCount{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m NetworkPacketCount) Inst() metric.Int64Counter { + return m.Int64Counter +} + +// Name returns the semantic convention name of the instrument. +func (NetworkPacketCount) Name() string { + return "system.network.packet.count" +} + +// Unit returns the semantic convention unit of the instrument +func (NetworkPacketCount) Unit() string { + return "{packet}" +} + +// Description returns the semantic convention description of the instrument +func (NetworkPacketCount) Description() string { + return "TODO." +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +func (m NetworkPacketCount) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +func (m NetworkPacketCount) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AttrNetworkIODirection returns an optional attribute for the +// "network.io.direction" semantic convention. It represents the network IO +// operation direction. +func (NetworkPacketCount) AttrNetworkIODirection(val NetworkIODirectionAttr) attribute.KeyValue { + return attribute.String("network.io.direction", string(val)) +} + +// AttrDevice returns an optional attribute for the "system.device" semantic +// convention. It represents the device identifier. +func (NetworkPacketCount) AttrDevice(val string) attribute.KeyValue { + return attribute.String("system.device", val) +} + +// NetworkPacketDropped is an instrument used to record metric values conforming +// to the "system.network.packet.dropped" semantic conventions. It represents the +// count of packets that are dropped or discarded even though there was no error. +type NetworkPacketDropped struct { + metric.Int64Counter +} + +// NewNetworkPacketDropped returns a new NetworkPacketDropped instrument. +func NewNetworkPacketDropped( + m metric.Meter, + opt ...metric.Int64CounterOption, +) (NetworkPacketDropped, error) { + // Check if the meter is nil. + if m == nil { + return NetworkPacketDropped{noop.Int64Counter{}}, nil + } + + i, err := m.Int64Counter( + "system.network.packet.dropped", + append([]metric.Int64CounterOption{ + metric.WithDescription("Count of packets that are dropped or discarded even though there was no error."), + metric.WithUnit("{packet}"), + }, opt...)..., + ) + if err != nil { + return NetworkPacketDropped{noop.Int64Counter{}}, err + } + return NetworkPacketDropped{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m NetworkPacketDropped) Inst() metric.Int64Counter { + return m.Int64Counter +} + +// Name returns the semantic convention name of the instrument. +func (NetworkPacketDropped) Name() string { + return "system.network.packet.dropped" +} + +// Unit returns the semantic convention unit of the instrument +func (NetworkPacketDropped) Unit() string { + return "{packet}" +} + +// Description returns the semantic convention description of the instrument +func (NetworkPacketDropped) Description() string { + return "Count of packets that are dropped or discarded even though there was no error." +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +// +// Measured as: +// +// - Linux: the `drop` column in `/proc/net/dev` ([source]) +// - Windows: [`InDiscards`/`OutDiscards`] +// from [`GetIfEntry2`] +// +// +// [source]: https://web.archive.org/web/20180321091318/http://www.onlamp.com/pub/a/linux/2000/11/16/LinuxAdmin.html +// [`InDiscards`/`OutDiscards`]: https://docs.microsoft.com/windows/win32/api/netioapi/ns-netioapi-mib_if_row2 +// [`GetIfEntry2`]: https://docs.microsoft.com/windows/win32/api/netioapi/nf-netioapi-getifentry2 +func (m NetworkPacketDropped) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +// +// Measured as: +// +// - Linux: the `drop` column in `/proc/net/dev` ([source]) +// - Windows: [`InDiscards`/`OutDiscards`] +// from [`GetIfEntry2`] +// +// +// [source]: https://web.archive.org/web/20180321091318/http://www.onlamp.com/pub/a/linux/2000/11/16/LinuxAdmin.html +// [`InDiscards`/`OutDiscards`]: https://docs.microsoft.com/windows/win32/api/netioapi/ns-netioapi-mib_if_row2 +// [`GetIfEntry2`]: https://docs.microsoft.com/windows/win32/api/netioapi/nf-netioapi-getifentry2 +func (m NetworkPacketDropped) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AttrNetworkInterfaceName returns an optional attribute for the +// "network.interface.name" semantic convention. It represents the network +// interface name. +func (NetworkPacketDropped) AttrNetworkInterfaceName(val string) attribute.KeyValue { + return attribute.String("network.interface.name", val) +} + +// AttrNetworkIODirection returns an optional attribute for the +// "network.io.direction" semantic convention. It represents the network IO +// operation direction. +func (NetworkPacketDropped) AttrNetworkIODirection(val NetworkIODirectionAttr) attribute.KeyValue { + return attribute.String("network.io.direction", string(val)) +} + +// PagingFaults is an instrument used to record metric values conforming to the +// "system.paging.faults" semantic conventions. It represents the TODO. +type PagingFaults struct { + metric.Int64Counter +} + +// NewPagingFaults returns a new PagingFaults instrument. +func NewPagingFaults( + m metric.Meter, + opt ...metric.Int64CounterOption, +) (PagingFaults, error) { + // Check if the meter is nil. + if m == nil { + return PagingFaults{noop.Int64Counter{}}, nil + } + + i, err := m.Int64Counter( + "system.paging.faults", + append([]metric.Int64CounterOption{ + metric.WithDescription("TODO."), + metric.WithUnit("{fault}"), + }, opt...)..., + ) + if err != nil { + return PagingFaults{noop.Int64Counter{}}, err + } + return PagingFaults{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m PagingFaults) Inst() metric.Int64Counter { + return m.Int64Counter +} + +// Name returns the semantic convention name of the instrument. +func (PagingFaults) Name() string { + return "system.paging.faults" +} + +// Unit returns the semantic convention unit of the instrument +func (PagingFaults) Unit() string { + return "{fault}" +} + +// Description returns the semantic convention description of the instrument +func (PagingFaults) Description() string { + return "TODO." +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +func (m PagingFaults) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +func (m PagingFaults) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AttrPagingType returns an optional attribute for the "system.paging.type" +// semantic convention. It represents the memory paging type. +func (PagingFaults) AttrPagingType(val PagingTypeAttr) attribute.KeyValue { + return attribute.String("system.paging.type", string(val)) +} + +// PagingOperations is an instrument used to record metric values conforming to +// the "system.paging.operations" semantic conventions. It represents the TODO. +type PagingOperations struct { + metric.Int64Counter +} + +// NewPagingOperations returns a new PagingOperations instrument. +func NewPagingOperations( + m metric.Meter, + opt ...metric.Int64CounterOption, +) (PagingOperations, error) { + // Check if the meter is nil. + if m == nil { + return PagingOperations{noop.Int64Counter{}}, nil + } + + i, err := m.Int64Counter( + "system.paging.operations", + append([]metric.Int64CounterOption{ + metric.WithDescription("TODO."), + metric.WithUnit("{operation}"), + }, opt...)..., + ) + if err != nil { + return PagingOperations{noop.Int64Counter{}}, err + } + return PagingOperations{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m PagingOperations) Inst() metric.Int64Counter { + return m.Int64Counter +} + +// Name returns the semantic convention name of the instrument. +func (PagingOperations) Name() string { + return "system.paging.operations" +} + +// Unit returns the semantic convention unit of the instrument +func (PagingOperations) Unit() string { + return "{operation}" +} + +// Description returns the semantic convention description of the instrument +func (PagingOperations) Description() string { + return "TODO." +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +func (m PagingOperations) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +func (m PagingOperations) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AttrPagingDirection returns an optional attribute for the +// "system.paging.direction" semantic convention. It represents the paging access +// direction. +func (PagingOperations) AttrPagingDirection(val PagingDirectionAttr) attribute.KeyValue { + return attribute.String("system.paging.direction", string(val)) +} + +// AttrPagingType returns an optional attribute for the "system.paging.type" +// semantic convention. It represents the memory paging type. +func (PagingOperations) AttrPagingType(val PagingTypeAttr) attribute.KeyValue { + return attribute.String("system.paging.type", string(val)) +} + +// PagingUsage is an instrument used to record metric values conforming to the +// "system.paging.usage" semantic conventions. It represents the unix swap or +// windows pagefile usage. +type PagingUsage struct { + metric.Int64UpDownCounter +} + +// NewPagingUsage returns a new PagingUsage instrument. +func NewPagingUsage( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (PagingUsage, error) { + // Check if the meter is nil. + if m == nil { + return PagingUsage{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "system.paging.usage", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("Unix swap or windows pagefile usage."), + metric.WithUnit("By"), + }, opt...)..., + ) + if err != nil { + return PagingUsage{noop.Int64UpDownCounter{}}, err + } + return PagingUsage{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m PagingUsage) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (PagingUsage) Name() string { + return "system.paging.usage" +} + +// Unit returns the semantic convention unit of the instrument +func (PagingUsage) Unit() string { + return "By" +} + +// Description returns the semantic convention description of the instrument +func (PagingUsage) Description() string { + return "Unix swap or windows pagefile usage." +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +func (m PagingUsage) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +func (m PagingUsage) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AttrDevice returns an optional attribute for the "system.device" semantic +// convention. It represents the unique identifier for the device responsible for +// managing paging operations. +func (PagingUsage) AttrDevice(val string) attribute.KeyValue { + return attribute.String("system.device", val) +} + +// AttrPagingState returns an optional attribute for the "system.paging.state" +// semantic convention. It represents the memory paging state. +func (PagingUsage) AttrPagingState(val PagingStateAttr) attribute.KeyValue { + return attribute.String("system.paging.state", string(val)) +} + +// PagingUtilization is an instrument used to record metric values conforming to +// the "system.paging.utilization" semantic conventions. It represents the TODO. +type PagingUtilization struct { + metric.Int64Gauge +} + +// NewPagingUtilization returns a new PagingUtilization instrument. +func NewPagingUtilization( + m metric.Meter, + opt ...metric.Int64GaugeOption, +) (PagingUtilization, error) { + // Check if the meter is nil. + if m == nil { + return PagingUtilization{noop.Int64Gauge{}}, nil + } + + i, err := m.Int64Gauge( + "system.paging.utilization", + append([]metric.Int64GaugeOption{ + metric.WithDescription("TODO."), + metric.WithUnit("1"), + }, opt...)..., + ) + if err != nil { + return PagingUtilization{noop.Int64Gauge{}}, err + } + return PagingUtilization{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m PagingUtilization) Inst() metric.Int64Gauge { + return m.Int64Gauge +} + +// Name returns the semantic convention name of the instrument. +func (PagingUtilization) Name() string { + return "system.paging.utilization" +} + +// Unit returns the semantic convention unit of the instrument +func (PagingUtilization) Unit() string { + return "1" +} + +// Description returns the semantic convention description of the instrument +func (PagingUtilization) Description() string { + return "TODO." +} + +// Record records val to the current distribution for attrs. +// +// All additional attrs passed are included in the recorded value. +func (m PagingUtilization) Record( + ctx context.Context, + val int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Gauge.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64Gauge.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +func (m PagingUtilization) RecordSet(ctx context.Context, val int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Gauge.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Gauge.Record(ctx, val, *o...) +} + +// AttrDevice returns an optional attribute for the "system.device" semantic +// convention. It represents the unique identifier for the device responsible for +// managing paging operations. +func (PagingUtilization) AttrDevice(val string) attribute.KeyValue { + return attribute.String("system.device", val) +} + +// AttrPagingState returns an optional attribute for the "system.paging.state" +// semantic convention. It represents the memory paging state. +func (PagingUtilization) AttrPagingState(val PagingStateAttr) attribute.KeyValue { + return attribute.String("system.paging.state", string(val)) +} + +// ProcessCount is an instrument used to record metric values conforming to the +// "system.process.count" semantic conventions. It represents the total number of +// processes in each state. +type ProcessCount struct { + metric.Int64UpDownCounter +} + +// NewProcessCount returns a new ProcessCount instrument. +func NewProcessCount( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (ProcessCount, error) { + // Check if the meter is nil. + if m == nil { + return ProcessCount{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "system.process.count", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("Total number of processes in each state."), + metric.WithUnit("{process}"), + }, opt...)..., + ) + if err != nil { + return ProcessCount{noop.Int64UpDownCounter{}}, err + } + return ProcessCount{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ProcessCount) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (ProcessCount) Name() string { + return "system.process.count" +} + +// Unit returns the semantic convention unit of the instrument +func (ProcessCount) Unit() string { + return "{process}" +} + +// Description returns the semantic convention description of the instrument +func (ProcessCount) Description() string { + return "Total number of processes in each state." +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +func (m ProcessCount) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +func (m ProcessCount) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AttrProcessStatus returns an optional attribute for the +// "system.process.status" semantic convention. It represents the process state, +// e.g., [Linux Process State Codes]. +// +// [Linux Process State Codes]: https://man7.org/linux/man-pages/man1/ps.1.html#PROCESS_STATE_CODES +func (ProcessCount) AttrProcessStatus(val ProcessStatusAttr) attribute.KeyValue { + return attribute.String("system.process.status", string(val)) +} + +// ProcessCreated is an instrument used to record metric values conforming to the +// "system.process.created" semantic conventions. It represents the total number +// of processes created over uptime of the host. +type ProcessCreated struct { + metric.Int64Counter +} + +// NewProcessCreated returns a new ProcessCreated instrument. +func NewProcessCreated( + m metric.Meter, + opt ...metric.Int64CounterOption, +) (ProcessCreated, error) { + // Check if the meter is nil. + if m == nil { + return ProcessCreated{noop.Int64Counter{}}, nil + } + + i, err := m.Int64Counter( + "system.process.created", + append([]metric.Int64CounterOption{ + metric.WithDescription("Total number of processes created over uptime of the host."), + metric.WithUnit("{process}"), + }, opt...)..., + ) + if err != nil { + return ProcessCreated{noop.Int64Counter{}}, err + } + return ProcessCreated{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ProcessCreated) Inst() metric.Int64Counter { + return m.Int64Counter +} + +// Name returns the semantic convention name of the instrument. +func (ProcessCreated) Name() string { + return "system.process.created" +} + +// Unit returns the semantic convention unit of the instrument +func (ProcessCreated) Unit() string { + return "{process}" +} + +// Description returns the semantic convention description of the instrument +func (ProcessCreated) Description() string { + return "Total number of processes created over uptime of the host." +} + +// Add adds incr to the existing count for attrs. +func (m ProcessCreated) Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Int64Counter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +func (m ProcessCreated) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Counter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Counter.Add(ctx, incr, *o...) +} + +// Uptime is an instrument used to record metric values conforming to the +// "system.uptime" semantic conventions. It represents the time the system has +// been running. +type Uptime struct { + metric.Float64Gauge +} + +// NewUptime returns a new Uptime instrument. +func NewUptime( + m metric.Meter, + opt ...metric.Float64GaugeOption, +) (Uptime, error) { + // Check if the meter is nil. + if m == nil { + return Uptime{noop.Float64Gauge{}}, nil + } + + i, err := m.Float64Gauge( + "system.uptime", + append([]metric.Float64GaugeOption{ + metric.WithDescription("The time the system has been running."), + metric.WithUnit("s"), + }, opt...)..., + ) + if err != nil { + return Uptime{noop.Float64Gauge{}}, err + } + return Uptime{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m Uptime) Inst() metric.Float64Gauge { + return m.Float64Gauge +} + +// Name returns the semantic convention name of the instrument. +func (Uptime) Name() string { + return "system.uptime" +} + +// Unit returns the semantic convention unit of the instrument +func (Uptime) Unit() string { + return "s" +} + +// Description returns the semantic convention description of the instrument +func (Uptime) Description() string { + return "The time the system has been running." +} + +// Record records val to the current distribution for attrs. +// +// Instrumentations SHOULD use a gauge with type `double` and measure uptime in +// seconds as a floating point number with the highest precision available. +// The actual accuracy would depend on the instrumentation and operating system. +func (m Uptime) Record(ctx context.Context, val float64, attrs ...attribute.KeyValue) { + if len(attrs) == 0 { + m.Float64Gauge.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributes(attrs...)) + m.Float64Gauge.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +// +// Instrumentations SHOULD use a gauge with type `double` and measure uptime in +// seconds as a floating point number with the highest precision available. +// The actual accuracy would depend on the instrumentation and operating system. +func (m Uptime) RecordSet(ctx context.Context, val float64, set attribute.Set) { + if set.Len() == 0 { + m.Float64Gauge.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Float64Gauge.Record(ctx, val, *o...) +} \ No newline at end of file diff --git a/semconv/v1.37.0/vcsconv/metric.go b/semconv/v1.37.0/vcsconv/metric.go new file mode 100644 index 00000000000..a8c99213da2 --- /dev/null +++ b/semconv/v1.37.0/vcsconv/metric.go @@ -0,0 +1,1592 @@ +// Code generated from semantic convention specification. DO NOT EDIT. + +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package httpconv provides types and functionality for OpenTelemetry semantic +// conventions in the "vcs" namespace. +package vcsconv + +import ( + "context" + "sync" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/noop" +) + +var ( + addOptPool = &sync.Pool{New: func() any { return &[]metric.AddOption{} }} + recOptPool = &sync.Pool{New: func() any { return &[]metric.RecordOption{} }} +) + +// ChangeStateAttr is an attribute conforming to the vcs.change.state semantic +// conventions. It represents the state of the change (pull request/merge +// request/changelist). +type ChangeStateAttr string + +var ( + // ChangeStateOpen is the open means the change is currently active and under + // review. It hasn't been merged into the target branch yet, and it's still + // possible to make changes or add comments. + ChangeStateOpen ChangeStateAttr = "open" + // ChangeStateWip is the WIP (work-in-progress, draft) means the change is still + // in progress and not yet ready for a full review. It might still undergo + // significant changes. + ChangeStateWip ChangeStateAttr = "wip" + // ChangeStateClosed is the closed means the merge request has been closed + // without merging. This can happen for various reasons, such as the changes + // being deemed unnecessary, the issue being resolved in another way, or the + // author deciding to withdraw the request. + ChangeStateClosed ChangeStateAttr = "closed" + // ChangeStateMerged is the merged indicates that the change has been + // successfully integrated into the target codebase. + ChangeStateMerged ChangeStateAttr = "merged" +) + +// LineChangeTypeAttr is an attribute conforming to the vcs.line_change.type +// semantic conventions. It represents the type of line change being measured on +// a branch or change. +type LineChangeTypeAttr string + +var ( + // LineChangeTypeAdded is the how many lines were added. + LineChangeTypeAdded LineChangeTypeAttr = "added" + // LineChangeTypeRemoved is the how many lines were removed. + LineChangeTypeRemoved LineChangeTypeAttr = "removed" +) + +// ProviderNameAttr is an attribute conforming to the vcs.provider.name semantic +// conventions. It represents the name of the version control system provider. +type ProviderNameAttr string + +var ( + // ProviderNameGithub is the [GitHub]. + // + // [GitHub]: https://github.com + ProviderNameGithub ProviderNameAttr = "github" + // ProviderNameGitlab is the [GitLab]. + // + // [GitLab]: https://gitlab.com + ProviderNameGitlab ProviderNameAttr = "gitlab" + // ProviderNameGitea is the [Gitea]. + // + // [Gitea]: https://gitea.io + ProviderNameGitea ProviderNameAttr = "gitea" + // ProviderNameBitbucket is the [Bitbucket]. + // + // [Bitbucket]: https://bitbucket.org + ProviderNameBitbucket ProviderNameAttr = "bitbucket" +) + +// RefBaseTypeAttr is an attribute conforming to the vcs.ref.base.type semantic +// conventions. It represents the type of the [reference] in the repository. +// +// [reference]: https://git-scm.com/docs/gitglossary#def_ref +type RefBaseTypeAttr string + +var ( + // RefBaseTypeBranch is the [branch]. + // + // [branch]: https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddefbranchabranch + RefBaseTypeBranch RefBaseTypeAttr = "branch" + // RefBaseTypeTag is the [tag]. + // + // [tag]: https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddeftagatag + RefBaseTypeTag RefBaseTypeAttr = "tag" +) + +// RefHeadTypeAttr is an attribute conforming to the vcs.ref.head.type semantic +// conventions. It represents the type of the [reference] in the repository. +// +// [reference]: https://git-scm.com/docs/gitglossary#def_ref +type RefHeadTypeAttr string + +var ( + // RefHeadTypeBranch is the [branch]. + // + // [branch]: https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddefbranchabranch + RefHeadTypeBranch RefHeadTypeAttr = "branch" + // RefHeadTypeTag is the [tag]. + // + // [tag]: https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddeftagatag + RefHeadTypeTag RefHeadTypeAttr = "tag" +) + +// RefTypeAttr is an attribute conforming to the vcs.ref.type semantic +// conventions. It represents the type of the [reference] in the repository. +// +// [reference]: https://git-scm.com/docs/gitglossary#def_ref +type RefTypeAttr string + +var ( + // RefTypeBranch is the [branch]. + // + // [branch]: https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddefbranchabranch + RefTypeBranch RefTypeAttr = "branch" + // RefTypeTag is the [tag]. + // + // [tag]: https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddeftagatag + RefTypeTag RefTypeAttr = "tag" +) + +// RevisionDeltaDirectionAttr is an attribute conforming to the +// vcs.revision_delta.direction semantic conventions. It represents the type of +// revision comparison. +type RevisionDeltaDirectionAttr string + +var ( + // RevisionDeltaDirectionBehind is the how many revisions the change is behind + // the target ref. + RevisionDeltaDirectionBehind RevisionDeltaDirectionAttr = "behind" + // RevisionDeltaDirectionAhead is the how many revisions the change is ahead of + // the target ref. + RevisionDeltaDirectionAhead RevisionDeltaDirectionAttr = "ahead" +) + +// ChangeCount is an instrument used to record metric values conforming to the +// "vcs.change.count" semantic conventions. It represents the number of changes +// (pull requests/merge requests/changelists) in a repository, categorized by +// their state (e.g. open or merged). +type ChangeCount struct { + metric.Int64UpDownCounter +} + +// NewChangeCount returns a new ChangeCount instrument. +func NewChangeCount( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (ChangeCount, error) { + // Check if the meter is nil. + if m == nil { + return ChangeCount{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "vcs.change.count", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("The number of changes (pull requests/merge requests/changelists) in a repository, categorized by their state (e.g. open or merged)."), + metric.WithUnit("{change}"), + }, opt...)..., + ) + if err != nil { + return ChangeCount{noop.Int64UpDownCounter{}}, err + } + return ChangeCount{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ChangeCount) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (ChangeCount) Name() string { + return "vcs.change.count" +} + +// Unit returns the semantic convention unit of the instrument +func (ChangeCount) Unit() string { + return "{change}" +} + +// Description returns the semantic convention description of the instrument +func (ChangeCount) Description() string { + return "The number of changes (pull requests/merge requests/changelists) in a repository, categorized by their state (e.g. open or merged)." +} + +// Add adds incr to the existing count for attrs. +// +// The changeState is the the state of the change (pull request/merge +// request/changelist). +// +// The repositoryUrlFull is the the [canonical URL] of the repository providing +// the complete HTTP(S) address in order to locate and identify the repository +// through a browser. +// +// All additional attrs passed are included in the recorded value. +// +// [canonical URL]: https://support.google.com/webmasters/answer/10347851?hl=en#:~:text=A%20canonical%20URL%20is%20the,Google%20chooses%20one%20as%20canonical. +func (m ChangeCount) Add( + ctx context.Context, + incr int64, + changeState ChangeStateAttr, + repositoryUrlFull string, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("vcs.change.state", string(changeState)), + attribute.String("vcs.repository.url.full", repositoryUrlFull), + )..., + ), + ) + + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +func (m ChangeCount) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AttrOwnerName returns an optional attribute for the "vcs.owner.name" semantic +// convention. It represents the group owner within the version control system. +func (ChangeCount) AttrOwnerName(val string) attribute.KeyValue { + return attribute.String("vcs.owner.name", val) +} + +// AttrRepositoryName returns an optional attribute for the "vcs.repository.name" +// semantic convention. It represents the human readable name of the repository. +// It SHOULD NOT include any additional identifier like Group/SubGroup in GitLab +// or organization in GitHub. +func (ChangeCount) AttrRepositoryName(val string) attribute.KeyValue { + return attribute.String("vcs.repository.name", val) +} + +// AttrProviderName returns an optional attribute for the "vcs.provider.name" +// semantic convention. It represents the name of the version control system +// provider. +func (ChangeCount) AttrProviderName(val ProviderNameAttr) attribute.KeyValue { + return attribute.String("vcs.provider.name", string(val)) +} + +// ChangeDuration is an instrument used to record metric values conforming to the +// "vcs.change.duration" semantic conventions. It represents the time duration a +// change (pull request/merge request/changelist) has been in a given state. +type ChangeDuration struct { + metric.Float64Gauge +} + +// NewChangeDuration returns a new ChangeDuration instrument. +func NewChangeDuration( + m metric.Meter, + opt ...metric.Float64GaugeOption, +) (ChangeDuration, error) { + // Check if the meter is nil. + if m == nil { + return ChangeDuration{noop.Float64Gauge{}}, nil + } + + i, err := m.Float64Gauge( + "vcs.change.duration", + append([]metric.Float64GaugeOption{ + metric.WithDescription("The time duration a change (pull request/merge request/changelist) has been in a given state."), + metric.WithUnit("s"), + }, opt...)..., + ) + if err != nil { + return ChangeDuration{noop.Float64Gauge{}}, err + } + return ChangeDuration{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ChangeDuration) Inst() metric.Float64Gauge { + return m.Float64Gauge +} + +// Name returns the semantic convention name of the instrument. +func (ChangeDuration) Name() string { + return "vcs.change.duration" +} + +// Unit returns the semantic convention unit of the instrument +func (ChangeDuration) Unit() string { + return "s" +} + +// Description returns the semantic convention description of the instrument +func (ChangeDuration) Description() string { + return "The time duration a change (pull request/merge request/changelist) has been in a given state." +} + +// Record records val to the current distribution for attrs. +// +// The changeState is the the state of the change (pull request/merge +// request/changelist). +// +// The refHeadName is the the name of the [reference] such as **branch** or +// **tag** in the repository. +// +// The repositoryUrlFull is the the [canonical URL] of the repository providing +// the complete HTTP(S) address in order to locate and identify the repository +// through a browser. +// +// All additional attrs passed are included in the recorded value. +// +// [reference]: https://git-scm.com/docs/gitglossary#def_ref +// [canonical URL]: https://support.google.com/webmasters/answer/10347851?hl=en#:~:text=A%20canonical%20URL%20is%20the,Google%20chooses%20one%20as%20canonical. +func (m ChangeDuration) Record( + ctx context.Context, + val float64, + changeState ChangeStateAttr, + refHeadName string, + repositoryUrlFull string, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Float64Gauge.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("vcs.change.state", string(changeState)), + attribute.String("vcs.ref.head.name", refHeadName), + attribute.String("vcs.repository.url.full", repositoryUrlFull), + )..., + ), + ) + + m.Float64Gauge.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +func (m ChangeDuration) RecordSet(ctx context.Context, val float64, set attribute.Set) { + if set.Len() == 0 { + m.Float64Gauge.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Float64Gauge.Record(ctx, val, *o...) +} + +// AttrOwnerName returns an optional attribute for the "vcs.owner.name" semantic +// convention. It represents the group owner within the version control system. +func (ChangeDuration) AttrOwnerName(val string) attribute.KeyValue { + return attribute.String("vcs.owner.name", val) +} + +// AttrRepositoryName returns an optional attribute for the "vcs.repository.name" +// semantic convention. It represents the human readable name of the repository. +// It SHOULD NOT include any additional identifier like Group/SubGroup in GitLab +// or organization in GitHub. +func (ChangeDuration) AttrRepositoryName(val string) attribute.KeyValue { + return attribute.String("vcs.repository.name", val) +} + +// AttrProviderName returns an optional attribute for the "vcs.provider.name" +// semantic convention. It represents the name of the version control system +// provider. +func (ChangeDuration) AttrProviderName(val ProviderNameAttr) attribute.KeyValue { + return attribute.String("vcs.provider.name", string(val)) +} + +// ChangeTimeToApproval is an instrument used to record metric values conforming +// to the "vcs.change.time_to_approval" semantic conventions. It represents the +// amount of time since its creation it took a change (pull request/merge +// request/changelist) to get the first approval. +type ChangeTimeToApproval struct { + metric.Float64Gauge +} + +// NewChangeTimeToApproval returns a new ChangeTimeToApproval instrument. +func NewChangeTimeToApproval( + m metric.Meter, + opt ...metric.Float64GaugeOption, +) (ChangeTimeToApproval, error) { + // Check if the meter is nil. + if m == nil { + return ChangeTimeToApproval{noop.Float64Gauge{}}, nil + } + + i, err := m.Float64Gauge( + "vcs.change.time_to_approval", + append([]metric.Float64GaugeOption{ + metric.WithDescription("The amount of time since its creation it took a change (pull request/merge request/changelist) to get the first approval."), + metric.WithUnit("s"), + }, opt...)..., + ) + if err != nil { + return ChangeTimeToApproval{noop.Float64Gauge{}}, err + } + return ChangeTimeToApproval{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ChangeTimeToApproval) Inst() metric.Float64Gauge { + return m.Float64Gauge +} + +// Name returns the semantic convention name of the instrument. +func (ChangeTimeToApproval) Name() string { + return "vcs.change.time_to_approval" +} + +// Unit returns the semantic convention unit of the instrument +func (ChangeTimeToApproval) Unit() string { + return "s" +} + +// Description returns the semantic convention description of the instrument +func (ChangeTimeToApproval) Description() string { + return "The amount of time since its creation it took a change (pull request/merge request/changelist) to get the first approval." +} + +// Record records val to the current distribution for attrs. +// +// The refHeadName is the the name of the [reference] such as **branch** or +// **tag** in the repository. +// +// The repositoryUrlFull is the the [canonical URL] of the repository providing +// the complete HTTP(S) address in order to locate and identify the repository +// through a browser. +// +// All additional attrs passed are included in the recorded value. +// +// [reference]: https://git-scm.com/docs/gitglossary#def_ref +// [canonical URL]: https://support.google.com/webmasters/answer/10347851?hl=en#:~:text=A%20canonical%20URL%20is%20the,Google%20chooses%20one%20as%20canonical. +func (m ChangeTimeToApproval) Record( + ctx context.Context, + val float64, + refHeadName string, + repositoryUrlFull string, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Float64Gauge.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("vcs.ref.head.name", refHeadName), + attribute.String("vcs.repository.url.full", repositoryUrlFull), + )..., + ), + ) + + m.Float64Gauge.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +func (m ChangeTimeToApproval) RecordSet(ctx context.Context, val float64, set attribute.Set) { + if set.Len() == 0 { + m.Float64Gauge.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Float64Gauge.Record(ctx, val, *o...) +} + +// AttrOwnerName returns an optional attribute for the "vcs.owner.name" semantic +// convention. It represents the group owner within the version control system. +func (ChangeTimeToApproval) AttrOwnerName(val string) attribute.KeyValue { + return attribute.String("vcs.owner.name", val) +} + +// AttrRefBaseName returns an optional attribute for the "vcs.ref.base.name" +// semantic convention. It represents the name of the [reference] such as +// **branch** or **tag** in the repository. +// +// [reference]: https://git-scm.com/docs/gitglossary#def_ref +func (ChangeTimeToApproval) AttrRefBaseName(val string) attribute.KeyValue { + return attribute.String("vcs.ref.base.name", val) +} + +// AttrRepositoryName returns an optional attribute for the "vcs.repository.name" +// semantic convention. It represents the human readable name of the repository. +// It SHOULD NOT include any additional identifier like Group/SubGroup in GitLab +// or organization in GitHub. +func (ChangeTimeToApproval) AttrRepositoryName(val string) attribute.KeyValue { + return attribute.String("vcs.repository.name", val) +} + +// AttrProviderName returns an optional attribute for the "vcs.provider.name" +// semantic convention. It represents the name of the version control system +// provider. +func (ChangeTimeToApproval) AttrProviderName(val ProviderNameAttr) attribute.KeyValue { + return attribute.String("vcs.provider.name", string(val)) +} + +// AttrRefBaseRevision returns an optional attribute for the +// "vcs.ref.base.revision" semantic convention. It represents the revision, +// literally [revised version], The revision most often refers to a commit object +// in Git, or a revision number in SVN. +// +// [revised version]: https://www.merriam-webster.com/dictionary/revision +func (ChangeTimeToApproval) AttrRefBaseRevision(val string) attribute.KeyValue { + return attribute.String("vcs.ref.base.revision", val) +} + +// AttrRefHeadRevision returns an optional attribute for the +// "vcs.ref.head.revision" semantic convention. It represents the revision, +// literally [revised version], The revision most often refers to a commit object +// in Git, or a revision number in SVN. +// +// [revised version]: https://www.merriam-webster.com/dictionary/revision +func (ChangeTimeToApproval) AttrRefHeadRevision(val string) attribute.KeyValue { + return attribute.String("vcs.ref.head.revision", val) +} + +// ChangeTimeToMerge is an instrument used to record metric values conforming to +// the "vcs.change.time_to_merge" semantic conventions. It represents the amount +// of time since its creation it took a change (pull request/merge +// request/changelist) to get merged into the target(base) ref. +type ChangeTimeToMerge struct { + metric.Float64Gauge +} + +// NewChangeTimeToMerge returns a new ChangeTimeToMerge instrument. +func NewChangeTimeToMerge( + m metric.Meter, + opt ...metric.Float64GaugeOption, +) (ChangeTimeToMerge, error) { + // Check if the meter is nil. + if m == nil { + return ChangeTimeToMerge{noop.Float64Gauge{}}, nil + } + + i, err := m.Float64Gauge( + "vcs.change.time_to_merge", + append([]metric.Float64GaugeOption{ + metric.WithDescription("The amount of time since its creation it took a change (pull request/merge request/changelist) to get merged into the target(base) ref."), + metric.WithUnit("s"), + }, opt...)..., + ) + if err != nil { + return ChangeTimeToMerge{noop.Float64Gauge{}}, err + } + return ChangeTimeToMerge{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ChangeTimeToMerge) Inst() metric.Float64Gauge { + return m.Float64Gauge +} + +// Name returns the semantic convention name of the instrument. +func (ChangeTimeToMerge) Name() string { + return "vcs.change.time_to_merge" +} + +// Unit returns the semantic convention unit of the instrument +func (ChangeTimeToMerge) Unit() string { + return "s" +} + +// Description returns the semantic convention description of the instrument +func (ChangeTimeToMerge) Description() string { + return "The amount of time since its creation it took a change (pull request/merge request/changelist) to get merged into the target(base) ref." +} + +// Record records val to the current distribution for attrs. +// +// The refHeadName is the the name of the [reference] such as **branch** or +// **tag** in the repository. +// +// The repositoryUrlFull is the the [canonical URL] of the repository providing +// the complete HTTP(S) address in order to locate and identify the repository +// through a browser. +// +// All additional attrs passed are included in the recorded value. +// +// [reference]: https://git-scm.com/docs/gitglossary#def_ref +// [canonical URL]: https://support.google.com/webmasters/answer/10347851?hl=en#:~:text=A%20canonical%20URL%20is%20the,Google%20chooses%20one%20as%20canonical. +func (m ChangeTimeToMerge) Record( + ctx context.Context, + val float64, + refHeadName string, + repositoryUrlFull string, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Float64Gauge.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("vcs.ref.head.name", refHeadName), + attribute.String("vcs.repository.url.full", repositoryUrlFull), + )..., + ), + ) + + m.Float64Gauge.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +func (m ChangeTimeToMerge) RecordSet(ctx context.Context, val float64, set attribute.Set) { + if set.Len() == 0 { + m.Float64Gauge.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Float64Gauge.Record(ctx, val, *o...) +} + +// AttrOwnerName returns an optional attribute for the "vcs.owner.name" semantic +// convention. It represents the group owner within the version control system. +func (ChangeTimeToMerge) AttrOwnerName(val string) attribute.KeyValue { + return attribute.String("vcs.owner.name", val) +} + +// AttrRefBaseName returns an optional attribute for the "vcs.ref.base.name" +// semantic convention. It represents the name of the [reference] such as +// **branch** or **tag** in the repository. +// +// [reference]: https://git-scm.com/docs/gitglossary#def_ref +func (ChangeTimeToMerge) AttrRefBaseName(val string) attribute.KeyValue { + return attribute.String("vcs.ref.base.name", val) +} + +// AttrRepositoryName returns an optional attribute for the "vcs.repository.name" +// semantic convention. It represents the human readable name of the repository. +// It SHOULD NOT include any additional identifier like Group/SubGroup in GitLab +// or organization in GitHub. +func (ChangeTimeToMerge) AttrRepositoryName(val string) attribute.KeyValue { + return attribute.String("vcs.repository.name", val) +} + +// AttrProviderName returns an optional attribute for the "vcs.provider.name" +// semantic convention. It represents the name of the version control system +// provider. +func (ChangeTimeToMerge) AttrProviderName(val ProviderNameAttr) attribute.KeyValue { + return attribute.String("vcs.provider.name", string(val)) +} + +// AttrRefBaseRevision returns an optional attribute for the +// "vcs.ref.base.revision" semantic convention. It represents the revision, +// literally [revised version], The revision most often refers to a commit object +// in Git, or a revision number in SVN. +// +// [revised version]: https://www.merriam-webster.com/dictionary/revision +func (ChangeTimeToMerge) AttrRefBaseRevision(val string) attribute.KeyValue { + return attribute.String("vcs.ref.base.revision", val) +} + +// AttrRefHeadRevision returns an optional attribute for the +// "vcs.ref.head.revision" semantic convention. It represents the revision, +// literally [revised version], The revision most often refers to a commit object +// in Git, or a revision number in SVN. +// +// [revised version]: https://www.merriam-webster.com/dictionary/revision +func (ChangeTimeToMerge) AttrRefHeadRevision(val string) attribute.KeyValue { + return attribute.String("vcs.ref.head.revision", val) +} + +// ContributorCount is an instrument used to record metric values conforming to +// the "vcs.contributor.count" semantic conventions. It represents the number of +// unique contributors to a repository. +type ContributorCount struct { + metric.Int64Gauge +} + +// NewContributorCount returns a new ContributorCount instrument. +func NewContributorCount( + m metric.Meter, + opt ...metric.Int64GaugeOption, +) (ContributorCount, error) { + // Check if the meter is nil. + if m == nil { + return ContributorCount{noop.Int64Gauge{}}, nil + } + + i, err := m.Int64Gauge( + "vcs.contributor.count", + append([]metric.Int64GaugeOption{ + metric.WithDescription("The number of unique contributors to a repository."), + metric.WithUnit("{contributor}"), + }, opt...)..., + ) + if err != nil { + return ContributorCount{noop.Int64Gauge{}}, err + } + return ContributorCount{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m ContributorCount) Inst() metric.Int64Gauge { + return m.Int64Gauge +} + +// Name returns the semantic convention name of the instrument. +func (ContributorCount) Name() string { + return "vcs.contributor.count" +} + +// Unit returns the semantic convention unit of the instrument +func (ContributorCount) Unit() string { + return "{contributor}" +} + +// Description returns the semantic convention description of the instrument +func (ContributorCount) Description() string { + return "The number of unique contributors to a repository." +} + +// Record records val to the current distribution for attrs. +// +// The repositoryUrlFull is the the [canonical URL] of the repository providing +// the complete HTTP(S) address in order to locate and identify the repository +// through a browser. +// +// All additional attrs passed are included in the recorded value. +// +// [canonical URL]: https://support.google.com/webmasters/answer/10347851?hl=en#:~:text=A%20canonical%20URL%20is%20the,Google%20chooses%20one%20as%20canonical. +func (m ContributorCount) Record( + ctx context.Context, + val int64, + repositoryUrlFull string, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Gauge.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("vcs.repository.url.full", repositoryUrlFull), + )..., + ), + ) + + m.Int64Gauge.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +func (m ContributorCount) RecordSet(ctx context.Context, val int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Gauge.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Gauge.Record(ctx, val, *o...) +} + +// AttrOwnerName returns an optional attribute for the "vcs.owner.name" semantic +// convention. It represents the group owner within the version control system. +func (ContributorCount) AttrOwnerName(val string) attribute.KeyValue { + return attribute.String("vcs.owner.name", val) +} + +// AttrRepositoryName returns an optional attribute for the "vcs.repository.name" +// semantic convention. It represents the human readable name of the repository. +// It SHOULD NOT include any additional identifier like Group/SubGroup in GitLab +// or organization in GitHub. +func (ContributorCount) AttrRepositoryName(val string) attribute.KeyValue { + return attribute.String("vcs.repository.name", val) +} + +// AttrProviderName returns an optional attribute for the "vcs.provider.name" +// semantic convention. It represents the name of the version control system +// provider. +func (ContributorCount) AttrProviderName(val ProviderNameAttr) attribute.KeyValue { + return attribute.String("vcs.provider.name", string(val)) +} + +// RefCount is an instrument used to record metric values conforming to the +// "vcs.ref.count" semantic conventions. It represents the number of refs of type +// branch or tag in a repository. +type RefCount struct { + metric.Int64UpDownCounter +} + +// NewRefCount returns a new RefCount instrument. +func NewRefCount( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (RefCount, error) { + // Check if the meter is nil. + if m == nil { + return RefCount{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "vcs.ref.count", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("The number of refs of type branch or tag in a repository."), + metric.WithUnit("{ref}"), + }, opt...)..., + ) + if err != nil { + return RefCount{noop.Int64UpDownCounter{}}, err + } + return RefCount{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m RefCount) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (RefCount) Name() string { + return "vcs.ref.count" +} + +// Unit returns the semantic convention unit of the instrument +func (RefCount) Unit() string { + return "{ref}" +} + +// Description returns the semantic convention description of the instrument +func (RefCount) Description() string { + return "The number of refs of type branch or tag in a repository." +} + +// Add adds incr to the existing count for attrs. +// +// The refType is the the type of the [reference] in the repository. +// +// The repositoryUrlFull is the the [canonical URL] of the repository providing +// the complete HTTP(S) address in order to locate and identify the repository +// through a browser. +// +// All additional attrs passed are included in the recorded value. +// +// [reference]: https://git-scm.com/docs/gitglossary#def_ref +// [canonical URL]: https://support.google.com/webmasters/answer/10347851?hl=en#:~:text=A%20canonical%20URL%20is%20the,Google%20chooses%20one%20as%20canonical. +func (m RefCount) Add( + ctx context.Context, + incr int64, + refType RefTypeAttr, + repositoryUrlFull string, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("vcs.ref.type", string(refType)), + attribute.String("vcs.repository.url.full", repositoryUrlFull), + )..., + ), + ) + + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +func (m RefCount) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AttrOwnerName returns an optional attribute for the "vcs.owner.name" semantic +// convention. It represents the group owner within the version control system. +func (RefCount) AttrOwnerName(val string) attribute.KeyValue { + return attribute.String("vcs.owner.name", val) +} + +// AttrRepositoryName returns an optional attribute for the "vcs.repository.name" +// semantic convention. It represents the human readable name of the repository. +// It SHOULD NOT include any additional identifier like Group/SubGroup in GitLab +// or organization in GitHub. +func (RefCount) AttrRepositoryName(val string) attribute.KeyValue { + return attribute.String("vcs.repository.name", val) +} + +// AttrProviderName returns an optional attribute for the "vcs.provider.name" +// semantic convention. It represents the name of the version control system +// provider. +func (RefCount) AttrProviderName(val ProviderNameAttr) attribute.KeyValue { + return attribute.String("vcs.provider.name", string(val)) +} + +// RefLinesDelta is an instrument used to record metric values conforming to the +// "vcs.ref.lines_delta" semantic conventions. It represents the number of lines +// added/removed in a ref (branch) relative to the ref from the +// `vcs.ref.base.name` attribute. +type RefLinesDelta struct { + metric.Int64Gauge +} + +// NewRefLinesDelta returns a new RefLinesDelta instrument. +func NewRefLinesDelta( + m metric.Meter, + opt ...metric.Int64GaugeOption, +) (RefLinesDelta, error) { + // Check if the meter is nil. + if m == nil { + return RefLinesDelta{noop.Int64Gauge{}}, nil + } + + i, err := m.Int64Gauge( + "vcs.ref.lines_delta", + append([]metric.Int64GaugeOption{ + metric.WithDescription("The number of lines added/removed in a ref (branch) relative to the ref from the `vcs.ref.base.name` attribute."), + metric.WithUnit("{line}"), + }, opt...)..., + ) + if err != nil { + return RefLinesDelta{noop.Int64Gauge{}}, err + } + return RefLinesDelta{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m RefLinesDelta) Inst() metric.Int64Gauge { + return m.Int64Gauge +} + +// Name returns the semantic convention name of the instrument. +func (RefLinesDelta) Name() string { + return "vcs.ref.lines_delta" +} + +// Unit returns the semantic convention unit of the instrument +func (RefLinesDelta) Unit() string { + return "{line}" +} + +// Description returns the semantic convention description of the instrument +func (RefLinesDelta) Description() string { + return "The number of lines added/removed in a ref (branch) relative to the ref from the `vcs.ref.base.name` attribute." +} + +// Record records val to the current distribution for attrs. +// +// The lineChangeType is the the type of line change being measured on a branch +// or change. +// +// The refBaseName is the the name of the [reference] such as **branch** or +// **tag** in the repository. +// +// The refBaseType is the the type of the [reference] in the repository. +// +// The refHeadName is the the name of the [reference] such as **branch** or +// **tag** in the repository. +// +// The refHeadType is the the type of the [reference] in the repository. +// +// The repositoryUrlFull is the the [canonical URL] of the repository providing +// the complete HTTP(S) address in order to locate and identify the repository +// through a browser. +// +// All additional attrs passed are included in the recorded value. +// +// [reference]: https://git-scm.com/docs/gitglossary#def_ref +// [reference]: https://git-scm.com/docs/gitglossary#def_ref +// [reference]: https://git-scm.com/docs/gitglossary#def_ref +// [reference]: https://git-scm.com/docs/gitglossary#def_ref +// [canonical URL]: https://support.google.com/webmasters/answer/10347851?hl=en#:~:text=A%20canonical%20URL%20is%20the,Google%20chooses%20one%20as%20canonical. +// +// This metric should be reported for each `vcs.line_change.type` value. For +// example if a ref added 3 lines and removed 2 lines, +// instrumentation SHOULD report two measurements: 3 and 2 (both positive +// numbers). +// If number of lines added/removed should be calculated from the start of time, +// then `vcs.ref.base.name` SHOULD be set to an empty string. +func (m RefLinesDelta) Record( + ctx context.Context, + val int64, + lineChangeType LineChangeTypeAttr, + refBaseName string, + refBaseType RefBaseTypeAttr, + refHeadName string, + refHeadType RefHeadTypeAttr, + repositoryUrlFull string, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Gauge.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("vcs.line_change.type", string(lineChangeType)), + attribute.String("vcs.ref.base.name", refBaseName), + attribute.String("vcs.ref.base.type", string(refBaseType)), + attribute.String("vcs.ref.head.name", refHeadName), + attribute.String("vcs.ref.head.type", string(refHeadType)), + attribute.String("vcs.repository.url.full", repositoryUrlFull), + )..., + ), + ) + + m.Int64Gauge.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +// +// This metric should be reported for each `vcs.line_change.type` value. For +// example if a ref added 3 lines and removed 2 lines, +// instrumentation SHOULD report two measurements: 3 and 2 (both positive +// numbers). +// If number of lines added/removed should be calculated from the start of time, +// then `vcs.ref.base.name` SHOULD be set to an empty string. +func (m RefLinesDelta) RecordSet(ctx context.Context, val int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Gauge.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Gauge.Record(ctx, val, *o...) +} + +// AttrChangeID returns an optional attribute for the "vcs.change.id" semantic +// convention. It represents the ID of the change (pull request/merge +// request/changelist) if applicable. This is usually a unique (within +// repository) identifier generated by the VCS system. +func (RefLinesDelta) AttrChangeID(val string) attribute.KeyValue { + return attribute.String("vcs.change.id", val) +} + +// AttrOwnerName returns an optional attribute for the "vcs.owner.name" semantic +// convention. It represents the group owner within the version control system. +func (RefLinesDelta) AttrOwnerName(val string) attribute.KeyValue { + return attribute.String("vcs.owner.name", val) +} + +// AttrRepositoryName returns an optional attribute for the "vcs.repository.name" +// semantic convention. It represents the human readable name of the repository. +// It SHOULD NOT include any additional identifier like Group/SubGroup in GitLab +// or organization in GitHub. +func (RefLinesDelta) AttrRepositoryName(val string) attribute.KeyValue { + return attribute.String("vcs.repository.name", val) +} + +// AttrProviderName returns an optional attribute for the "vcs.provider.name" +// semantic convention. It represents the name of the version control system +// provider. +func (RefLinesDelta) AttrProviderName(val ProviderNameAttr) attribute.KeyValue { + return attribute.String("vcs.provider.name", string(val)) +} + +// RefRevisionsDelta is an instrument used to record metric values conforming to +// the "vcs.ref.revisions_delta" semantic conventions. It represents the number +// of revisions (commits) a ref (branch) is ahead/behind the branch from the +// `vcs.ref.base.name` attribute. +type RefRevisionsDelta struct { + metric.Int64Gauge +} + +// NewRefRevisionsDelta returns a new RefRevisionsDelta instrument. +func NewRefRevisionsDelta( + m metric.Meter, + opt ...metric.Int64GaugeOption, +) (RefRevisionsDelta, error) { + // Check if the meter is nil. + if m == nil { + return RefRevisionsDelta{noop.Int64Gauge{}}, nil + } + + i, err := m.Int64Gauge( + "vcs.ref.revisions_delta", + append([]metric.Int64GaugeOption{ + metric.WithDescription("The number of revisions (commits) a ref (branch) is ahead/behind the branch from the `vcs.ref.base.name` attribute."), + metric.WithUnit("{revision}"), + }, opt...)..., + ) + if err != nil { + return RefRevisionsDelta{noop.Int64Gauge{}}, err + } + return RefRevisionsDelta{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m RefRevisionsDelta) Inst() metric.Int64Gauge { + return m.Int64Gauge +} + +// Name returns the semantic convention name of the instrument. +func (RefRevisionsDelta) Name() string { + return "vcs.ref.revisions_delta" +} + +// Unit returns the semantic convention unit of the instrument +func (RefRevisionsDelta) Unit() string { + return "{revision}" +} + +// Description returns the semantic convention description of the instrument +func (RefRevisionsDelta) Description() string { + return "The number of revisions (commits) a ref (branch) is ahead/behind the branch from the `vcs.ref.base.name` attribute." +} + +// Record records val to the current distribution for attrs. +// +// The refBaseName is the the name of the [reference] such as **branch** or +// **tag** in the repository. +// +// The refBaseType is the the type of the [reference] in the repository. +// +// The refHeadName is the the name of the [reference] such as **branch** or +// **tag** in the repository. +// +// The refHeadType is the the type of the [reference] in the repository. +// +// The repositoryUrlFull is the the [canonical URL] of the repository providing +// the complete HTTP(S) address in order to locate and identify the repository +// through a browser. +// +// The revisionDeltaDirection is the the type of revision comparison. +// +// All additional attrs passed are included in the recorded value. +// +// [reference]: https://git-scm.com/docs/gitglossary#def_ref +// [reference]: https://git-scm.com/docs/gitglossary#def_ref +// [reference]: https://git-scm.com/docs/gitglossary#def_ref +// [reference]: https://git-scm.com/docs/gitglossary#def_ref +// [canonical URL]: https://support.google.com/webmasters/answer/10347851?hl=en#:~:text=A%20canonical%20URL%20is%20the,Google%20chooses%20one%20as%20canonical. +// +// This metric should be reported for each `vcs.revision_delta.direction` value. +// For example if branch `a` is 3 commits behind and 2 commits ahead of `trunk`, +// instrumentation SHOULD report two measurements: 3 and 2 (both positive +// numbers) and `vcs.ref.base.name` is set to `trunk`. +func (m RefRevisionsDelta) Record( + ctx context.Context, + val int64, + refBaseName string, + refBaseType RefBaseTypeAttr, + refHeadName string, + refHeadType RefHeadTypeAttr, + repositoryUrlFull string, + revisionDeltaDirection RevisionDeltaDirectionAttr, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64Gauge.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("vcs.ref.base.name", refBaseName), + attribute.String("vcs.ref.base.type", string(refBaseType)), + attribute.String("vcs.ref.head.name", refHeadName), + attribute.String("vcs.ref.head.type", string(refHeadType)), + attribute.String("vcs.repository.url.full", repositoryUrlFull), + attribute.String("vcs.revision_delta.direction", string(revisionDeltaDirection)), + )..., + ), + ) + + m.Int64Gauge.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +// +// This metric should be reported for each `vcs.revision_delta.direction` value. +// For example if branch `a` is 3 commits behind and 2 commits ahead of `trunk`, +// instrumentation SHOULD report two measurements: 3 and 2 (both positive +// numbers) and `vcs.ref.base.name` is set to `trunk`. +func (m RefRevisionsDelta) RecordSet(ctx context.Context, val int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64Gauge.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64Gauge.Record(ctx, val, *o...) +} + +// AttrChangeID returns an optional attribute for the "vcs.change.id" semantic +// convention. It represents the ID of the change (pull request/merge +// request/changelist) if applicable. This is usually a unique (within +// repository) identifier generated by the VCS system. +func (RefRevisionsDelta) AttrChangeID(val string) attribute.KeyValue { + return attribute.String("vcs.change.id", val) +} + +// AttrOwnerName returns an optional attribute for the "vcs.owner.name" semantic +// convention. It represents the group owner within the version control system. +func (RefRevisionsDelta) AttrOwnerName(val string) attribute.KeyValue { + return attribute.String("vcs.owner.name", val) +} + +// AttrRepositoryName returns an optional attribute for the "vcs.repository.name" +// semantic convention. It represents the human readable name of the repository. +// It SHOULD NOT include any additional identifier like Group/SubGroup in GitLab +// or organization in GitHub. +func (RefRevisionsDelta) AttrRepositoryName(val string) attribute.KeyValue { + return attribute.String("vcs.repository.name", val) +} + +// AttrProviderName returns an optional attribute for the "vcs.provider.name" +// semantic convention. It represents the name of the version control system +// provider. +func (RefRevisionsDelta) AttrProviderName(val ProviderNameAttr) attribute.KeyValue { + return attribute.String("vcs.provider.name", string(val)) +} + +// RefTime is an instrument used to record metric values conforming to the +// "vcs.ref.time" semantic conventions. It represents the time a ref (branch) +// created from the default branch (trunk) has existed. The `ref.type` attribute +// will always be `branch`. +type RefTime struct { + metric.Float64Gauge +} + +// NewRefTime returns a new RefTime instrument. +func NewRefTime( + m metric.Meter, + opt ...metric.Float64GaugeOption, +) (RefTime, error) { + // Check if the meter is nil. + if m == nil { + return RefTime{noop.Float64Gauge{}}, nil + } + + i, err := m.Float64Gauge( + "vcs.ref.time", + append([]metric.Float64GaugeOption{ + metric.WithDescription("Time a ref (branch) created from the default branch (trunk) has existed. The `ref.type` attribute will always be `branch`."), + metric.WithUnit("s"), + }, opt...)..., + ) + if err != nil { + return RefTime{noop.Float64Gauge{}}, err + } + return RefTime{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m RefTime) Inst() metric.Float64Gauge { + return m.Float64Gauge +} + +// Name returns the semantic convention name of the instrument. +func (RefTime) Name() string { + return "vcs.ref.time" +} + +// Unit returns the semantic convention unit of the instrument +func (RefTime) Unit() string { + return "s" +} + +// Description returns the semantic convention description of the instrument +func (RefTime) Description() string { + return "Time a ref (branch) created from the default branch (trunk) has existed. The `ref.type` attribute will always be `branch`." +} + +// Record records val to the current distribution for attrs. +// +// The refHeadName is the the name of the [reference] such as **branch** or +// **tag** in the repository. +// +// The refHeadType is the the type of the [reference] in the repository. +// +// The repositoryUrlFull is the the [canonical URL] of the repository providing +// the complete HTTP(S) address in order to locate and identify the repository +// through a browser. +// +// All additional attrs passed are included in the recorded value. +// +// [reference]: https://git-scm.com/docs/gitglossary#def_ref +// [reference]: https://git-scm.com/docs/gitglossary#def_ref +// [canonical URL]: https://support.google.com/webmasters/answer/10347851?hl=en#:~:text=A%20canonical%20URL%20is%20the,Google%20chooses%20one%20as%20canonical. +func (m RefTime) Record( + ctx context.Context, + val float64, + refHeadName string, + refHeadType RefHeadTypeAttr, + repositoryUrlFull string, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Float64Gauge.Record(ctx, val) + return + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + append( + attrs, + attribute.String("vcs.ref.head.name", refHeadName), + attribute.String("vcs.ref.head.type", string(refHeadType)), + attribute.String("vcs.repository.url.full", repositoryUrlFull), + )..., + ), + ) + + m.Float64Gauge.Record(ctx, val, *o...) +} + +// RecordSet records val to the current distribution for set. +func (m RefTime) RecordSet(ctx context.Context, val float64, set attribute.Set) { + if set.Len() == 0 { + m.Float64Gauge.Record(ctx, val) + } + + o := recOptPool.Get().(*[]metric.RecordOption) + defer func() { + *o = (*o)[:0] + recOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Float64Gauge.Record(ctx, val, *o...) +} + +// AttrOwnerName returns an optional attribute for the "vcs.owner.name" semantic +// convention. It represents the group owner within the version control system. +func (RefTime) AttrOwnerName(val string) attribute.KeyValue { + return attribute.String("vcs.owner.name", val) +} + +// AttrRepositoryName returns an optional attribute for the "vcs.repository.name" +// semantic convention. It represents the human readable name of the repository. +// It SHOULD NOT include any additional identifier like Group/SubGroup in GitLab +// or organization in GitHub. +func (RefTime) AttrRepositoryName(val string) attribute.KeyValue { + return attribute.String("vcs.repository.name", val) +} + +// AttrProviderName returns an optional attribute for the "vcs.provider.name" +// semantic convention. It represents the name of the version control system +// provider. +func (RefTime) AttrProviderName(val ProviderNameAttr) attribute.KeyValue { + return attribute.String("vcs.provider.name", string(val)) +} + +// RepositoryCount is an instrument used to record metric values conforming to +// the "vcs.repository.count" semantic conventions. It represents the number of +// repositories in an organization. +type RepositoryCount struct { + metric.Int64UpDownCounter +} + +// NewRepositoryCount returns a new RepositoryCount instrument. +func NewRepositoryCount( + m metric.Meter, + opt ...metric.Int64UpDownCounterOption, +) (RepositoryCount, error) { + // Check if the meter is nil. + if m == nil { + return RepositoryCount{noop.Int64UpDownCounter{}}, nil + } + + i, err := m.Int64UpDownCounter( + "vcs.repository.count", + append([]metric.Int64UpDownCounterOption{ + metric.WithDescription("The number of repositories in an organization."), + metric.WithUnit("{repository}"), + }, opt...)..., + ) + if err != nil { + return RepositoryCount{noop.Int64UpDownCounter{}}, err + } + return RepositoryCount{i}, nil +} + +// Inst returns the underlying metric instrument. +func (m RepositoryCount) Inst() metric.Int64UpDownCounter { + return m.Int64UpDownCounter +} + +// Name returns the semantic convention name of the instrument. +func (RepositoryCount) Name() string { + return "vcs.repository.count" +} + +// Unit returns the semantic convention unit of the instrument +func (RepositoryCount) Unit() string { + return "{repository}" +} + +// Description returns the semantic convention description of the instrument +func (RepositoryCount) Description() string { + return "The number of repositories in an organization." +} + +// Add adds incr to the existing count for attrs. +// +// All additional attrs passed are included in the recorded value. +func (m RepositoryCount) Add( + ctx context.Context, + incr int64, + attrs ...attribute.KeyValue, +) { + if len(attrs) == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append( + *o, + metric.WithAttributes( + attrs..., + ), + ) + + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AddSet adds incr to the existing count for set. +func (m RepositoryCount) AddSet(ctx context.Context, incr int64, set attribute.Set) { + if set.Len() == 0 { + m.Int64UpDownCounter.Add(ctx, incr) + return + } + + o := addOptPool.Get().(*[]metric.AddOption) + defer func() { + *o = (*o)[:0] + addOptPool.Put(o) + }() + + *o = append(*o, metric.WithAttributeSet(set)) + m.Int64UpDownCounter.Add(ctx, incr, *o...) +} + +// AttrOwnerName returns an optional attribute for the "vcs.owner.name" semantic +// convention. It represents the group owner within the version control system. +func (RepositoryCount) AttrOwnerName(val string) attribute.KeyValue { + return attribute.String("vcs.owner.name", val) +} + +// AttrProviderName returns an optional attribute for the "vcs.provider.name" +// semantic convention. It represents the name of the version control system +// provider. +func (RepositoryCount) AttrProviderName(val ProviderNameAttr) attribute.KeyValue { + return attribute.String("vcs.provider.name", string(val)) +} \ No newline at end of file diff --git a/trace/auto.go b/trace/auto.go index f3aa398138e..8763936a846 100644 --- a/trace/auto.go +++ b/trace/auto.go @@ -20,7 +20,7 @@ import ( "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" - semconv "go.opentelemetry.io/otel/semconv/v1.34.0" + semconv "go.opentelemetry.io/otel/semconv/v1.37.0" "go.opentelemetry.io/otel/trace/embedded" "go.opentelemetry.io/otel/trace/internal/telemetry" ) @@ -39,7 +39,7 @@ type autoTracerProvider struct{ embedded.TracerProvider } var _ TracerProvider = autoTracerProvider{} -func (p autoTracerProvider) Tracer(name string, opts ...TracerOption) Tracer { +func (autoTracerProvider) Tracer(name string, opts ...TracerOption) Tracer { cfg := NewTracerConfig(opts...) return autoTracer{ name: name, @@ -81,7 +81,7 @@ func (t autoTracer) Start(ctx context.Context, name string, opts ...SpanStartOpt // Expected to be implemented in eBPF. // //go:noinline -func (t *autoTracer) start( +func (*autoTracer) start( ctx context.Context, spanPtr *autoSpan, psc *SpanContext, diff --git a/trace/auto_test.go b/trace/auto_test.go index 28a59cc7b5f..be7e7ca5b56 100644 --- a/trace/auto_test.go +++ b/trace/auto_test.go @@ -18,7 +18,7 @@ import ( "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" - semconv "go.opentelemetry.io/otel/semconv/v1.34.0" + semconv "go.opentelemetry.io/otel/semconv/v1.37.0" "go.opentelemetry.io/otel/trace/internal/telemetry" ) @@ -125,7 +125,7 @@ func TestTracerProviderConcurrentSafe(t *testing.T) { defer close(done) var wg sync.WaitGroup - for i := 0; i < goroutines; i++ { + for i := range goroutines { wg.Add(1) go func(name, version string) { defer wg.Done() @@ -207,7 +207,7 @@ func TestTracerConcurrentSafe(t *testing.T) { defer close(done) var wg sync.WaitGroup - for i := 0; i < goroutines; i++ { + for i := range goroutines { wg.Add(1) go func(name string) { defer wg.Done() @@ -1041,7 +1041,7 @@ func TestSpanConcurrentSafe(t *testing.T) { defer close(done) var wg sync.WaitGroup - for i := 0; i < nGoroutine; i++ { + for i := range nGoroutine { wg.Add(1) go func(n int) { defer wg.Done() @@ -1074,7 +1074,7 @@ func TestSpanConcurrentSafe(t *testing.T) { ctx := context.Background() var wg sync.WaitGroup - for i := 0; i < nSpans; i++ { + for i := range nSpans { wg.Add(1) go func(n int) { defer wg.Done() @@ -1094,7 +1094,7 @@ func TestSpanConcurrentSafe(t *testing.T) { defer close(done) var wg sync.WaitGroup - for i := 0; i < nTracers; i++ { + for i := range nTracers { wg.Add(1) go func(n int) { defer wg.Done() diff --git a/trace/config.go b/trace/config.go index 9c0b720a4d6..aea11a2b52c 100644 --- a/trace/config.go +++ b/trace/config.go @@ -73,7 +73,7 @@ func (cfg *SpanConfig) Timestamp() time.Time { return cfg.timestamp } -// StackTrace checks whether stack trace capturing is enabled. +// StackTrace reports whether stack trace capturing is enabled. func (cfg *SpanConfig) StackTrace() bool { return cfg.stackTrace } @@ -154,7 +154,7 @@ func (cfg *EventConfig) Timestamp() time.Time { return cfg.timestamp } -// StackTrace checks whether stack trace capturing is enabled. +// StackTrace reports whether stack trace capturing is enabled. func (cfg *EventConfig) StackTrace() bool { return cfg.stackTrace } diff --git a/trace/go.mod b/trace/go.mod index 58fb4e5e02b..31e50801e72 100644 --- a/trace/go.mod +++ b/trace/go.mod @@ -6,8 +6,8 @@ replace go.opentelemetry.io/otel => ../ require ( github.com/google/go-cmp v0.7.0 - github.com/stretchr/testify v1.10.0 - go.opentelemetry.io/otel v1.37.0 + github.com/stretchr/testify v1.11.1 + go.opentelemetry.io/otel v1.38.0 ) require ( diff --git a/trace/go.sum b/trace/go.sum index 0e926776e98..e1f9146e420 100644 --- a/trace/go.sum +++ b/trace/go.sum @@ -4,8 +4,8 @@ github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= -github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= diff --git a/trace/hex.go b/trace/hex.go new file mode 100644 index 00000000000..1cbef1d4b94 --- /dev/null +++ b/trace/hex.go @@ -0,0 +1,38 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package trace // import "go.opentelemetry.io/otel/trace" + +const ( + // hexLU is a hex lookup table of the 16 lowercase hex digits. + // The character values of the string are indexed at the equivalent + // hexadecimal value they represent. This table efficiently encodes byte data + // into a string representation of hexadecimal. + hexLU = "0123456789abcdef" + + // hexRev is a reverse hex lookup table for lowercase hex digits. + // The table is efficiently decodes a hexadecimal string into bytes. + // Valid hexadecimal characters are indexed at their respective values. All + // other invalid ASCII characters are represented with '\xff'. + // + // The '\xff' character is used as invalid because no valid character has + // the upper 4 bits set. Meaning, an efficient validation can be performed + // over multiple character parsing by checking these bits remain zero. + hexRev = "" + + "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" + + "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" + + "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" + + "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\xff\xff\xff\xff\xff\xff" + + "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" + + "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" + + "\xff\x0a\x0b\x0c\x0d\x0e\x0f\xff\xff\xff\xff\xff\xff\xff\xff\xff" + + "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" + + "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" + + "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" + + "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" + + "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" + + "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" + + "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" + + "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" + + "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" +) diff --git a/trace/internal/telemetry/attr.go b/trace/internal/telemetry/attr.go index f663547b4ee..ff0f6eac62e 100644 --- a/trace/internal/telemetry/attr.go +++ b/trace/internal/telemetry/attr.go @@ -52,7 +52,7 @@ func Map(key string, value ...Attr) Attr { return Attr{key, MapValue(value...)} } -// Equal returns if a is equal to b. +// Equal reports whether a is equal to b. func (a Attr) Equal(b Attr) bool { return a.Key == b.Key && a.Value.Equal(b.Value) } diff --git a/trace/internal/telemetry/id.go b/trace/internal/telemetry/id.go index 7b1ae3c4ea8..bea56f2e7d3 100644 --- a/trace/internal/telemetry/id.go +++ b/trace/internal/telemetry/id.go @@ -22,7 +22,7 @@ func (tid TraceID) String() string { return hex.EncodeToString(tid[:]) } -// IsEmpty returns false if id contains at least one non-zero byte. +// IsEmpty reports whether the TraceID contains only zero bytes. func (tid TraceID) IsEmpty() bool { return tid == [traceIDSize]byte{} } @@ -50,7 +50,7 @@ func (sid SpanID) String() string { return hex.EncodeToString(sid[:]) } -// IsEmpty returns true if the span ID contains at least one non-zero byte. +// IsEmpty reports whether the SpanID contains only zero bytes. func (sid SpanID) IsEmpty() bool { return sid == [spanIDSize]byte{} } @@ -82,7 +82,7 @@ func marshalJSON(id []byte) ([]byte, error) { } // unmarshalJSON inflates trace id from hex string, possibly enclosed in quotes. -func unmarshalJSON(dst []byte, src []byte) error { +func unmarshalJSON(dst, src []byte) error { if l := len(src); l >= 2 && src[0] == '"' && src[l-1] == '"' { src = src[1 : l-1] } diff --git a/trace/internal/telemetry/test/conversion_test.go b/trace/internal/telemetry/test/conversion_test.go index feabc78eb80..3d41ac04790 100644 --- a/trace/internal/telemetry/test/conversion_test.go +++ b/trace/internal/telemetry/test/conversion_test.go @@ -11,7 +11,6 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/ptrace" diff --git a/trace/internal/telemetry/test/go.mod b/trace/internal/telemetry/test/go.mod index 9e0811d51cb..5d29ac80156 100644 --- a/trace/internal/telemetry/test/go.mod +++ b/trace/internal/telemetry/test/go.mod @@ -3,9 +3,9 @@ module go.opentelemetry.io/otel/trace/internal/telemetry/test go 1.23.0 require ( - github.com/stretchr/testify v1.10.0 - go.opentelemetry.io/collector/pdata v1.34.0 - go.opentelemetry.io/otel/trace v1.37.0 + github.com/stretchr/testify v1.11.1 + go.opentelemetry.io/collector/pdata v1.38.0 + go.opentelemetry.io/otel/trace v1.38.0 ) require ( @@ -14,16 +14,16 @@ require ( github.com/json-iterator/go v1.1.12 // indirect github.com/kr/text v0.2.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect - github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/rogpeppe/go-internal v1.13.1 // indirect go.uber.org/multierr v1.11.0 // indirect - golang.org/x/net v0.41.0 // indirect - golang.org/x/sys v0.33.0 // indirect - golang.org/x/text v0.26.0 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822 // indirect - google.golang.org/grpc v1.73.0 // indirect - google.golang.org/protobuf v1.36.6 // indirect + golang.org/x/net v0.43.0 // indirect + golang.org/x/sys v0.35.0 // indirect + golang.org/x/text v0.28.0 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250825161204-c5933d9347a5 // indirect + google.golang.org/grpc v1.75.0 // indirect + google.golang.org/protobuf v1.36.8 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/trace/internal/telemetry/test/go.sum b/trace/internal/telemetry/test/go.sum index 4e8e06b796f..3861cbf5a26 100644 --- a/trace/internal/telemetry/test/go.sum +++ b/trace/internal/telemetry/test/go.sum @@ -2,8 +2,8 @@ github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ3 github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= -github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= @@ -26,26 +26,27 @@ github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee h1:W5t00kpgFdJifH4BDsTlE89Zl93FEloxaWZfGcifgq8= +github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= -github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= -go.opentelemetry.io/collector/pdata v1.34.0 h1:2vwYftckXe7pWxI9mfSo+tw3wqdGNrYpMbDx/5q6rw8= -go.opentelemetry.io/collector/pdata v1.34.0/go.mod h1:StPHMFkhLBellRWrULq0DNjv4znCDJZP6La4UuC+JHI= -go.opentelemetry.io/otel/sdk v1.35.0 h1:iPctf8iprVySXSKJffSS79eOjl9pvxV9ZqOWT0QejKY= -go.opentelemetry.io/otel/sdk v1.35.0/go.mod h1:+ga1bZliga3DxJ3CQGg3updiaAJoNECOgJREo9KHGQg= -go.opentelemetry.io/otel/sdk/metric v1.35.0 h1:1RriWBmCKgkeHEhM7a2uMjMUfP7MsOF5JpUCaEqEI9o= -go.opentelemetry.io/otel/sdk/metric v1.35.0/go.mod h1:is6XYCUMpcKi+ZsOvfluY5YstFnhW0BidkR+gL+qN+w= +go.opentelemetry.io/collector/pdata v1.38.0 h1:94LzVKMQM8R7RFJ8Z1+sL51IkI90TDfTc/ipH3mPUro= +go.opentelemetry.io/collector/pdata v1.38.0/go.mod h1:DSvnwj37IKyQj2hpB97cGITyauR8tvAauJ6/gsxg8mg= +go.opentelemetry.io/otel/sdk v1.37.0 h1:ItB0QUqnjesGRvNcmAcU0LyvkVyGJ2xftD29bWdDvKI= +go.opentelemetry.io/otel/sdk v1.37.0/go.mod h1:VredYzxUvuo2q3WRcDnKDjbdvmO0sCzOvVAiY+yUkAg= +go.opentelemetry.io/otel/sdk/metric v1.37.0 h1:90lI228XrB9jCMuSdA0673aubgRobVZFhbjxHHspCPc= +go.opentelemetry.io/otel/sdk/metric v1.37.0/go.mod h1:cNen4ZWfiD37l5NhS+Keb5RXVWZWpRE+9WyVCpbo5ps= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= @@ -59,20 +60,20 @@ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.41.0 h1:vBTly1HeNPEn3wtREYfy4GZ/NECgw2Cnl+nK6Nz3uvw= -golang.org/x/net v0.41.0/go.mod h1:B/K4NNqkfmg07DQYrbwvSluqCJOOXwUjeb/5lOisjbA= +golang.org/x/net v0.43.0 h1:lat02VYK2j4aLzMzecihNvTlJNQUq316m2Mr9rnM6YE= +golang.org/x/net v0.43.0/go.mod h1:vhO1fvI4dGsIjh73sWfUVjj3N7CA9WkKJNQm2svM6Jg= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw= -golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/sys v0.35.0 h1:vz1N37gP5bs89s7He8XuIYXpyY0+QlsKmzipCbUtyxI= +golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.26.0 h1:P42AVeLghgTYr4+xUnTRKDMqpar+PtX7KWuNQL21L8M= -golang.org/x/text v0.26.0/go.mod h1:QK15LZJUUQVJxhz7wXgxSy/CJaTFjd0G+YLonydOVQA= +golang.org/x/text v0.28.0 h1:rhazDwis8INMIwQ4tpjLDzUhx6RlXqZNPEM0huQojng= +golang.org/x/text v0.28.0/go.mod h1:U8nCwOR8jO/marOQ0QbDiOngZVEBB7MAiitBuMjXiNU= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= @@ -81,12 +82,14 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822 h1:fc6jSaCT0vBduLYZHYrBBNY4dsWuvgyff9noRNDdBeE= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= -google.golang.org/grpc v1.73.0 h1:VIWSmpI2MegBtTuFt5/JWy2oXxtjJ/e89Z70ImfD2ok= -google.golang.org/grpc v1.73.0/go.mod h1:50sbHOUqWoCQGI8V2HQLJM0B+LMlIUjNSZmow7EVBQc= -google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY= -google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= +gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk= +gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250825161204-c5933d9347a5 h1:eaY8u2EuxbRv7c3NiGK0/NedzVsCcV6hDuU5qPX5EGE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250825161204-c5933d9347a5/go.mod h1:M4/wBTSeyLxupu3W3tJtOgB14jILAS/XWPSSa3TAlJc= +google.golang.org/grpc v1.75.0 h1:+TW+dqTd2Biwe6KKfhE5JpiYIBWq865PhKGSXiivqt4= +google.golang.org/grpc v1.75.0/go.mod h1:JtPAzKiq4v1xcAB2hydNlWI2RnF85XXcV0mhKXr2ecQ= +google.golang.org/protobuf v1.36.8 h1:xHScyCOEuuwZEc6UtSOvPbAT4zRh0xcNRYekJwfqyMc= +google.golang.org/protobuf v1.36.8/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= diff --git a/trace/internal/telemetry/value.go b/trace/internal/telemetry/value.go index ae9ce102a9a..cb7927b8167 100644 --- a/trace/internal/telemetry/value.go +++ b/trace/internal/telemetry/value.go @@ -257,10 +257,10 @@ func (v Value) Kind() ValueKind { } } -// Empty returns if v does not hold any value. +// Empty reports whether v does not hold any value. func (v Value) Empty() bool { return v.Kind() == ValueKindEmpty } -// Equal returns if v is equal to w. +// Equal reports whether v is equal to w. func (v Value) Equal(w Value) bool { k1 := v.Kind() k2 := w.Kind() diff --git a/trace/noop.go b/trace/noop.go index 0f56e4dbb34..400fab12387 100644 --- a/trace/noop.go +++ b/trace/noop.go @@ -26,7 +26,7 @@ type noopTracerProvider struct{ embedded.TracerProvider } var _ TracerProvider = noopTracerProvider{} // Tracer returns noop implementation of Tracer. -func (p noopTracerProvider) Tracer(string, ...TracerOption) Tracer { +func (noopTracerProvider) Tracer(string, ...TracerOption) Tracer { return noopTracer{} } @@ -37,7 +37,7 @@ var _ Tracer = noopTracer{} // Start carries forward a non-recording Span, if one is present in the context, otherwise it // creates a no-op Span. -func (t noopTracer) Start(ctx context.Context, name string, _ ...SpanStartOption) (context.Context, Span) { +func (noopTracer) Start(ctx context.Context, _ string, _ ...SpanStartOption) (context.Context, Span) { span := SpanFromContext(ctx) if _, ok := span.(nonRecordingSpan); !ok { // span is likely already a noopSpan, but let's be sure diff --git a/trace/noop/noop.go b/trace/noop/noop.go index 64a4f1b362f..689d220df7d 100644 --- a/trace/noop/noop.go +++ b/trace/noop/noop.go @@ -51,7 +51,7 @@ type Tracer struct{ embedded.Tracer } // If ctx contains a span context, the returned span will also contain that // span context. If the span context in ctx is for a non-recording span, that // span instance will be returned directly. -func (t Tracer) Start(ctx context.Context, _ string, _ ...trace.SpanStartOption) (context.Context, trace.Span) { +func (Tracer) Start(ctx context.Context, _ string, _ ...trace.SpanStartOption) (context.Context, trace.Span) { span := trace.SpanFromContext(ctx) // If the parent context contains a non-zero span context, that span diff --git a/trace/trace.go b/trace/trace.go index d49adf671b9..ee6f4bcb2aa 100644 --- a/trace/trace.go +++ b/trace/trace.go @@ -4,8 +4,6 @@ package trace // import "go.opentelemetry.io/otel/trace" import ( - "bytes" - "encoding/hex" "encoding/json" ) @@ -38,21 +36,47 @@ var ( _ json.Marshaler = nilTraceID ) -// IsValid checks whether the trace TraceID is valid. A valid trace ID does +// IsValid reports whether the trace TraceID is valid. A valid trace ID does // not consist of zeros only. func (t TraceID) IsValid() bool { - return !bytes.Equal(t[:], nilTraceID[:]) + return t != nilTraceID } // MarshalJSON implements a custom marshal function to encode TraceID // as a hex string. func (t TraceID) MarshalJSON() ([]byte, error) { - return json.Marshal(t.String()) + b := [32 + 2]byte{0: '"', 33: '"'} + h := t.hexBytes() + copy(b[1:], h[:]) + return b[:], nil } // String returns the hex string representation form of a TraceID. func (t TraceID) String() string { - return hex.EncodeToString(t[:]) + h := t.hexBytes() + return string(h[:]) +} + +// hexBytes returns the hex string representation form of a TraceID. +func (t TraceID) hexBytes() [32]byte { + return [32]byte{ + hexLU[t[0x0]>>4], hexLU[t[0x0]&0xf], + hexLU[t[0x1]>>4], hexLU[t[0x1]&0xf], + hexLU[t[0x2]>>4], hexLU[t[0x2]&0xf], + hexLU[t[0x3]>>4], hexLU[t[0x3]&0xf], + hexLU[t[0x4]>>4], hexLU[t[0x4]&0xf], + hexLU[t[0x5]>>4], hexLU[t[0x5]&0xf], + hexLU[t[0x6]>>4], hexLU[t[0x6]&0xf], + hexLU[t[0x7]>>4], hexLU[t[0x7]&0xf], + hexLU[t[0x8]>>4], hexLU[t[0x8]&0xf], + hexLU[t[0x9]>>4], hexLU[t[0x9]&0xf], + hexLU[t[0xa]>>4], hexLU[t[0xa]&0xf], + hexLU[t[0xb]>>4], hexLU[t[0xb]&0xf], + hexLU[t[0xc]>>4], hexLU[t[0xc]&0xf], + hexLU[t[0xd]>>4], hexLU[t[0xd]&0xf], + hexLU[t[0xe]>>4], hexLU[t[0xe]&0xf], + hexLU[t[0xf]>>4], hexLU[t[0xf]&0xf], + } } // SpanID is a unique identity of a span in a trace. @@ -63,21 +87,38 @@ var ( _ json.Marshaler = nilSpanID ) -// IsValid checks whether the SpanID is valid. A valid SpanID does not consist +// IsValid reports whether the SpanID is valid. A valid SpanID does not consist // of zeros only. func (s SpanID) IsValid() bool { - return !bytes.Equal(s[:], nilSpanID[:]) + return s != nilSpanID } // MarshalJSON implements a custom marshal function to encode SpanID // as a hex string. func (s SpanID) MarshalJSON() ([]byte, error) { - return json.Marshal(s.String()) + b := [16 + 2]byte{0: '"', 17: '"'} + h := s.hexBytes() + copy(b[1:], h[:]) + return b[:], nil } // String returns the hex string representation form of a SpanID. func (s SpanID) String() string { - return hex.EncodeToString(s[:]) + b := s.hexBytes() + return string(b[:]) +} + +func (s SpanID) hexBytes() [16]byte { + return [16]byte{ + hexLU[s[0]>>4], hexLU[s[0]&0xf], + hexLU[s[1]>>4], hexLU[s[1]&0xf], + hexLU[s[2]>>4], hexLU[s[2]&0xf], + hexLU[s[3]>>4], hexLU[s[3]&0xf], + hexLU[s[4]>>4], hexLU[s[4]&0xf], + hexLU[s[5]>>4], hexLU[s[5]&0xf], + hexLU[s[6]>>4], hexLU[s[6]&0xf], + hexLU[s[7]>>4], hexLU[s[7]&0xf], + } } // TraceIDFromHex returns a TraceID from a hex string if it is compliant with @@ -85,65 +126,58 @@ func (s SpanID) String() string { // https://www.w3.org/TR/trace-context/#trace-id // nolint:revive // revive complains about stutter of `trace.TraceIDFromHex`. func TraceIDFromHex(h string) (TraceID, error) { - t := TraceID{} if len(h) != 32 { - return t, errInvalidTraceIDLength + return [16]byte{}, errInvalidTraceIDLength } - - if err := decodeHex(h, t[:]); err != nil { - return t, err + var b [16]byte + invalidMark := byte(0) + for i := 0; i < len(h); i += 4 { + b[i/2] = (hexRev[h[i]] << 4) | hexRev[h[i+1]] + b[i/2+1] = (hexRev[h[i+2]] << 4) | hexRev[h[i+3]] + invalidMark |= hexRev[h[i]] | hexRev[h[i+1]] | hexRev[h[i+2]] | hexRev[h[i+3]] } - - if !t.IsValid() { - return t, errNilTraceID + // If the upper 4 bits of any byte are not zero, there was an invalid hex + // character since invalid hex characters are 0xff in hexRev. + if invalidMark&0xf0 != 0 { + return [16]byte{}, errInvalidHexID + } + // If we didn't set any bits, then h was all zeros. + if invalidMark == 0 { + return [16]byte{}, errNilTraceID } - return t, nil + return b, nil } // SpanIDFromHex returns a SpanID from a hex string if it is compliant // with the w3c trace-context specification. // See more at https://www.w3.org/TR/trace-context/#parent-id func SpanIDFromHex(h string) (SpanID, error) { - s := SpanID{} if len(h) != 16 { - return s, errInvalidSpanIDLength - } - - if err := decodeHex(h, s[:]); err != nil { - return s, err + return [8]byte{}, errInvalidSpanIDLength } - - if !s.IsValid() { - return s, errNilSpanID + var b [8]byte + invalidMark := byte(0) + for i := 0; i < len(h); i += 4 { + b[i/2] = (hexRev[h[i]] << 4) | hexRev[h[i+1]] + b[i/2+1] = (hexRev[h[i+2]] << 4) | hexRev[h[i+3]] + invalidMark |= hexRev[h[i]] | hexRev[h[i+1]] | hexRev[h[i+2]] | hexRev[h[i+3]] } - return s, nil -} - -func decodeHex(h string, b []byte) error { - for _, r := range h { - switch { - case 'a' <= r && r <= 'f': - continue - case '0' <= r && r <= '9': - continue - default: - return errInvalidHexID - } + // If the upper 4 bits of any byte are not zero, there was an invalid hex + // character since invalid hex characters are 0xff in hexRev. + if invalidMark&0xf0 != 0 { + return [8]byte{}, errInvalidHexID } - - decoded, err := hex.DecodeString(h) - if err != nil { - return err + // If we didn't set any bits, then h was all zeros. + if invalidMark == 0 { + return [8]byte{}, errNilSpanID } - - copy(b, decoded) - return nil + return b, nil } // TraceFlags contains flags that can be set on a SpanContext. type TraceFlags byte //nolint:revive // revive complains about stutter of `trace.TraceFlags`. -// IsSampled returns if the sampling bit is set in the TraceFlags. +// IsSampled reports whether the sampling bit is set in the TraceFlags. func (tf TraceFlags) IsSampled() bool { return tf&FlagsSampled == FlagsSampled } @@ -160,12 +194,20 @@ func (tf TraceFlags) WithSampled(sampled bool) TraceFlags { // nolint:revive // // MarshalJSON implements a custom marshal function to encode TraceFlags // as a hex string. func (tf TraceFlags) MarshalJSON() ([]byte, error) { - return json.Marshal(tf.String()) + b := [2 + 2]byte{0: '"', 3: '"'} + h := tf.hexBytes() + copy(b[1:], h[:]) + return b[:], nil } // String returns the hex string representation form of TraceFlags. func (tf TraceFlags) String() string { - return hex.EncodeToString([]byte{byte(tf)}[:]) + h := tf.hexBytes() + return string(h[:]) +} + +func (tf TraceFlags) hexBytes() [2]byte { + return [2]byte{hexLU[tf>>4], hexLU[tf&0xf]} } // SpanContextConfig contains mutable fields usable for constructing @@ -201,13 +243,13 @@ type SpanContext struct { var _ json.Marshaler = SpanContext{} -// IsValid returns if the SpanContext is valid. A valid span context has a +// IsValid reports whether the SpanContext is valid. A valid span context has a // valid TraceID and SpanID. func (sc SpanContext) IsValid() bool { return sc.HasTraceID() && sc.HasSpanID() } -// IsRemote indicates whether the SpanContext represents a remotely-created Span. +// IsRemote reports whether the SpanContext represents a remotely-created Span. func (sc SpanContext) IsRemote() bool { return sc.remote } @@ -228,7 +270,7 @@ func (sc SpanContext) TraceID() TraceID { return sc.traceID } -// HasTraceID checks if the SpanContext has a valid TraceID. +// HasTraceID reports whether the SpanContext has a valid TraceID. func (sc SpanContext) HasTraceID() bool { return sc.traceID.IsValid() } @@ -249,7 +291,7 @@ func (sc SpanContext) SpanID() SpanID { return sc.spanID } -// HasSpanID checks if the SpanContext has a valid SpanID. +// HasSpanID reports whether the SpanContext has a valid SpanID. func (sc SpanContext) HasSpanID() bool { return sc.spanID.IsValid() } @@ -270,7 +312,7 @@ func (sc SpanContext) TraceFlags() TraceFlags { return sc.traceFlags } -// IsSampled returns if the sampling bit is set in the SpanContext's TraceFlags. +// IsSampled reports whether the sampling bit is set in the SpanContext's TraceFlags. func (sc SpanContext) IsSampled() bool { return sc.traceFlags.IsSampled() } @@ -302,7 +344,7 @@ func (sc SpanContext) WithTraceState(state TraceState) SpanContext { } } -// Equal is a predicate that determines whether two SpanContext values are equal. +// Equal reports whether two SpanContext values are equal. func (sc SpanContext) Equal(other SpanContext) bool { return sc.traceID == other.traceID && sc.spanID == other.spanID && diff --git a/trace/trace_test.go b/trace/trace_test.go index 9abea43a036..e11401e71dc 100644 --- a/trace/trace_test.go +++ b/trace/trace_test.go @@ -457,7 +457,7 @@ func TestStringSpanID(t *testing.T) { } } -func assertSpanContextEqual(got SpanContext, want SpanContext) bool { +func assertSpanContextEqual(got, want SpanContext) bool { return got.spanID == want.spanID && got.traceID == want.traceID && got.traceFlags == want.traceFlags && diff --git a/trace/tracestate.go b/trace/tracestate.go index dc5e34cad0d..073adae2faa 100644 --- a/trace/tracestate.go +++ b/trace/tracestate.go @@ -80,7 +80,7 @@ func checkKeyRemain(key string) bool { // // param n is remain part length, should be 255 in simple-key or 13 in system-id. func checkKeyPart(key string, n int) bool { - if len(key) == 0 { + if key == "" { return false } first := key[0] // key's first char @@ -102,7 +102,7 @@ func isAlphaNum(c byte) bool { // // param n is remain part length, should be 240 exactly. func checkKeyTenant(key string, n int) bool { - if len(key) == 0 { + if key == "" { return false } return isAlphaNum(key[0]) && len(key[1:]) <= n && checkKeyRemain(key[1:]) @@ -191,7 +191,7 @@ func ParseTraceState(ts string) (TraceState, error) { for ts != "" { var memberStr string memberStr, ts, _ = strings.Cut(ts, listDelimiters) - if len(memberStr) == 0 { + if memberStr == "" { continue } diff --git a/trace/tracestate_test.go b/trace/tracestate_test.go index aff07d46c6f..68955c5883f 100644 --- a/trace/tracestate_test.go +++ b/trace/tracestate_test.go @@ -278,7 +278,7 @@ var testcases = []struct { var maxMembers = func() TraceState { members := make([]member, maxListMembers) - for i := 0; i < maxListMembers; i++ { + for i := range maxListMembers { members[i] = member{ Key: fmt.Sprintf("key%d", i+1), Value: fmt.Sprintf("value%d", i+1), diff --git a/trace_test.go b/trace_test.go index 9aa1ba428f1..21d73421def 100644 --- a/trace_test.go +++ b/trace_test.go @@ -17,7 +17,7 @@ type testTracerProvider struct{ embedded.TracerProvider } var _ trace.TracerProvider = &testTracerProvider{} -func (*testTracerProvider) Tracer(_ string, _ ...trace.TracerOption) trace.Tracer { +func (*testTracerProvider) Tracer(string, ...trace.TracerOption) trace.Tracer { return noop.NewTracerProvider().Tracer("") } diff --git a/version.go b/version.go index 7afe92b5981..bcaa5aa5378 100644 --- a/version.go +++ b/version.go @@ -5,5 +5,5 @@ package otel // import "go.opentelemetry.io/otel" // Version is the current release version of OpenTelemetry in use. func Version() string { - return "1.37.0" + return "1.38.0" } diff --git a/versions.yaml b/versions.yaml index 9d4742a1764..07145e254b5 100644 --- a/versions.yaml +++ b/versions.yaml @@ -3,7 +3,7 @@ module-sets: stable-v1: - version: v1.37.0 + version: v1.38.0 modules: - go.opentelemetry.io/otel - go.opentelemetry.io/otel/bridge/opencensus @@ -22,11 +22,11 @@ module-sets: - go.opentelemetry.io/otel/sdk/metric - go.opentelemetry.io/otel/trace experimental-metrics: - version: v0.59.0 + version: v0.60.0 modules: - go.opentelemetry.io/otel/exporters/prometheus experimental-logs: - version: v0.13.0 + version: v0.14.0 modules: - go.opentelemetry.io/otel/log - go.opentelemetry.io/otel/log/logtest @@ -36,7 +36,7 @@ module-sets: - go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp - go.opentelemetry.io/otel/exporters/stdout/stdoutlog experimental-schema: - version: v0.0.12 + version: v0.0.13 modules: - go.opentelemetry.io/otel/schema excluded-modules: